id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,100 |
transient electric dipole whole space
|
import numpy as np
from scipy.constants import mu_0, pi
from scipy.special import erf
from SimPEG import utils
def hzAnalyticDipoleT(r, t, sigma):
theta = np.sqrt((sigma * mu_0) / (4 * t))
tr = theta * r
etr = erf(tr)
t1 = (9 / (2 * tr**2) - 1) * etr
t2 = (1 / np.sqrt(pi)) * (9 / tr + 4 * tr) * np.exp(-(tr**2))
hz = (t1 - t2) / (4 * pi * r**3)
return hz
def hzAnalyticCentLoopT(a, t, sigma):
theta = np.sqrt((sigma * mu_0) / (4 * t))
ta = theta * a
eta = erf(ta)
t1 = (3 / (np.sqrt(pi) * ta)) * np.exp(-(ta**2))
t2 = (1 - (3 / (2 * ta**2))) * eta
hz = (t1 + t2) / (2 * a)
return hz
def TransientMagneticDipoleWholeSpace(
XYZ, srcLoc, sig, t, moment, fieldType="h", mu_r=1
):
"""
Analytical solution for a dipole in a whole-space.
"""
mu = 4 * np.pi * 1e-7 * mu_r
if isinstance(moment, str):
if moment == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.as_array_n_by_dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(mu * sig / (4 * t))
kr = k * r
if fieldType == "h":
front = 1 / (4.0 * pi * r**3.0)
mid = 3 * erf(kr) - (4 * kr**3 + 6 * kr) * np.exp(-(kr**2)) / np.sqrt(pi)
end = -erf(kr) + (4 * kr**3 + 2 * kr) * np.exp(-(kr**2)) / np.sqrt(pi)
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + end)
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + end)
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + end)
)
elif fieldType == "dhdt":
front = (4 * k**5 / (pi**1.5 * mu * sig)) * np.exp(-(kr**2))
mid = kr**2
end = 1 - kr**2
Fx = front * (
mx * ((dx / r) ** 2.0 * mid + end)
+ my * ((dy * dx / r**2.0) * mid)
+ mz * ((dx * dz / r**2.0) * mid)
)
Fy = front * (
mx * ((dx * dy / r**2.0) * mid)
+ my * ((dy / r) ** 2.0 * mid + end)
+ mz * ((dy * dz / r**2.0) * mid)
)
Fz = front * (
mx * ((dx * dz / r**2.0) * mid)
+ my * ((dy * dz / r**2.0) * mid)
+ mz * ((dz / r) ** 2.0 * mid + end)
)
elif fieldType == "e":
front = (2 * k**5 / (pi**1.5 * sig)) * np.exp(-(kr**2))
Fx = front * (my * (-dz / r) + mz * (dy / r))
Fy = front * (mx * (dz / r) + mz * (-dx / r))
Fz = front * (mx * (-dy / r) + my * (dx / r))
return Fx, Fy, Fz
def METHOD_NAME(
XYZ, srcLoc, sig, t, moment, fieldType="h", mu_r=1
):
mu = 4 * np.pi * 1e-7 * mu_r
if isinstance(moment, str):
if moment.upper() == "X":
mx, my, mz = 1.0, 0.0, 0.0
elif moment.upper() == "Y":
mx, my, mz = 0.0, 1.0, 0.0
elif moment.upper() == "Z":
mx, my, mz = 0.0, 0.0, 1.0
else:
raise NotImplementedError("String type for moment not recognized")
else:
mx, my, mz = moment[0], moment[1], moment[2]
XYZ = utils.as_array_n_by_dim(XYZ, 3)
dx = XYZ[:, 0] - srcLoc[0]
dy = XYZ[:, 1] - srcLoc[1]
dz = XYZ[:, 2] - srcLoc[2]
r = np.sqrt(dx**2.0 + dy**2.0 + dz**2.0)
k = np.sqrt(mu * sig / (4 * t))
kr = k * r
if fieldType == "e":
front = 1 / (4.0 * np.pi * sig * r**3)
mid = 3 * erf(kr) - (4 * kr**3 + 6 * kr) * np.exp(-(kr**2)) / np.sqrt(pi)
end = -erf(kr) + (4 * kr**3 + 2 * kr) * np.exp(-(kr**2)) / np.sqrt(pi)
Fx = front * (
mx * ((dx**2 / r**2) * mid + end)
+ my * (dy * dx / r**2) * mid
+ mz * (dz * dx / r**2) * mid
)
Fy = front * (
mx * (dx * dy / r**2) * mid
+ my * ((dy**2 / r**2) * mid + end)
+ mz * (dz * dy / r**2) * mid
)
Fz = front * (
mx * (dx * dz / r**2) * mid
+ my * (dy * dz / r**2) * mid
+ mz * ((dz**2 / r**2) * mid + end)
)
elif fieldType == "h":
front = (1 / (4.0 * pi * r**3)) * (
erf(kr) - 2 * kr * np.exp(-(kr**2)) / np.sqrt(pi)
)
Fx = front * (my * -dz + mz * dy)
Fy = front * (mx * dz + mz * -dx)
Fz = front * (mx * -dy + my * dx)
elif fieldType == "dhdt":
front = -(2 * k**5 / (pi**1.5 * mu * sig)) * np.exp(-(kr**2))
Fx = front * (my * -dz + mz * dy)
Fy = front * (mx * dz + mz * -dx)
Fz = front * (mx * -dy + my * dx)
return Fx, Fy, Fz
|
3,101 |
get display info
|
# Created By: Virgil Dupras
# Created On: 2009-10-23
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import mutagen
from hscommon.util import get_file_ext, format_size, format_time
from core.util import format_timestamp, format_perc, format_words, format_dupe_count
from core import fs
TAG_FIELDS = {
"audiosize",
"duration",
"bitrate",
"samplerate",
"title",
"artist",
"album",
"genre",
"year",
"track",
"comment",
}
# This is a temporary workaround for migration from hsaudiotag for the can_handle method
SUPPORTED_EXTS = {"mp3", "wma", "m4a", "m4p", "ogg", "flac", "aif", "aiff", "aifc"}
class MusicFile(fs.File):
INITIAL_INFO = fs.File.INITIAL_INFO.copy()
INITIAL_INFO.update(
{
"audiosize": 0,
"bitrate": 0,
"duration": 0,
"samplerate": 0,
"artist": "",
"album": "",
"title": "",
"genre": "",
"comment": "",
"year": "",
"track": 0,
}
)
__slots__ = fs.File.__slots__ + tuple(INITIAL_INFO.keys())
@classmethod
def can_handle(cls, path):
if not fs.File.can_handle(path):
return False
return get_file_ext(path.name) in SUPPORTED_EXTS
def METHOD_NAME(self, group, delta):
size = self.size
duration = self.duration
bitrate = self.bitrate
samplerate = self.samplerate
mtime = self.mtime
m = group.get_match_of(self)
if m:
percentage = m.percentage
dupe_count = 0
if delta:
r = group.ref
size -= r.size
duration -= r.duration
bitrate -= r.bitrate
samplerate -= r.samplerate
mtime -= r.mtime
else:
percentage = group.percentage
dupe_count = len(group.dupes)
dupe_folder_path = getattr(self, "display_folder_path", self.folder_path)
return {
"name": self.name,
"folder_path": str(dupe_folder_path),
"size": format_size(size, 2, 2, False),
"duration": format_time(duration, with_hours=False),
"bitrate": str(bitrate),
"samplerate": str(samplerate),
"extension": self.extension,
"mtime": format_timestamp(mtime, delta and m),
"title": self.title,
"artist": self.artist,
"album": self.album,
"genre": self.genre,
"year": self.year,
"track": str(self.track),
"comment": self.comment,
"percentage": format_perc(percentage),
"words": format_words(self.words) if hasattr(self, "words") else "",
"dupe_count": format_dupe_count(dupe_count),
}
def _read_info(self, field):
fs.File._read_info(self, field)
if field in TAG_FIELDS:
# The various conversions here are to make this look like the previous implementation
file = mutagen.File(str(self.path), easy=True)
self.audiosize = self.path.stat().st_size
self.bitrate = file.info.bitrate / 1000
self.duration = file.info.length
self.samplerate = file.info.sample_rate
self.artist = ", ".join(file.tags.get("artist") or [])
self.album = ", ".join(file.tags.get("album") or [])
self.title = ", ".join(file.tags.get("title") or [])
self.genre = ", ".join(file.tags.get("genre") or [])
self.comment = ", ".join(file.tags.get("comment") or [""])
self.year = ", ".join(file.tags.get("date") or [])
self.track = (file.tags.get("tracknumber") or [""])[0]
|
3,102 |
sample region cov
|
"""Estimate reasonable bin sizes from BAM read counts or depths."""
import logging
import os
import tempfile
import numpy as np
import pandas as pd
from skgenome import tabio, GenomicArray as GA
from . import coverage, samutil
from .antitarget import compare_chrom_names
from .descriptives import weighted_median
def midsize_file(fnames):
"""Select the median-size file from several given filenames.
If an even number of files is given, selects the file just below the median.
"""
assert fnames, "No files provided to calculate the median size."
return sorted(fnames, key=lambda f: os.stat(f).st_size)[(len(fnames) - 1) // 2]
def do_autobin(
bam_fname,
method,
targets=None,
access=None,
bp_per_bin=100000.0,
target_min_size=20,
target_max_size=50000,
antitarget_min_size=500,
antitarget_max_size=1000000,
fasta=None,
):
"""Quickly calculate reasonable bin sizes from BAM read counts.
Parameters
----------
bam_fname : string
BAM filename.
method : string
One of: 'wgs' (whole-genome sequencing), 'amplicon' (targeted amplicon
capture), 'hybrid' (hybridization capture).
targets : GenomicArray
Targeted genomic regions (for 'hybrid' and 'amplicon').
access : GenomicArray
Sequencing-accessible regions of the reference genome (for 'hybrid' and
'wgs').
bp_per_bin : int
Desired number of sequencing read nucleotide bases mapped to each bin.
Returns
-------
2-tuple of 2-tuples:
((target depth, target avg. bin size),
(antitarget depth, antitarget avg. bin size))
"""
if method in ("amplicon", "hybrid"):
if targets is None:
raise ValueError(
f"Target regions are required for method {method!r} but were "
"not provided."
)
if not len(targets):
raise ValueError(
f"Target regions are required for method {method!r} but were "
"not provided."
)
# Closes over bp_per_bin
def depth2binsize(depth, min_size, max_size):
if not depth:
return None
bin_size = int(round(bp_per_bin / depth))
if bin_size < min_size:
logging.info(
"Limiting est. bin size %d to given min. %d", bin_size, min_size
)
bin_size = min_size
elif bin_size > max_size:
logging.info(
"Limiting est. bin size %d to given max. %d", bin_size, max_size
)
bin_size = max_size
return bin_size
samutil.ensure_bam_index(bam_fname)
rc_table = samutil.idxstats(bam_fname, drop_unmapped=True, fasta=fasta)
read_len = samutil.get_read_length(bam_fname, fasta=fasta)
logging.info("Estimated read length %s", read_len)
# Dispatch
if method == "amplicon":
# From BAM index
# rc_table = update_chrom_length(rc_table, targets)
# tgt_depth = average_depth(rc_table, read_len)
# By sampling
tgt_depth = METHOD_NAME(bam_fname, targets, fasta=fasta)
anti_depth = None
elif method == "hybrid":
tgt_depth, anti_depth = hybrid(
rc_table, read_len, bam_fname, targets, access, fasta
)
elif method == "wgs":
if access is not None and len(access):
rc_table = update_chrom_length(rc_table, access)
tgt_depth = average_depth(rc_table, read_len)
anti_depth = None
# Clip bin sizes to specified ranges
tgt_bin_size = depth2binsize(tgt_depth, target_min_size, target_max_size)
anti_bin_size = depth2binsize(anti_depth, antitarget_min_size, antitarget_max_size)
return ((tgt_depth, tgt_bin_size), (anti_depth, anti_bin_size))
def hybrid(rc_table, read_len, bam_fname, targets, access=None, fasta=None):
"""Hybrid capture sequencing."""
# Identify off-target regions
if access is None:
access = idxstats2ga(rc_table, bam_fname)
# Verify BAM chromosome names match those in target BED
compare_chrom_names(access, targets)
antitargets = access.subtract(targets)
# Only examine chromosomes present in all 2-3 input datasets
rc_table, targets, antitargets = shared_chroms(rc_table, targets, antitargets)
# Deal with targets
target_depth = METHOD_NAME(bam_fname, targets, fasta=fasta)
# Antitargets: subtract captured reads from total
target_length = region_size_by_chrom(targets)["length"]
target_reads = (target_length * target_depth / read_len).values
anti_table = update_chrom_length(rc_table, antitargets)
anti_table = anti_table.assign(mapped=anti_table.mapped - target_reads)
anti_depth = average_depth(anti_table, read_len)
return target_depth, anti_depth
# ---
def average_depth(rc_table, read_length):
"""Estimate the average read depth across the genome.
Returns
-------
float
Median of the per-chromosome mean read depths, weighted by chromosome
size.
"""
mean_depths = read_length * rc_table.mapped / rc_table.length
return weighted_median(mean_depths, rc_table.length)
def idxstats2ga(table, bam_fname):
return GA(
table.assign(start=0, end=table.length).loc[:, ("chromosome", "start", "end")],
meta_dict={"filename": bam_fname},
)
def METHOD_NAME(bam_fname, regions, max_num=100, fasta=None):
"""Calculate read depth in a randomly sampled subset of regions."""
midsize_regions = sample_midsize_regions(regions, max_num)
with tempfile.NamedTemporaryFile(suffix=".bed", mode="w+t") as f:
tabio.write(regions.as_dataframe(midsize_regions), f, "bed4")
f.flush()
table = coverage.bedcov(f.name, bam_fname, 0, fasta)
# Mean read depth across all sampled regions
return table.basecount.sum() / (table.end - table.start).sum()
def sample_midsize_regions(regions, max_num):
"""Randomly select a subset of up to `max_num` regions."""
sizes = regions.end - regions.start
lo_size, hi_size = np.percentile(sizes[sizes > 0], [25, 75])
midsize_regions = regions.data[(sizes >= lo_size) & (sizes <= hi_size)]
if len(midsize_regions) > max_num:
midsize_regions = midsize_regions.sample(max_num, random_state=0xA5EED)
return midsize_regions
def shared_chroms(*tables):
"""Intersection of DataFrame .chromosome values."""
chroms = tables[0].chromosome.drop_duplicates()
for tab in tables[1:]:
if tab is not None:
new_chroms = tab.chromosome.drop_duplicates()
chroms = chroms[chroms.isin(new_chroms)]
return [None if tab is None else tab[tab.chromosome.isin(chroms)] for tab in tables]
def update_chrom_length(rc_table, regions):
if regions is not None and len(regions):
chrom_sizes = region_size_by_chrom(regions)
rc_table = rc_table.merge(chrom_sizes, on="chromosome", how="inner")
rc_table["length"] = rc_table["length_y"] # ?
rc_table = rc_table.drop(["length_x", "length_y"], axis=1)
return rc_table
def region_size_by_chrom(regions):
chromgroups = regions.data.groupby("chromosome", sort=False)
# sizes = chromgroups.apply(total_region_size) # XXX
sizes = [total_region_size(g) for _key, g in chromgroups]
return pd.DataFrame(
{"chromosome": regions.chromosome.drop_duplicates(), "length": sizes}
)
def total_region_size(regions):
"""Aggregate area of all genomic ranges in `regions`."""
return (regions.end - regions.start).sum()
|
3,103 |
impl
|
"""
Core Implementations for Generator/BitGenerator Models.
"""
from llvmlite import ir
from numba.core import cgutils, types
from numba.core.extending import (intrinsic, make_attribute_wrapper, models,
overload, register_jitable,
register_model)
from numba import float32
@register_model(types.NumPyRandomBitGeneratorType)
class NumPyRngBitGeneratorModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('parent', types.pyobject),
('state_address', types.uintp),
('state', types.uintp),
('fnptr_next_uint64', types.uintp),
('fnptr_next_uint32', types.uintp),
('fnptr_next_double', types.uintp),
('bit_generator', types.uintp),
]
super(NumPyRngBitGeneratorModel, self).__init__(dmm, fe_type, members)
_bit_gen_type = types.NumPyRandomBitGeneratorType('bit_generator')
@register_model(types.NumPyRandomGeneratorType)
class NumPyRandomGeneratorTypeModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('bit_generator', _bit_gen_type),
('meminfo', types.MemInfoPointer(types.voidptr)),
('parent', types.pyobject)
]
super(
NumPyRandomGeneratorTypeModel,
self).__init__(
dmm,
fe_type,
members)
# The Generator instances have a bit_generator attr
make_attribute_wrapper(
types.NumPyRandomGeneratorType,
'bit_generator',
'bit_generator')
def _generate_next_binding(overloadable_function, return_type):
"""
Generate the overloads for "next_(some type)" functions.
"""
@intrinsic
def intrin_NumPyRandomBitGeneratorType_next_ty(tyctx, inst):
sig = return_type(inst)
def codegen(cgctx, builder, sig, llargs):
name = overloadable_function.__name__
struct_ptr = cgutils.create_struct_proxy(inst)(cgctx, builder,
value=llargs[0])
# Get the 'state' and 'fnptr_next_(type)' members of the struct
state = struct_ptr.state
next_double_addr = getattr(struct_ptr, f'fnptr_{name}')
# LLVM IR types needed
ll_void_ptr_t = cgctx.get_value_type(types.voidptr)
ll_return_t = cgctx.get_value_type(return_type)
ll_uintp_t = cgctx.get_value_type(types.uintp)
# Convert the stored Generator function address to a pointer
next_fn_fnptr = builder.inttoptr(
next_double_addr, ll_void_ptr_t)
# Add the function to the module
fnty = ir.FunctionType(ll_return_t, (ll_uintp_t,))
next_fn = cgutils.get_or_insert_function(
builder.module, fnty, name)
# Bit cast the function pointer to the function type
fnptr_as_fntype = builder.bitcast(next_fn_fnptr, next_fn.type)
# call it with the "state" address as the arg
ret = builder.call(fnptr_as_fntype, (state,))
return ret
return sig, codegen
@overload(overloadable_function)
def ol_next_ty(bitgen):
if isinstance(bitgen, types.NumPyRandomBitGeneratorType):
def METHOD_NAME(bitgen):
return intrin_NumPyRandomBitGeneratorType_next_ty(bitgen)
return METHOD_NAME
# Some function stubs for "next(some type)", these will be overloaded
def next_double(bitgen):
return bitgen.ctypes.next_double(bitgen.ctypes.state)
def next_uint32(bitgen):
return bitgen.ctypes.next_uint32(bitgen.ctypes.state)
def next_uint64(bitgen):
return bitgen.ctypes.next_uint64(bitgen.ctypes.state)
_generate_next_binding(next_double, types.double)
_generate_next_binding(next_uint32, types.uint32)
_generate_next_binding(next_uint64, types.uint64)
# See: https://github.com/numpy/numpy/pull/20314
@register_jitable
def next_float(bitgen):
return float32(float32(next_uint32(bitgen) >> 8)
* float32(1.0) / float32(16777216.0))
|
3,104 |
test basic
|
from contextlib import closing
from urllib.parse import parse_qs
from urllib.parse import urlparse
from saml2 import BINDING_HTTP_POST
from saml2 import BINDING_SOAP
__author__ = "rolandh"
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.authn_context import requested_authn_context
from saml2.client import Saml2Client
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.saml import NameID
from saml2.saml import Subject
from saml2.samlp import AuthnQuery
from saml2.server import Server
TAG1 = 'name="SAMLRequest" value='
AUTHN = {"class_ref": INTERNETPROTOCOLPASSWORD, "authn_auth": "http://www.example.com/login"}
def get_msg(hinfo, binding):
if binding == BINDING_SOAP:
xmlstr = hinfo["data"]
elif binding == BINDING_HTTP_POST:
_inp = hinfo["data"]
i = _inp.find(TAG1)
i += len(TAG1) + 1
j = _inp.find('"', i)
xmlstr = _inp[i:j]
else: # BINDING_HTTP_REDIRECT
parts = urlparse(hinfo["headers"][0][1])
xmlstr = parse_qs(parts.query)["SAMLRequest"][0]
return xmlstr
# ------------------------------------------------------------------------
def METHOD_NAME():
sp = Saml2Client(config_file="servera_conf")
with closing(Server(config_file="idp_all_conf")) as idp:
srvs = sp.metadata.authn_query_service(idp.config.entityid)
destination = srvs[0]["location"]
authn_context = requested_authn_context(INTERNETPROTOCOLPASSWORD)
subject = Subject(text="abc", name_id=NameID(format=NAMEID_FORMAT_TRANSIENT))
_id, aq = sp.create_authn_query(subject, destination, authn_context)
print(aq)
assert isinstance(aq, AuthnQuery)
def test_flow():
sp = Saml2Client(config_file="servera_conf")
with closing(Server(config_file="idp_all_conf")) as idp:
relay_state = "FOO"
# == Create an AuthnRequest response
name_id = idp.ident.transient_nameid(sp.config.entityid, "id12")
binding, destination = idp.pick_binding("assertion_consumer_service", entity_id=sp.config.entityid)
resp = idp.create_authn_response(
{
"eduPersonEntitlement": "Short stop",
"surName": "Jeter",
"givenName": "Derek",
"mail": "[email protected]",
"title": "The man",
},
"id-123456789",
destination,
sp.config.entityid,
name_id=name_id,
authn=AUTHN,
)
hinfo = idp.apply_binding(binding, f"{resp}", destination, relay_state)
# ------- @SP ----------
xmlstr = get_msg(hinfo, binding)
# Explicitly allow unsigned responses for this test
sp.want_response_signed = False
aresp = sp.parse_authn_request_response(xmlstr, binding, {resp.in_response_to: "/"})
binding, destination = sp.pick_binding("authn_query_service", entity_id=idp.config.entityid)
authn_context = requested_authn_context(INTERNETPROTOCOLPASSWORD)
subject = aresp.assertion.subject
aq_id, aq = sp.create_authn_query(subject, destination, authn_context)
print(aq)
assert isinstance(aq, AuthnQuery)
binding = BINDING_SOAP
hinfo = sp.apply_binding(binding, f"{aq}", destination, "state2")
# -------- @IDP ----------
xmlstr = get_msg(hinfo, binding)
pm = idp.parse_authn_query(xmlstr, binding)
msg = pm.message
assert msg.id == aq.id
p_res = idp.create_authn_query_response(msg.subject, msg.session_index, msg.requested_authn_context)
print(p_res)
hinfo = idp.apply_binding(binding, f"{p_res}", "", "state2", response=True)
# ------- @SP ----------
xmlstr = get_msg(hinfo, binding)
final = sp.parse_authn_query_response(xmlstr, binding)
print(final)
assert final.response.id == p_res.id
if __name__ == "__main__":
test_flow()
|
3,105 |
run on image
|
# Copyright (c) Facebook, Inc. and its affiliates.
import atexit
import bisect
import multiprocessing as mp
from collections import deque
import cv2
import torch
from detectron2.data import MetadataCatalog
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.video_visualizer import VideoVisualizer
from detectron2.utils.visualizer import ColorMode, Visualizer
class VisualizationDemo:
def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
"""
Args:
cfg (CfgNode):
instance_mode (ColorMode):
parallel (bool): whether to run the model in different processes from visualization.
Useful since the visualization logic can be slow.
"""
self.metadata = MetadataCatalog.get(
cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
)
self.cpu_device = torch.device("cpu")
self.instance_mode = instance_mode
self.parallel = parallel
if parallel:
num_gpu = torch.cuda.device_count()
self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
else:
self.predictor = DefaultPredictor(cfg)
def METHOD_NAME(self, image):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
Returns:
predictions (dict): the output of the model.
vis_output (VisImage): the visualized image output.
"""
vis_output = None
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_output = visualizer.draw_panoptic_seg_predictions(
panoptic_seg.to(self.cpu_device), segments_info
)
else:
if "sem_seg" in predictions:
vis_output = visualizer.draw_sem_seg(
predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
vis_output = visualizer.draw_instance_predictions(predictions=instances)
return predictions, vis_output
def _frame_from_video(self, video):
while video.isOpened():
success, frame = video.read()
if success:
yield frame
else:
break
def run_on_video(self, video):
"""
Visualizes predictions on frames of the input video.
Args:
video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
either a webcam or a video file.
Yields:
ndarray: BGR visualizations of each video frame.
"""
video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
def process_predictions(frame, predictions):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if "panoptic_seg" in predictions:
panoptic_seg, segments_info = predictions["panoptic_seg"]
vis_frame = video_visualizer.draw_panoptic_seg_predictions(
frame, panoptic_seg.to(self.cpu_device), segments_info
)
elif "instances" in predictions:
predictions = predictions["instances"].to(self.cpu_device)
vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
elif "sem_seg" in predictions:
vis_frame = video_visualizer.draw_sem_seg(
frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
)
# Converts Matplotlib RGB format to OpenCV BGR format
vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
return vis_frame
frame_gen = self._frame_from_video(video)
if self.parallel:
buffer_size = self.predictor.default_buffer_size
frame_data = deque()
for cnt, frame in enumerate(frame_gen):
frame_data.append(frame)
self.predictor.put(frame)
if cnt >= buffer_size:
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
while len(frame_data):
frame = frame_data.popleft()
predictions = self.predictor.get()
yield process_predictions(frame, predictions)
else:
for frame in frame_gen:
yield process_predictions(frame, self.predictor(frame))
class AsyncPredictor:
"""
A predictor that runs the model asynchronously, possibly on >1 GPUs.
Because rendering the visualization takes considerably amount of time,
this helps improve throughput a little bit when rendering videos.
"""
class _StopToken:
pass
class _PredictWorker(mp.Process):
def __init__(self, cfg, task_queue, result_queue):
self.cfg = cfg
self.task_queue = task_queue
self.result_queue = result_queue
super().__init__()
def run(self):
predictor = DefaultPredictor(self.cfg)
while True:
task = self.task_queue.get()
if isinstance(task, AsyncPredictor._StopToken):
break
idx, data = task
result = predictor(data)
self.result_queue.put((idx, result))
def __init__(self, cfg, num_gpus: int = 1):
"""
Args:
cfg (CfgNode):
num_gpus (int): if 0, will run on CPU
"""
num_workers = max(num_gpus, 1)
self.task_queue = mp.Queue(maxsize=num_workers * 3)
self.result_queue = mp.Queue(maxsize=num_workers * 3)
self.procs = []
for gpuid in range(max(num_gpus, 1)):
cfg = cfg.clone()
cfg.defrost()
cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
self.procs.append(
AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
)
self.put_idx = 0
self.get_idx = 0
self.result_rank = []
self.result_data = []
for p in self.procs:
p.start()
atexit.register(self.shutdown)
def put(self, image):
self.put_idx += 1
self.task_queue.put((self.put_idx, image))
def get(self):
self.get_idx += 1 # the index needed for this request
if len(self.result_rank) and self.result_rank[0] == self.get_idx:
res = self.result_data[0]
del self.result_data[0], self.result_rank[0]
return res
while True:
# make sure the results are returned in the correct order
idx, res = self.result_queue.get()
if idx == self.get_idx:
return res
insert = bisect.bisect(self.result_rank, idx)
self.result_rank.insert(insert, idx)
self.result_data.insert(insert, res)
def __len__(self):
return self.put_idx - self.get_idx
def __call__(self, image):
self.put(image)
return self.get()
def shutdown(self):
for _ in self.procs:
self.task_queue.put(AsyncPredictor._StopToken())
@property
def default_buffer_size(self):
return len(self.procs) * 5
|
3,106 |
load report from tsv
|
#!/usr/bin/env python3
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
"""Access the results of benchmark runs
Send these results on to OpenSearch graphing service
"""
import argparse
import itertools
import logging
import os
import re
import sys
import requests
from dateutil import parser
logging.basicConfig(level=logging.DEBUG)
class Configuration:
opensearch_user = os.environ["ES_USER"]
opensearch_pass = os.environ["ES_PASS"]
class BenchmarkResultException(Exception):
def __init__(self, message, content):
super().__init__(self, message)
self.content = content
class BenchmarkUtils:
expected_keys = [
"ops_sec",
"mb_sec",
"lsm_sz",
"blob_sz",
"c_wgb",
"w_amp",
"c_mbps",
"c_wsecs",
"c_csecs",
"b_rgb",
"b_wgb",
"usec_op",
"p50",
"p99",
"p99.9",
"p99.99",
"pmax",
"uptime",
"stall%",
"Nstall",
"u_cpu",
"s_cpu",
"rss",
"test",
"date",
"version",
"job_id",
]
def sanity_check(row):
if "test" not in row:
logging.debug(f"not 'test' in row: {row}")
return False
if row["test"] == "":
logging.debug(f"row['test'] == '': {row}")
return False
if "date" not in row:
logging.debug(f"not 'date' in row: {row}")
return False
if "ops_sec" not in row:
logging.debug(f"not 'ops_sec' in row: {row}")
return False
try:
_ = int(row["ops_sec"])
except (ValueError, TypeError):
logging.debug(f"int(row['ops_sec']): {row}")
return False
try:
(_, _) = parser.parse(row["date"], fuzzy_with_tokens=True)
except (parser.ParserError):
logging.error(
f"parser.parse((row['date']): not a valid format for date in row: {row}"
)
return False
return True
def conform_opensearch(row):
(dt, _) = parser.parse(row["date"], fuzzy_with_tokens=True)
# create a test_date field, which was previously what was expected
# repair the date field, which has what can be a WRONG ISO FORMAT, (no leading 0 on single-digit day-of-month)
# e.g. 2022-07-1T00:14:55 should be 2022-07-01T00:14:55
row["test_date"] = dt.isoformat()
row["date"] = dt.isoformat()
return {key.replace(".", "_"): value for key, value in row.items()}
class ResultParser:
def __init__(self, field="(\w|[+-:.%])+", intrafield="(\s)+", separator="\t"):
self.field = re.compile(field)
self.intra = re.compile(intrafield)
self.sep = re.compile(separator)
def ignore(self, l_in: str):
if len(l_in) == 0:
return True
if l_in[0:1] == "#":
return True
return False
def line(self, line_in: str):
"""Parse a line into items
Being clever about separators
"""
line = line_in
row = []
while line != "":
match_item = self.field.match(line)
if match_item:
item = match_item.group(0)
row.append(item)
line = line[len(item) :]
else:
match_intra = self.intra.match(line)
if match_intra:
intra = match_intra.group(0)
# Count the separators
# If there are >1 then generate extra blank fields
# White space with no true separators fakes up a single separator
tabbed = self.sep.split(intra)
sep_count = len(tabbed) - 1
if sep_count == 0:
sep_count = 1
for _ in range(sep_count - 1):
row.append("")
line = line[len(intra) :]
else:
raise BenchmarkResultException(
"Invalid TSV line", f"{line_in} at {line}"
)
return row
def parse(self, lines):
"""Parse something that iterates lines"""
rows = [self.line(line) for line in lines if not self.ignore(line)]
header = rows[0]
width = len(header)
records = [
{k: v for (k, v) in itertools.zip_longest(header, row[:width])}
for row in rows[1:]
]
return records
def METHOD_NAME(filename: str):
file = open(filename, "r")
contents = file.readlines()
file.close()
parser = ResultParser()
report = parser.parse(contents)
logging.debug(f"Loaded TSV Report: {report}")
return report
def push_report_to_opensearch(report, esdocument):
sanitized = [
BenchmarkUtils.conform_opensearch(row)
for row in report
if BenchmarkUtils.sanity_check(row)
]
logging.debug(
f"upload {len(sanitized)} sane of {len(report)} benchmarks to opensearch"
)
for single_benchmark in sanitized:
logging.debug(f"upload benchmark: {single_benchmark}")
response = requests.post(
esdocument,
json=single_benchmark,
auth=(os.environ["ES_USER"], os.environ["ES_PASS"]),
)
logging.debug(
f"Sent to OpenSearch, status: {response.status_code}, result: {response.text}"
)
response.raise_for_status()
def push_report_to_null(report):
for row in report:
if BenchmarkUtils.sanity_check(row):
logging.debug(f"row {row}")
conformed = BenchmarkUtils.conform_opensearch(row)
logging.debug(f"conformed row {conformed}")
def main():
"""Tool for fetching, parsing and uploading benchmark results to OpenSearch / ElasticSearch
This tool will
(1) Open a local tsv benchmark report file
(2) Upload to OpenSearch document, via https/JSON
"""
parser = argparse.ArgumentParser(description="CircleCI benchmark scraper.")
# --tsvfile is the name of the file to read results from
# --esdocument is the ElasticSearch document to push these results into
#
parser.add_argument(
"--tsvfile",
default="build_tools/circle_api_scraper_input.txt",
help="File from which to read tsv report",
)
parser.add_argument(
"--esdocument",
help="ElasticSearch/OpenSearch document URL to upload report into",
)
parser.add_argument(
"--upload", choices=["opensearch", "none"], default="opensearch"
)
args = parser.parse_args()
logging.debug(f"Arguments: {args}")
reports = METHOD_NAME(args.tsvfile)
if args.upload == "opensearch":
push_report_to_opensearch(reports, args.esdocument)
else:
push_report_to_null(reports)
if __name__ == "__main__":
sys.exit(main())
|
3,107 |
initialize
|
import math
import random
from collections import defaultdict
from typing import Union, Tuple, Optional, Dict, Callable, cast, Sequence
import torch
import torch.nn.functional as F
from allenact.algorithms.onpolicy_sync.policy import ObservationType
from allenact.algorithms.onpolicy_sync.storage import (
MiniBatchStorageMixin,
ExperienceStorage,
)
from allenact.base_abstractions.misc import (
GenericAbstractLoss,
ModelType,
Memory,
LossOutput,
)
from allenact.utils.misc_utils import unzip, partition_sequence
def _index_recursive(d: Dict, key: Union[str, Tuple[str, ...]]):
if isinstance(key, str):
return d[key]
for k in key:
d = d[k]
return d
class InverseDynamicsVDRLoss(GenericAbstractLoss):
def __init__(
self,
compute_action_logits_fn: Callable,
img0_key: str,
img1_key: str,
action_key: str,
):
self.compute_action_logits_fn = compute_action_logits_fn
self.img0_key = img0_key
self.img1_key = img1_key
self.action_key = action_key
def loss(
self,
*,
model: ModelType,
batch: ObservationType,
batch_memory: Memory,
stream_memory: Memory,
) -> LossOutput:
action_logits = self.compute_action_logits_fn(
model=model, img0=batch[self.img0_key], img1=batch[self.img1_key],
)
loss = F.cross_entropy(action_logits, target=batch[self.action_key])
return LossOutput(
value=loss,
info={"cross_entropy": loss.item()},
per_epoch_info={},
batch_memory=batch_memory,
stream_memory=stream_memory,
bsize=int(batch[self.img0_key].shape[0]),
)
class DiscreteVisualDynamicsReplayStorage(ExperienceStorage, MiniBatchStorageMixin):
def __init__(
self,
image_uuid: Union[str, Tuple[str, ...]],
action_success_uuid: Optional[Union[str, Tuple[str, ...]]],
nactions: int,
num_to_store_per_action: int,
max_to_save_per_episode: int,
target_batch_size: int,
extra_targets: Optional[Sequence] = None,
):
self.image_uuid = image_uuid
self.action_success_uuid = action_success_uuid
self.nactions = nactions
self.num_to_store_per_action = num_to_store_per_action
self.max_to_save_per_episode = max_to_save_per_episode
self.target_batch_size = target_batch_size
self.extra_targets = extra_targets if extra_targets is not None else []
self._prev_imgs: Optional[torch.Tensor] = None
self.action_to_saved_transitions = {i: [] for i in range(nactions)}
self.action_to_num_seen = {i: 0 for i in range(nactions)}
self.task_sampler_to_actions_already_sampled = defaultdict(lambda: set())
self.device = torch.device("cpu")
self._total_samples_returned_in_batches = 0
@property
def total_experiences(self):
return self._total_samples_returned_in_batches
def set_partition(self, index: int, num_parts: int):
self.num_to_store_per_action = math.ceil(
self.num_to_store_per_action / num_parts
)
self.target_batch_size = math.ceil(self.target_batch_size / num_parts)
def METHOD_NAME(self, *, observations: ObservationType, **kwargs):
self._prev_imgs = None
self.add(observations=observations, actions=None, masks=None)
def batched_experience_generator(self, num_mini_batch: int):
triples = [
(i0, a, i1)
for a, v in self.action_to_saved_transitions.items()
for (i0, i1) in v
]
random.shuffle(triples)
if len(triples) == 0:
return
parts = partition_sequence(
triples, math.ceil(len(triples) / self.target_batch_size)
)
for part in parts:
img0s, actions, img1s = unzip(part, n=3)
img0 = torch.stack([i0.to(self.device) for i0 in img0s], 0)
action = torch.tensor(actions, device=self.device)
img1 = torch.stack([i1.to(self.device) for i1 in img1s], 0)
self._total_samples_returned_in_batches += img0.shape[0]
yield {"img0": img0, "action": action, "img1": img1}
def add(
self,
*,
observations: ObservationType,
actions: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
**kwargs,
):
cur_imgs = cast(
torch.Tensor, _index_recursive(d=observations, key=self.image_uuid).cpu()
)
if self._prev_imgs is not None:
actions = actions.view(-1).cpu().numpy()
masks = masks.view(-1).cpu().numpy()
if self.action_success_uuid is not None:
action_successes = (
observations[self.action_success_uuid].cpu().view(-1).numpy()
)
else:
action_successes = [True] * actions.shape[0]
extra = {}
for et in self.extra_targets:
extra[et] = observations[et][0].cpu().numpy()
nsamplers = actions.shape[0]
assert nsamplers == masks.shape[0]
for i, (a, m, action_success) in enumerate(
zip(actions, masks, action_successes)
):
actions_already_sampled_in_ep = self.task_sampler_to_actions_already_sampled[
i
]
if (
m != 0
and action_success
and (
len(actions_already_sampled_in_ep)
<= self.max_to_save_per_episode
)
and a not in actions_already_sampled_in_ep
): # Not the start of a new episode/task -> self._prev_imgs[i] corresponds to cur_imgs[i]
saved_transitions = self.action_to_saved_transitions[a]
if len(saved_transitions) < self.num_to_store_per_action:
saved_transitions.append((self._prev_imgs[i], cur_imgs[i]))
else:
saved_transitions[
random.randint(0, len(saved_transitions) - 1)
] = (
self._prev_imgs[i],
cur_imgs[i],
)
# Reservoir sampling transitions
# a = int(a)
# saved_transitions = self.action_to_saved_transitions[a]
# num_seen = self.action_to_num_seen[a]
# if num_seen < self.triples_to_save_per_action:
# saved_transitions.append((self._prev_imgs[i], cur_imgs[i]))
# else:
# index = random.randint(0, num_seen)
# if index < self.triples_to_save_per_action:
# saved_transitions[index] = (self._prev_imgs[i], cur_imgs[i])
actions_already_sampled_in_ep.add(a)
self.action_to_num_seen[a] += 1
else:
actions_already_sampled_in_ep.clear()
self._prev_imgs = cur_imgs
def before_updates(self, **kwargs):
pass
def after_updates(self, **kwargs):
pass
def to(self, device: torch.device):
self.device = device
|
3,108 |
numba funcify qr full
|
import warnings
import numba
import numpy as np
from aesara.link.numba.dispatch import basic as numba_basic
from aesara.link.numba.dispatch.basic import (
_numba_funcify,
get_numba_type,
int_to_float_fn,
)
from aesara.tensor.nlinalg import (
SVD,
Det,
Eig,
Eigh,
Inv,
MatrixInverse,
MatrixPinv,
QRFull,
)
@_numba_funcify.register(SVD)
def numba_funcify_SVD(op, node, **kwargs):
full_matrices = op.full_matrices
compute_uv = op.compute_uv
if not compute_uv:
warnings.warn(
(
"Numba will use object mode to allow the "
"`compute_uv` argument to `numpy.linalg.svd`."
),
UserWarning,
)
ret_sig = get_numba_type(node.outputs[0].type, node.outputs[0])
@numba_basic.numba_njit
def svd(x):
with numba.objmode(ret=ret_sig):
ret = np.linalg.svd(x, full_matrices, compute_uv)
return ret
else:
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def svd(x):
return np.linalg.svd(inputs_cast(x), full_matrices)
return svd
@_numba_funcify.register(Det)
def numba_funcify_Det(op, node, **kwargs):
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def det(x):
return numba_basic.direct_cast(np.linalg.det(inputs_cast(x)), out_dtype)
return det
@_numba_funcify.register(Eig)
def numba_funcify_Eig(op, node, **kwargs):
out_dtype_1 = node.outputs[0].type.numpy_dtype
out_dtype_2 = node.outputs[1].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype_1)
@numba_basic.numba_njit
def eig(x):
out = np.linalg.eig(inputs_cast(x))
return (out[0].astype(out_dtype_1), out[1].astype(out_dtype_2))
return eig
@_numba_funcify.register(Eigh)
def numba_funcify_Eigh(op, node, **kwargs):
uplo = op.UPLO
if uplo != "L":
warnings.warn(
(
"Numba will use object mode to allow the "
"`UPLO` argument to `numpy.linalg.eigh`."
),
UserWarning,
)
out_dtypes = tuple(o.type.numpy_dtype for o in node.outputs)
ret_sig = numba.types.Tuple(
[
get_numba_type(node.outputs[0].type, node.outputs[0]),
get_numba_type(node.outputs[1].type, node.outputs[1]),
]
)
@numba_basic.numba_njit
def eigh(x):
with numba.objmode(ret=ret_sig):
out = np.linalg.eigh(x, UPLO=uplo)
ret = (out[0].astype(out_dtypes[0]), out[1].astype(out_dtypes[1]))
return ret
else:
@numba_basic.numba_njit(inline="always")
def eigh(x):
return np.linalg.eigh(x)
return eigh
@_numba_funcify.register(Inv)
def numba_funcify_Inv(op, node, **kwargs):
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def inv(x):
return np.linalg.inv(inputs_cast(x)).astype(out_dtype)
return inv
@_numba_funcify.register(MatrixInverse)
def numba_funcify_MatrixInverse(op, node, **kwargs):
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def matrix_inverse(x):
return np.linalg.inv(inputs_cast(x)).astype(out_dtype)
return matrix_inverse
@_numba_funcify.register(MatrixPinv)
def numba_funcify_MatrixPinv(op, node, **kwargs):
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def matrixpinv(x):
return np.linalg.pinv(inputs_cast(x)).astype(out_dtype)
return matrixpinv
@_numba_funcify.register(QRFull)
def METHOD_NAME(op, node, **kwargs):
mode = op.mode
if mode != "reduced":
warnings.warn(
(
"Numba will use object mode to allow the "
"`mode` argument to `numpy.linalg.qr`."
),
UserWarning,
)
if len(node.outputs) > 1:
ret_sig = numba.types.Tuple(
[get_numba_type(o.type, o) for o in node.outputs]
)
else:
ret_sig = get_numba_type(node.outputs[0].type, node.outputs[0])
@numba_basic.numba_njit
def qr_full(x):
with numba.objmode(ret=ret_sig):
ret = np.linalg.qr(x, mode=mode)
return ret
else:
out_dtype = node.outputs[0].type.numpy_dtype
inputs_cast = int_to_float_fn(node.inputs, out_dtype)
@numba_basic.numba_njit(inline="always")
def qr_full(x):
return np.linalg.qr(inputs_cast(x))
return qr_full
|
3,109 |
annotate with queryset
|
"""Base classes for metric annotators for the standings generator.
Each metric annotator is responsible for computing a particular metric for each
item and annotating the standings with them, for example, in the case of teams,
number of wins (points), or draw strength. Subclasses should be defined in
context-specific files, e.g., teams.py or speakers.py.
"""
import logging
from django.db.models import Case, F, When
logger = logging.getLogger(__name__)
def metricgetter(items, negate=None):
"""Returns a callable object that fetches each item in `items` from its
operand's `metrics` attribute, and returns a tuple containing the results.
The tuple will have the same number for elements as `items`.
For example:
- After `f = metricgetter(("a",))`, the call `f(x)` returns `(x.metrics["a"],)`.
- After `g = metricgetter((4, 9))`, the call `g(x)` returns `(x.metrics[4], x.metrics[9])`.
If the metric is None (e.g. no scores so can't calculate stdev), use 0 instead to sort.
"""
if negate is None:
def metricitemgetter(x):
return tuple(x.metrics[item] or 0 for item in items)
else:
assert len(items) == len(negate), "items had %d items but negate had %d" % (len(items), len(negate))
coeffs = [-1 if neg else 1 for neg in negate]
def metricitemgetter(x):
return tuple(coeff * (x.metrics[item] or 0) for (coeff, item) in zip(coeffs, items))
return metricitemgetter
class BaseMetricAnnotator:
"""Base class for all metric annotators.
A metric annotator is a class that adds a metric to a Standings object.
Subclasses must implement the method `annotate()`. Every annotator
must add precisely one metric.
Subclasses must set the `key`, `name` and `abbr` attributes.
The default constructor does nothing, but subclasses may have constructors
that initialise themselves with parameters."""
key = None # must be set by subclasses
name = None # must be set by subclasses
abbr = None # must be set by subclasses
icon = None
ranked_only = False
repeatable = False
listed = True
ascending = False # if True, this metric is sorted in ascending order, not descending
combinable = False # if True, use single query with all combinable metrics
def run(self, queryset, standings, round=None):
standings.record_added_metric(self.key, self.name, self.abbr, self.icon, self.ascending)
self.annotate(queryset, standings, round)
def annotate(self, queryset, standings, round=None):
"""Annotates the given `standings` by calling `add_metric()` on every
`StandingInfo` object in `standings`.
`queryset` is the queryset on which the standings are produced.
`standings` is a `Standings` object.
`round`, if specified, is a `Round` object that is assumed to be in the
relevant tournament.
"""
raise NotImplementedError("BaseMetricAnnotator subclasses must implement annotate()")
class RepeatedMetricAnnotator(BaseMetricAnnotator):
"""Base class for metric annotators that can be used multiple times.
Subclasses should set the `key_prefix`, `name_prefix` and `abbr_prefix`
class attributes, and use the `key` attribute when adding metrics in
implementing `annotate()`."""
ranked_only = True # Repeated metrics don't make sense outside the precedence
repeatable = True
def __init__(self, index, keys):
self.index = index
self.key = self.key_prefix + str(index)
self.name = self.name_prefix + " " + str(index)
self.abbr = self.abbr_prefix + str(index)
self.keys = keys
class QuerySetMetricAnnotator(BaseMetricAnnotator):
"""Base class for annotators that metrics based on conditional aggregations."""
combinable = True
def get_annotation(self, round):
raise NotImplementedError("Subclasses of QuerySetMetricAnnotator must implement get_annotation().")
def get_annotated_queryset(self, queryset, round=None):
"""Returns a QuerySet annotated with the metric given."""
annotation = self.get_annotation(round=round)
logger.info("Annotation in %s: %s", self.__class__.__name__, str(annotation))
self.queryset_annotated = True
return queryset.annotate(**{self.key: annotation})
def get_ranking_annotation(self, min_field, min_rounds):
if min_rounds is None:
return F(self.key)
return Case(When(**{min_field + "__gte": min_rounds, "then": F(self.key)}))
def METHOD_NAME(self, queryset, standings):
"""Annotates items with the given QuerySet."""
for item in queryset:
standings.add_metric(item, self.key, getattr(item, self.key))
def annotate(self, queryset, standings, round=None):
if self.combinable:
assert self.queryset_annotated, "get_annotated_queryset() must be run before annotate()"
self.METHOD_NAME(queryset, standings)
else:
self.METHOD_NAME(self.get_annotated_queryset(queryset, round), standings)
|
3,110 |
test add weekly schedule with invalid options
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from types import SimpleNamespace
# from azure.cli.core.util import CLIError
from azure.cli.core.azclierror import (
InvalidArgumentValueError,
RequiredArgumentMissingError,
MutuallyExclusiveArgumentError,
)
from azure.cli.command_modules.acs.maintenanceconfiguration import aks_maintenanceconfiguration_update_internal
from azure.cli.command_modules.acs.tests.latest.mocks import MockCLI, MockCmd
from azure.cli.core.profiles import ResourceType
class TestAddMaintenanceConfiguration(unittest.TestCase):
def setUp(self):
self.cli_ctx = MockCLI()
self.cmd = MockCmd(self.cli_ctx)
self.resource_type = ResourceType.MGMT_CONTAINERSERVICE
def test_add_maintenance_configuration_with_invalid_name(self):
cmd = SimpleNamespace()
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "something",
}
err = ("--config-name must be one of default, aksManagedAutoUpgradeSchedule or aksManagedNodeOSUpgradeSchedule, not something")
with self.assertRaises(InvalidArgumentValueError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_default_maintenance_configuration_with_schedule_type(self):
cmd = SimpleNamespace()
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "default",
"weekday": "Monday",
"start_hour": 1,
"schedule_type": "Weekly",
}
err = ("--schedule-type is not supported for default maintenance configuration.")
with self.assertRaises(MutuallyExclusiveArgumentError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_non_default_schedule_with_weekday(self):
cmd = SimpleNamespace()
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"weekday": "Monday",
}
err = ("--weekday and --start-hour are only applicable to default maintenance configuration.")
with self.assertRaises(MutuallyExclusiveArgumentError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_daily_schedule_with_missing_options(self):
cli_ctx = MockCLI()
cmd = MockCmd(cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "Daily",
}
err = ("Please specify --interval-days when using daily schedule.")
with self.assertRaises(RequiredArgumentMissingError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_daily_schedule_with_invalid_options(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "Daily",
"interval_days": 3,
"day_of_week": "Monday",
}
err = ("--interval-weeks, --interval-months, --day-of-week, --day-of-month and --week-index cannot be used for Daily schedule.")
with self.assertRaises(MutuallyExclusiveArgumentError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def METHOD_NAME(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "Weekly",
"day_of_week": "Monday",
"interval_weeks": 3,
"week_index": "First",
}
err = ("--interval-months, --day-of-month and --week-index cannot be used for Weekly schedule.")
with self.assertRaises(MutuallyExclusiveArgumentError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_absolute_monthly_schedule_with_missing_options(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "AbsoluteMonthly",
"day_of_week": "Monday",
"interval_months": 3,
}
err = ("Please specify --interval-months and --day-of-month when using absolute monthly schedule.")
with self.assertRaises(RequiredArgumentMissingError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_absolute_monthly_schedule_with_invalid_options(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "AbsoluteMonthly",
"day_of_month": 15,
"interval_months": 3,
"week_index": "First",
}
err = ("--interval-days, --interval-weeks, --day-of-week and --week-index cannot be used for AbsoluteMonthly schedule.")
with self.assertRaises(MutuallyExclusiveArgumentError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_relative_monthly_schedule_with_missing_options(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "RelativeMonthly",
"day_of_week": "Monday",
"interval_months": 3,
}
err = ("Please specify --interval-months, --day-of-week and --week-index when using relative monthly schedule.")
with self.assertRaises(RequiredArgumentMissingError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
def test_add_dedicated_schedule_with_missing_options(self):
cmd = MockCmd(self.cli_ctx)
raw_parameters = {
"resource_group_name": "test_rg",
"cluster_name": "test_cluster",
"config_name": "aksManagedAutoUpgradeSchedule",
"schedule_type": "AbsoluteMonthly",
"day_of_month": 1,
"interval_months": 3,
"start_time": "00:00",
}
err = ("Please specify --duration for maintenance window.")
with self.assertRaises(RequiredArgumentMissingError) as cm:
aks_maintenanceconfiguration_update_internal(cmd, None, raw_parameters)
self.assertEqual(str(cm.exception), err)
|
3,111 |
build embedding
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
RoBERTa: A Robustly Optimized BERT Pretraining Approach.
"""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.model_parallel.models.transformer import ModelParallelTransformerEncoder
from fairseq.models import register_model, register_model_architecture
from fairseq.models.roberta import (
roberta_base_architecture,
roberta_prenorm_architecture,
RobertaEncoder,
RobertaModel,
)
from fairseq.modules import LayerNorm
try:
from fairseq.model_parallel.megatron.mpu import (
copy_to_model_parallel_region,
gather_from_model_parallel_region,
ColumnParallelLinear,
VocabParallelEmbedding,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
logger = logging.getLogger(__name__)
@register_model("model_parallel_roberta")
class ModelParallelRobertaModel(RobertaModel):
def __init__(self, args, encoder):
super().__init__(args, encoder)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
parser.add_argument(
"--no-final-layer-norm",
action="store_true",
help=(
"don't add final layernorm (only applicable when "
"--encoder-normalize-before=True"
),
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present
base_architecture(args)
task.source_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
task.target_dictionary.pad_to_multiple_(args.model_parallel_size * 8)
if not hasattr(args, "max_positions"):
args.max_positions = args.tokens_per_sample
if getattr(args, "untie_weights_roberta", False):
raise NotImplementedError(
"--untie-weights-roberta is not supported in model parallel mode"
)
encoder = ModelParallelRobertaEncoder(args, task.source_dictionary)
return cls(args, encoder)
def forward(
self,
src_tokens,
features_only=False,
return_all_hiddens=False,
classification_head_name=None,
**kwargs
):
if classification_head_name is not None:
features_only = True
x, extra = self.encoder(src_tokens, features_only, return_all_hiddens, **kwargs)
if classification_head_name is not None:
x = self.classification_heads[classification_head_name](x)
return x, extra
def register_classification_head(
self, name, num_classes=None, inner_dim=None, **kwargs
):
"""Register a classification head."""
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
logger.warning(
're-registering head "{}" with num_classes {} (prev: {}) '
"and inner_dim {} (prev: {})".format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = ModelParallelRobertaClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
class ModelParallelRobertaLMHead(nn.Module):
"""Head for masked language modeling."""
def __init__(self, embed_dim, output_dim, activation_fn, weight=None):
super().__init__()
self.dense = ColumnParallelLinear(embed_dim, embed_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.layer_norm = LayerNorm(embed_dim)
if weight is None:
weight = nn.Linear(embed_dim, output_dim, bias=False).weight
self.weight = weight
self.bias = nn.Parameter(torch.zeros(output_dim))
def forward(self, features, masked_tokens=None, **kwargs):
# Only project the unmasked tokens while training,
# saves both memory and computation
if masked_tokens is not None:
features = features[masked_tokens, :]
x = self.dense(features)
x = self.activation_fn(x)
x = self.layer_norm(x)
x = copy_to_model_parallel_region(x)
# project back to size of vocabulary with bias
x = F.linear(x, self.weight)
x = gather_from_model_parallel_region(x).contiguous()
x = x + self.bias
return x
class ModelParallelRobertaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self, input_dim, inner_dim, num_classes, activation_fn, pooler_dropout
):
super().__init__()
self.dense = ColumnParallelLinear(input_dim, inner_dim, gather_output=True)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
class ModelParallelRobertaEncoder(RobertaEncoder):
"""RoBERTa encoder."""
def __init__(self, args, dictionary):
super().__init__(args, dictionary)
assert not self.args.untie_weights_roberta
def METHOD_NAME(self, vocab_size, embedding_dim, padding_idx):
return VocabParallelEmbedding(vocab_size, embedding_dim, padding_idx)
def build_encoder(self, args, dictionary, embed_tokens):
return ModelParallelTransformerEncoder(args, dictionary, embed_tokens)
def build_lm_head(self, embed_dim, output_dim, activation_fn, weight):
return ModelParallelRobertaLMHead(embed_dim, output_dim, activation_fn, weight)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta")
def base_architecture(args):
args.no_final_layer_norm = getattr(args, "no_final_layer_norm", False)
# model parallel RoBERTa defaults to "Pre-LN" formulation
roberta_prenorm_architecture(args)
# earlier versions of model parallel RoBERTa removed the final layer norm
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_v1")
def model_parallel_roberta_v1_architecture(args):
args.no_final_layer_norm = getattr(args, "no_final_layer_norm", True)
base_architecture(args)
@register_model_architecture(
"model_parallel_roberta", "model_parallel_roberta_postnorm"
)
def model_parallel_roberta_postnorm_architecture(args):
# the original BERT/RoBERTa uses the "Post-LN" formulation
roberta_base_architecture(args)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_base")
def model_parallel_roberta_base_architecture(args):
base_architecture(args)
@register_model_architecture("model_parallel_roberta", "model_parallel_roberta_large")
def model_parallel_roberta_large_architecture(args):
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
base_architecture(args)
|
3,112 |
cfg filter
|
import itertools
import time
from datetime import datetime
from typing import List
import yaml
import json
import numpy as np
import argparse
from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output
with add_path(REPO_PATH):
from components._impl.workers.subprocess_rpc import UnserializableException, ChildTraceException
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
BM_NAME = "model-stableness"
# By default, use 7 percent as the threshold for stableness checking
STABLE_THRESHOLD = 0.07
# By default, run 15 iterations
DEFAULT_ITERATIONS = 15
def generate_model_config(model_name: str) -> List[TorchBenchModelConfig]:
devices = ["cpu", "cuda"]
tests = ["train", "eval"]
cfgs = itertools.product(*[devices, tests])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
extra_args=[],
extra_env=None,
) for device, test in cfgs]
return result
def parse_args(args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rounds", default=DEFAULT_ITERATIONS, type=int, help="Number of rounds to run to simulate measuring max delta in workflow.")
parser.add_argument("-m", "--models", default="", help="Specify the models to run, default (empty) runs all models.")
parser.add_argument("-d", "--device", default="cpu", help="Specify the device.")
parser.add_argument("-t", "--test", default="eval", help="Specify the test.")
parser.add_argument("-o", "--output", type=str, help="The default output json file.")
args = parser.parse_args(args)
return args
def _get_median_latencies(raw_metrics):
has_all_latencies = len(list(filter(lambda x: 'latencies' in x, raw_metrics)))
if not has_all_latencies == len(raw_metrics):
return None
median_latencies = list(map(lambda x: np.median(x['latencies']), raw_metrics))
return median_latencies
def reduce_results(full_results):
ub_metrics = {}
latencies_by_cfg = {}
for round_result in full_results:
for result in round_result:
cfg = result['cfg']
cfg_name = f"{cfg['name']}_{cfg['device']}_{cfg['test']}_ootb_latencies"
if not cfg_name in latencies_by_cfg:
latencies_by_cfg[cfg_name] = []
latencies_by_cfg[cfg_name].append(result['raw_metrics'])
for cfg_name in latencies_by_cfg:
raw_metrics = latencies_by_cfg[cfg_name]
latencies = _get_median_latencies(raw_metrics)
if latencies:
ub_metrics[f"{cfg_name}_maxdelta"] = (max(latencies) - min(latencies)) / min(latencies)
else:
ub_metrics[f"{cfg_name}_maxdelta"] = -1.0
return ub_metrics
def reduce_results_by_device(full_results):
def _cfg_to_key(cfg):
key = {}
key["model"] = cfg["name"]
key["test"] = cfg["test"]
return frozenset(key.items())
result_by_device = {}
result_yaml_obj = {}
for round_result in full_results:
for result in round_result:
cfg = result['cfg']
device = cfg['device']
raw_metrics = result['raw_metrics']
result_by_device[device] = {} if not device in result_by_device else result_by_device[device]
key = _cfg_to_key(cfg)
result_by_device[device][key] = [] if not key in result_by_device[device] else result_by_device[device][key]
result_by_device[device][key].append(raw_metrics)
for device in result_by_device:
result_yaml_obj[device] = []
for key in result_by_device[device]:
latencies = _get_median_latencies(result_by_device[device][key])
if not latencies:
continue
max_delta = (max(latencies) - min(latencies)) / min(latencies)
stable_obj = dict(key)
stable_obj["max_delta"] = str(max_delta)
if max_delta < STABLE_THRESHOLD:
stable_obj["stable"] = True
else:
stable_obj["stable"] = False
result_yaml_obj[device].append(stable_obj)
return result_yaml_obj
def generate_filter(args: argparse.Namespace):
allowed_models = args.models
if allowed_models:
allowed_models = allowed_models.split(",") if "," in allowed_models else [allowed_models]
allowed_devices = args.device
allowed_devices = allowed_devices.split(",") if "," in allowed_devices else [allowed_devices]
allowed_tests = args.test
allowed_tests = allowed_tests.split(",") if "," in allowed_tests else [allowed_tests]
def METHOD_NAME(cfg: TorchBenchModelConfig) -> bool:
if cfg.device in allowed_devices and cfg.test in allowed_tests:
if not allowed_models:
return True
else:
return cfg.name in allowed_models
return False
return METHOD_NAME
def run(args: List[str]):
args = parse_args(args)
output_dir = get_output_dir(BM_NAME)
models = list_models()
cfgs = list(itertools.chain(*map(generate_model_config, models)))
METHOD_NAME = generate_filter(args)
# run a model cfg and get latencies
full_results = []
for _round in range(args.rounds):
single_round_result = []
for cfg in filter(METHOD_NAME, cfgs):
print(f"[Round {_round}/{args.rounds}] Running {cfg}")
try:
task = load_model_isolated(cfg)
# get the model test metrics
metrics: TorchBenchModelMetrics = get_model_test_metrics(task, metrics=["latencies"])
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': metrics.__dict__,
})
except NotImplementedError:
# some models don't implement the test specified
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': "NotImplemented",
})
except ChildTraceException as exception:
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': str(exception),
})
except UnserializableException as exception:
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': exception.args_repr,
})
finally:
# Remove task reference to trigger deletion in gc
task = None
full_results.append(single_round_result)
print(full_results)
ub_metrics = reduce_results(full_results)
# reduce full results to metrics
# log detailed results in the .userbenchmark/model-stableness/logs/ directory
output_json = get_output_json(BM_NAME, ub_metrics)
log_dir = output_dir.joinpath("logs")
log_dir.mkdir(exist_ok=True, parents=True)
fname = "logs-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
full_fname = log_dir.joinpath(fname)
with open(full_fname, 'w') as f:
json.dump(full_results, f, indent=4)
# output userbenchmark metrics in the .userbenchmark/model-stableness directory
print(output_json)
dump_output(BM_NAME, output_json)
# output the stableness result yaml
yaml_dicts = reduce_results_by_device(full_results)
for device in yaml_dicts:
fname = f"summary-{device}.yaml"
full_fname = log_dir.joinpath(fname)
with open(full_fname, "w") as f:
f.write(yaml.safe_dump(yaml_dicts[device]))
|
3,113 |
update note context
|
# pylint: disable-msg=R0904
import json
import time
from django.urls import reverse
from mediathread.api import ClassLevelAuthentication, UserResource
from mediathread.assetmgr.models import Asset, Source
from mediathread.djangosherd.api import SherdNoteResource
from tastypie import fields
from tastypie.resources import ModelResource
def add_note_ctx_to_json(note_ctx, the_json):
if note_ctx['is_global_annotation']:
the_json['global_annotation'] = note_ctx
the_json['global_annotation_analysis'] = (
len(note_ctx['vocabulary']) > 0 or
len(note_ctx['metadata']['body']) > 0 or
len(note_ctx['metadata']['tags']) > 0)
else:
the_json['annotations'].append(note_ctx)
return the_json
class AssetResource(ModelResource):
author = fields.ForeignKey(UserResource, 'author', full=True)
class Meta:
queryset = Asset.objects.none()
excludes = ['added', 'course',
'active', 'metadata_blob']
list_allowed_methods = []
detail_allowed_methods = []
authentication = ClassLevelAuthentication()
ordering = ['added', 'modified', 'id', 'title', 'author']
def __init__(self, *args, **kwargs):
# @todo: extras is a side-effect of the Mustache templating system
# not supporting the ability to reference variables in the parent
# context. ideally, the templating system should be switched out to
# something more reasonable
self.extras = kwargs.pop('extras', {})
self.include_annotations = kwargs.pop('include_annotations', True)
super(AssetResource, self).__init__(*args, **kwargs)
def format_time(self, dt):
return dt.strftime("%m/%d/%y %I:%M %p")
def to_time(self, dtstr):
return time.strptime(dtstr, "%m/%d/%y %I:%M %p")
def apply_filters(self, request, applicable_filters):
qs = self.get_object_list(request).filter(**applicable_filters)
return qs.distinct()
def dehydrate(self, bundle):
bundle.data['thumb_url'] = bundle.obj.thumb_url
bundle.data['primary_type'] = bundle.obj.primary.label
if bundle.obj.primary.upload:
bundle.data['primary_upload_url'] = bundle.obj.primary.upload.url
bundle.data['local_url'] = reverse(
'react_asset_detail', kwargs={
'course_pk': bundle.obj.course.pk,
'pk': bundle.obj.pk,
})
bundle.data['media_type_label'] = bundle.obj.media_type()
bundle.data['editable_title'] = (
bundle.request.user.is_staff or
bundle.obj.author == bundle.request.user)
bundle.data['editable'] = (bundle.obj.author == bundle.request.user)
bundle.data['annotations'] = []
bundle.data['annotation_count'] = 0
bundle.data['my_annotation_count'] = 0
bundle.data['added'] = self.format_time(bundle.obj.added)
bundle.data['modified'] = self.format_time(bundle.obj.modified)
sources = {}
for s in bundle.obj.source_set.all():
sources[s.label] = {'label': s.label,
'url': s.url_processed(bundle.request),
'width': s.width,
'height': s.height,
'primary': s.primary}
bundle.data['sources'] = sources
for key, value in self.extras.items():
bundle.data[key] = value
return bundle
def render_one(self, request, asset, notes=None):
self.request = request
try:
bundle = self.build_bundle(obj=asset, request=request)
dehydrated = self.full_dehydrate(bundle)
the_json = self._meta.serializer.to_simple(dehydrated, None)
try:
metadata = json.loads(bundle.obj.metadata_blob)
metadata = [{'key': k, 'value': v}
for k, v in metadata.items()]
the_json['metadata'] = metadata
except ValueError:
pass
if notes:
note_resource = SherdNoteResource()
for note in notes:
note_ctx = note_resource.render_one(request, note, "")
the_json = add_note_ctx_to_json(note_ctx, the_json)
return the_json
except Source.DoesNotExist:
return None
def render_one_context(self, request, asset, notes=None):
ctx = {
'assets': {
asset.pk: self.render_one(request, asset, notes)
}
}
return ctx
def update_asset_context(self, request, ctx, asset):
if asset.id not in ctx:
abundle = self.build_bundle(obj=asset, request=request)
dehydrated = self.full_dehydrate(abundle)
asset_ctx = self._meta.serializer.to_simple(dehydrated, None)
asset_ctx['tags'] = [tag.name for tag in asset.tags()]
ctx[asset.id] = asset_ctx
def METHOD_NAME(self, request, ctx, note_res, note, owner, viewer):
if not note.is_global_annotation:
ctx[note.asset.id]['annotation_count'] += 1
if note.author == viewer:
ctx[note.asset.id]['my_annotation_count'] += 1
if note.modified > note.asset.modified:
ctx[note.asset.id]['modified'] = self.format_time(note.modified)
if self.include_annotations:
note_ctx = note_res.render_one(request, note, "")
if note.is_global_annotation:
if note.author == owner:
ctx[note.asset.id]['global_annotation'] = note_ctx
else:
ctx[note.asset.id]['annotations'].append(note_ctx)
def render_list(self, request, record_owner, record_viewer, assets, notes):
self.request = request
note_resource = SherdNoteResource()
ctx = {}
for asset in assets.all():
self.update_asset_context(request, ctx, asset)
for note in notes.all():
try:
note.asset.primary
self.METHOD_NAME(
request, ctx, note_resource, note,
record_owner, record_viewer)
except Source.DoesNotExist:
pass # don't break in this situation
values = ctx.values()
return list(values)
def alter_list_data_to_serialize(self, request, to_be_serialized):
to_be_serialized['objects'] = sorted(
to_be_serialized['objects'],
key=lambda bundle: bundle.data['modified'],
reverse=True)
return to_be_serialized
|
3,114 |
update comment in doc
|
# Copyright (c) 2019, Frappe Technologies and contributors
# License: MIT. See LICENSE
import json
import frappe
from frappe.database.schema import add_column
from frappe.desk.notifications import notify_mentions
from frappe.exceptions import ImplicitCommitError
from frappe.model.document import Document
from frappe.model.utils import is_virtual_doctype
from frappe.website.utils import clear_cache
class Comment(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.types import DF
comment_by: DF.Data | None
comment_email: DF.Data | None
comment_type: DF.Literal[
"Comment",
"Like",
"Info",
"Label",
"Workflow",
"Created",
"Submitted",
"Cancelled",
"Updated",
"Deleted",
"Assigned",
"Assignment Completed",
"Attachment",
"Attachment Removed",
"Shared",
"Unshared",
"Bot",
"Relinked",
"Edit",
]
content: DF.HTMLEditor | None
ip_address: DF.Data | None
published: DF.Check
reference_doctype: DF.Link | None
reference_name: DF.DynamicLink | None
reference_owner: DF.Data | None
seen: DF.Check
subject: DF.Text | None
# end: auto-generated types
def after_insert(self):
notify_mentions(self.reference_doctype, self.reference_name, self.content)
self.notify_change("add")
def validate(self):
if not self.comment_email:
self.comment_email = frappe.session.user
self.content = frappe.utils.sanitize_html(self.content)
def on_update(self):
METHOD_NAME(self)
if self.is_new():
self.notify_change("update")
def on_trash(self):
self.remove_comment_from_cache()
self.notify_change("delete")
def notify_change(self, action):
key_map = {
"Like": "like_logs",
"Assigned": "assignment_logs",
"Assignment Completed": "assignment_logs",
"Comment": "comments",
"Attachment": "attachment_logs",
"Attachment Removed": "attachment_logs",
}
key = key_map.get(self.comment_type)
if not key:
return
frappe.publish_realtime(
"docinfo_update",
{"doc": self.as_dict(), "key": key, "action": action},
doctype=self.reference_doctype,
docname=self.reference_name,
after_commit=True,
)
def remove_comment_from_cache(self):
_comments = get_comments_from_parent(self)
for c in _comments:
if c.get("name") == self.name:
_comments.remove(c)
update_comments_in_parent(self.reference_doctype, self.reference_name, _comments)
def on_doctype_update():
frappe.db.add_index("Comment", ["reference_doctype", "reference_name"])
def METHOD_NAME(doc):
"""Updates `_comments` (JSON) property in parent Document.
Creates a column `_comments` if property does not exist.
Only user created Communication or Comment of type Comment are saved.
`_comments` format
{
"comment": [String],
"by": [user],
"name": [Comment Document name]
}"""
# only comments get updates, not likes, assignments etc.
if doc.doctype == "Comment" and doc.comment_type != "Comment":
return
def get_truncated(content):
return (content[:97] + "...") if len(content) > 100 else content
if doc.reference_doctype and doc.reference_name and doc.content:
_comments = get_comments_from_parent(doc)
updated = False
for c in _comments:
if c.get("name") == doc.name:
c["comment"] = get_truncated(doc.content)
updated = True
if not updated:
_comments.append(
{
"comment": get_truncated(doc.content),
# "comment_email" for Comment and "sender" for Communication
"by": getattr(doc, "comment_email", None) or getattr(doc, "sender", None) or doc.owner,
"name": doc.name,
}
)
update_comments_in_parent(doc.reference_doctype, doc.reference_name, _comments)
def get_comments_from_parent(doc):
"""
get the list of comments cached in the document record in the column
`_comments`
"""
try:
if is_virtual_doctype(doc.reference_doctype):
_comments = "[]"
else:
_comments = frappe.db.get_value(doc.reference_doctype, doc.reference_name, "_comments") or "[]"
except Exception as e:
if frappe.db.is_missing_table_or_column(e):
_comments = "[]"
else:
raise
try:
return json.loads(_comments)
except ValueError:
return []
def update_comments_in_parent(reference_doctype, reference_name, _comments):
"""Updates `_comments` property in parent Document with given dict.
:param _comments: Dict of comments."""
if (
not reference_doctype
or not reference_name
or frappe.db.get_value("DocType", reference_doctype, "issingle")
or is_virtual_doctype(reference_doctype)
):
return
try:
# use sql, so that we do not mess with the timestamp
frappe.db.sql(
f"""update `tab{reference_doctype}` set `_comments`=%s where name=%s""", # nosec
(json.dumps(_comments[-100:]), reference_name),
)
except Exception as e:
if frappe.db.is_column_missing(e) and getattr(frappe.local, "request", None):
pass
elif frappe.db.is_data_too_long(e):
raise frappe.DataTooLongException
else:
raise
else:
if frappe.flags.in_patch:
return
# Clear route cache
if route := frappe.get_cached_value(reference_doctype, reference_name, "route"):
clear_cache(route)
|
3,115 |
field description
|
from django import template
from django.db.models import Q
from questionnaires.models import Poll
from questionnaires.utils import SkipLogicPaginator
import iogt.iogt_globals as globals_
register = template.Library()
@register.inclusion_tag('questionnaires/tags/checkbox.html')
def render_checkbox(field):
return {'field': field}
@register.inclusion_tag('questionnaires/tags/checkboxes.html')
def render_checkboxes(field):
return {'field': field}
@register.inclusion_tag("questionnaires/tags/select.html")
def render_select(field, is_multiselect=False):
return {"field": field, "is_multiselect": is_multiselect}
@register.inclusion_tag('questionnaires/tags/textarea.html')
def render_textarea(field):
return {'field': field}
@register.inclusion_tag('questionnaires/tags/radios.html')
def render_radios(field):
return {'field': field}
@register.inclusion_tag('questionnaires/tags/text_field.html')
def render_text_field(field):
return {'field': field}
@register.inclusion_tag('questionnaires/tags/field_description.html')
def METHOD_NAME(field):
return {"field": field}
@register.inclusion_tag('questionnaires/tags/render_fields.html')
def render_field(field):
return {'field': field}
@register.inclusion_tag('questionnaires/tags/field_counter.html')
def field_counter(form, forloop, form_length, fields_step, questionnaire):
if form_length != None:
if form.errors:
counter = forloop.get("counter")
else:
counter = forloop.get("counter") + int(form_length or 0)
else:
counter = forloop.get("counter")
if hasattr(questionnaire, "multi_step") and (questionnaire.multi_step or questionnaire.has_page_breaks):
total = fields_step.paginator.count
else:
total = len(form.fields)
return {"counter": counter, "total": total}
@register.inclusion_tag('questionnaires/tags/submit_button.html')
def render_submit_button(page, fields_step=None):
return {'submit_button_text': page.get_submit_button_text(fields_step)}
@register.inclusion_tag('questionnaires/tags/action_url.html')
def get_action_url(page, self, fields_step, request, form):
return {"page": page, "self": self, "fields_step": fields_step,
"request": request, "form": form}
@register.inclusion_tag('questionnaires/tags/questionnaire_template_wrapper.html', takes_context=True)
def render_questionnaire_form(context, page, background_color=None, font_color=None):
theme_settings = globals_.theme_settings
font_color = font_color or theme_settings.section_listing_questionnaire_font_color
background_color = background_color or theme_settings.section_listing_questionnaire_background_color
request = context['request']
form_class = page.get_form_class()
if isinstance(page, Poll):
template = 'questionnaires/tags/embedded_poll.html'
context.update({
'results': page.get_results(),
'result_as_percentage': page.result_as_percentage,
})
else:
template = 'questionnaires/tags/embedded_questionnaire.html'
if hasattr(page, 'multi_step') and page.multi_step and page.get_form_fields():
paginator = SkipLogicPaginator(page.get_form_fields(), {}, {})
step = paginator.page(1)
form_class = page.get_form_class_for_step(step)
context.update({
'fields_step': step,
})
context.update({
'template': template,
'font_color': font_color,
'background_color': background_color,
'questionnaire': page,
})
multiple_submission_filter = (
Q(session_key=request.session.session_key) if request.user.is_anonymous else Q(user__pk=request.user.pk)
)
multiple_submission_check = (
not page.allow_multiple_submissions
and page.get_submission_class().objects.filter(multiple_submission_filter,
page=page).exists()
)
anonymous_user_submission_check = request.user.is_anonymous and not page.allow_anonymous_submissions
if multiple_submission_check or anonymous_user_submission_check:
context.update({
'form': None,
})
return context
form = form_class(page=page, user=context['request'].user)
context.update({
'form': form,
})
return context
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.simple_tag
def subtract(value, arg):
return int(value) - int(arg)
@register.inclusion_tag('questionnaires/tags/questionnaire_wrapper.html', takes_context=True)
def render_questionnaire_wrapper(context, page, direct_display=False, background_color=None, font_color=None):
context.update({
'questionnaire': page,
'direct_display': direct_display,
'background_color': background_color,
'font_color': font_color,
})
return context
@register.simple_tag
def get_answer_options(field, field_option, fields_info):
label = field_option.choice_label
correct_answers = fields_info.get(field.name, {}).get('correct_answer_list', [])
is_selected = field_option.data.get('selected', False)
rv = ''
if is_selected and label in correct_answers:
rv = {
'class': 'success',
'aria_label': 'Checkbox with tick, indicating correct and selected',
}
elif is_selected and label not in correct_answers:
rv = {
'class': 'error',
'aria_label': 'Checkbox with X, indicating incorrect and selected',
}
elif not is_selected and label in correct_answers:
rv = {
'class': 'clear-tick',
'aria_label': 'Checkbox with tick, indicating correct but not selected',
}
elif not is_selected and label not in correct_answers:
rv = {
'class': 'clear-cross',
'aria_label': 'Checkbox with X, indicating incorrect and not selected',
}
return rv
|
3,116 |
test pruning export concat model
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from tests.torch.helpers import load_exported_onnx_version
from tests.torch.pruning.helpers import BigPruningTestModel
from tests.torch.pruning.helpers import DiffConvsModel
from tests.torch.pruning.helpers import GroupNormModel
from tests.torch.pruning.helpers import PruningTestModelConcat
from tests.torch.pruning.helpers import PruningTestModelEltwise
from tests.torch.pruning.helpers import get_basic_pruning_config
pytestmark = pytest.mark.skip(reason="Export as actually deleting filters from the model is currently disabled.")
def find_value_by_name_in_list(obj_list, name):
for obj in obj_list:
if obj.name == name:
return obj
return None
def check_bias_and_weight_shape(node_name, onnx_model_proto, weight_shape, bias_shape):
node_weight = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + ".weight")
node_bias = find_value_by_name_in_list(onnx_model_proto.graph.initializer, node_name + ".bias")
assert node_weight.dims == weight_shape
assert node_bias.dims == bias_shape
def test_pruning_export_simple_model(tmp_path):
model = BigPruningTestModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["pruning_init"] = 0.5
nncf_config["compression"]["algorithm"] = "filter_pruning"
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
# Check that conv2 + BN were pruned by output filters
# WARNING: starting from at least torch 1.7.0, torch.onnx.export will fuses BN into previous
# convs if torch.onnx.export is done with `training=False`, so this test might fail.
check_bias_and_weight_shape("nncf_module.conv2", onnx_model_proto, [16, 16, 3, 3], [16])
check_bias_and_weight_shape("nncf_module.bn", onnx_model_proto, [16], [16])
# Check that up was pruned by input filters
check_bias_and_weight_shape("nncf_module.up", onnx_model_proto, [16, 32, 3, 3], [32])
# Check that conv3 was pruned by input filters
check_bias_and_weight_shape("nncf_module.conv3", onnx_model_proto, [1, 32, 5, 5], [1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 32, 3, 3], [16]]]),
(True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]], [[16, 32, 3, 3], [16]]]),
],
)
def METHOD_NAME(tmp_path, prune_first, ref_shapes):
model = PruningTestModelConcat()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[16, 1, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 2, 2], [16]], [[16, 16, 3, 3], [16]]]),
(True, [[[8, 1, 2, 2], [8]], [[16, 8, 2, 2], [16]], [[16, 8, 2, 2], [16]], [[16, 16, 3, 3], [16]]]),
],
)
def test_pruning_export_eltwise_model(tmp_path, prune_first, ref_shapes):
model = PruningTestModelEltwise()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
@pytest.mark.parametrize(
("prune_first", "ref_shapes"),
[
(False, [[[32, 1, 2, 2], [32]], [[32, 1, 1, 1], [32]], [[32, 32, 3, 3], [32]], [[16, 4, 1, 1], [16]]]),
(True, [[[16, 1, 2, 2], [16]], [[16, 1, 1, 1], [16]], [[32, 16, 3, 3], [32]], [[16, 4, 1, 1], [16]]]),
],
)
def test_pruning_export_diffconvs_model(tmp_path, prune_first, ref_shapes):
model = DiffConvsModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = prune_first
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
for i in range(1, 5):
conv_name = "nncf_module.conv{}".format(i)
check_bias_and_weight_shape(conv_name, onnx_model_proto, *ref_shapes[i - 1])
def test_pruning_export_groupnorm_model(tmp_path):
model = GroupNormModel()
nncf_config = get_basic_pruning_config(input_sample_size=[1, 1, 8, 8])
nncf_config["compression"]["algorithm"] = "filter_pruning"
nncf_config["compression"]["params"]["prune_first_conv"] = True
nncf_config["compression"]["pruning_init"] = 0.5
onnx_model_proto = load_exported_onnx_version(nncf_config, model, path_to_storage_dir=tmp_path)
check_bias_and_weight_shape("nncf_module.conv1", onnx_model_proto, [8, 1, 1, 1], [8])
check_bias_and_weight_shape("nncf_module.conv2", onnx_model_proto, [16, 8, 1, 1], [16])
|
3,117 |
get node label
|
import logging
import re
from enum import Enum
from typing import Dict, List, Optional
from hikaru.model.rel_1_26 import Node, NodeList
class ClusterProviderType(str, Enum):
GKE = "GKE"
AKS = "AKS"
EKS = "EKS"
Kind = "Kind"
Minikube = "Minikube"
RancherDesktop = "RancherDesktop"
Kapsule = "Kapsule"
Kops = "Kops"
DigitalOcean = "DigitalOcean"
Unknown = "Unknown"
# the value is a regex match of the hostname
HOSTNAME_MATCH: Dict[ClusterProviderType, str] = {
ClusterProviderType.Kind: ".*kind.*",
ClusterProviderType.RancherDesktop: ".*rancher-desktop.*",
}
# the value is a node label unique to the provider
NODE_LABELS: Dict[ClusterProviderType, str] = {
ClusterProviderType.Minikube: "minikube.k8s.io/name",
ClusterProviderType.DigitalOcean: "doks.digitalocean.com/version",
ClusterProviderType.Kops: "kops.k8s.io/instancegroup",
ClusterProviderType.Kapsule: "k8s.scaleway.com/kapsule",
}
class ClusterProviderDiscovery:
provider: ClusterProviderType = ClusterProviderType.Unknown
def init_provider_discovery(self):
try:
self.provider = self._find_cluster_provider()
logging.info(f"{self.provider} cluster discovered.")
except Exception:
logging.error("Error detecting cluster type", exc_info=True)
def get_cluster_provider(self):
return self.provider
@staticmethod
def METHOD_NAME(node: List[Node], label: str) -> Optional[str]:
if node.metadata.labels:
return node.metadata.labels.get(label)
return None
@staticmethod
def _is_str_in_cluster_provider(nodes: List[Node], identifier: str) -> bool:
node = nodes[0]
try:
provider_id = node.spec.providerID
return identifier in provider_id
except (AttributeError, TypeError):
# is not aks, field is optional so could be missing
return False
@staticmethod
def _is_detect_cluster_from_kubelet_version(nodes: List[Node], kubelet_substring: str) -> bool:
node = nodes[0]
try:
kubelet_version = node.status.nodeInfo.kubeletVersion
return kubelet_substring in kubelet_version
except (AttributeError, TypeError):
# missing kubeletVersion
return False
def _detect_provider_from_hostname(self, nodes: List[Node]) -> Optional[ClusterProviderType]:
nodes_host_names = [self.METHOD_NAME(node, "kubernetes.io/hostname") for node in nodes]
for host_name in nodes_host_names:
if not host_name:
continue
for cluster_type in HOSTNAME_MATCH:
cluster_hostname_regex = HOSTNAME_MATCH[cluster_type]
if re.match(cluster_hostname_regex, host_name):
return cluster_type
return ClusterProviderType.Unknown
def _detect_provider_from_node_labels(self, nodes: List[Node]) -> Optional[ClusterProviderType]:
for cluster_type, node_label in NODE_LABELS.items():
if self.METHOD_NAME(nodes[0], node_label):
return cluster_type
return ClusterProviderType.Unknown
def _find_cluster_provider(self) -> ClusterProviderType:
nodes = NodeList.listNode().obj.items
cluster_hostname_provider = self._detect_provider_from_hostname(nodes)
if cluster_hostname_provider != ClusterProviderType.Unknown:
return cluster_hostname_provider
elif self._is_str_in_cluster_provider(nodes, "aks"):
return ClusterProviderType.AKS
elif self._is_detect_cluster_from_kubelet_version(nodes, "gke"):
return ClusterProviderType.GKE
elif self._is_detect_cluster_from_kubelet_version(nodes, "eks"):
return ClusterProviderType.EKS
elif self._is_str_in_cluster_provider(nodes, "kind"):
return ClusterProviderType.Kind
return self._detect_provider_from_node_labels(nodes)
cluster_provider = ClusterProviderDiscovery()
__all__ = ["cluster_provider"]
|
3,118 |
test command line
|
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import getpass
import os
import subprocess
import pytest
from sos import execute_workflow
from sos._version import __version__
from sos.eval import get_config
from sos.utils import env, load_config_files
# if the test is imported under sos/test, test interacive executor
test_cfg = '''
cut: 0.5
cut1:
- 0.5
- 2
- 3
cut2: a3
cut3:
- a
- b
- c
cut4:
A: 123
me: '{user_name}@my'
'''
def METHOD_NAME():
'''Test command line arguments'''
assert subprocess.call(
'sos config -h',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config -g --get',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config --get',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config -g --set a 5',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config --get a',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config -g --unset a',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
def test_config_set(config_factory):
'''Test interpolation of config'''
myconfig = config_factory(test_cfg)
assert subprocess.call(
f'sos config --set cut 0.5 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut'] == 0.5
#
assert subprocess.call(
f'sos config --set cut1 0.5 2 3 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut1'] == [0.5, 2, 3]
#
assert subprocess.call(
f'sos config --set cut2 a3 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut2'] == 'a3'
#
assert subprocess.call(
f'sos config --set cut3 a b c -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut3'] == ['a', 'b', 'c']
#
assert subprocess.call(
f'''sos config --set cut4 "{{'A': 123}}" -c {myconfig}''',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut4'] == {'A': 123}
def test_interpolate(config_factory):
'''Test interpolation of config'''
myconfig = config_factory(test_cfg)
assert subprocess.call(
f'''sos config --set me '{{user_name}}@my' -c {myconfig}''',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig, default_config_files=False)
assert get_config('me') == f'{getpass.getuser().lower()}@my'
def test_global_vars(config_factory):
'''Test SoS defined variables'''
execute_workflow("[0]", options={'mode': 'dryrun'})
assert env.sos_dict['SOS_VERSION'] == __version__
assert isinstance(env.sos_dict['CONFIG'], dict)
cfg = config_factory({'my_config': 5})
execute_workflow("[0]", options={'config_file': cfg})
assert env.sos_dict['CONFIG']['my_config'] == 5
def test_get_config(config_factory):
myconfig = config_factory({
'val': 5,
'A': {
'B.C': '33',
'B.C1': {
'D': '34'
},
'D': '45'
},
'E': {
'F': {
'val': 6,
'val1': 10,
'G': '{val + val1}'
},
'H': '{val}'
},
'O': 'A{nonexisting}',
'X': '{os.environ.get("HOME", "no_home")}'
})
load_config_files(myconfig)
assert get_config('A', 'D') == '45'
assert get_config('A.D') == '45'
assert get_config(['A', 'D']) == '45'
assert get_config(['A', 'D']) == '45'
assert get_config('A.B.C') == '33'
assert get_config('A.B.C1.D') == '34'
assert get_config('A') == {'B.C': '33', 'B.C1': {'D': '34'}, 'D': '45'}
assert get_config('E.F') == {'val': 6, 'val1': 10, 'G': '16'}
assert get_config('E.F', val=7) == {'val': 6, 'val1': 10, 'G': '17'}
assert get_config('E.F', val=7, allowed_keys=['G']) == {'G': '17'}
assert get_config(
'E.F', val=7, val1=20) == {
'val': 6,
'val1': 10,
'G': '27'
}
assert get_config('E.F', {
'val': 8,
'val1': 30
}) == {
'val': 6,
'val1': 10,
'G': '38'
}
assert get_config('E.H', val=7) == '7'
with pytest.raises(ValueError):
get_config('O')
assert get_config('O', nonexisting=7) == 'A7'
assert get_config('X') == os.environ.get("HOME", "no_home")
|
3,119 |
chill rule11
|
# Leo colorizer control file for chill mode.
# This file is in the public domain.
# Properties for chill mode.
properties = {
"commentEnd": "*/",
"commentStart": "/*",
}
# Attributes dict for chill_main ruleset.
chill_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for chill mode.
attributesDictDict = {
"chill_main": chill_main_attributes_dict,
}
# Keywords dict for chill_main ruleset.
chill_main_keywords_dict = {
"and": "keyword1",
"array": "keyword2",
"begin": "keyword1",
"bin": "keyword3",
"bool": "keyword3",
"case": "keyword1",
"char": "keyword3",
"dcl": "keyword2",
"div": "keyword1",
"do": "keyword1",
"eject": "label",
"else": "keyword1",
"elsif": "keyword1",
"end": "keyword1",
"esac": "keyword1",
"exit": "keyword1",
"false": "literal2",
"fi": "keyword1",
"for": "keyword1",
"goto": "keyword1",
"grant": "keyword2",
"if": "keyword1",
"in": "keyword1",
"int": "keyword3",
"label": "keyword2",
"lio_infos": "label",
"mod": "keyword1",
"module": "keyword2",
"module_description_header": "label",
"msg_xref": "label",
"newmode": "keyword2",
"not": "keyword1",
"null": "literal2",
"od": "keyword1",
"of": "keyword1",
"on": "keyword1",
"or": "keyword1",
"out": "keyword1",
"pack": "keyword2",
"patch_infos": "label",
"powerset": "keyword2",
"proc": "keyword2",
"ptr": "keyword3",
"range": "keyword3",
"ref": "keyword3",
"result": "keyword1",
"return": "keyword1",
"seize": "keyword2",
"set": "keyword2",
"struct": "keyword2",
"swsg_infos": "label",
"syn": "keyword2",
"synmode": "keyword2",
"then": "keyword1",
"to": "keyword1",
"true": "literal2",
"type": "keyword2",
"until": "keyword1",
"uses": "keyword1",
"while": "keyword1",
"with": "keyword1",
"xor": "keyword1",
}
# Dictionary of keywords dictionaries for chill mode.
keywordsDictDict = {
"chill_main": chill_main_keywords_dict,
}
# Rules for chill_main ruleset.
def chill_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment2", begin="<>", end="<>")
def chill_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="/*", end="*/")
def chill_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
no_line_break=True)
def chill_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="H'", end=";",
no_line_break=True)
def chill_rule4(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=")")
def chill_rule5(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="(")
def chill_rule6(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="]")
def chill_rule7(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="[")
def chill_rule8(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="+")
def chill_rule9(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="-")
def chill_rule10(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/")
def METHOD_NAME(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="*")
def chill_rule12(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=".")
def chill_rule13(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=",")
def chill_rule14(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=";")
def chill_rule15(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="^")
def chill_rule16(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="@")
def chill_rule17(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=":=")
def chill_rule18(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=":")
def chill_rule19(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="=")
def chill_rule20(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="/=")
def chill_rule21(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">")
def chill_rule22(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<")
def chill_rule23(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq=">=")
def chill_rule24(colorer, s, i):
return colorer.match_plain_seq(s, i, kind="operator", seq="<=")
def chill_rule25(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for chill_main ruleset.
rulesDict1 = {
"'": [chill_rule2,],
"(": [chill_rule5,],
")": [chill_rule4,],
"*": [METHOD_NAME,],
"+": [chill_rule8,],
",": [chill_rule13,],
"-": [chill_rule9,],
".": [chill_rule12,],
"/": [chill_rule1, chill_rule10, chill_rule20,],
"0": [chill_rule25,],
"1": [chill_rule25,],
"2": [chill_rule25,],
"3": [chill_rule25,],
"4": [chill_rule25,],
"5": [chill_rule25,],
"6": [chill_rule25,],
"7": [chill_rule25,],
"8": [chill_rule25,],
"9": [chill_rule25,],
":": [chill_rule17, chill_rule18,],
";": [chill_rule14,],
"<": [chill_rule0, chill_rule22, chill_rule24,],
"=": [chill_rule19,],
">": [chill_rule21, chill_rule23,],
"@": [chill_rule16, chill_rule25,],
"A": [chill_rule25,],
"B": [chill_rule25,],
"C": [chill_rule25,],
"D": [chill_rule25,],
"E": [chill_rule25,],
"F": [chill_rule25,],
"G": [chill_rule25,],
"H": [chill_rule3, chill_rule25,],
"I": [chill_rule25,],
"J": [chill_rule25,],
"K": [chill_rule25,],
"L": [chill_rule25,],
"M": [chill_rule25,],
"N": [chill_rule25,],
"O": [chill_rule25,],
"P": [chill_rule25,],
"Q": [chill_rule25,],
"R": [chill_rule25,],
"S": [chill_rule25,],
"T": [chill_rule25,],
"U": [chill_rule25,],
"V": [chill_rule25,],
"W": [chill_rule25,],
"X": [chill_rule25,],
"Y": [chill_rule25,],
"Z": [chill_rule25,],
"[": [chill_rule7,],
"]": [chill_rule6,],
"^": [chill_rule15,],
"_": [chill_rule25,],
"a": [chill_rule25,],
"b": [chill_rule25,],
"c": [chill_rule25,],
"d": [chill_rule25,],
"e": [chill_rule25,],
"f": [chill_rule25,],
"g": [chill_rule25,],
"h": [chill_rule25,],
"i": [chill_rule25,],
"j": [chill_rule25,],
"k": [chill_rule25,],
"l": [chill_rule25,],
"m": [chill_rule25,],
"n": [chill_rule25,],
"o": [chill_rule25,],
"p": [chill_rule25,],
"q": [chill_rule25,],
"r": [chill_rule25,],
"s": [chill_rule25,],
"t": [chill_rule25,],
"u": [chill_rule25,],
"v": [chill_rule25,],
"w": [chill_rule25,],
"x": [chill_rule25,],
"y": [chill_rule25,],
"z": [chill_rule25,],
}
# x.rulesDictDict for chill mode.
rulesDictDict = {
"chill_main": rulesDict1,
}
# Import dict for chill mode.
importDict = {}
|
3,120 |
test parse
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.jid}.
"""
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
class JIDParsingTests(unittest.TestCase):
def METHOD_NAME(self) -> None:
"""
Test different forms of JIDs.
"""
# Basic forms
self.assertEqual(jid.parse("user@host/resource"), ("user", "host", "resource"))
self.assertEqual(jid.parse("user@host"), ("user", "host", None))
self.assertEqual(jid.parse("host"), (None, "host", None))
self.assertEqual(jid.parse("host/resource"), (None, "host", "resource"))
# More interesting forms
self.assertEqual(jid.parse("foo/bar@baz"), (None, "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar@baz"), ("boo", "foo", "bar@baz"))
self.assertEqual(jid.parse("boo@foo/bar/baz"), ("boo", "foo", "bar/baz"))
self.assertEqual(jid.parse("boo/foo@bar@baz"), (None, "boo", "foo@bar@baz"))
self.assertEqual(jid.parse("boo/foo/bar"), (None, "boo", "foo/bar"))
self.assertEqual(jid.parse("boo//foo"), (None, "boo", "/foo"))
def test_noHost(self) -> None:
"""
Test for failure on no host part.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@")
def test_doubleAt(self) -> None:
"""
Test for failure on double @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@@host")
def test_multipleAt(self) -> None:
"""
Test for failure on two @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@host@host")
# Basic tests for case mapping. These are fallback tests for the
# prepping done in twisted.words.protocols.jabber.xmpp_stringprep
def test_prepCaseMapUser(self) -> None:
"""
Test case mapping of the user part of the JID.
"""
self.assertEqual(
jid.prep("UsEr", "host", "resource"), ("user", "host", "resource")
)
def test_prepCaseMapHost(self) -> None:
"""
Test case mapping of the host part of the JID.
"""
self.assertEqual(
jid.prep("user", "hoST", "resource"), ("user", "host", "resource")
)
def test_prepNoCaseMapResource(self) -> None:
"""
Test no case mapping of the resourcce part of the JID.
"""
self.assertEqual(
jid.prep("user", "hoST", "resource"), ("user", "host", "resource")
)
self.assertNotEqual(
jid.prep("user", "host", "Resource"), ("user", "host", "resource")
)
class JIDTests(unittest.TestCase):
def test_noneArguments(self) -> None:
"""
Test that using no arguments raises an exception.
"""
self.assertRaises(RuntimeError, jid.JID)
def test_attributes(self) -> None:
"""
Test that the attributes correspond with the JID parts.
"""
j = jid.JID("user@host/resource")
self.assertEqual(j.user, "user")
self.assertEqual(j.host, "host")
self.assertEqual(j.resource, "resource")
def test_userhost(self) -> None:
"""
Test the extraction of the bare JID.
"""
j = jid.JID("user@host/resource")
self.assertEqual("user@host", j.userhost())
def test_userhostOnlyHost(self) -> None:
"""
Test the extraction of the bare JID of the full form host/resource.
"""
j = jid.JID("host/resource")
self.assertEqual("host", j.userhost())
def test_userhostJID(self) -> None:
"""
Test getting a JID object of the bare JID.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.internJID("user@host")
self.assertIdentical(j2, j1.userhostJID())
def test_userhostJIDNoResource(self) -> None:
"""
Test getting a JID object of the bare JID when there was no resource.
"""
j = jid.JID("user@host")
self.assertIdentical(j, j.userhostJID())
def test_fullHost(self) -> None:
"""
Test giving a string representation of the JID with only a host part.
"""
j = jid.JID(tuple=(None, "host", None))
self.assertEqual("host", j.full())
def test_fullHostResource(self) -> None:
"""
Test giving a string representation of the JID with host, resource.
"""
j = jid.JID(tuple=(None, "host", "resource"))
self.assertEqual("host/resource", j.full())
def test_fullUserHost(self) -> None:
"""
Test giving a string representation of the JID with user, host.
"""
j = jid.JID(tuple=("user", "host", None))
self.assertEqual("user@host", j.full())
def test_fullAll(self) -> None:
"""
Test giving a string representation of the JID.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("user@host/resource", j.full())
def test_equality(self) -> None:
"""
Test JID equality.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertNotIdentical(j1, j2)
self.assertEqual(j1, j2)
def test_equalityWithNonJIDs(self) -> None:
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertFalse(j == "user@host/resource")
def test_inequality(self) -> None:
"""
Test JID inequality.
"""
j1 = jid.JID("user1@host/resource")
j2 = jid.JID("user2@host/resource")
self.assertNotEqual(j1, j2)
def test_inequalityWithNonJIDs(self) -> None:
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertNotEqual(j, "user@host/resource")
def test_hashable(self) -> None:
"""
Test JID hashability.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertEqual(hash(j1), hash(j2))
def test_str(self) -> None:
"""
Test unicode representation of JIDs.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("user@host/resource", str(j))
def test_repr(self) -> None:
"""
Test representation of JID objects.
"""
j = jid.JID(tuple=("user", "host", "resource"))
self.assertEqual("JID(%s)" % repr("user@host/resource"), repr(j))
class InternJIDTests(unittest.TestCase):
def test_identity(self) -> None:
"""
Test that two interned JIDs yield the same object.
"""
j1 = jid.internJID("user@host")
j2 = jid.internJID("user@host")
self.assertIdentical(j1, j2)
|
3,121 |
master config
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from unittest.case import SkipTest
from twisted.internet import defer
from buildbot.config import BuilderConfig
from buildbot.plugins import schedulers
from buildbot.plugins import steps
from buildbot.process.factory import BuildFactory
from buildbot.process.results import SUCCESS
from buildbot.test.util.integration import RunMasterBase
from buildbot.worker.upcloud import UpcloudLatentWorker
# This integration test creates a master and upcloud worker environment. You
# need to have upcloud account for this to work. Running this will cost money.
# If you want to run this,
# export BBTEST_UPCLOUD_CREDS=username:password
# following environment variable can be used to stress concurrent worker startup
NUM_CONCURRENT = int(os.environ.get("BUILDBOT_TEST_NUM_CONCURRENT_BUILD", 1))
class UpcloudMaster(RunMasterBase):
# wait 5 minutes.
timeout = 300
def setUp(self):
if "BBTEST_UPCLOUD_CREDS" not in os.environ:
raise SkipTest(
"upcloud integration tests only run when environment variable BBTEST_UPCLOUD_CREDS"
" is set to valid upcloud credentials ")
@defer.inlineCallbacks
def test_trigger(self):
yield self.setup_master(METHOD_NAME(num_concurrent=1), startWorker=False)
yield self.doForceBuild()
builds = yield self.master.data.get(("builds",))
# if there are some retry, there will be more builds
self.assertEqual(len(builds), 1 + NUM_CONCURRENT)
for b in builds:
self.assertEqual(b['results'], SUCCESS)
# master configuration
def METHOD_NAME(num_concurrent, extra_steps=None):
if extra_steps is None:
extra_steps = []
c = {}
c['schedulers'] = [
schedulers.ForceScheduler(
name="force",
builderNames=["testy"])]
triggereables = []
for i in range(num_concurrent):
c['schedulers'].append(
schedulers.Triggerable(
name="trigsched" + str(i),
builderNames=["build"]))
triggereables.append("trigsched" + str(i))
f = BuildFactory()
f.addStep(steps.ShellCommand(command='echo hello'))
f.addStep(steps.Trigger(schedulerNames=triggereables,
waitForFinish=True,
updateSourceStamp=True))
f.addStep(steps.ShellCommand(command='echo world'))
f2 = BuildFactory()
f2.addStep(steps.ShellCommand(command='echo ola'))
for step in extra_steps:
f2.addStep(step)
c['builders'] = [
BuilderConfig(name="testy",
workernames=["upcloud0"],
factory=f),
BuilderConfig(name="build",
workernames=["upcloud" + str(i)
for i in range(num_concurrent)],
factory=f2)]
creds = os.environ.get('BBTEST_UPCLOUD_CREDS')
if creds is not None:
user, password = creds.split(":")
else:
raise RuntimeError("Cannot run this test without credentials")
masterFQDN = os.environ.get('masterFQDN', 'localhost')
c['workers'] = []
for i in range(num_concurrent):
upcloud_host_config = {
"user_data":
f"""
#!/usr/bin/env bash
groupadd -g 999 buildbot
useradd -u 999 -g buildbot -s /bin/bash -d /buildworker -m buildbot
passwd -l buildbot
apt update
apt install -y git python3 python3-dev python3-pip sudo gnupg curl
pip3 install buildbot-worker service_identity
chown -R buildbot:buildbot /buildworker
cat <<EOF >> /etc/hosts
127.0.1.1 upcloud{i}
EOF
cat <<EOF >/etc/sudoers.d/buildbot
buidbot ALL=(ALL) NOPASSWD:ALL
EOF
sudo -H -u buildbot bash -c "buildbot-worker create-worker /buildworker {masterFQDN} upcloud{i} pass"
sudo -H -u buildbot bash -c "buildbot-worker start /buildworker"
""" # noqa pylint: disable=line-too-long
}
c['workers'].append(UpcloudLatentWorker('upcloud' + str(i), api_username=user,
api_password=password,
image='Debian GNU/Linux 9 (Stretch)',
hostconfig=upcloud_host_config,
masterFQDN=masterFQDN))
# un comment for debugging what happens if things looks locked.
# c['www'] = {'port': 8080}
# if the masterFQDN is forced (proxy case), then we use 9989 default port
# else, we try to find a free port
if masterFQDN is not None:
c['protocols'] = {"pb": {"port": "tcp:9989"}}
else:
c['protocols'] = {"pb": {"port": "tcp:0"}}
return c
|
3,122 |
set up class
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import json
import urllib.parse
import urllib.request
import edgedb
from edb.errors import base as base_errors
from edb.common import assert_data_shape
from . import server
bag = assert_data_shape.bag
class BaseHttpExtensionTest(server.QueryTestCase):
@classmethod
def get_extension_name(cls):
raise NotImplementedError
@classmethod
def get_extension_path(cls):
return cls.get_extension_name()
@classmethod
def get_api_prefix(cls):
extpath = cls.get_extension_path()
dbname = cls.get_database_name()
return f'/db/{dbname}/{extpath}'
@classmethod
def METHOD_NAME(cls):
super().METHOD_NAME()
extname = cls.get_extension_name()
cls.loop.run_until_complete(
cls.con.execute(f'CREATE EXTENSION {extname};')
)
@classmethod
def tearDownClass(cls):
extname = cls.get_extension_name()
cls.loop.run_until_complete(
cls.con.execute(f'DROP EXTENSION {extname};')
)
super().tearDownClass()
class ExtAuthTestCase(BaseHttpExtensionTest):
@classmethod
def get_extension_name(cls):
return 'auth'
@classmethod
def get_extension_path(cls):
return 'ext/auth'
class EdgeQLTestCase(BaseHttpExtensionTest):
@classmethod
def get_extension_name(cls):
return 'edgeql_http'
@classmethod
def get_extension_path(cls):
return 'edgeql'
def edgeql_query(
self, query, *, use_http_post=True, variables=None, globals=None):
req_data = {
'query': query
}
if use_http_post:
if variables is not None:
req_data['variables'] = variables
if globals is not None:
req_data['globals'] = globals
req = urllib.request.Request(self.http_addr, method='POST')
req.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(
req, json.dumps(req_data).encode(), context=self.tls_context
)
resp_data = json.loads(response.read())
else:
if variables is not None:
req_data['variables'] = json.dumps(variables)
if globals is not None:
req_data['globals'] = json.dumps(globals)
response = urllib.request.urlopen(
f'{self.http_addr}/?{urllib.parse.urlencode(req_data)}',
context=self.tls_context,
)
resp_data = json.loads(response.read())
if 'data' in resp_data:
return resp_data['data']
err = resp_data['error']
ex_msg = err['message'].strip()
ex_code = err['code']
raise edgedb.EdgeDBError._from_code(ex_code, ex_msg)
def assert_edgeql_query_result(self, query, result, *,
msg=None, sort=None,
use_http_post=True,
variables=None,
globals=None):
res = self.edgeql_query(
query,
use_http_post=use_http_post,
variables=variables,
globals=globals)
if sort is not None:
# GQL will always have a single object returned. The data is
# in the top-level fields, so that's what needs to be sorted.
for r in res.values():
assert_data_shape.sort_results(r, sort)
assert_data_shape.assert_data_shape(
res, result, self.fail, message=msg)
return res
class GraphQLTestCase(BaseHttpExtensionTest):
@classmethod
def get_extension_name(cls):
return 'graphql'
def graphql_query(self, query, *, operation_name=None,
use_http_post=True,
variables=None,
globals=None):
req_data = {
'query': query
}
if operation_name is not None:
req_data['operationName'] = operation_name
if use_http_post:
if variables is not None:
req_data['variables'] = variables
if globals is not None:
req_data['globals'] = globals
req = urllib.request.Request(self.http_addr, method='POST')
req.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(
req, json.dumps(req_data).encode(), context=self.tls_context
)
resp_data = json.loads(response.read())
else:
if variables is not None:
req_data['variables'] = json.dumps(variables)
if globals is not None:
req_data['globals'] = json.dumps(globals)
response = urllib.request.urlopen(
f'{self.http_addr}/?{urllib.parse.urlencode(req_data)}',
context=self.tls_context,
)
resp_data = json.loads(response.read())
if 'data' in resp_data:
return resp_data['data']
err = resp_data['errors'][0]
typename, msg = err['message'].split(':', 1)
msg = msg.strip()
try:
ex_type = getattr(edgedb, typename)
except AttributeError:
raise AssertionError(
f'server returned an invalid exception typename: {typename!r}'
f'\n Message: {msg}')
ex = ex_type(msg)
if 'locations' in err:
# XXX Fix this when LSP "location" objects are implemented
ex._attrs[base_errors.FIELD_LINE_START] = str(
err['locations'][0]['line']).encode()
ex._attrs[base_errors.FIELD_COLUMN_START] = str(
err['locations'][0]['column']).encode()
raise ex
def assert_graphql_query_result(self, query, result, *,
msg=None, sort=None,
operation_name=None,
use_http_post=True,
variables=None,
globals=None):
res = self.graphql_query(
query,
operation_name=operation_name,
use_http_post=use_http_post,
variables=variables,
globals=globals)
if sort is not None:
# GQL will always have a single object returned. The data is
# in the top-level fields, so that's what needs to be sorted.
for r in res.values():
assert_data_shape.sort_results(r, sort)
assert_data_shape.assert_data_shape(
res, result, self.fail, message=msg)
return res
|
3,123 |
wrapper
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
from datetime import datetime, timezone
import redis
from first import first
from limits import parse_many
from limits.storage import storage_from_string
from limits.strategies import MovingWindowRateLimiter
from zope.interface import implementer
from warehouse.metrics import IMetricsService
from warehouse.rate_limiting.interfaces import IRateLimiter
logger = logging.getLogger(__name__)
def _return_on_exception(rvalue, *exceptions):
def deco(fn):
@functools.wraps(fn)
def METHOD_NAME(self, *args, **kwargs):
try:
return fn(self, *args, **kwargs)
except exceptions as exc:
logging.warning("Error computing rate limits: %r", exc)
self._metrics.increment(
"warehouse.ratelimiter.error", tags=[f"call:{fn.__name__}"]
)
return rvalue
return METHOD_NAME
return deco
@implementer(IRateLimiter)
class RateLimiter:
def __init__(self, storage, limit, *, identifiers=None, metrics):
if identifiers is None:
identifiers = []
self._storage = storage
self._window = MovingWindowRateLimiter(storage)
self._limits = parse_many(limit)
self._identifiers = identifiers
self._metrics = metrics
def _get_identifiers(self, identifiers):
return [str(i) for i in list(self._identifiers) + list(identifiers)]
@_return_on_exception(True, redis.RedisError)
def test(self, *identifiers):
return all(
[
self._window.test(limit, *self._get_identifiers(identifiers))
for limit in self._limits
]
)
@_return_on_exception(True, redis.RedisError)
def hit(self, *identifiers):
return all(
[
self._window.hit(limit, *self._get_identifiers(identifiers))
for limit in self._limits
]
)
@_return_on_exception(None, redis.RedisError)
def clear(self, *identifiers):
for limit in self._limits:
self._storage.clear(limit.key_for(*self._get_identifiers(identifiers)))
@_return_on_exception(None, redis.RedisError)
def resets_in(self, *identifiers):
resets = []
for limit in self._limits:
resets_at, remaining = self._window.get_window_stats(
limit, *self._get_identifiers(identifiers)
)
# If this limit has any remaining limits left, then we will skip it
# since it doesn't need reset.
if remaining > 0:
continue
current = datetime.now(tz=timezone.utc)
reset = datetime.fromtimestamp(resets_at, tz=timezone.utc)
# If our current datetime is either greater than or equal to when
# the limit resets, then we will skipp it since it has either
# already reset, or it is resetting now.
if current >= reset:
continue
# Add a timedelta that represents how long until this limit resets.
resets.append(reset - current)
# If we have any resets, then we'll go through and find whichever one
# is going to reset soonest and use that as our hint for when this
# limit might be available again.
return first(sorted(resets))
@implementer(IRateLimiter)
class DummyRateLimiter:
def test(self, *identifiers):
return True
def hit(self, *identifiers):
return True
def clear(self, *identifiers):
return None
def resets_in(self, *identifiers):
return None
class RateLimit:
def __init__(self, limit, identifiers=None, limiter_class=RateLimiter):
self.limit = limit
self.identifiers = identifiers
self.limiter_class = limiter_class
def __call__(self, context, request):
return self.limiter_class(
request.registry["ratelimiter.storage"],
limit=self.limit,
identifiers=self.identifiers,
metrics=request.find_service(IMetricsService, context=None),
)
def __repr__(self):
return (
f'RateLimit("{self.limit}", identifiers={self.identifiers}, '
f"limiter_class={self.limiter_class})"
)
def __eq__(self, other):
if not isinstance(other, RateLimit):
return NotImplemented
return (self.limit, self.identifiers, self.limiter_class) == (
other.limit,
other.identifiers,
other.limiter_class,
)
def includeme(config):
config.registry["ratelimiter.storage"] = storage_from_string(
config.registry.settings["ratelimit.url"]
)
|
3,124 |
test create t4c instance
|
# SPDX-FileCopyrightText: Copyright DB Netz AG and the capella-collab-manager contributors
# SPDX-License-Identifier: Apache-2.0
import pytest
import responses
from fastapi import status, testclient
from sqlalchemy import orm
from capellacollab.settings.modelsources.t4c import crud as t4c_crud
from capellacollab.settings.modelsources.t4c import models as t4c_models
from capellacollab.tools import models as tools_models
from capellacollab.users import crud as users_crud
from capellacollab.users import models as users_models
@pytest.mark.usefixtures("admin_user")
def METHOD_NAME(
client: testclient.TestClient,
db: orm.Session,
test_tool_version: tools_models.DatabaseVersion,
):
response = client.post(
"/api/v1/settings/modelsources/t4c",
json={
"license": "test",
"host": "test",
"port": 2036,
"cdo_port": 12036,
"usage_api": "http://localhost:8086",
"rest_api": "http://localhost:8080",
"username": "admin",
"protocol": "tcp",
"name": "Test integration",
"version_id": test_tool_version.id,
"password": "secret-password",
},
)
assert response.status_code == 200
assert response.json()["name"] == "Test integration"
t4c_instance = t4c_crud.get_t4c_instance_by_id(db, response.json()["id"])
assert t4c_instance
assert t4c_instance.name == "Test integration"
@pytest.mark.usefixtures("t4c_server")
def test_get_t4c_instances(
client: testclient.TestClient, db: orm.Session, executor_name: str
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
response = client.get(
"/api/v1/settings/modelsources/t4c",
)
assert len(response.json()) == 2
assert response.json()[1]["name"] == "test server"
# Password should not be exposed via API
assert "password" not in response.json()[1]
def test_get_t4c_instance(
client: testclient.TestClient,
db: orm.Session,
executor_name: str,
t4c_server: t4c_models.DatabaseT4CInstance,
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
response = client.get(
f"/api/v1/settings/modelsources/t4c/{t4c_server.id}",
)
assert response.json()["name"] == "test server"
# Password should not be exposed via API
assert "password" not in response.json()
def test_patch_t4c_instance(
client: testclient.TestClient,
db: orm.Session,
executor_name: str,
t4c_server: t4c_models.DatabaseT4CInstance,
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
response = client.patch(
f"/api/v1/settings/modelsources/t4c/{t4c_server.id}",
json={
"name": "Patched test integration",
},
)
t4c_instance = t4c_crud.get_t4c_instance_by_id(db, response.json()["id"])
assert t4c_instance
assert response.json()["name"] == "Patched test integration"
assert t4c_instance.name == "Patched test integration"
assert response.json()["host"] == "localhost"
assert t4c_instance.host == "localhost"
def test_update_t4c_instance_password_empty_string(
client: testclient.TestClient,
db: orm.Session,
executor_name: str,
t4c_server: t4c_models.DatabaseT4CInstance,
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
expected_password = t4c_server.password
response = client.patch(
f"/api/v1/settings/modelsources/t4c/{t4c_server.id}",
json={
"password": "",
},
)
updated_t4c_server = t4c_crud.get_t4c_instance_by_id(
db, response.json()["id"]
)
assert updated_t4c_server
assert updated_t4c_server.password == expected_password
@responses.activate
def test_get_t4c_license_usage(
client: testclient.TestClient,
db: orm.Session,
executor_name: str,
t4c_server: t4c_models.DatabaseT4CInstance,
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
responses.get(
"http://localhost:8086/status/json",
status=status.HTTP_200_OK,
json={"status": {"used": 1, "free": 19, "total": 20}},
)
response = client.get(
f"/api/v1/settings/modelsources/t4c/{t4c_server.id}/licenses",
)
assert response.status_code == 200
assert response.json()["free"] == 19
assert response.json()["total"] == 20
@responses.activate
def test_get_t4c_license_usage_no_status(
client: testclient.TestClient,
db: orm.Session,
executor_name: str,
t4c_server: t4c_models.DatabaseT4CInstance,
):
users_crud.create_user(db, executor_name, users_models.Role.ADMIN)
responses.get(
"http://localhost:8086/status/json",
status=status.HTTP_200_OK,
json={"status": {"message": "No last status available."}},
)
response = client.get(
f"/api/v1/settings/modelsources/t4c/{t4c_server.id}/licenses",
)
assert response.status_code == 404
assert response.json()["detail"]["err_code"] == "NO_STATUS"
|
3,125 |
build asn
|
from __future__ import absolute_import, division, print_function
from boost_adaptbx import graph
from boost_adaptbx.graph import graph_structure_comparison
import unittest
class collector(object):
def __init__(self):
self.collected = []
def __call__(self, data):
self.collected.append( data )
return True
class TestMcGregorCommonSubgraphsUnique(unittest.TestCase):
def build_leu(self, leu):
v0 = leu.add_vertex( "CA" )
v1 = leu.add_vertex( "C" ) # CB
v2 = leu.add_vertex( "C" ) # CG
v3 = leu.add_vertex( "C" ) # CD2
v4 = leu.add_vertex( "C" ) # CD1
leu.add_edge(v0, v1, 1.53 )
leu.add_edge(v0, v2, 2.62 )
leu.add_edge(v0, v3, 3.23 )
leu.add_edge(v0, v4, 3.91 )
leu.add_edge(v1, v2, 1.53 )
leu.add_edge(v1, v3, 2.54 )
leu.add_edge(v1, v4, 2.51 )
leu.add_edge(v2, v3, 1.52 )
leu.add_edge(v2, v4, 1.52 )
leu.add_edge(v3, v4, 2.50 )
return ( v0, v1, v2 )
def METHOD_NAME(self, asn):
w0 = asn.add_vertex( "CA" )
w1 = asn.add_vertex( "C" ) # CB
w2 = asn.add_vertex( "C" ) # CG
w3 = asn.add_vertex( "C" ) # ND2
w4 = asn.add_vertex( "C" ) # OD1
asn.add_edge(w0, w1, 1.53 )
asn.add_edge(w0, w2, 2.54 )
asn.add_edge(w0, w3, 2.87 )
asn.add_edge(w0, w4, 3.59 )
asn.add_edge(w1, w2, 1.52 )
asn.add_edge(w1, w3, 2.43 )
asn.add_edge(w1, w4, 2.40 )
asn.add_edge(w2, w3, 1.33 )
asn.add_edge(w2, w4, 1.23 )
asn.add_edge(w3, w4, 2.25 )
return ( w0, w1, w2 )
def manipulate(self, leu, asn, matchings):
( leu_ca, leu_cb, leu_cg ) = self.build_leu( leu = leu )
( asn_ca, asn_cb, asn_cg ) = self.METHOD_NAME( asn = asn )
callback = collector()
import operator
graph_structure_comparison.mcgregor_common_subgraphs_unique(
graph1 = leu,
graph2 = asn,
vertex_equality = operator.eq,
edge_equality = lambda l, r: abs( l - r ) <= 0.1,
callback = callback,
)
self.assertEqual( len( callback.collected ), matchings )
largest = max( callback.collected, key = len )
self.assertEqual( len( largest ), 3 )
self.assertEqual(
sorted( [ ( leu.vertex_label( p[0] ), asn.vertex_label( p[1] ) ) for p in largest ] ),
[ ( "C", "C" ), ( "C", "C" ), ( "CA", "CA" ) ],
)
self.assertEqual(
set( largest ),
set( [ ( leu_ca, asn_ca ), ( leu_cb, asn_cb ), ( leu_cg, asn_cg ) ] ),
)
def test_adjacency_list_undirected_vector_set(self):
try:
leu = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "vector",
edge_type = "set",
)
asn = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "vector",
edge_type = "set",
)
except NotImplementedError:
pass
else:
self.manipulate( leu = leu, asn = asn, matchings = 13 )
def test_adjacency_list_undirected_list_set(self):
try:
leu = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "list",
edge_type = "set",
)
asn = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "list",
edge_type = "set",
)
except NotImplementedError:
pass
else:
self.manipulate( leu = leu, asn = asn, matchings = 13 )
def test_adjacency_list_undirected_vector_vector(self):
try:
leu = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "vector",
edge_type = "vector",
)
asn = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "vector",
edge_type = "vector",
)
except NotImplementedError:
pass
else:
self.manipulate( leu = leu, asn = asn, matchings = 13 )
def test_adjacency_list_undirected_list_vector(self):
try:
leu = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "list",
edge_type = "vector",
)
asn = graph.adjacency_list(
graph_type = "undirected",
vertex_type = "list",
edge_type = "vector",
)
except NotImplementedError:
pass
else:
self.manipulate( leu = leu, asn = asn, matchings = 13 )
def test_adjacency_list_directed_vector_vector(self):
try:
leu = graph.adjacency_list(
graph_type = "directed",
vertex_type = "vector",
edge_type = "vector",
)
asn = graph.adjacency_list(
graph_type = "directed",
vertex_type = "vector",
edge_type = "vector",
)
except NotImplementedError:
pass
else:
self.manipulate( leu = leu, asn = asn, matchings = 8 )
suite_mcgregor_common_subgraphs_unique = unittest.TestLoader().loadTestsFromTestCase(
TestMcGregorCommonSubgraphsUnique
)
alltests = unittest.TestSuite(
[
suite_mcgregor_common_subgraphs_unique,
]
)
def load_tests(loader, tests, pattern):
return alltests
if __name__ == "__main__":
unittest.TextTestRunner( verbosity = 2 ).run( alltests )
|
3,126 |
jog
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Connect to a PI stage using the E873 controller and the Q-545.140 translation stages
using Physik Instrumente (PI) GmbH & Co. KG
sc_hardware.physikInstrumente.E873.py
Alistair Boettiger, April 2019
V1.1
Functional connection to x-y axes of PI piezo stage
V1.2
Functional connection to both xy axes as a stage and z-axis as a separate z-stage
Notes:
This module requires the PIPython library that ships with the PI controllers.
It also requires the path to this library to be added to the python path (see below).
There is probably a more elegant way to do this.
"""
from __future__ import print_function
# Update the path to the PIPython Library:
#
# Note: This might be better done by creating a xyz.pth module in the Python
# library folder?
#
import sys
sys.path.append(r'C:\Users\Scope3\Desktop\MicroscopeHardware\PI\PIPython-1.5.1.7 for E-873.3QTU\PIPython-1.5.1.7')
from copy import deepcopy
import storm_control.sc_library.parameters as params
from pipython import GCSDevice, pitools
CONTROLLERNAME = 'E-873.3QTU' # 'C-884' will also work
STAGES = ['Q-545.140', 'Q-545.140', 'Q-545.140'] #, 'Q-545.140',
REFMODES = 'FRF' # ['FNL', 'FRF']
class piE873(object):
## __init__
#
# Connect to the PI E873 stage.
#
#
def __init__(self, serialnum = '119006811'): # should become a parameter, see other stages
print(serialnum)
# Connect to the PI E873 stage.
# with GCSDevice(CONTROLLERNAME) as pidevice:
pidevice = GCSDevice(CONTROLLERNAME)
pidevice.ConnectUSB(serialnum) # pidevice.ConnectUSB(serialnum='119006811')
print('connected: {}'.format(pidevice.qIDN().strip()))
# Show the version info which is helpful for PI support when there
# are any issues.
if pidevice.HasqVER():
print('version info:\n{}'.format(pidevice.qVER().strip()))
# In the module pipython.pitools there are some helper
# functions to make using a PI device more convenient. The "startup"
# function will initialize your system. There are controllers that
# cannot discover the connected stages hence we set them with the
# "stages" argument. The desired referencing method (see controller
# user manual) is passed as "refmode" argument. All connected axes
# will be stopped if they are moving and their servo will be enabled.
print('initialize connected stages...')
pitools.startup(pidevice, stages=STAGES, refmodes=REFMODES)
# Now we query the allowed motion range and current position of all
# connected stages. GCS commands often return an (ordered) dictionary
# with axes/channels as "keys" and the according values as "values".
self.pidevice = pidevice
self.wait = 1 # move commands wait for motion to stop
self.unit_to_um = 100.0 # needs calibration
self.um_to_unit = 1.0/self.unit_to_um
# Connect to the stage.
self.good = 1
# get min and max range
self.rangemin = pidevice.qTMN()
self.rangemax = pidevice.qTMX()
self.curpos = pidevice.qPOS()
## getStatus
#
# @return True/False if we are actually connected to the stage.
#
def getStatus(self):
return self.good
## goAbsolute
#
# @param x Stage x position in um.
# @param y Stage y position in um.
#
def goAbsolute(self, x, y):
if self.good:
# If the stage is currently moving due to a jog command
# and then you try to do a positional move everything
# will freeze, so we stop the stage first.
# self.jog(0.0,0.0)
X = x * self.um_to_unit
Y = y * self.um_to_unit
if X > self.rangemin['1'] and X < self.rangemax['1']:
self.pidevice.MOV(1, X)
else:
print('requested move outside max range!')
if Y > self.rangemin['2'] and Y < self.rangemax['2']:
self.pidevice.MOV(2, Y)
else:
print('requested move outside max range!')
## goRelative
#
# @param dx Amount to displace the stage in x in um.
# @param dy Amount to displace the stage in y in um.
#
def goRelative(self, dx, dy):
if self.good:
# self.jog(0.0,0.0)
x0 = self.pidevice.qPOS(1)[1] # query single axis [need to check units. Also, shouldn't this be zero indexed?]
y0 = self.pidevice.qPOS(2)[2] # query single axis
# position = pidevice.qPOS()[str(axis)] # query all axes
X = x0 + dx * self.um_to_unit
Y = y0 + dy * self.um_to_unit
if X > self.rangemin['1'] and X < self.rangemax['1']:
self.pidevice.MOV(1, X)
else:
print('requested move outside max range!')
if Y > self.rangemin['2'] and Y < self.rangemax['2']:
self.pidevice.MOV(2, Y)
else:
print('requested move outside max range!')
# pitools.waitontarget(self.pidevice, axes=1) # actively hold on target
# pitools.waitontarget(self.pidevice, axes=2) # actively hold on target
## position
#
# @return [stage x (um), stage y (um), stage z (um)]
#
def position(self):
if self.good:
x0 = self.pidevice.qPOS(1)[1] # query single axis
y0 = self.pidevice.qPOS(2)[2] # query single axis
return {"x" : x0,
"y" : y0}
def zMoveTo(self, z):
"""
Move the z stage to the specified position (in microns).
"""
if self.good:
Z = z * self.um_to_unit
if Z > self.rangemin['3'] and Z < self.rangemax['3']:
self.pidevice.MOV(3, Z)
else:
print('requested move outside max range!')
def zPosition(self):
"""
Query for current z position in microns.
"""
if self.good:
z0 = self.pidevice.qPOS(3)[3] # query single axis
return {"z" : z0}
def zSetVelocity(self, z_vel):
pass
def zZero(self):
if self.good:
pitools._ref_with_pos(self, self.pidevice.axes([2])) # added axes [0,1], not sure this ever worked anyway
## jog
#
# @param x_speed Speed to jog the stage in x in um/s.
# @param y_speed Speed to jog the stage in y in um/s.
#
def METHOD_NAME(self, x_speed, y_speed):
pass
# figure out how to do something here
# if self.good:
# c_xs = c_double(x_speed * self.um_to_unit)
# c_ys = c_double(y_speed * self.um_to_unit)
# c_zr = c_double(0.0)
# tango.LSX_SetDigJoySpeed(self.LSID, c_xs, c_ys, c_zr, c_zr)
## joystickOnOff
#
# @param on True/False enable/disable the joystick.
#
def joystickOnOff(self, on):
pass
# No joystick used
## lockout
#
# Calls joystickOnOff.
#
# @param flag True/False.
#
def lockout(self, flag):
self.joystickOnOff(not flag)
## setVelocity
#
# FIXME: figure out how to set velocity..
#
def setVelocity(self, x_vel, y_vel):
pass
## shutDown
#
# Disconnect from the stage.
#
def shutDown(self):
# Disconnect from the stage
if self.good:
self.pidevice.StopAll(noraise=True)
pitools.waitonready(self.pidevice) # there are controllers that need some time to halt all axes
## zero
#
# Set the current position as the new zero position.
#
def zero(self):
if self.good:
pitools._ref_with_pos(self, self.pidevice.axes([0,1])) # added axes [0,1], not sure this ever worked anyway
|
3,127 |
test proc col checksum consistency same preprocessing
|
import copy
import uuid
from typing import List
from unittest import mock
import pytest
from ludwig.constants import INPUT_FEATURES, OUTPUT_FEATURES
from ludwig.data.cache.util import calculate_checksum
from ludwig.schema.model_types.base import ModelConfig
from ludwig.types import FeatureConfigDict, ModelConfigDict
from ludwig.utils.misc_utils import merge_dict
def _gen_config(input_features: List[FeatureConfigDict]) -> ModelConfigDict:
return {INPUT_FEATURES: input_features, OUTPUT_FEATURES: [{"name": "out1", "type": "binary"}]}
@pytest.mark.parametrize(
"input_features,diff,expected",
[
(
[
{
"name": "in1",
"type": "text",
"encoder": {"type": "parallel_cnn"},
}
],
[
{
"encoder": {"type": "stacked_cnn"},
}
],
True,
),
(
[
{
"name": "in1",
"type": "text",
"preprocessing": {"cache_encoder_embeddings": True},
"encoder": {"type": "bert"},
}
],
[
{
"encoder": {"type": "distilbert"},
}
],
False,
),
],
)
def test_calculate_checksum(input_features: List[FeatureConfigDict], diff: List[FeatureConfigDict], expected: bool):
config = _gen_config(input_features)
diff_features = [merge_dict(f, df) for f, df in zip(input_features, diff)]
diff_config = _gen_config(diff_features)
mock_dataset = mock.Mock()
mock_dataset.checksum = uuid.uuid4().hex
assert (
calculate_checksum(mock_dataset, ModelConfig.from_dict(config).to_dict())
== calculate_checksum(mock_dataset, ModelConfig.from_dict(diff_config).to_dict())
) == expected
def test_proc_col_checksum_consistency():
"""Tests that proc_col is equal if checksum are equal."""
config_dict1 = {
"input_features": [{"name": "txt1", "type": "text", "encoder": {"type": "bert"}}],
"output_features": [{"name": "bin1", "type": "binary"}],
}
config1 = ModelConfig.from_dict(config_dict1)
config_dict2 = copy.deepcopy(config_dict1)
config_dict2["input_features"][0]["preprocessing"] = {
"tokenizer": "bert",
}
config2 = ModelConfig.from_dict(config_dict2)
mock_dataset = mock.Mock()
mock_dataset.checksum = uuid.uuid4().hex
assert calculate_checksum(mock_dataset, config1.to_dict()) == calculate_checksum(mock_dataset, config2.to_dict())
for if1, if2 in zip(config1.input_features, config2.input_features):
assert if1.name == if2.name
assert if1.proc_column == if2.proc_column
for of1, of2 in zip(config1.output_features, config2.output_features):
assert of1.name == of2.name
assert of1.proc_column == of2.proc_column
def METHOD_NAME():
"""Tests that proc_col is different if preprocessing and names are the same but types are different."""
config = {
"input_features": [
# Same name, different types, same preprocessing
{"name": "num1", "type": "number", "preprocessing": {"missing_value_strategy": "fill_with_mode"}},
{"name": "num2", "type": "category", "preprocessing": {"missing_value_strategy": "fill_with_mode"}},
],
"output_features": [
{"name": "num3", "type": "number", "preprocessing": {"missing_value_strategy": "fill_with_mode"}}
],
}
config = ModelConfig.from_dict(config)
assert config.input_features[0].proc_column != config.input_features[1].proc_column
@pytest.mark.distributed
def test_checksum_determinism(ray_cluster_2cpu):
"""Tests that checksums are deterministic across different processes (no unordered hash maps)."""
import ray
# Generate a lot of features so the probability of a reordering of feature sets is very high.
config = {
INPUT_FEATURES: [{"name": f"in{i}", "type": "number"} for i in range(100)],
OUTPUT_FEATURES: [{"name": "out1", "type": "binary"}],
}
config = ModelConfig.from_dict(config)
mock_dataset = mock.Mock()
mock_dataset.checksum = uuid.uuid4().hex
@ray.remote(max_calls=1)
def calculate_checksum_remote(dataset, config):
return calculate_checksum(dataset, config)
# Run each checksum calculation as a remote function so it gets its own Python interpreter, as
# the hash function in Python is deterministic within a process, but not between different processes.
# See: https://docs.python.org/3/reference/datamodel.html#object.__hash__
checksum1 = ray.get(calculate_checksum_remote.remote(mock_dataset, config.to_dict()))
checksum2 = ray.get(calculate_checksum_remote.remote(mock_dataset, config.to_dict()))
assert checksum1 == checksum2
|
3,128 |
set url
|
"""
This module implements the Response class which is used to represent HTTP
responses in Scrapy.
See documentation in docs/topics/request-response.rst
"""
from typing import Generator, Tuple
from urllib.parse import urljoin
from scrapy.exceptions import NotSupported
from scrapy.http.common import obsolete_setter
from scrapy.http.headers import Headers
from scrapy.http.request import Request
from scrapy.link import Link
from scrapy.utils.trackref import object_ref
class Response(object_ref):
"""An object that represents an HTTP response, which is usually
downloaded (by the Downloader) and fed to the Spiders for processing.
"""
attributes: Tuple[str, ...] = (
"url",
"status",
"headers",
"body",
"flags",
"request",
"certificate",
"ip_address",
"protocol",
)
"""A tuple of :class:`str` objects containing the name of all public
attributes of the class that are also keyword parameters of the
``__init__`` method.
Currently used by :meth:`Response.replace`.
"""
def __init__(
self,
url: str,
status=200,
headers=None,
body=b"",
flags=None,
request=None,
certificate=None,
ip_address=None,
protocol=None,
):
self.headers = Headers(headers or {})
self.status = int(status)
self._set_body(body)
self.METHOD_NAME(url)
self.request = request
self.flags = [] if flags is None else list(flags)
self.certificate = certificate
self.ip_address = ip_address
self.protocol = protocol
@property
def cb_kwargs(self):
try:
return self.request.cb_kwargs
except AttributeError:
raise AttributeError(
"Response.cb_kwargs not available, this response "
"is not tied to any request"
)
@property
def meta(self):
try:
return self.request.meta
except AttributeError:
raise AttributeError(
"Response.meta not available, this response "
"is not tied to any request"
)
def _get_url(self):
return self._url
def METHOD_NAME(self, url: str):
if isinstance(url, str):
self._url = url
else:
raise TypeError(
f"{type(self).__name__} url must be str, " f"got {type(url).__name__}"
)
url = property(_get_url, obsolete_setter(METHOD_NAME, "url"))
def _get_body(self):
return self._body
def _set_body(self, body):
if body is None:
self._body = b""
elif not isinstance(body, bytes):
raise TypeError(
"Response body must be bytes. "
"If you want to pass unicode body use TextResponse "
"or HtmlResponse."
)
else:
self._body = body
body = property(_get_body, obsolete_setter(_set_body, "body"))
def __repr__(self):
return f"<{self.status} {self.url}>"
def copy(self):
"""Return a copy of this Response"""
return self.replace()
def replace(self, *args, **kwargs):
"""Create a new Response with the same attributes except for those given new values"""
for x in self.attributes:
kwargs.setdefault(x, getattr(self, x))
cls = kwargs.pop("cls", self.__class__)
return cls(*args, **kwargs)
def urljoin(self, url):
"""Join this Response's url with a possible relative url to form an
absolute interpretation of the latter."""
return urljoin(self.url, url)
@property
def text(self):
"""For subclasses of TextResponse, this will return the body
as str
"""
raise AttributeError("Response content isn't text")
def css(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def jmespath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def xpath(self, *a, **kw):
"""Shortcut method implemented only by responses whose content
is text (subclasses of TextResponse).
"""
raise NotSupported("Response content isn't text")
def follow(
self,
url,
callback=None,
method="GET",
headers=None,
body=None,
cookies=None,
meta=None,
encoding="utf-8",
priority=0,
dont_filter=False,
errback=None,
cb_kwargs=None,
flags=None,
) -> Request:
"""
Return a :class:`~.Request` instance to follow a link ``url``.
It accepts the same arguments as ``Request.__init__`` method,
but ``url`` can be a relative URL or a ``scrapy.link.Link`` object,
not only an absolute URL.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
.. versionadded:: 2.0
The *flags* parameter.
"""
if isinstance(url, Link):
url = url.url
elif url is None:
raise ValueError("url can't be None")
url = self.urljoin(url)
return Request(
url=url,
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
def follow_all(
self,
urls,
callback=None,
method="GET",
headers=None,
body=None,
cookies=None,
meta=None,
encoding="utf-8",
priority=0,
dont_filter=False,
errback=None,
cb_kwargs=None,
flags=None,
) -> Generator[Request, None, None]:
"""
.. versionadded:: 2.0
Return an iterable of :class:`~.Request` instances to follow all links
in ``urls``. It accepts the same arguments as ``Request.__init__`` method,
but elements of ``urls`` can be relative URLs or :class:`~scrapy.link.Link` objects,
not only absolute URLs.
:class:`~.TextResponse` provides a :meth:`~.TextResponse.follow_all`
method which supports selectors in addition to absolute/relative URLs
and Link objects.
"""
if not hasattr(urls, "__iter__"):
raise TypeError("'urls' argument must be an iterable")
return (
self.follow(
url=url,
callback=callback,
method=method,
headers=headers,
body=body,
cookies=cookies,
meta=meta,
encoding=encoding,
priority=priority,
dont_filter=dont_filter,
errback=errback,
cb_kwargs=cb_kwargs,
flags=flags,
)
for url in urls
)
|
3,129 |
default sortoption
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
"""Search blueprint in order for template and static files to be loaded."""
from __future__ import absolute_import, division, print_function
import json
import six
from flask import Blueprint, current_app, jsonify, request, render_template
from inspirehep.modules.search import LiteratureSearch
blueprint = Blueprint(
'inspirehep_search',
__name__,
url_prefix='',
template_folder='templates',
static_folder='static',
)
@blueprint.route("/search")
def search():
"""Search page ui."""
collection = request.values.get('cc', 'hep', type=unicode).lower()
ctx = {}
if collection == 'conferences':
ctx['search_api'] = '/api/conferences/'
return render_template('search/search_conferences.html', **ctx)
if collection == 'authors':
ctx['search_api'] = '/api/authors/'
return render_template('search/search_authors.html', **ctx)
if collection == 'data':
ctx['search_api'] = 'https://hepdata.net/search/'
return render_template('search/search_data.html', **ctx)
if collection == 'experiments':
ctx['search_api'] = '/api/experiments/'
return render_template('search/search_experiments.html', **ctx)
if collection == 'institutions':
ctx['search_api'] = '/api/institutions/'
return render_template('search/search_institutions.html', **ctx)
if collection == 'journals':
ctx['search_api'] = '/api/journals/'
return render_template('search/search_journals.html', **ctx)
if collection == 'jobs':
ctx['search_api'] = '/api/jobs/'
return render_template('search/search_jobs.html', **ctx)
ctx['search_api'] = current_app.config['SEARCH_UI_SEARCH_API']
return render_template(current_app.config['SEARCH_UI_SEARCH_TEMPLATE'],
**ctx)
@blueprint.route('/search/suggest', methods=['GET'])
def suggest():
"""Power typeahead.js search bar suggestions."""
field = request.values.get('field')
query = request.values.get('query')
search = LiteratureSearch()
search = search.suggest(
'suggestions', query, completion={"field": field}
)
suggestions = search.execute_suggest()
if field == "authors.name_suggest":
bai_name_map = {}
for suggestion in suggestions['suggestions'][0]['options']:
bai = suggestion['_source']['bai']
if bai in bai_name_map:
bai_name_map[bai].append(
suggestion['text']
)
else:
bai_name_map[bai] = [suggestion['text']]
result = []
for key, value in six.iteritems(bai_name_map):
result.append(
{
'name': max(value, key=len),
'value': key,
'template': 'author'
}
)
return jsonify({
'results': result
})
return jsonify({
'results': [
{'value': s['text']}
for s in suggestions['suggestions'][0]['options']
]
})
def sorted_options(sort_options):
"""Sort sort options for display."""
return [
dict(
title=v['title'],
value=('-{0}'.format(k)
if v.get('default_order', 'asc') == 'desc' else k),
)
for k, v in
sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))
]
@blueprint.app_template_filter('format_sortoptions')
def format_sortoptions(sort_options):
"""Create sort options JSON dump for Invenio-Search-JS."""
return json.dumps(dict(
options=sorted_options(sort_options)
), sort_keys=True)
@blueprint.app_template_filter('default_sortoption')
def METHOD_NAME(sort_options):
"""Get defualt sort option for Invenio-Search-JS."""
return sorted_options(sort_options)[0]['value']
|
3,130 |
delete if existing
|
from pyramid.response import Response
from pyramid.view import view_config
from c2cgeoportal_commons.models import DBSession
from geoportailv3_geoportal.userconfig import UserConfig
from pyramid.httpexceptions import HTTPUnauthorized
import json
import urllib.request
import re
import logging
log = logging.getLogger(__name__)
class Config(object):
def __init__(self, request):
self.request = request
self.config = self.request.registry.settings
self.db_userconfig = DBSession
@view_config(route_name="get_userconfig", renderer='json')
def get_userconfig(self):
user = self.request.user
key = self.request.params['key']
if user is None:
return HTTPUnauthorized()
username = user.username
user_config = self.db_userconfig.query(UserConfig) \
.filter(UserConfig.user_login == username, UserConfig.key == key).all()
output = [ { 'key': conf.key, 'value': conf.style} for conf in user_config]
return output
@view_config(route_name="save_userconfig")
def save_userconfig(self):
user = self.request.user
if user is None:
return HTTPUnauthorized()
json_body = json.loads(self.request.body)
key = json_body['key']
self.METHOD_NAME(user, key)
userConfig = UserConfig()
userConfig.key = key
userConfig.style = json_body['value']
userConfig.user_login = user.username
self.db_userconfig.add(userConfig)
return self.request.response
@view_config(route_name="delete_userconfig")
def delete_userconfig(self):
user = self.request.user
key = self.request.params['key']
if user is None:
return HTTPUnauthorized()
self.METHOD_NAME(user, key)
return self.request.response
def METHOD_NAME(self, user, key):
username = user.username
existing_user_config = self.db_userconfig.query(UserConfig) \
.filter(UserConfig.user_login == username).all()
if len(existing_user_config) > 0:
for config in existing_user_config:
self.db_userconfig.query(UserConfig).filter(
UserConfig.id == config.id,
UserConfig.key == key
).delete()
@view_config(route_name="apply_mvt_config", renderer='json')
def apply_mvt_config(self):
# Parse and make a dict from the styles config to apply
config = json.loads(self.request.params['config'])
paint_conf_dict = {}
layout_conf_dict = {}
keys = ['background', 'line', 'fill', 'fill-extrusion', 'symbol']
for conf in json.loads(config):
# Roadmap layer
if 'color' in conf:
color = conf['color']
if 'opacity' in conf:
opacity = conf['opacity']
for key in keys:
if 'fill-extrusion' in key:
prop = 'fillExtrusions'
else:
prop = key + 's'
if prop in conf:
for layer in conf[prop]:
paint_conf_dict.setdefault(layer, {})[key + '-color'] = color
if 'opacity' in conf:
paint_conf_dict.setdefault(layer, {})[key + '-opacity'] = int(opacity)
if 'visible' in conf:
layout_conf_dict.setdefault(layer, {})['visibility'] = 'visible' if conf['visible'] else 'none'
# Topo layers
else:
for key in keys:
if 'fill-extrusion' in key:
prop = 'fillExtrusions'
else:
prop = key + 's'
if prop in conf:
for layer in conf[prop]:
layout_conf_dict.setdefault(layer, {})['visibility'] = 'visible' if conf['visible'] else 'none'
for layer in conf.get('hillshades', []):
layout_conf_dict.setdefault(layer, {})['visibility'] = 'visible' if conf['visible'] else 'none'
# Parse and modify the default config with the styles to apply
style_url = self.request.params['style_url']
with urllib.request.urlopen(style_url) as file:
default_styles = file.read().decode('utf-8')
myjson = json.loads(default_styles)
for layer in myjson['layers']:
for key, value in paint_conf_dict.get(layer['id'], {}).items():
layer['paint'][key] = value
for key, value in layout_conf_dict.get(layer['id'], {}).items():
layer.setdefault('layout', {})[key] = value
return myjson
|
3,131 |
bad
|
#!/usr/bin/env python
#
#
# Ravenbrook
# <https://www.ravenbrook.com/>
#
# P4-BISECT -- FIND CHANGE THAT INTRODUCED A BUG
#
# Gareth Rees, Ravenbrook Limited, 2014-04-14
#
#
# 1. INTRODUCTION
#
# This script automates (or partly automates) the process of finding,
# by binary search, the change that introduced a bug.
#
# The interface is modelled closely on git-bisect(1).
import argparse
from functools import partial
import json
from os import unlink
import p4
import subprocess
import sys
BISECT_FILE = '.p4-bisect'
def error(msg):
sys.stderr.write(msg)
sys.stderr.write('\n')
exit(1)
def sync(*filespecs):
try:
p4.do('sync', *filespecs)
except p4.Error as e:
if 'file(s) up-to-date' not in e.args[0]:
raise
class State(object):
def __init__(self, **d):
self.filespec = d['filespec']
self.changes = d['changes']
if 'current' in d:
self.current = d['current']
@classmethod
def load(cls):
try:
with open(BISECT_FILE, 'r') as f:
return cls(**json.load(f))
except FileNotFoundError:
error("p4-bisect not in progress here.")
def save(self):
with open(BISECT_FILE, 'w') as f:
json.dump(vars(self), f)
def update(self):
n = len(self.changes)
if n == 0:
print("no changes remaining.".format(**vars(self)))
elif n == 1:
print("{} change remaining: {}.".format(n, self.changes[0]))
elif n == 2:
print("{} changes remaining: [{}, {}]."
.format(n, self.changes[0], self.changes[-1]))
else:
print("{} changes remaining: [{}, ..., {}]."
.format(n, self.changes[0], self.changes[-1]))
if n > 0:
self.current = self.changes[n // 2]
print("Syncing to changelevel {current}.".format(**vars(self)))
sync(*['{}@{}'.format(f, self.current) for f in self.filespec])
self.save()
def help(parser, args):
parser.print_help()
def start(args):
args.filespec = args.filespec or ['...']
changes = sorted(int(c['change']) for c in p4.run('changes', *args.filespec))
if not changes:
error("No changes for {}".format(' '.join(args.filespec)))
if args.good is None:
args.good = changes[0]
if args.METHOD_NAME is None:
args.METHOD_NAME = changes[-1]
state = State(filespec=args.filespec,
changes=[c for c in changes if args.good <= c <= args.METHOD_NAME])
state.update()
def good(args):
state = State.load()
print("Change {current} good.".format(**vars(state)))
state.changes = [c for c in state.changes if c > state.current]
state.update()
def METHOD_NAME(args):
state = State.load()
print("Change {current} bad.".format(**vars(state)))
state.changes = [c for c in state.changes if c < state.current]
state.update()
def skip(args):
state = State.load()
print("Skipping change {current}.".format(**vars(state)))
state.changes.remove(state.current)
state.update()
def reset(args):
state = State.load()
sync(*state.filespec)
unlink(BISECT_FILE)
def run(args):
while True:
state = State.load()
if not state.changes:
break
result = subprocess.call([args.cmd] + args.args)
if result == 0:
good(None)
elif result == 125:
skip(None)
elif 0 < result < 128:
METHOD_NAME(None)
else:
exit(result)
def main(argv):
parser = argparse.ArgumentParser(
prog='p4-bisect', epilog='For help on CMD, use p4-bisect CMD -h')
subparsers = parser.add_subparsers()
a = subparsers.add_parser
help_parser = a('help', help='show this help message')
help_parser.set_defaults(func=partial(help, parser))
start_parser = a('start', help='start a p4-bisect session')
aa = start_parser.add_argument
start_parser.add_argument('-f', '--filespec', action='append',
help='filespec(s) to search')
start_parser.add_argument('good', nargs='?', type=int,
help='known good changelevel')
start_parser.add_argument('bad', nargs='?', type=int,
help='known bad changelevel')
start_parser.set_defaults(func=start)
good_parser = a('good', help='declare current revision good')
good_parser.set_defaults(func=good)
bad_parser = a('bad', help='declare current revision bad')
bad_parser.set_defaults(func=METHOD_NAME)
skip_parser = a('skip', help='skip current revision')
skip_parser.set_defaults(func=skip)
reset_parser = a('reset', help='finish p4-bisect session')
reset_parser.set_defaults(func=reset)
run_parser = a('run', help='run p4-bisect session automatically')
run_parser.add_argument('cmd',
help='command that determines if current '
'changelevel is good or bad')
run_parser.add_argument('args', nargs=argparse.REMAINDER,
help='arguments to pass to cmd')
run_parser.set_defaults(func=run)
args = parser.parse_args(argv[1:])
args.func(args)
if __name__ == '__main__':
main(sys.argv)
|
3,132 |
unit sim reaction wheel
|
# ISC License
#
# Copyright (c) 2016, Autonomous Vehicle Systems Lab, University of Colorado at Boulder
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Integrated Unit Test Script
# Purpose: Run a test of the reaction wheel sim module
# Author: John Alcorn
# Creation Date: November 14, 2016
#
import inspect
import os
import numpy as np
import pytest
filename = inspect.getframeinfo(inspect.currentframe()).filename
path = os.path.dirname(os.path.abspath(filename))
from Basilisk.utilities import macros
from Basilisk.utilities import unitTestSupport
from Basilisk.simulation import reactionWheelStateEffector
from Basilisk.architecture import messaging
# methods
def listStack(vec,simStopTime,unitProcRate):
# returns a list duplicated the number of times needed to be consistent with module output
return [vec] * int(simStopTime/(float(unitProcRate)/float(macros.sec2nano(1))))
def writeNewRWCmds(self, u_cmd, numRW):
# create standard vector from SWIG template (see .i file)
NewRWCmdsVec = messaging.RWCmdMsgPayloadVector(numRW)
cmds = messaging.RWCmdMsgPayload()
for i in range(0, numRW):
cmds.u_cmd = u_cmd[i]
NewRWCmdsVec[i] = cmds # set the data
self.NewRWCmds = NewRWCmdsVec # set in module
def defaultReactionWheel():
RW = messaging.RWConfigMsgPayload()
RW.rWB_B = [[0.], [0.], [0.]]
RW.gsHat_B = [[1.], [0.], [0.]]
RW.w2Hat0_B = [[0.], [1.], [0.]]
RW.w3Hat0_B = [[0.], [0.], [1.]]
RW.RWModel = reactionWheelStateEffector.BalancedWheels
return RW
def asEigen(v):
out = []
for i in range(0, len(v)):
out.append([v[i]])
return out
# uncomment this line is this test is to be skipped in the global unit test run, adjust message as needed
# @pytest.mark.skipif(conditionstring)
# uncomment this line if this test has an expected failure, adjust message as needed
# The following 'parametrize' function decorator provides the parameters and expected results for each
# of the multiple test runs for this test.
@pytest.mark.parametrize("useFlag, testCase", [
(False, 'saturation'),
(False, 'minimum'),
(False, 'speedSaturation'),
(False, 'powerSaturation')
])
# provide a unique test method name, starting with test_
def test_unitSimReactionWheel(show_plots, useFlag, testCase):
"""Module Unit Test"""
# each test method requires a single assert method to be called
[testResults, testMessage] = METHOD_NAME(show_plots, useFlag, testCase)
assert testResults < 1, testMessage
def METHOD_NAME(show_plots, useFlag, testCase):
testFail = False
testFailCount = 0 # zero unit test result counter
testMessages = [] # create empty array to store test log messages
# configure module
ReactionWheel = reactionWheelStateEffector.ReactionWheelStateEffector()
ReactionWheel.ModelTag = "ReactionWheel"
numRW = 2
RWs = []
for i in range(0, numRW):
RWs.append(defaultReactionWheel())
expOut = dict() # expected output
print(testCase)
if testCase == 'basic':
pass
elif testCase == 'saturation':
RWs.append(defaultReactionWheel())
RWs[0].u_max = 1.
RWs[1].u_max = 2.
RWs[2].u_max = 2.
u_cmd = [-1.2, 1.5, 2.5]
writeNewRWCmds(ReactionWheel, u_cmd, len(RWs))
expOut['u_current'] = [-1., 1.5, 2.]
elif testCase == 'minimum':
RWs[0].u_min = .1
RWs[1].u_min = .0
u_cmd = [-.09, 0.0001]
writeNewRWCmds(ReactionWheel, u_cmd, len(RWs))
expOut['u_current'] = [0., 0.0001]
elif testCase == 'speedSaturation':
RWs.append(defaultReactionWheel())
RWs[0].Omega_max = 50.
RWs[1].Omega_max = 50.
RWs[2].Omega_max = 50.
RWs[0].Omega = 49.
RWs[1].Omega = 51.
RWs[2].Omega = -52.
u_cmd = [1.5, 1.5, 1.5]
writeNewRWCmds(ReactionWheel, u_cmd, len(RWs))
expOut['u_current'] = [1.5, 0.0, 1.5]
elif testCase == 'powerSaturation':
RWs.append(defaultReactionWheel())
RWs[0].P_max = 1.
RWs[1].P_max = 1.
RWs[2].P_max = 1.
RWs[0].Omega = 50.
RWs[1].Omega = 50.
RWs[2].Omega = 50.
u_cmd = [0.01, -0.04, 0.04]
writeNewRWCmds(ReactionWheel, u_cmd, len(RWs))
expOut['u_current'] = [0.01, -0.02, 0.02]
else:
raise Exception('invalid test case')
for i in range(0, len(RWs)):
ReactionWheel.addReactionWheel(RWs[i])
ReactionWheel.ConfigureRWRequests(0.)
if 'accuracy' not in vars():
accuracy = 1e-10
for outputName in list(expOut.keys()):
for i in range(0, len(RWs)):
if expOut[outputName][i] != ReactionWheel.ReactionWheelData[i].u_current:
print("expected: " + str(expOut[outputName][i]))
print("got :" + str(ReactionWheel.ReactionWheelData[i].u_current))
testFail = 1
break
if testFail:
break
if testFail:
testFailCount += 1
testMessages.append("FAILED: " + ReactionWheel.ModelTag + " Module failed " +
outputName + " unit test")
np.set_printoptions(precision=16)
# print out success message if no errors were found
if testFailCount == 0:
print("PASSED ")
colorText = 'ForestGreen'
passedText = r'\textcolor{' + colorText + '}{' + "PASSED" + '}'
else:
colorText = 'Red'
passedText = r'\textcolor{' + colorText + '}{' + "FAILED" + '}'
# Write some snippets for AutoTex
snippetName = testCase + 'PassFail'
unitTestSupport.writeTeXSnippet(snippetName, passedText, path)
# each test method requires a single assert method to be called
# this check below just makes sure no sub-test failures were found
return [testFailCount, ''.join(testMessages)]
# This statement below ensures that the unit test script can be run as a
# standalone python script
if __name__ == "__main__":
test_unitSimReactionWheel(
False, # show_plots
False, # useFlag
'speedSaturation' # testCase
)
|
3,133 |
test channel
|
import os
from asyncio import to_thread
from datetime import timedelta
from math import isclose
from typing import TYPE_CHECKING, Dict
import arrow
import pytest
from humanfriendly.testing import TemporaryDirectory
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
from virtool.pg.utils import get_row_by_id
from virtool.tasks.client import TasksClient
from virtool.tasks.data import TasksData
from virtool.tasks.models import SQLTask
from virtool.tasks.spawner import TaskSpawnerService, PeriodicTask
from virtool.tasks.task import BaseTask
from virtool.utils import get_temp_dir
if TYPE_CHECKING:
from virtool.data.layer import DataLayer
class DummyBaseTask(BaseTask):
name = "dummmy_base_task"
def __init__(self, task_id, data, context, temp_dir):
super().__init__(task_id, data, context, temp_dir)
self.steps = [self.step_one, self.step_two]
async def step_one(self):
...
async def step_two(self):
...
class DummyTask(BaseTask):
name = "dummy_task"
def __init__(
self,
task_id: int,
data: "DataLayer",
context: Dict,
temp_dir: TemporaryDirectory,
):
super().__init__(task_id, data, context, temp_dir)
self.steps = [self.create_file, self.remove_file]
async def create_file(self):
with open(self.temp_path / "test.txt", "w") as f:
f.write("This is a test file.")
async def remove_file(self):
await to_thread(os.remove, self.temp_path / "test.txt")
@pytest.fixture
async def task(data_layer, pg: AsyncEngine, static_time) -> DummyTask:
task = SQLTask(
id=1,
complete=False,
context={"user_id": "test"},
count=0,
created_at=static_time.datetime,
progress=0,
step="create_file",
type="test_task",
)
async with AsyncSession(pg) as session:
session.add(task)
await session.commit()
return await DummyTask.from_task_id(data_layer, 1)
async def test_base_task(data_layer, pg, static_time):
task = SQLTask(
id=1,
complete=False,
context={"user_id": "test"},
count=0,
created_at=static_time.datetime,
progress=0,
step="create_file",
type="test_task",
)
async with AsyncSession(pg) as session:
session.add(task)
await session.commit()
task = DummyBaseTask(1, data_layer, {}, get_temp_dir())
await task.run()
row: SQLTask = await get_row_by_id(pg, SQLTask, 1)
assert row.id == 1
assert row.complete is True
assert row.progress == 100
assert row.step == "step_two"
@pytest.mark.parametrize("error", [None, "error"])
async def test_run(error, task, pg: AsyncEngine):
task.errored = error
await task.run()
async with AsyncSession(pg) as session:
result = (
(await session.execute(select(SQLTask).filter_by(id=task.task_id)))
.scalar()
.to_dict()
)
if error:
assert result["progress"] == 0
else:
assert result["progress"] == 100
assert not os.path.exists(task.temp_path)
@pytest.fixture
def METHOD_NAME():
return "test-task-channel"
@pytest.fixture
def tasks_client(redis):
return TasksClient(redis)
async def test_progress_handler_set_progress(task: BaseTask, pg: AsyncEngine):
task.step = task.steps[0]
tracker_1 = task.create_progress_handler()
await tracker_1.set_progress(50)
assert (await get_row_by_id(pg, SQLTask, 1)).progress == 25
await tracker_1.set_progress(100)
assert (await get_row_by_id(pg, SQLTask, 1)).progress == 50
task.step = task.steps[1]
tracker_2 = task.create_progress_handler()
await tracker_2.set_progress(100)
assert (await get_row_by_id(pg, SQLTask, 1)).progress == 100
async def test_progress_handler_set_error(task: BaseTask, pg: AsyncEngine):
task.step = task.steps[0]
tracker = task.create_progress_handler()
await tracker.set_error("GenericError")
assert (await get_row_by_id(pg, SQLTask, 1)).error == "GenericError"
async def test_register(pg: AsyncEngine, tasks_data: TasksData):
await tasks_data.create(DummyBaseTask)
await tasks_data.create(DummyTask)
await tasks_data.create(DummyBaseTask)
last_run_task = (await tasks_data.find())[0]
task_spawner_service = TaskSpawnerService(pg, tasks_data)
tasks = [(DummyBaseTask, 10), (DummyTask, 15)]
await task_spawner_service.register(tasks)
assert isclose(
(
(
task_spawner_service.registered[0].last_triggered
- last_run_task.created_at
).total_seconds()
),
0,
abs_tol=0.8,
)
async def test_check_or_spawn_task(pg: AsyncEngine, tasks_data: TasksData):
"""
First case tests that the task has spawned, second case ensures that it does not
"""
task_spawner_service = TaskSpawnerService(pg, tasks_data)
# This time should trigger a spawn as it is greater than the interval.
long_last_triggered = (arrow.utcnow() - timedelta(seconds=180)).naive
task_spawner_service.registered.append(
PeriodicTask(DummyTask, interval=60, last_triggered=long_last_triggered)
)
spawned_task = await task_spawner_service.check_or_spawn_task(
task_spawner_service.registered[0]
)
assert spawned_task.last_triggered != long_last_triggered
# This time should prevent a task being spawned as it is less than the interval.
short_last_triggered = (arrow.utcnow() - timedelta(seconds=20)).naive
task_spawner_service.registered.append(
PeriodicTask(DummyBaseTask, interval=60, last_triggered=short_last_triggered)
)
not_spawned_task = await task_spawner_service.check_or_spawn_task(
task_spawner_service.registered[1]
)
assert not_spawned_task.last_triggered == short_last_triggered
|
3,134 |
test bad authentication
|
import unittest
import base64
from django.conf.urls import include, url
from django.contrib.auth import get_user_model
from django.http import HttpResponse
from django.test import TestCase
from django.test.utils import override_settings
from oauth2_provider.models import get_application_model
from apps.dot_ext.authentication import SLSAuthentication
Application = get_application_model()
UserModel = get_user_model()
try:
from rest_framework import permissions
from rest_framework.views import APIView
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({"a": 1, "b": 2, "c": 3})
def post(self, request):
return HttpResponse({"a": 1, "b": 2, "c": 3})
class SLSAuthView(MockView):
authentication_classes = [SLSAuthentication]
urlpatterns = [
url(r"^oauth2/", include("oauth2_provider.urls")),
url(r"^oauth2-test/$", SLSAuthView.as_view()),
]
rest_framework_installed = True
except ImportError:
rest_framework_installed = False
@override_settings(ROOT_URLCONF=__name__)
class TestOAuth2Authentication(TestCase):
def setUp(self):
self.test_username = "0123456789abcdefghijklmnopqrstuvwxyz"
self.test_user = UserModel.objects.create_user("0123456789abcdefghijklmnopqrstuvwxyz", "[email protected]", "123456")
self.dev_user = UserModel.objects.create_user("dev_user", "[email protected]", "123456")
self.application = Application.objects.create(
name="Test Application",
redirect_uris="http://localhost http://example.com http://example.org",
user=self.dev_user,
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_AUTHORIZATION_CODE,
)
def _create_authorization_header(self, client_id, client_secret):
return "Basic {0}".format(base64.b64encode("{0}:{1}".format(client_id, client_secret).encode('utf-8')).decode('utf-8'))
def _create_authentication_header(self, username):
return "SLS {0}".format(base64.b64encode(username.encode('utf-8')).decode("utf-8"))
@unittest.skipUnless(rest_framework_installed, "djangorestframework not installed")
def test_authentication_allow(self):
auth = self._create_authorization_header(self.application.client_id, self.application.client_secret_plain)
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth,
HTTP_X_AUTHENTICATION=self._create_authentication_header(self.test_username))
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(rest_framework_installed, "djangorestframework not installed")
def test_authentication_denied(self):
auth = self._create_authorization_header(12345, "bogus")
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth,
HTTP_X_AUTHENTICATION=self._create_authentication_header(self.test_username))
self.assertEqual(response.status_code, 403)
def test_user_dne(self):
auth = self._create_authorization_header(self.application.client_id, self.application.client_secret_plain)
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth,
HTTP_X_AUTHENTICATION=self._create_authentication_header('bogus'))
self.assertEqual(response.status_code, 404)
def test_no_authentication(self):
auth = self._create_authorization_header(self.application.client_id, self.application.client_secret_plain)
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 403)
def METHOD_NAME(self):
auth = self._create_authorization_header(self.application.client_id, self.application.client_secret_plain)
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth,
HTTP_X_AUTHENTICATION="thisisabadheader")
self.assertEqual(response.status_code, 404)
def test_unknown_authentication(self):
auth = self._create_authorization_header(self.application.client_id, self.application.client_secret_plain)
response = self.client.get("/oauth2-test/",
HTTP_AUTHORIZATION=auth,
HTTP_X_AUTHENTICATION="UUID thisisabadheader")
self.assertEqual(response.status_code, 404)
|
3,135 |
find invitations by status
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This model manages a Invitation item in the Auth Service."""
from datetime import datetime, timedelta
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import relationship
from auth_api.config import get_named_config
from .base_model import BaseModel
from .db import db
from .invitation_membership import InvitationMembership
from .invite_status import InvitationStatus
class Invitation(BaseModel): # pylint: disable=too-few-public-methods # Temporarily disable until methods defined
"""Model for a Invitation record."""
__tablename__ = 'invitations'
id = Column(Integer, primary_key=True)
sender_id = Column(ForeignKey('users.id'), nullable=False)
recipient_email = Column(String(100), nullable=False)
sent_date = Column(DateTime, nullable=False)
accepted_date = Column(DateTime, nullable=True)
token = Column(String(100), nullable=True) # stores the one time invitation token
invitation_status_code = Column(ForeignKey('invitation_statuses.code'), nullable=False, default='PENDING')
type = Column(ForeignKey('invitation_types.code'), nullable=False, default='STANDARD')
invitation_status = relationship('InvitationStatus', foreign_keys=[invitation_status_code])
sender = relationship('User', foreign_keys=[sender_id])
membership = relationship('InvitationMembership', cascade='all,delete')
login_source = Column(String(20), nullable=True)
@hybrid_property
def expires_on(self):
"""Calculate the expiry date based on the config value."""
if self.invitation_status_code == 'PENDING':
return self.sent_date + timedelta(days=int(get_named_config().TOKEN_EXPIRY_PERIOD))
return None
@hybrid_property
def status(self):
"""Calculate the status based on the config value."""
current_time = datetime.now()
if self.invitation_status_code == 'PENDING':
expiry_time = self.sent_date + timedelta(days=int(get_named_config().TOKEN_EXPIRY_PERIOD))
if current_time >= expiry_time:
return 'EXPIRED'
return self.invitation_status_code
@classmethod
def create_from_dict(cls, invitation_info: dict, user_id, invitation_type):
"""Create a new Invitation from the provided dictionary."""
if invitation_info:
invitation = Invitation()
invitation.sender_id = user_id
invitation.type = invitation_type
invitation.recipient_email = invitation_info['recipientEmail']
invitation.sent_date = datetime.now()
invitation.invitation_status = InvitationStatus.get_default_status()
for member in invitation_info['membership']:
invitation_membership = InvitationMembership()
invitation_membership.org_id = member['orgId']
invitation_membership.membership_type_code = member['membershipType']
invitation.membership.append(invitation_membership)
invitation.save()
return invitation
return None
@classmethod
def find_invitations_by_user(cls, user_id):
"""Find all invitation sent by the given user."""
return cls.query.filter_by(sender_id=user_id).all()
@classmethod
def find_invitation_by_id(cls, invitation_id):
"""Find an invitation record that matches the id."""
return cls.query.filter_by(id=invitation_id).first()
@classmethod
def find_invitations_by_org(cls, org_id, status=None):
"""Find all invitations sent for specific org filtered by status."""
results = cls.query.filter(Invitation.membership.any(InvitationMembership.org_id == org_id))
return results.filter(Invitation.status == status.value).all() if status else results.all()
@staticmethod
def find_pending_invitations_by_user(user_id):
"""Find all invitations that are not in accepted state."""
return db.session.query(Invitation). \
filter(Invitation.sender_id == user_id). \
filter(Invitation.invitation_status_code != 'ACCEPTED').all()
@staticmethod
def find_pending_invitations_by_org(org_id):
"""Find all invitations that are not in accepted state."""
return db.session.query(Invitation) \
.filter(Invitation.membership.any(InvitationMembership.org_id == org_id)) \
.filter(Invitation.invitation_status_code != 'ACCEPTED').all()
@staticmethod
def METHOD_NAME(user_id, status):
"""Find all invitations that are not in accepted state."""
return db.session.query(Invitation). \
filter(Invitation.sender_id == user_id). \
filter(Invitation.invitation_status_code == status).all()
def update_invitation_as_retried(self):
"""Update this invitation with the new data."""
self.sent_date = datetime.now()
self.invitation_status = InvitationStatus.get_default_status()
self.save()
return self
|
3,136 |
set app
|
from abc import ABC, abstractmethod
from decimal import ROUND_HALF_EVEN, ROUND_UP, Decimal
from travertino.size import at_least
from toga.constants import CENTER, JUSTIFY, LEFT, RIGHT, TRANSPARENT
from ..colors import native_color
from ..libs.activity import MainActivity
from ..libs.android.graphics import PorterDuff__Mode, PorterDuffColorFilter, Rect
from ..libs.android.graphics.drawable import ColorDrawable, InsetDrawable
from ..libs.android.view import Gravity, View
from ..libs.android.widget import RelativeLayout__LayoutParams
def _get_activity(_cache=[]):
"""Android Toga widgets need a reference to the current activity to pass it as
`context` when creating Android native widgets. This may be useful at any time, so
we retain a global JNI ref.
:param _cache: List that is either empty or contains 1 item, the cached global JNI ref
"""
if _cache:
return _cache[0]
# See MainActivity.onCreate() for initialization of .singletonThis:
# https://github.com/beeware/briefcase-android-gradle-template/blob/3.7/%7B%7B%20cookiecutter.formal_name%20%7D%7D/app/src/main/java/org/beeware/android/MainActivity.java
# This can't be tested because if it isn't set, nothing else will work.
if not MainActivity.singletonThis: # pragma: no cover
raise ValueError(
"Unable to find MainActivity.singletonThis from Python. This is typically set by "
"org.beeware.android.MainActivity.onCreate()."
)
_cache.append(MainActivity.singletonThis.__global__())
return _cache[0]
class Scalable:
SCALE_DEFAULT_ROUNDING = ROUND_HALF_EVEN
def init_scale(self, context):
# The baseline DPI is 160:
# https://developer.android.com/training/multiscreen/screendensities
self.scale = context.getResources().getDisplayMetrics().densityDpi / 160
# Convert CSS pixels to native pixels
def scale_in(self, value, rounding=SCALE_DEFAULT_ROUNDING):
return self.scale_round(value * self.scale, rounding)
# Convert native pixels to CSS pixels
def scale_out(self, value, rounding=SCALE_DEFAULT_ROUNDING):
if isinstance(value, at_least):
return at_least(self.scale_out(value.value, rounding))
else:
return self.scale_round(value / self.scale, rounding)
def scale_round(self, value, rounding):
return int(Decimal(value).to_integral(rounding))
class Widget(ABC, Scalable):
# Some widgets are not generally focusable, but become focusable if there has been a
# keyboard event since the last touch event. To avoid this complicating the tests,
# these widgets disable programmatic focus entirely by setting focusable = False.
focusable = True
def __init__(self, interface):
super().__init__()
self.interface = interface
self.interface._impl = self
self._container = None
self.native = None
self._native_activity = _get_activity()
self.init_scale(self._native_activity)
self.create()
# Some widgets, e.g. TextView, may throw an exception if we call measure()
# before setting LayoutParams.
self.native.setLayoutParams(
RelativeLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.WRAP_CONTENT,
)
)
# Immediately re-apply styles. Some widgets may defer style application until
# they have been added to a container.
self.interface.style.reapply()
@abstractmethod
def create(self):
...
def METHOD_NAME(self, app):
pass
def set_window(self, window):
pass
@property
def container(self):
return self._container
@container.setter
def container(self, container):
if self._container:
self._container.remove_content(self)
self._container = container
if container:
container.add_content(self)
for child in self.interface.children:
child._impl.container = container
self.refresh()
def get_enabled(self):
return self.native.isEnabled()
def set_enabled(self, value):
self.native.setEnabled(value)
def focus(self):
if self.focusable:
self.native.requestFocus()
def get_tab_index(self):
self.interface.factory.not_implemented("Widget.get_tab_index()")
def set_tab_index(self, tab_index):
self.interface.factory.not_implemented("Widget.set_tab_index()")
# APPLICATOR
def set_bounds(self, x, y, width, height):
self.container.set_content_bounds(
self, *map(self.scale_in, (x, y, width, height))
)
def set_hidden(self, hidden):
if hidden:
self.native.setVisibility(View.INVISIBLE)
else:
self.native.setVisibility(View.VISIBLE)
def set_font(self, font):
# By default, font can't be changed
pass
# Although setBackgroundColor is defined in the View base class, we can't use it as
# a default implementation because it often overwrites other aspects of the widget's
# appearance. So each widget must decide how to implement this method, possibly
# using one of the utility functions below.
def set_background_color(self, color):
pass
def set_background_simple(self, value):
if not hasattr(self, "_default_background"):
self._default_background = self.native.getBackground()
if value in (None, TRANSPARENT):
self.native.setBackground(self._default_background)
else:
background = ColorDrawable(native_color(value))
if isinstance(self._default_background, InsetDrawable):
outer_padding = Rect()
inner_padding = Rect()
self._default_background.getPadding(outer_padding)
self._default_background.getDrawable().getPadding(inner_padding)
insets = [
getattr(outer_padding, name) - getattr(inner_padding, name)
for name in ["left", "top", "right", "bottom"]
]
background = InsetDrawable(background, *insets)
self.native.setBackground(background)
def set_background_filter(self, value):
self.native.getBackground().setColorFilter(
None
if value in (None, TRANSPARENT)
else PorterDuffColorFilter(native_color(value), PorterDuff__Mode.SRC_IN)
)
def set_alignment(self, alignment):
pass # If appropriate, a widget subclass will implement this.
def set_color(self, color):
pass # If appropriate, a widget subclass will implement this.
# INTERFACE
def add_child(self, child):
child.container = self.container
def insert_child(self, index, child):
self.add_child(child)
def remove_child(self, child):
child.container = None
# TODO: consider calling requestLayout or forceLayout here
# (https://github.com/beeware/toga/issues/1289#issuecomment-1453096034)
def refresh(self):
intrinsic = self.interface.intrinsic
intrinsic.width = intrinsic.height = None
self.rehint()
assert intrinsic.width is not None, self
assert intrinsic.height is not None, self
intrinsic.width = self.scale_out(intrinsic.width, ROUND_UP)
intrinsic.height = self.scale_out(intrinsic.height, ROUND_UP)
@abstractmethod
def rehint(self):
...
def align(value):
"""Convert toga alignment values into Android alignment values."""
return {
LEFT: Gravity.LEFT,
RIGHT: Gravity.RIGHT,
CENTER: Gravity.CENTER_HORIZONTAL,
JUSTIFY: Gravity.LEFT,
}[value]
|
3,137 |
validate
|
# Copyright (c) 2022, Frappe Technologies and contributors
# License: MIT. See LICENSE
import json
import frappe
from frappe import _
from frappe.config import get_modules_from_all_apps_for_user
from frappe.model.document import Document
from frappe.modules.export_file import export_to_files
from frappe.query_builder import DocType
class Dashboard(Document):
# begin: auto-generated types
# This code is auto-generated. Do not modify anything in this block.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from frappe.desk.doctype.dashboard_chart_link.dashboard_chart_link import DashboardChartLink
from frappe.desk.doctype.number_card_link.number_card_link import NumberCardLink
from frappe.types import DF
cards: DF.Table[NumberCardLink]
chart_options: DF.Code | None
charts: DF.Table[DashboardChartLink]
dashboard_name: DF.Data
is_default: DF.Check
is_standard: DF.Check
module: DF.Link | None
# end: auto-generated types
def on_update(self):
if self.is_default:
# make all other dashboards non-default
DashBoard = DocType("Dashboard")
frappe.qb.update(DashBoard).set(DashBoard.is_default, 0).where(
DashBoard.name != self.name
).run()
if frappe.conf.developer_mode and self.is_standard:
export_to_files(
record_list=[["Dashboard", self.name, f"{self.module} Dashboard"]], record_module=self.module
)
def METHOD_NAME(self):
if not frappe.conf.developer_mode and self.is_standard:
frappe.throw(_("Cannot edit Standard Dashboards"))
if self.is_standard:
non_standard_docs_map = {
"Dashboard Chart": get_non_standard_charts_in_dashboard(self),
"Number Card": get_non_standard_cards_in_dashboard(self),
}
if non_standard_docs_map["Dashboard Chart"] or non_standard_docs_map["Number Card"]:
message = get_non_standard_warning_message(non_standard_docs_map)
frappe.throw(message, title=_("Standard Not Set"), is_minimizable=True)
self.validate_custom_options()
def validate_custom_options(self):
if self.chart_options:
try:
json.loads(self.chart_options)
except ValueError as error:
frappe.throw(_("Invalid json added in the custom options: {0}").format(error))
def get_permission_query_conditions(user):
if not user:
user = frappe.session.user
if user == "Administrator":
return
roles = frappe.get_roles(user)
if "System Manager" in roles:
return None
allowed_modules = [
frappe.db.escape(module.get("module_name")) for module in get_modules_from_all_apps_for_user()
]
return "`tabDashboard`.`module` in ({allowed_modules}) or `tabDashboard`.`module` is NULL".format(
allowed_modules=",".join(allowed_modules)
)
@frappe.whitelist()
def get_permitted_charts(dashboard_name):
permitted_charts = []
dashboard = frappe.get_doc("Dashboard", dashboard_name)
for chart in dashboard.charts:
if frappe.has_permission("Dashboard Chart", doc=chart.chart):
chart_dict = frappe._dict()
chart_dict.update(chart.as_dict())
if dashboard.get("chart_options"):
chart_dict.custom_options = dashboard.get("chart_options")
permitted_charts.append(chart_dict)
return permitted_charts
@frappe.whitelist()
def get_permitted_cards(dashboard_name):
dashboard = frappe.get_doc("Dashboard", dashboard_name)
return [card for card in dashboard.cards if frappe.has_permission("Number Card", doc=card.card)]
def get_non_standard_charts_in_dashboard(dashboard):
non_standard_charts = [doc.name for doc in frappe.get_list("Dashboard Chart", {"is_standard": 0})]
return [
chart_link.chart for chart_link in dashboard.charts if chart_link.chart in non_standard_charts
]
def get_non_standard_cards_in_dashboard(dashboard):
non_standard_cards = [doc.name for doc in frappe.get_list("Number Card", {"is_standard": 0})]
return [card_link.card for card_link in dashboard.cards if card_link.card in non_standard_cards]
def get_non_standard_warning_message(non_standard_docs_map):
message = _("""Please set the following documents in this Dashboard as standard first.""")
def get_html(docs, doctype):
html = f"<p>{frappe.bold(doctype)}</p>"
for doc in docs:
html += '<div><a href="/app/Form/{doctype}/{doc}">{doc}</a></div>'.format(
doctype=doctype, doc=doc
)
html += "<br>"
return html
html = message + "<br>"
for doctype in non_standard_docs_map:
if non_standard_docs_map[doctype]:
html += get_html(non_standard_docs_map[doctype], doctype)
return html
|
3,138 |
tpx path
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shlex
import sys
from typing import Optional
class Env(object):
def __init__(self, src=None) -> None:
self._dict = {}
if src is None:
self.update(os.environ)
else:
self.update(src)
def update(self, src) -> None:
for k, v in src.items():
self.set(k, v)
def copy(self) -> "Env":
return Env(self._dict)
def _key(self, key):
# The `str` cast may not appear to be needed, but without it we run
# into issues when passing the environment to subprocess. The main
# issue is that in python2 `os.environ` (which is the initial source
# of data for the environment) uses byte based strings, but this
# project uses `unicode_literals`. `subprocess` will raise an error
# if the environment that it is passed has a mixture of byte and
# unicode strings.
# It is simplest to force everything to be `str` for the sake of
# consistency.
key = str(key)
if sys.platform.startswith("win"):
# Windows env var names are case insensitive but case preserving.
# An implementation of PAR files on windows gets confused if
# the env block contains keys with conflicting case, so make a
# pass over the contents to remove any.
# While this O(n) scan is technically expensive and gross, it
# is practically not a problem because the volume of calls is
# relatively low and the cost of manipulating the env is dwarfed
# by the cost of spawning a process on windows. In addition,
# since the processes that we run are expensive anyway, this
# overhead is not the worst thing to worry about.
for k in list(self._dict.keys()):
if str(k).lower() == key.lower():
return k
elif key in self._dict:
return key
return None
def get(self, key, defval=None):
key = self._key(key)
if key is None:
return defval
return self._dict[key]
def __getitem__(self, key):
val = self.get(key)
if key is None:
raise KeyError(key)
return val
def unset(self, key) -> None:
if key is None:
raise KeyError("attempting to unset env[None]")
key = self._key(key)
if key:
del self._dict[key]
def __delitem__(self, key) -> None:
self.unset(key)
def __repr__(self):
return repr(self._dict)
def set(self, key, value) -> None:
if key is None:
raise KeyError("attempting to assign env[None] = %r" % value)
if value is None:
raise ValueError("attempting to assign env[%s] = None" % key)
# The `str` conversion is important to avoid triggering errors
# with subprocess if we pass in a unicode value; see commentary
# in the `_key` method.
key = str(key)
value = str(value)
# The `unset` call is necessary on windows where the keys are
# case insensitive. Since this dict is case sensitive, simply
# assigning the value to the new key is not sufficient to remove
# the old value. The `unset` call knows how to match keys and
# remove any potential duplicates.
self.unset(key)
self._dict[key] = value
def __setitem__(self, key, value) -> None:
self.set(key, value)
def __iter__(self):
return self._dict.__iter__()
def __len__(self) -> int:
return len(self._dict)
def keys(self):
return self._dict.keys()
def values(self):
return self._dict.values()
def items(self):
return self._dict.items()
def add_path_entry(
env, name, item, append: bool = True, separator: str = os.pathsep
) -> None:
"""Cause `item` to be added to the path style env var named
`name` held in the `env` dict. `append` specifies whether
the item is added to the end (the default) or should be
prepended if `name` already exists."""
val = env.get(name, "")
if len(val) > 0:
val = val.split(separator)
else:
val = []
if append:
val.append(item)
else:
val.insert(0, item)
env.set(name, separator.join(val))
def add_flag(env, name, flag: str, append: bool = True) -> None:
"""Cause `flag` to be added to the CXXFLAGS-style env var named
`name` held in the `env` dict. `append` specifies whether the
flag is added to the end (the default) or should be prepended if
`name` already exists."""
val = shlex.split(env.get(name, ""))
if append:
val.append(flag)
else:
val.insert(0, flag)
env.set(name, " ".join(val))
_path_search_cache = {}
_not_found = object()
def METHOD_NAME() -> str:
return "xplat/testinfra/tpx/ctp.tpx"
def path_search(env, exename: str, defval: Optional[str] = None) -> Optional[str]:
"""Search for exename in the PATH specified in env.
exename is eg: `ninja` and this function knows to append a .exe
to the end on windows.
Returns the path to the exe if found, or None if either no
PATH is set in env or no executable is found."""
path = env.get("PATH", None)
if path is None:
return defval
# The project hash computation code searches for C++ compilers (g++, clang, etc)
# repeatedly. Cache the result so we don't end up searching for these over and over
# again.
cache_key = (path, exename)
result = _path_search_cache.get(cache_key, _not_found)
if result is _not_found:
result = _perform_path_search(path, exename)
_path_search_cache[cache_key] = result
return result
def _perform_path_search(path, exename: str) -> Optional[str]:
is_win = sys.platform.startswith("win")
if is_win:
exename = "%s.exe" % exename
for bindir in path.split(os.pathsep):
full_name = os.path.join(bindir, exename)
if os.path.exists(full_name) and os.path.isfile(full_name):
if not is_win and not os.access(full_name, os.X_OK):
continue
return full_name
return None
|
3,139 |
prop descriptions
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Unselected(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterternary"
_path_str = "scatterternary.unselected"
_valid_props = {"marker", "textfont"}
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scatterternary.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# textfont
# --------
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterternary.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
Sets the text font color of unselected points,
applied only when a selection exists.
Returns
-------
plotly.graph_objs.scatterternary.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
marker
:class:`plotly.graph_objects.scatterternary.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.unselected.
Textfont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterternary.Unselected`
marker
:class:`plotly.graph_objects.scatterternary.unselected.
Marker` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.scatterternary.unselected.
Textfont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super(Unselected, self).__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterternary.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterternary.Unselected`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
3,140 |
set up
|
import json
from django.test import TestCase
from tastypie.test import ResourceTestCaseMixin
class TestPVWindProdFactor(ResourceTestCaseMixin, TestCase):
def METHOD_NAME(self):
super(TestPVWindProdFactor, self).METHOD_NAME()
self.submit_url = '/v1/job/'
self.results_url = '/v1/job/<run_uuid>/results/'
self.post = {"Scenario": {"webtool_uuid": None, "description": "", "timeout_seconds": 295,
"Site": {
"PV": {"prod_factor_series_kw": [1]*8760, "min_kw": 2.0, "max_kw": 2.0, "existing_kw": 0.0, "pbi_years": 1.0, "macrs_bonus_pct": 0.0, "pbi_max_us_dollars": 1000000000.0, "radius": 0.0, "state_ibi_pct": 0.0, "state_rebate_us_dollars_per_kw": 0.0, "installed_cost_us_dollars_per_kw": 2000.0, "utility_ibi_max_us_dollars": 10000000000.0, "tilt": 35.2468, "degradation_pct": 0.005, "gcr": 0.4, "pbi_system_max_kw": 1000000000.0, "utility_ibi_pct": 0.0, "state_ibi_max_us_dollars": 10000000000.0, "utility_rebate_max_us_dollars": 10000000000.0, "macrs_option_years": 5, "state_rebate_max_us_dollars": 10000000000.0, "dc_ac_ratio": 1.1, "federal_itc_pct": 0.3, "module_type": 0, "array_type": 0, "pbi_us_dollars_per_kwh": 0.0, "om_cost_us_dollars_per_kw": 16.0, "utility_rebate_us_dollars_per_kw": 0.0, "losses": 0.14, "macrs_itc_reduction": 0.5, "federal_rebate_us_dollars_per_kw": 0.0, "inv_eff": 0.96, "azimuth": 180.0},
"Wind": {"prod_factor_series_kw": [1]*8760, "min_kw": 2, "max_kw": 2, "installed_cost_us_dollars_per_kw": 0.0, "om_cost_us_dollars_per_kw": 0},
"Generator": {"max_kw": 0.0},
"LoadProfile": {"doe_reference_name": "MidriseApartment", "annual_kwh": None, "critical_loads_kw_is_net": False, "year": 2017, "loads_kw_is_net": True, "outage_start_time_step": None, "outage_end_time_step": None, "monthly_totals_kwh": [], "critical_load_pct": 0.5, "outage_is_major_event": True, "critical_loads_kw": []},
"address": "",
"Storage": {"max_kwh": 0.0, "max_kw": 0.0},
"land_acres": None,
"ElectricTariff": {"add_blended_rates_to_urdb_rate": False, "wholesale_rate_us_dollars_per_kwh": 0.0, "net_metering_limit_kw": 0.0, "interconnection_limit_kw": 100000000.0, "blended_monthly_demand_charges_us_dollars_per_kw": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "urdb_utility_name": "", "urdb_label": "", "wholesale_rate_above_site_load_us_dollars_per_kwh": 0.0, "urdb_rate_name": "", "urdb_response": None, "blended_annual_demand_charges_us_dollars_per_kw": 0.0, "blended_annual_rates_us_dollars_per_kwh": 0.0, "blended_monthly_rates_us_dollars_per_kwh": [0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2]},
"longitude": -91.7337,
"roof_squarefeet": None,
"latitude": 35.2468,
"Financial": {"escalation_pct": 0.026, "offtaker_discount_pct": 0.081, "value_of_lost_load_us_dollars_per_kwh": 100.0, "analysis_years": 20, "microgrid_upgrade_cost_pct": 0.3, "offtaker_tax_pct": 0.26, "om_cost_escalation_pct": 0.025},
},
"time_steps_per_hour": 1, "user_uuid": None}}
def get_response(self, data):
initial_post = self.api_client.post(self.submit_url, format='json', data=data)
uuid = json.loads(initial_post.content)['run_uuid']
response = json.loads(self.api_client.get(self.results_url.replace('<run_uuid>', str(uuid))).content)
return response
def test_custom_prod_factors(self):
"""
Pass in a PV and Wind each with min_kw and max_kw constraints set to 2 kW.
Set each prod_factor_series_kw to [1]*8760 and then expect the year_one_energy_produced_kwh to be 2 * 8760.
Tests the the custom prod_factor is being used, not one one from PV Watts or the Wind SAM Sdk
"""
response = self.get_response(self.post)
pv_out = response['outputs']['Scenario']['Site']['PV']
wind_out = response['outputs']['Scenario']['Site']['Wind']
self.assertEqual(pv_out['size_kw'], 2,
"PV size ({} kW) does not equal expected value ({} kW)."
.format(pv_out['size_kw'], 2))
self.assertEqual(pv_out['year_one_energy_produced_kwh'], 2 * 8760,
"PV energy produced ({} kWh) does not equal expected value({} kWh)."
.format(pv_out['year_one_energy_produced_kwh'], 2 * 8760))
self.assertEqual(wind_out['size_kw'], 2,
"Wind size ({} kW) does not equal expected value ({} kW)."
.format(wind_out['size_kw'], 2))
self.assertEqual(wind_out['year_one_energy_produced_kwh'], 2 * 8760,
"Wind energy produced ({} kWh) does not equal expected value({} kWh)."
.format(wind_out['year_one_energy_produced_kwh'], 2 * 8760))
|
3,141 |
notify
|
import logging
from copy import deepcopy
import requests
from redash.destinations import BaseDestination, register
from redash.models import Alert
class Webex(BaseDestination):
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"webex_bot_token": {"type": "string", "title": "Webex Bot Token"},
"to_person_emails": {
"type": "string",
"title": "People (comma-separated)",
},
"to_room_ids": {
"type": "string",
"title": "Rooms (comma-separated)",
},
},
"secret": ["webex_bot_token"],
"required": ["webex_bot_token"],
}
@classmethod
def icon(cls):
return "fa-webex"
@property
def api_base_url(self):
return "https://webexapis.com/v1/messages"
@staticmethod
def formatted_attachments_template(subject, description, query_link, alert_link):
return [
{
"contentType": "application/vnd.microsoft.card.adaptive",
"content": {
"$schema": "http://adaptivecards.io/schemas/adaptive-card.json",
"type": "AdaptiveCard",
"version": "1.0",
"body": [
{
"type": "ColumnSet",
"columns": [
{
"type": "Column",
"width": 4,
"items": [
{
"type": "TextBlock",
"text": {subject},
"weight": "bolder",
"size": "medium",
"wrap": True,
},
{
"type": "TextBlock",
"text": {description},
"isSubtle": True,
"wrap": True,
},
{
"type": "TextBlock",
"text": f"Click [here]({query_link}) to check your query!",
"wrap": True,
"isSubtle": True,
},
{
"type": "TextBlock",
"text": f"Click [here]({alert_link}) to check your alert!",
"wrap": True,
"isSubtle": True,
},
],
},
],
}
],
},
}
]
def METHOD_NAME(self, alert, query, user, new_state, app, host, metadata, options):
# Documentation: https://developer.webex.com/docs/api/guides/cards
query_link = f"{host}/queries/{query.id}"
alert_link = f"{host}/alerts/{alert.id}"
if new_state == Alert.TRIGGERED_STATE:
subject = alert.custom_subject or f"{alert.name} just triggered"
else:
subject = f"{alert.name} went back to normal"
attachments = self.formatted_attachments_template(
subject=subject, description=alert.custom_body, query_link=query_link, alert_link=alert_link
)
template_payload = {"markdown": subject + "\n" + alert.custom_body, "attachments": attachments}
headers = {"Authorization": f"Bearer {options['webex_bot_token']}"}
api_destinations = {
"toPersonEmail": options.get("to_person_emails"),
"roomId": options.get("to_room_ids"),
}
for payload_tag, destinations in api_destinations.items():
if destinations is None:
continue
# destinations is guaranteed to be a comma-separated string
for destination_id in destinations.split(","):
payload = deepcopy(template_payload)
payload[payload_tag] = destination_id
self.post_message(payload, headers)
def post_message(self, payload, headers):
try:
resp = requests.post(
self.api_base_url,
json=payload,
headers=headers,
timeout=5.0,
)
logging.warning(resp.text)
if resp.status_code != 200:
logging.error("Webex send ERROR. status_code => {status}".format(status=resp.status_code))
except Exception as e:
logging.exception(f"Webex send ERROR: {e}")
register(Webex)
|
3,142 |
build model
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
@register_model("fconv_lm")
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-layers",
type=str,
metavar="EXPR",
help="decoder layers [(dim, kernel_size), ...]",
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--decoder-attention",
type=str,
metavar="EXPR",
help="decoder attention [True, ...]",
)
@classmethod
def METHOD_NAME(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if hasattr(args, "max_target_positions") and not hasattr(
args, "tokens_per_sample"
):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
@register_model_architecture("fconv_lm", "fconv_lm")
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103")
def fconv_lm_dauphin_wikitext103(args):
layers = "[(850, 6)] * 3"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 5)] * 4"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 4)] * 3"
layers += " + [(1024, 4)] * 1"
layers += " + [(2048, 4)] * 1"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,20000,200000"
)
base_lm_architecture(args)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw")
def fconv_lm_dauphin_gbw(args):
layers = "[(512, 5)]"
layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3"
layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3"
layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6"
layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
base_lm_architecture(args)
|
3,143 |
min timer
|
import numpy as np
from qutip.settings import settings as qset
from timeit import default_timer as timer
def METHOD_NAME(function, *args, **kwargs):
min_time = 1e6
for kk in range(10000):
t0 = timer()
function(*args, **kwargs)
t1 = timer()
min_time = min(min_time, t1-t0)
return min_time
def system_bench(func, dims):
from qutip.random_objects import rand_ket
ratio = 0
ratio_old = 0
nnz_old = 0
for N in dims:
L = func(N).data
vec = rand_ket(L.shape[0], 0.25).full().ravel()
nnz = L.nnz
out = np.zeros_like(vec)
ser = METHOD_NAME(_spmvpy, L.data, L.indices, L.indptr, vec, 1, out)
out = np.zeros_like(vec)
par = METHOD_NAME(_spmvpy_openmp, L.data, L.indices, L.indptr, vec, 1, out, 2)
ratio = ser/par
if ratio > 1:
break
nnz_old = nnz
ratio_old = ratio
if ratio > 1:
rate = (ratio-ratio_old)/(nnz-nnz_old)
return int((1.0-ratio_old)/rate+nnz_old)
else:
return -1
def calculate_openmp_thresh():
# if qset.num_cpus == 1:
# return qset.openmp_thresh
jc_dims = np.unique(np.logspace(0.45, 1.78, 20, dtype=int))
jc_result = system_bench(_jc_liouvillian, jc_dims)
opto_dims = np.unique(np.logspace(0.4, 1.33, 12, dtype=int))
opto_result = system_bench(_opto_liouvillian, opto_dims)
spin_dims = np.unique(np.logspace(0.45, 1.17, 10, dtype=int))
spin_result = system_bench(_spin_hamiltonian, spin_dims)
# Double result to be conservative
thresh = 2*int(max([jc_result, opto_result, spin_result]))
if thresh < 0:
thresh = np.iinfo(np.int32).max
return thresh
def _jc_liouvillian(N):
from qutip.core import tensor, destroy, qeye, liouvillian
wc = 2*np.pi * 1.0 # cavity frequency
wa = 2*np.pi * 1.0 # atom frequency
g = 2*np.pi * 0.05 # coupling strength
kappa = 0.005 # cavity dissipation rate
gamma = 0.05 # atom dissipation rate
n_th_a = 1 # temperature in frequency units
use_rwa = 0
# operators
a = tensor(destroy(N), qeye(2))
sm = tensor(qeye(N), destroy(2))
# Hamiltonian
H = wc*a.dag()*a + wa*sm.dag()*sm + g*(a.dag()*sm + a*sm.dag())
c_op_list = []
rate = kappa * (1 + n_th_a)
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a)
rate = kappa * n_th_a
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * a.dag())
rate = gamma
if rate > 0.0:
c_op_list.append(np.sqrt(rate) * sm)
return liouvillian(H, c_op_list)
def _opto_liouvillian(N):
from qutip.core import tensor, destroy, qeye, liouvillian
Nc = 5 # Number of cavity states
Nm = N # Number of mech states
kappa = 0.3 # Cavity damping rate
E = 0.1 # Driving Amplitude
g0 = 2.4*kappa # Coupling strength
Qm = 1e4 # Mech quality factor
gamma = 1/Qm # Mech damping rate
n_th = 1 # Mech bath temperature
delta = -0.43 # Detuning
a = tensor(destroy(Nc), qeye(Nm))
b = tensor(qeye(Nc), destroy(Nm))
num_b = b.dag()*b
num_a = a.dag()*a
H = -delta*(num_a)+num_b+g0*(b.dag()+b)*num_a+E*(a.dag()+a)
cc = np.sqrt(kappa)*a
cm = np.sqrt(gamma*(1.0 + n_th))*b
cp = np.sqrt(gamma*n_th)*b.dag()
c_ops = [cc, cm, cp]
return liouvillian(H, c_ops)
def _spin_hamiltonian(N):
from qutip.core import tensor, qeye, sigmax, sigmay, sigmaz
# array of spin energy splittings and coupling strengths. here we use
# uniform parameters, but in general we don't have too
h = 2*np.pi * 1.0 * np.ones(N)
Jz = 2*np.pi * 0.1 * np.ones(N)
Jx = 2*np.pi * 0.1 * np.ones(N)
Jy = 2*np.pi * 0.1 * np.ones(N)
# dephasing rate
si = qeye(2)
sx = sigmax()
sy = sigmay()
sz = sigmaz()
sx_list = []
sy_list = []
sz_list = []
for n in range(N):
op_list = [si] * N
op_list[n] = sx
sx_list.append(tensor(op_list))
op_list[n] = sy
sy_list.append(tensor(op_list))
op_list[n] = sz
sz_list.append(tensor(op_list))
# construct the hamiltonian
H = 0
# energy splitting terms
for n in range(N):
H += - 0.5 * h[n] * sz_list[n]
# interaction terms
for n in range(N-1):
H += - 0.5 * Jx[n] * sx_list[n] * sx_list[n+1]
H += - 0.5 * Jy[n] * sy_list[n] * sy_list[n+1]
H += - 0.5 * Jz[n] * sz_list[n] * sz_list[n+1]
return H
|
3,144 |
collect
|
from collections import OrderedDict
from typing import Any, Dict, Iterable, List, Tuple
import mindspore.nn as nn
from mindspore import Tensor
def _cell_list(net: nn.Cell, flatten_sequential: bool = False) -> Iterable[Tuple[str, str, nn.Cell]]:
"""Yield the partially flattened cell list from the model, together with its new name and old name
Args:
net (nn.Cell): Network need to be partially flattened
flatten_sequential (bool): Flatten the inner-layer of the sequential cell. Default: False.
Returns:
iterator[tuple[str, str, nn.Cell]]: The new name, the old name and corresponding cell
"""
for name, cell in net.name_cells().items():
if flatten_sequential and isinstance(cell, nn.SequentialCell):
for child_name, child_cell in cell.name_cells().items():
combined = [name, child_name]
yield "_".join(combined), ".".join(combined), child_cell
else:
yield name, name, cell
def _get_return_layers(feature_info: Dict[str, Any], out_indices: List[int]) -> Dict[str, int]:
"""Create a dict storing the "layer_name - layer_id" pair that need to be extracted"""
return_layers = dict()
for i, x in enumerate(feature_info):
if i in out_indices:
return_layers[x["name"]] = i
return return_layers
class FeatureExtractWrapper(nn.Cell):
"""A wrapper of the original model, aims to perform the feature extraction at each stride.
Basically, it performs 3 steps: 1. extract the return node name from the network's property
`feature_info`; 2. partially flatten the network architecture if network's attribute `flatten_sequential`
is True; 3. rebuild the forward steps and output the features based on the return node name.
It also provide a property `out_channels` in the wrapped model, return the number of features at each output
layer. This propery is usually used for the downstream tasks, which requires feature infomation at network
build stage.
It should be note that to apply this wrapper, there is a strong assumption that each of the outmost cell
are registered in the same order as they are used. And there should be no reuse of each cell, even for the `ReLU`
cell. Otherwise, the returned result may not be correct.
And it should be also note that it basically rebuild the model. So the default checkpoint parameter cannot be loaded
correctly once that model is wrapped. To use the pretrained weight, please load the weight first and then use this
wrapper to rebuild the model.
Args:
net (nn.Cell): The model need to be wrapped.
out_indices (list[int]): The indicies of the output features. Default: [0, 1, 2, 3, 4]
"""
def __init__(self, net: nn.Cell, out_indices: List[int] = [0, 1, 2, 3, 4]) -> None:
super().__init__(auto_prefix=False)
feature_info = self._get_feature_info(net)
self.is_rewritten = getattr(net, "is_rewritten", False)
flatten_sequetial = getattr(net, "flatten_sequential", False)
return_layers = _get_return_layers(feature_info, out_indices)
self.return_index = list()
if not self.is_rewritten:
cells = _cell_list(net, flatten_sequential=flatten_sequetial)
self.net, updated_return_layers = self._create_net(cells, return_layers)
# calculate the return index
for i, name in enumerate(self.net.name_cells().keys()):
if name in updated_return_layers:
self.return_index.append(i)
else:
self.net = net
self.return_index = out_indices
# calculate the out_channels
self._out_channels = list()
for i in return_layers.values():
self._out_channels.append(feature_info[i]["chs"])
@property
def out_channels(self):
"""The output channels of the model, filtered by the out_indices.
"""
return self._out_channels
def construct(self, x: Tensor) -> List[Tensor]:
return self.METHOD_NAME(x)
def _get_feature_info(self, net: nn.Cell) -> Dict[str, Any]:
try:
feature_info = getattr(net, "feature_info")
except AttributeError:
raise
return feature_info
def _create_net(
self, cells: Iterable[Tuple[str, str, nn.Cell]], return_layers: Dict[str, int]
) -> Tuple[nn.SequentialCell, Dict[str, int]]:
layers = OrderedDict()
updated_return_layers = dict()
remaining = set(return_layers.keys())
for new_name, old_name, module in cells:
layers[new_name] = module
if old_name in remaining:
updated_return_layers[new_name] = return_layers[old_name]
remaining.remove(old_name)
if not remaining:
break
net = nn.SequentialCell(layers)
return net, updated_return_layers
def METHOD_NAME(self, x: Tensor) -> List[Tensor]:
out = list()
if self.is_rewritten:
xs = self.net(x)
for i, x in enumerate(xs):
if i in self.return_index:
out.append(x)
else:
for i, cell in enumerate(self.net.cell_list):
x = cell(x)
if i in self.return_index:
out.append(x)
return out
|
3,145 |
write
|
"""
General testing utilities.
"""
import functools
import sys
from contextlib import contextmanager
from django.dispatch import Signal
from markupsafe import escape
from mock import Mock, patch
@contextmanager
def nostderr():
"""
ContextManager to suppress stderr messages
http://stackoverflow.com/a/1810086/882918
"""
savestderr = sys.stderr
class Devnull:
""" /dev/null incarnation as output-stream-like object """
def METHOD_NAME(self, _):
""" Write method - just does nothing"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
sys.stderr = Devnull()
try:
yield
finally:
sys.stderr = savestderr
class XssTestMixin:
"""
Mixin for testing XSS vulnerabilities.
"""
def assert_no_xss(self, response, xss_content):
"""Assert that `xss_content` is not present in the content of
`response`, and that its escaped version is present. Uses the
same `markupsafe.escape` function as Mako templates.
Args:
response (Response): The HTTP response
xss_content (str): The Javascript code to check for.
Returns:
None
"""
self.assertContains(response, escape(xss_content))
self.assertNotContains(response, xss_content)
def disable_signal(module, signal):
"""Replace `signal` inside of `module` with a dummy signal. Can be
used as a method or class decorator, as well as a context manager."""
return patch.object(module, signal, new=Signal())
class MockSignalHandlerMixin:
"""Mixin for testing sending of signals."""
@contextmanager
def assert_signal_sent(self, module, signal, *args, **kwargs):
"""Assert that a signal was sent with the correct arguments. Since
Django calls signal handlers with the signal as an argument,
it is added to `kwargs`.
Uses `mock.patch.object`, which requires the target to be
specified as a module along with a variable name inside that
module.
Args:
module (module): The module in which to patch the given signal name.
signal (str): The name of the signal to patch.
*args, **kwargs: The arguments which should have been passed
along with the signal. If `exclude_args` is passed as a
keyword argument, its value should be a list of keyword
arguments passed to the signal whose values should be
ignored.
"""
with patch.object(module, signal, new=Signal()) as mock_signal:
def handler(*args, **kwargs): # pylint: disable=unused-argument
"""No-op signal handler."""
pass # lint-amnesty, pylint: disable=unnecessary-pass
mock_handler = Mock(spec=handler)
mock_signal.connect(mock_handler)
yield
assert mock_handler.called
mock_args, mock_kwargs = mock_handler.call_args
if 'exclude_args' in kwargs:
for key in kwargs['exclude_args']:
assert key in mock_kwargs
del mock_kwargs[key]
del kwargs['exclude_args']
assert mock_args == args
assert mock_kwargs == dict(kwargs, signal=mock_signal)
@contextmanager
def skip_signal(signal, **kwargs):
"""
ContextManager to skip a signal by disconnecting it, yielding,
and then reconnecting the signal.
"""
signal.disconnect(**kwargs)
try:
yield
finally:
signal.connect(**kwargs)
class MockS3Boto3Mixin:
"""
TestCase mixin that mocks the S3Boto3Storage save method and s3 connection.
"""
def setUp(self):
super().setUp()
self._mocked_connection = patch('boto3.resource', return_value=Mock())
self.mocked_connection = self._mocked_connection.start()
self.patcher = patch('storages.backends.s3boto3.S3Boto3Storage.save')
self.patcher.start()
def tearDown(self):
self._mocked_connection.stop()
self.patcher.stop()
super().tearDown()
class reprwrapper:
"""
Wrapper class for functions that need a normalized string representation.
"""
def __init__(self, func):
self._func = func
self.repr = f'Func: {func.__name__}'
functools.update_wrapper(self, func)
def __call__(self, *args, **kw):
return self._func(*args, **kw)
def __repr__(self):
return self.repr
def normalize_repr(func):
"""
Function decorator used to normalize its string representation.
Used to wrap functions used as ddt parameters, so pytest-xdist
doesn't complain about the sequence of discovered tests differing
between worker processes.
"""
return reprwrapper(func)
|
3,146 |
send rpc
|
##############################################################
# Copyright 2023 Lawrence Livermore National Security, LLC
# (c.f. AUTHORS, NOTICE.LLNS, COPYING)
#
# This file is part of the Flux resource manager framework.
# For details, see https://github.com/flux-framework.
#
# SPDX-License-Identifier: LGPL-3.0
##############################################################
import argparse
import json
import logging
import math
import sys
import flux
import flux.job
import flux.util
LOGGER = logging.getLogger("flux-update")
class JobspecUpdates:
"""
Convenience class for building a jobspec-update payload from a
set of KEY=VALUE pairs on the command line, and a method to send
the update as a request to the job manager.
"""
# Mapping of short key names, i.e. as given on the command line,
# to full dotted-path location in jobspec.
#
# Note: If a key doesn't exist in this mapping, but also does not start
# with 'attributes.', 'resources.' or 'tasks.', then 'attributes.system'
# is assumed.
#
key_aliases = {"name": "attributes.system.job.name"}
def __init__(self, jobid, flux_handle=None):
self._flux_handle = flux_handle
self.jobid = jobid
self.updates = None
self.jobspec = None
@property
def flux_handle(self):
if self._flux_handle is None:
self._flux_handle = flux.Flux()
return self._flux_handle
def _apply_jobspec_updates(self, eventlog):
"""
Apply jobspec updates from eventlog to internal jobspec:
"""
for entry in eventlog.splitlines():
event = flux.job.EventLogEvent(entry)
if event.name == "jobspec-update":
for key, value in event.context.items():
self.jobspec.setattr(key, value)
def _fetch_jobspec(self, key):
"""
Fetch dotted key 'key' in jobspec for this job, fetching jobspec
and eventlog (to apply jobspec-updates) if necessary.
"""
if self.jobspec is None:
lookup = flux.job.job_kvs_lookup(
self.flux_handle, jobid=self.jobid, keys=["jobspec", "eventlog"]
)
self.jobspec = flux.job.JobspecV1(**lookup["jobspec"])
self._apply_jobspec_updates(lookup["eventlog"])
return self.jobspec.getattr(key)
def update_attributes_system_duration(self, value):
"""
Handle a duration update.
If update begins with "+" or "-", then get duration from jobspec and
increase or decrease by the amount of the remaining argument. O/w,
treat value as an explicit new duration.
"""
result = None
if value.startswith(("-", "+")):
# relative update, fetch value first
duration = self._fetch_jobspec("attributes.system.duration")
if duration == 0:
raise ValueError(
f"duration for {self.jobid} is unlimited, "
f"can't update by {value}"
)
arg = flux.util.parse_fsd(value[1:])
if value.startswith("-"):
result = duration - arg
if result <= 0.0:
duration = flux.util.fsd(duration)
raise ValueError(
f"current duration for {self.jobid} ({duration})"
f" cannot be reduced by {value[1:]}"
)
else:
result = duration + arg
else:
result = flux.util.parse_fsd(value)
# An unlimited duration is represented as 0. in jobspec, so
# check for infinity here and replace with 0.
#
if math.isinf(result):
result = 0.0
return result
def add_update(self, key, value):
"""
Append an update to the current updates object.
"""
if self.updates is None:
self.updates = {}
# Handle any special keys aliases
if key in self.key_aliases:
key = self.key_aliases[key]
# If key doesn't start with attributes, resources, or tasks,
# assume 'attributes.system.' for convenience:
if not key.startswith(("attributes.", "resources.", "tasks.")):
key = f"attributes.system.{key}"
try:
# Use any function update_attributes_system_blah() if
# registered to process the value:
#
function_signature = "update_" + key.replace(".", "_")
value = getattr(self, function_signature)(value)
except AttributeError:
# Otherwise, attempt to load value as JSON:
#
try:
value = json.loads(value)
except json.decoder.JSONDecodeError:
# Otherwise, load value as string:
#
value = str(value)
self.updates[key] = value
def items(self):
"""
Convenience wrapper to return a copy of the current update
dictionary key, value pairs
"""
return self.updates.items()
def to_json(self):
return json.dumps(self.updates)
def METHOD_NAME(self):
payload = {"id": self.jobid, "updates": self.updates}
return self.flux_handle.rpc("job-manager.update", payload)
def parse_args():
parser = argparse.ArgumentParser(
prog="flux-update", formatter_class=flux.util.help_formatter()
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
help="Do not apply any updates, just emit update payload to stdout",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=0,
help="Be more verbose. Log updated items after success.",
)
parser.add_argument(
"jobid",
metavar="JOBID",
type=flux.job.JobID,
help="Target jobid",
)
parser.add_argument(
"updates",
metavar="KEY=VALUE",
type=str,
nargs="+",
help="Requested jobspec updates in KEY=VALUE form",
)
return parser.parse_args()
@flux.util.CLIMain(LOGGER)
def main():
sys.stdout = open(
sys.stdout.fileno(), "w", encoding="utf8", errors="surrogateescape"
)
sys.stderr = open(
sys.stderr.fileno(), "w", encoding="utf8", errors="surrogateescape"
)
args = parse_args()
updates = JobspecUpdates(args.jobid)
for arg in args.updates:
key, _, value = arg.partition("=")
updates.add_update(key, value)
if args.dry_run:
print(updates.to_json())
sys.exit(0)
updates.METHOD_NAME().get()
if args.verbose:
for key, value in updates.items():
LOGGER.info(f"updated {key} to {value}")
if __name__ == "__main__":
main()
# vi: ts=4 sw=4 expandtab
|
3,147 |
geodesic distance
|
import multiprocessing as mp
import warnings
from typing import Optional
import numpy as np
import torch
from torch import Tensor
def METHOD_NAME(
pos: Tensor,
face: Tensor,
src: Optional[Tensor] = None,
dst: Optional[Tensor] = None,
norm: bool = True,
max_distance: Optional[float] = None,
num_workers: int = 0,
**kwargs,
) -> Tensor:
r"""Computes (normalized) geodesic distances of a mesh given by :obj:`pos`
and :obj:`face`. If :obj:`src` and :obj:`dst` are given, this method only
computes the geodesic distances for the respective source and target
node-pairs.
.. note::
This function requires the :obj:`gdist` package.
To install, run :obj:`pip install cython && pip install gdist`.
Args:
pos (torch.Tensor): The node positions.
face (torch.Tensor): The face indices.
src (torch.Tensor, optional): If given, only compute geodesic distances
for the specified source indices. (default: :obj:`None`)
dst (torch.Tensor, optional): If given, only compute geodesic distances
for the specified target indices. (default: :obj:`None`)
norm (bool, optional): Normalizes geodesic distances by
:math:`\sqrt{\textrm{area}(\mathcal{M})}`. (default: :obj:`True`)
max_distance (float, optional): If given, only yields results for
geodesic distances less than :obj:`max_distance`. This will speed
up runtime dramatically. (default: :obj:`None`)
num_workers (int, optional): How many subprocesses to use for
calculating geodesic distances.
:obj:`0` means that computation takes place in the main process.
:obj:`-1` means that the available amount of CPU cores is used.
(default: :obj:`0`)
:rtype: :class:`Tensor`
Example:
>>> pos = torch.tensor([[0.0, 0.0, 0.0],
... [2.0, 0.0, 0.0],
... [0.0, 2.0, 0.0],
... [2.0, 2.0, 0.0]])
>>> face = torch.tensor([[0, 0],
... [1, 2],
... [3, 3]])
>>> geodesic_distance(pos, face)
[[0, 1, 1, 1.4142135623730951],
[1, 0, 1.4142135623730951, 1],
[1, 1.4142135623730951, 0, 1],
[1.4142135623730951, 1, 1, 0]]
"""
import gdist
if 'dest' in kwargs:
dst = kwargs['dest']
warnings.warn("'dest' attribute in 'geodesic_distance' is deprecated "
"and will be removed in a future release. Use the 'dst' "
"argument instead.")
max_distance = float('inf') if max_distance is None else max_distance
if norm:
area = (pos[face[1]] - pos[face[0]]).cross(pos[face[2]] - pos[face[0]])
norm = (area.norm(p=2, dim=1) / 2).sum().sqrt().item()
else:
norm = 1.0
dtype = pos.dtype
pos = pos.detach().cpu().to(torch.double).numpy()
face = face.detach().t().cpu().to(torch.int).numpy()
if src is None and dst is None:
out = gdist.local_gdist_matrix(pos, face,
max_distance * norm).toarray() / norm
return torch.from_numpy(out).to(dtype)
if src is None:
src = np.arange(pos.shape[0], dtype=np.int32)
else:
src = src.detach().cpu().to(torch.int).numpy()
dst = None if dst is None else dst.detach().cpu().to(torch.int).numpy()
def _parallel_loop(pos, face, src, dst, max_distance, norm, i, dtype):
s = src[i:i + 1]
d = None if dst is None else dst[i:i + 1]
out = gdist.compute_gdist(pos, face, s, d, max_distance * norm) / norm
return torch.from_numpy(out).to(dtype)
num_workers = mp.cpu_count() if num_workers <= -1 else num_workers
if num_workers > 0:
with mp.Pool(num_workers) as pool:
outs = pool.starmap(
_parallel_loop,
[(pos, face, src, dst, max_distance, norm, i, dtype)
for i in range(len(src))])
else:
outs = [
_parallel_loop(pos, face, src, dst, max_distance, norm, i, dtype)
for i in range(len(src))
]
out = torch.cat(outs, dim=0)
if dst is None:
out = out.view(-1, pos.shape[0])
return out
|
3,148 |
cci
|
"""Momentum Technical Analysis"""
__docformat__ = "numpy"
import logging
from typing import Tuple
import numpy as np
import pandas as pd
import pandas_ta as ta
from sklearn.linear_model import LinearRegression
from openbb_terminal.common.technical_analysis import ta_helpers
from openbb_terminal.decorators import log_start_end
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def METHOD_NAME(
data: pd.DataFrame,
window: int = 14,
scalar: float = 0.0015,
) -> pd.DataFrame:
"""Commodity channel index
Parameters
----------
high_vals: pd.Series
High values
low_values: pd.Series
Low values
close-values: pd.Series
Close values
window: int
Length of window
scalar: float
Scalar variable
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.METHOD_NAME(
high=data["High"],
low=data["Low"],
close=data[close_col],
length=window,
scalar=scalar,
).dropna()
)
@log_start_end(log=logger)
def macd(
data: pd.Series,
n_fast: int = 12,
n_slow: int = 26,
n_signal: int = 9,
) -> pd.DataFrame:
"""Moving average convergence divergence
Parameters
----------
data: pd.Series
Values for calculation
n_fast : int
Fast period
n_slow : int
Slow period
n_signal : int
Signal period
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
return pd.DataFrame(
ta.macd(data, fast=n_fast, slow=n_slow, signal=n_signal).dropna()
)
@log_start_end(log=logger)
def rsi(
data: pd.Series, window: int = 14, scalar: float = 100, drift: int = 1
) -> pd.DataFrame:
"""Relative strength index
Parameters
----------
data: pd.Series
Dataframe of prices
window: int
Length of window
scalar: float
Scalar variable
drift: int
Drift variable
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
if isinstance(data, pd.DataFrame):
console.print("[red]Please send a series and not a DataFrame.[/red]\n")
return pd.DataFrame()
raw_data = ta.rsi(data, length=window, scalar=scalar, drift=drift)
if raw_data is None:
return pd.DataFrame()
if raw_data.empty:
return pd.DataFrame()
return pd.DataFrame(raw_data.dropna())
@log_start_end(log=logger)
def stoch(
data: pd.DataFrame,
fastkperiod: int = 14,
slowdperiod: int = 3,
slowkperiod: int = 3,
):
"""Stochastic oscillator
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
fastkperiod : int
Fast k period
slowdperiod : int
Slow d period
slowkperiod : int
Slow k period
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
close_col = ta_helpers.check_columns(data)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.stoch(
high=data["High"],
low=data["Low"],
close=data[close_col],
k=fastkperiod,
d=slowdperiod,
smooth_k=slowkperiod,
).dropna()
)
@log_start_end(log=logger)
def fisher(data: pd.DataFrame, window: int = 14) -> pd.DataFrame:
"""Fisher Transform
Parameters
----------
data : pd.DataFrame
Dataframe of OHLC prices
window: int
Length for indicator window
Returns
-------
df_ta: pd.DataFrame
Dataframe of technical indicator
"""
# Daily
close_col = ta_helpers.check_columns(data, close=False)
if close_col is None:
return pd.DataFrame()
return pd.DataFrame(
ta.fisher(high=data["High"], low=data["Low"], length=window).dropna()
)
@log_start_end(log=logger)
def cg(values: pd.Series, window: int) -> pd.DataFrame:
"""Center of gravity
Parameters
----------
values: pd.DataFrame
Data to use with close being titled values
window: int
Length for indicator window
Returns
-------
pd.DataFrame
Dataframe of technical indicator
"""
return pd.DataFrame(ta.cg(close=values, length=window).dropna())
@log_start_end(log=logger)
def clenow_momentum(
values: pd.Series, window: int = 90
) -> Tuple[float, float, pd.Series]:
"""Gets the Clenow Volatility Adjusted Momentum. this is defined as the regression coefficient on log prices
multiplied by the R^2 value of the regression
Parameters
----------
values: pd.Series
Values to perform regression for
window: int
Length of lookback period
Returns
-------
float:
R2 of fit to log data
float:
Coefficient of linear regression
pd.Series:
Values for best fit line
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.clenow(df["Close"])
"""
if len(values) < window:
console.print(
f"[red]Calculation asks for at least last {window} days of data[/red]"
)
return np.nan, np.nan, pd.Series()
values = values[-window:]
y = np.log(values)
X = np.arange(len(y)).reshape(-1, 1)
lr = LinearRegression()
lr.fit(X, y)
r2 = lr.score(X, y)
coef = lr.coef_[0]
annualized_coef = (np.exp(coef) ** 252) - 1
return r2, annualized_coef, pd.Series(lr.predict(X))
@log_start_end(log=logger)
def demark_seq(values: pd.Series) -> pd.DataFrame:
"""Get the integer value for demark sequential indicator
Parameters
----------
values: pd.Series
Series of close values
Returns
-------
pd.DataFrame
Dataframe of UP and DOWN sequential indicators
Examples
--------
>>> from openbb_terminal.sdk import openbb
>>> df = openbb.stocks.load("AAPL")
>>> openbb.ta.demark(df["Close"])
"""
return ta.td_seq(values, asint=True)
|
3,149 |
user
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import logging
import json
import posixpath
import threading
from desktop.conf import has_connectors
from desktop.lib.rest.http_client import HttpClient
from desktop.lib.rest.resource import Resource
from spark.conf import get_livy_server_url, SECURITY_ENABLED, SSL_CERT_CA_VERIFY, CSRF_ENABLED
LOG = logging.getLogger()
DEFAULT_USER = 'hue'
_API_VERSION = 'v1'
_JSON_CONTENT_TYPE = 'application/json'
_BINARY_CONTENT_TYPE = 'application/octet-stream'
_TEXT_CONTENT_TYPE = 'text/plain'
API_CACHE = None
API_CACHE_LOCK = threading.Lock()
def get_api(METHOD_NAME, connector=None):
if connector is not None and connector.get('options'):
client = LivyClient(connector['options']['api_url'])
client.setuser(METHOD_NAME)
return client
else:
global API_CACHE
if API_CACHE is None:
API_CACHE_LOCK.acquire()
try:
if API_CACHE is None:
API_CACHE = LivyClient(get_livy_server_url())
finally:
API_CACHE_LOCK.release()
API_CACHE.setuser(METHOD_NAME)
return API_CACHE
class LivyClient(object):
def __init__(self, livy_url):
self._url = posixpath.join(livy_url)
self._client = HttpClient(self._url, logger=LOG)
self._root = Resource(self._client)
self._security_enabled = SECURITY_ENABLED.get()
self._csrf_enabled = CSRF_ENABLED.get()
self._thread_local = threading.local()
if self.security_enabled:
self._client.set_kerberos_auth()
if self.csrf_enabled:
self._client.set_headers({'X-Requested-By': 'hue'})
self._client.set_verify(SSL_CERT_CA_VERIFY.get())
def __str__(self):
return "LivyClient at %s" % (self._url,)
@property
def url(self):
return self._url
@property
def security_enabled(self):
return self._security_enabled
@property
def csrf_enabled(self):
return self._csrf_enabled
@property
def METHOD_NAME(self):
return self._thread_local.METHOD_NAME
def setuser(self, METHOD_NAME):
if hasattr(METHOD_NAME, 'username'):
self._thread_local.METHOD_NAME = METHOD_NAME.username
else:
self._thread_local.METHOD_NAME = METHOD_NAME
def get_status(self):
return self._root.get('sessions')
def get_log(self, uuid, startFrom=None, size=None):
params = {}
if startFrom is not None:
params['from'] = startFrom
if size is not None:
params['size'] = size
response = self._root.get('sessions/%s/log' % uuid, params=params)
return '\n'.join(response['log'])
def create_session(self, **properties):
properties['proxyUser'] = self.METHOD_NAME.split('@')[0]
if has_connectors(): # Only SQL supported via connectors currently
properties['kind'] = 'sql'
return self._root.post('sessions', data=json.dumps(properties), contenttype=_JSON_CONTENT_TYPE)
def get_sessions(self):
return self._root.get('sessions')
def get_session(self, uuid):
return self._root.get('sessions/%s' % uuid)
def get_statements(self, uuid):
return self._root.get('sessions/%s/statements' % uuid)
def submit_statement(self, uuid, statement):
data = {'code': statement}
return self._root.post('sessions/%s/statements' % uuid, data=json.dumps(data), contenttype=_JSON_CONTENT_TYPE)
def inspect(self, uuid, statement):
data = {'code': statement}
return self._root.post('sessions/%s/inspect' % uuid, data=json.dumps(data), contenttype=_JSON_CONTENT_TYPE)
def fetch_data(self, session, statement):
return self._root.get('sessions/%s/statements/%s' % (session, statement))
def cancel(self, session):
return self._root.post('sessions/%s/interrupt' % session)
def close(self, uuid):
return self._root.delete('sessions/%s' % uuid)
def get_batches(self):
return self._root.get('batches')
def cancel_statement(self, session, statement_id):
return self._root.post('sessions/%s/statements/%s/cancel' % (session, statement_id))
def submit_batch(self, properties):
properties['proxyUser'] = self.METHOD_NAME
return self._root.post('batches', data=json.dumps(properties), contenttype=_JSON_CONTENT_TYPE)
def get_batch(self, uuid):
return self._root.get('batches/%s' % uuid)
def get_batch_status(self, uuid):
response = self._root.get('batches/%s/state' % uuid)
return response['state']
def get_batch_log(self, uuid, startFrom=None, size=None):
params = {}
if startFrom is not None:
params['from'] = startFrom
if size is not None:
params['size'] = size
response = self._root.get('batches/%s/log' % uuid, params=params)
return '\n'.join(response['log'])
def close_batch(self, uuid):
return self._root.delete('batches/%s' % uuid)
|
3,150 |
string list
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
''' Provides a parser class for the file transfer specification as it is
defined in GFD.90, sction 4.1.3.
'''
from ... import exceptions as se
# 4.1.3 File Transfer Specifications (GFD90 p 176-177)
#
# The syntax of a file transfer directive for the job description is modeled on
# the LSF syntax (LSF stands for Load Sharing Facility, a commercial job
# scheduler by Platform Computing), and has the general syntax:
# local_file operator remote_file
# Both the local_file and the remote_file can be URLs. If they are not URLs,
# but full or relative pathnames, then the local_file is relative to the host
# where the submission is executed, and the remote_file is evaluated on the
# execution host of the job. The operator is one of the following four:
#
# '>' copies the local file to the remote file before the job starts.
# Overwrites the remote file if it exists.
# '>>' copies the local file to the remote file before the job starts.
# Appends to the remote file if it exists.
# '<' copies the remote file to the local file after the job finishes.
# Overwrites the local file if it exists.
# '<<' copies the remote file to the local file after the job finishes.
# Appends to the local file if it exists.
# ------------------------------------------------------------------------------
#
class TransferDirectives(object):
# --------------------------------------------------------------------------
#
def __init__(self, directives=None):
self._in_overwrite = list()
self._in_append = list()
self._out_overwrite = list()
self._out_append = list()
if not directives:
directives = []
for d in directives:
if (d.count('>') > 2) or (d.count('<') > 2):
msg = "'%s' is not a valid transfer d string."
raise se.BadParameter(msg)
elif '>>' in d:
(loc, rem) = d.split('>>')
self._in_append.append([loc.strip(), rem.strip()])
elif '>' in d:
(loc, rem) = d.split('>')
self._in_overwrite.append([loc.strip(), rem.strip()])
elif '<<' in d:
(loc, rem) = d.split('<<')
self._out_append.append([loc.strip(), rem.strip()])
elif '<' in d:
(loc, rem) = d.split('<')
self._out_overwrite.append([loc.strip(), rem.strip()])
else:
msg = "'%s' is not a valid transfer directive string." % d
raise se.BadParameter(msg)
# --------------------------------------------------------------------------
#
def _to_string_list(self):
slist = list()
for (loc, rem) in self._in_overwrite:
slist.append('%s > %s' % (loc, rem))
for (loc, rem) in self._in_append:
slist.append('%s >> %s' % (loc, rem))
for (loc, rem) in self._out_overwrite:
slist.append('%s < %s' % (loc, rem))
for (loc, rem) in self._out_append:
slist.append('%s << %s' % (loc, rem))
return slist
# --------------------------------------------------------------------------
#
def __str__(self):
return str(self._to_string_list())
# --------------------------------------------------------------------------
#
@property
def in_overwrite(self):
return self._in_overwrite
# --------------------------------------------------------------------------
#
@property
def in_append(self):
return self._in_append
# --------------------------------------------------------------------------
#
@property
def out_overwrite(self):
return self._out_overwrite
# --------------------------------------------------------------------------
#
@property
def out_append(self):
return self._out_append
# --------------------------------------------------------------------------
#
@property
def METHOD_NAME(self):
return self._to_string_list()
# ------------------------------------------------------------------------------
#
def _test_():
tdp = TransferDirectives(["ab","a>c", "c>>d","f<a","g<<h"])
print(tdp.in_append)
print(tdp.in_overwrite)
print(tdp.out_append)
print(tdp.out_overwrite)
# ------------------------------------------------------------------------------
|
3,151 |
get next
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_by_location_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.labservices.aio.ManagedLabsClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_location(
self, location: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.Usage"]:
"""Gets the list of usages.
Returns list of usage per SKU family for the specified subscription in the specified region.
:param location: The location name. Required.
:type location: str
:param filter: The filter to apply to the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.labservices.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.ListUsagesResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list_by_location.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/locations/{location}/usages"
}
|
3,152 |
on cmd user joined channel
|
import TeamTalk5
from TeamTalk5 import ttstr
class TTClient():
def __init__(self, host, tcpPort=10333, udpPort=10333, nickName="", userName="", password=""):
self.host = host
self.tcpPort = tcpPort
self.udpPort = udpPort
self.nickName = nickName
self.userName = userName
self.password = password
self.tt = TeamTalk5.TeamTalk()
self.tt.onConnectSuccess = self.onConnectSuccess
self.tt.onCmdMyselfLoggedIn = self.onCmdMyselfLoggedIn
self.tt.onCmdMyselfKickedFromChannel = self.onCmdMyselfKickedFromChannel
self.tt.onCmdUserLoggedIn = self.onCmdUserLoggedIn
self.tt.onCmdUserLoggedOut = self.onCmdUserLoggedOut
self.tt.onCmdUserUpdate = self.onCmdUserUpdate
self.tt.METHOD_NAME = self.METHOD_NAME
self.tt.onCmdUserLeftChannel = self.onCmdUserLeftChannel
self.tt.onCmdChannelNew = self.onCmdChannelNew
self.tt.onCmdChannelUpdate = self.onCmdChannelUpdate
self.tt.onCmdChannelRemove = self.onCmdChannelRemove
self.tt.onCmdUserTextMessage = self.onCmdUserTextMessage
self.tt.onCmdServerUpdate = self.onCmdServerUpdate
self.tt.onCmdFileNew = self.onCmdFileNew
self.tt.onCmdFileRemove = self.onCmdFileRemove
def start(self):
self.connect()
def connect(self):
self.tt.connect(self.host, self.tcpPort, self.udpPort)
def onConnectSuccess(self):
self.tt.doLogin(self.nickName, self.userName, self.password, ttstr("ttsamplepy"))
def onConnectionLost(self):
self.connect()
def onCmdMyselfLoggedIn(self,userID, userAccount):
print(f"Hello {userAccount.szUsername}. Your User ID is {userID}")
channelID = self.tt.getChannelIDFromPath(ttstr("/testChannel/"))
self.tt.doJoinChannelByID(channelID, ttstr(""))
def onCmdMyselfKickedFromChannel(self, channelID, user):
print(f"kicked from {channelID} by {user.szUsername}")
def onCmdUserLoggedIn(self, user):
print(f"{user.szUsername} with nickname {user.szNickname} has logged in")
def onCmdUserLoggedOut(self, user):
print(f"{user.szUsername} with nickname {user.szNickname} has logged out")
def onCmdUserUpdate(self, user):
print(f"{user.szUsername}was updated")
def METHOD_NAME(self, user):
channel = self.tt.getChannel(user.nChannelID)
print(f"{user.szUsername} with nickname {user.szNickname} has joined to channel {channel.szName}")
def onCmdUserLeftChannel(self, channelID, user):
channel = self.tt.getChannel(channelID)
print(f"{user.szUsername} with nickname {user.szNickname} has left channel {channel.szName}")
def onCmdChannelNew(self, channel):
print(f"channel {channel.szName} was added")
def onCmdChannelUpdate(self, channel):
print(f"channel {channel.szName} was updated")
def onCmdChannelRemove(self, channel):
print(f"channel {channel.szName} was removed")
def onCmdUserTextMessage(self, message):
msgType = message.nMsgType
if msgType == TeamTalk5.TextMsgType.MSGTYPE_USER:
self.onUserMessage(message.nFromUserID, message.szFromUsername, message.szMessage)
if msgType == TeamTalk5.TextMsgType.MSGTYPE_CHANNEL:
self.onChannelMessage(message.nFromUserID, message.szFromUsername, message.nChannelID, message.szMessage)
if msgType == TeamTalk5.TextMsgType.MSGTYPE_BROADCAST:
self.onBroadcastMessage(message.nFromUserID, message.szFromUsername, message.szMessage)
def onUserMessage(self, fromUserID, fromUserName, msgText):
print(f"User message from userid: {fromUserID}, username: {fromUserName} {msgText}")
def onChannelMessage(self, fromUserID, fromUserName, channelID, msgText):
print(f"Channel message in channelid {channelID} from userid {fromUserID} username: {fromUserName} {msgText}")
def onBroadcastMessage(self, fromUserID, fromUserName, msgText):
print(f"Broadcast message from userid: {fromUserID}, username: {fromUserName} {msgText}")
def onCmdServerUpdate(self, serverProperties):
print(f"Welcome to server {serverProperties.szServerName}")
def onCmdFileNew(self, remoteFile):
print(f"file {remoteFile.szFileName} was added")
def onCmdFileRemove(self, remoteFile):
print(f"file {remoteFile.szFileName} was removed")
if __name__ == "__main__":
try:
ttClient = TTClient(ttstr("localhost"), 10333, 10333, ttstr("Nickname"), ttstr("username"), ttstr("password"))
ttClient.start()
while True:
ttClient.tt.runEventLoop()
except Exception as e:
print(e)
|
3,153 |
main
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This installer will install mysql-server on an Ubuntu machine.
In addition to the normal installation done by apt-get, it will
also configure the new MySQL server to store it's data files in
a different location. By default, this is /mnt but that can be
configured in the [MySQL] section of the boto config file passed
to the instance.
"""
from boto.pyami.installers.ubuntu.installer import Installer
import os
import boto
from boto.utils import ShellCommand
from boto.compat import ConfigParser
import time
ConfigSection = """
[MySQL]
root_password = <will be used as MySQL root password, default none>
data_dir = <new data dir for MySQL, default is /mnt>
"""
class MySQL(Installer):
def install(self):
self.run('apt-get update')
self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True)
# def set_root_password(self, password=None):
# if not password:
# password = boto.config.get('MySQL', 'root_password')
# if password:
# self.run('mysqladmin -u root password %s' % password)
# return password
def change_data_dir(self, password=None):
data_dir = boto.config.get('MySQL', 'data_dir', '/mnt')
fresh_install = False
is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running
is_mysql_running_command.run()
if is_mysql_running_command.getStatus() == 0:
# mysql is running. This is the state apt-get will leave it in. If it isn't running,
# that means mysql was already installed on the AMI and there's no need to stop it,
# saving 40 seconds on instance startup.
time.sleep(10) #trying to stop mysql immediately after installing it fails
# We need to wait until mysql creates the root account before we kill it
# or bad things will happen
i = 0
while self.run("echo 'quit' | mysql -u root") != 0 and i < 5:
time.sleep(5)
i = i + 1
self.run('/etc/init.d/mysql stop')
self.run("pkill -9 mysql")
mysql_path = os.path.join(data_dir, 'mysql')
if not os.path.exists(mysql_path):
self.run('mkdir %s' % mysql_path)
fresh_install = True
self.run('chown -R mysql:mysql %s' % mysql_path)
fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w')
fp.write('# created by pyami\n')
fp.write('# use the %s volume for data\n' % data_dir)
fp.write('[mysqld]\n')
fp.write('datadir = %s\n' % mysql_path)
fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log'))
fp.close()
if fresh_install:
self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path)
self.start('mysql')
else:
#get the password ubuntu expects to use:
config_parser = ConfigParser()
config_parser.read('/etc/mysql/debian.cnf')
password = config_parser.get('client', 'password')
# start the mysql deamon, then mysql with the required grant statement piped into it:
self.start('mysql')
time.sleep(10) #time for mysql to start
grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password
while self.run(grant_command) != 0:
time.sleep(5)
# leave mysqld running
def METHOD_NAME(self):
self.install()
# change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
# and changing that is too ugly to be worth it:
#self.set_root_password()
self.change_data_dir()
|
3,154 |
address
|
from __future__ import annotations
import logging
import os
import re
from typing import List, Callable
from checkov.common.parallelizer.parallel_runner import parallel_runner
from checkov.common.util.file_utils import read_file_with_any_encoding
from checkov.terraform.module_loading.registry import module_loader_registry
MODULE_SOURCE_PATTERN = re.compile(r'[^#]*\bsource\s*=\s*"(?P<link>.*)"')
MODULE_VERSION_PATTERN = re.compile(r'[^#]*\bversion\s*=\s*"(?P<operator>=|!=|>=|>|<=|<|~>)?\s*(?P<version>[\d.]+-?\w*)"')
class ModuleDownload:
def __init__(self, source_dir: str) -> None:
self.source_dir = source_dir
self.module_link: str | None = None
self.version: str | None = None
def __str__(self) -> str:
return f"{self.source_dir} -> {self.module_link} ({self.version})"
@property
def METHOD_NAME(self) -> str:
return f'{self.module_link}:{self.version}'
def find_modules(path: str) -> List[ModuleDownload]:
modules_found: list[ModuleDownload] = []
for root, _, full_file_names in os.walk(path):
for file_name in full_file_names:
if not file_name.endswith('.tf'):
continue
try:
content = read_file_with_any_encoding(file_path=os.path.join(path, root, file_name))
if "module " not in content:
# if there is no "module " ref in the whole file, then no need to search line by line
continue
curr_md = None
for line in content.splitlines():
if not curr_md:
if line.startswith('module'):
curr_md = ModuleDownload(os.path.dirname(os.path.join(root, file_name)))
continue
else:
if line.startswith('}'):
if curr_md.module_link is None:
logging.warning(f'A module at {curr_md.source_dir} had no source, skipping')
else:
modules_found.append(curr_md)
curr_md = None
continue
if "source" in line:
match = re.match(MODULE_SOURCE_PATTERN, line)
if match:
curr_md.module_link = match.group('link')
continue
if "version" in line:
match = re.match(MODULE_VERSION_PATTERN, line)
if match:
curr_md.version = f"{match.group('operator')}{match.group('version')}" if match.group('operator') else match.group('version')
except (UnicodeDecodeError, FileNotFoundError) as e:
logging.warning(f"Skipping {os.path.join(path, root, file_name)} because of {e}")
continue
return modules_found
def should_download(path: str | None) -> bool:
return path is not None and not (path.startswith('./') or path.startswith('../') or path.startswith('/'))
def load_tf_modules(
path: str,
should_download_module: Callable[[str | None], bool] = should_download,
run_parallel: bool = False,
modules_to_load: List[ModuleDownload] | None = None,
stop_on_failure: bool = False
) -> None:
module_loader_registry.root_dir = path
if not modules_to_load:
modules_to_load = find_modules(path)
def _download_module(m: ModuleDownload) -> bool:
if should_download_module(m.module_link):
logging.info(f'Downloading module {m.METHOD_NAME}')
try:
content = module_loader_registry.load(m.source_dir, m.module_link,
"latest" if not m.version else m.version)
if content is None or not content.loaded():
log_message = f'Failed to download module {m.METHOD_NAME}'
if not module_loader_registry.download_external_modules:
log_message += ' (for external modules, the --download-external-modules flag is required)'
logging.warning(log_message)
return False
except Exception as e:
logging.warning(f"Unable to load module ({m.METHOD_NAME}): {e}")
return False
return True
# To avoid duplicate work, we need to get the distinct module sources
distinct_modules = list({m.METHOD_NAME: m for m in modules_to_load}.values())
if run_parallel:
list(parallel_runner.run_function(_download_module, distinct_modules))
else:
logging.info(f"Starting download of modules of length {len(distinct_modules)}")
for m in distinct_modules:
success = _download_module(m)
if not success and stop_on_failure:
logging.info(f"Stopping downloading of modules due to failed attempt on {m.METHOD_NAME}")
break
|
3,155 |
is tflite available
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te, relay, transform
from tvm.contrib.download import download_testdata
from tvm.contrib import graph_executor as runtime
import os
import pytest
from PIL import Image
import numpy as np
from test_verilator.infrastructure import (
skip_test,
compile_hardware,
compiler_opts,
offload,
clear_stats,
stats,
)
def extract(path):
"""Extract a tgz or gz file.
Paramters
---------
path : Str
The path of the compressed file.
"""
import tarfile
if path.endswith("tgz") or path.endswith("gz"):
dir_path = os.path.dirname(path)
tar = tarfile.open(path)
tar.extractall(path=dir_path)
tar.close()
else:
raise RuntimeError("Could not decompress the file: " + path)
def get_real_image(im_height, im_width):
"""Get a real image.
Paramters
---------
im_height : Int
The image height.
im_width : Int
The image width.
Returns
-------
data: Data
The image array.
"""
repo_base = "https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def get_mobilenet_model():
"""Return mobilenet model."""
model_url = "https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz"
model_path = download_testdata(
model_url, "mobilenet_v1_1.0_224_quant.tgz", module=["tf", "official"]
)
model_dir = os.path.dirname(model_path)
extract(model_path)
tflite_model_file = os.path.join(model_dir, "mobilenet_v1_1.0_224_quant.tflite")
tflite_model_buf = open(tflite_model_file, "rb").read()
try:
import tflite
return tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
return tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
def get_input_tensor_name():
"""Return input name."""
return "input"
def compile_model_to_relay(model):
"""Compile model to relay.
Paramters
---------
model : Model
The input model.
Returns
-------
mod: Module
The relay module.
params: Parameters
The model parameters.
"""
input_tensor = get_input_tensor_name()
input_shape = (1, 224, 224, 3)
input_dtype = "uint8"
mod, params = relay.frontend.from_tflite(
model,
shape_dict={input_tensor: input_shape},
dtype_dict={input_tensor: input_dtype},
)
return mod, params
def run_model(mod, params=None, opts=None):
"""Run model.
Paramters
---------
mod: Module
The relay module.
params: Parameters
The model parameters.
opts: Dict
The compiler options.
Returns
-------
out: Data
The output data.
"""
with transform.PassContext(opt_level=3, config={"relay.ext.verilator.options": opts}):
lib = relay.build(mod, target="llvm", params=params)
module = runtime.GraphModule(lib["default"](tvm.cpu()))
image_data = get_real_image(224, 224)
input_tensor = get_input_tensor_name()
module.set_input(input_tensor, image_data)
module.run()
out = module.get_output(0).numpy()
return out
def get_labels():
"""Return labels."""
label_file_url = "".join(
[
"https://raw.githubusercontent.com/",
"tensorflow/tensorflow/master/tensorflow/lite/java/demo/",
"app/src/main/assets/",
"labels_mobilenet_quant_v1_224.txt",
]
)
label_file = "labels_mobilenet_quant_v1_224.txt"
label_path = download_testdata(label_file_url, label_file, module="data")
# List of 1001 classes
with open(label_path) as f:
labels = f.readlines()
return labels
def check_result(res):
"""Check prediction."""
labels = get_labels()
predictions = np.squeeze(res)
prediction = np.argmax(predictions)
# 387 is the elephant
assert prediction == 387
def print_test_info(lanes, cycles):
"""Print test info
Paramters
---------
lanes : Int
The number of vector lanes.
cycles : Int
The number of cycles.
"""
print(
"[mobilenet] vector-lanes:{} number of cycles:{} spent in nn.bias_add".format(lanes, cycles)
)
def METHOD_NAME():
"""Skip test if tensorflow-lite is not installed."""
try:
import tflite
return True
except:
return False
@pytest.mark.skipif(skip_test(), reason="Skip because Verilator codegen is not available")
def tmobilenet(lanes):
"""Mobilenet test template.
Paramters
---------
lanes : Int
The number of vector lanes.
"""
if skip_test():
return
if not METHOD_NAME():
return
model = get_mobilenet_model()
mod, params = compile_model_to_relay(model)
mod = offload(mod)
lib = compile_hardware(lanes)
opts = compiler_opts(lib)
clear_stats()
res = run_model(mod, params, opts)
values = stats()
check_result(res)
print_test_info(lanes, values["cycle_counter"])
def test_mobilenet():
"""Mobilenet tests."""
tmobilenet(4)
tmobilenet(32)
|
3,156 |
codegen config
|
import contextlib
import copy
import pickle
import unittest
from types import FunctionType, ModuleType
from typing import Any, Dict, Set
from unittest import mock
# Types saved/loaded in configs
CONFIG_TYPES = (int, float, bool, type(None), str, list, set, tuple, dict)
def install_config_module(module):
"""
Converts a module-level config into a `ConfigModule()`
"""
class ConfigModuleInstance(ConfigModule):
_bypass_keys = set()
def visit(source, dest, prefix):
"""Walk the module structure and move everything to module._config"""
for key, value in list(source.__dict__.items()):
if key.startswith("__") or isinstance(value, (ModuleType, FunctionType)):
continue
name = f"{prefix}{key}"
if isinstance(value, CONFIG_TYPES):
config[name] = value
default[name] = value
if dest is module:
delattr(module, key)
elif isinstance(value, type):
assert value.__module__ == module.__name__
# a subconfig with `class Blah:` syntax
proxy = SubConfigProxy(module, f"{name}.")
visit(value, proxy, f"{name}.")
setattr(dest, key, proxy)
else:
raise AssertionError(f"Unhandled config {key}={value} ({type(value)})")
config = dict()
default = dict()
visit(module, module, "")
module._config = config
module._default = default
module._allowed_keys = set(config.keys())
module.__class__ = ConfigModuleInstance
class ConfigModule(ModuleType):
# The default values of the configuration settings. This can be used to
# determine if the config has been changed or not.
_default: Dict[str, Any]
# The actual configuration settings. E.g., torch._dynamo.config.debug
# would live as "debug" in the key, and torch._inductor.config.triton.cudagraphs
# maps as "triton.cudagraphs"
_config: Dict[str, Any]
_allowed_keys: Set[str]
_bypass_keys: Set[str]
def __init__(self):
raise NotImplementedError(
f"use {__name__}.install_config_module(sys.modules[__name__])"
)
def __setattr__(self, name, value):
if name in self._bypass_keys:
super().__setattr__(name, value)
elif name not in self._allowed_keys:
raise AttributeError(f"{self.__name__}.{name} does not exist")
else:
self._config[name] = value
def __getattr__(self, name):
try:
return self._config[name]
except KeyError:
# make hasattr() work properly
raise AttributeError(f"{self.__name__}.{name} does not exist")
def __delattr__(self, name):
# must support delete because unittest.mock.patch deletes
# then recreate things
del self._config[name]
def save_config(self):
"""Convert config to a pickled blob"""
config = dict(self._config)
for key in config.get("_save_config_ignore", ()):
config.pop(key)
return pickle.dumps(config, protocol=2)
def METHOD_NAME(self):
"""Convert config to Python statements that replicate current config.
This does NOT include config settings that are at default values.
"""
lines = []
mod = self.__name__
for k, v in self._config.items():
if k in self._config.get("_save_config_ignore", ()):
continue
if v == self._default[k]:
continue
lines.append(f"{mod}.{k} = {v!r}")
return "\n".join(lines)
def load_config(self, data):
"""Restore from a prior call to save_config()"""
self.to_dict().update(pickle.loads(data))
def to_dict(self):
return self._config
def get_config_copy(self):
return copy.deepcopy(self._config)
def patch(self, arg1=None, arg2=None, **kwargs):
"""
Decorator and/or context manager to make temporary changes to a config.
As a decorator:
@config.patch("name", val)
@config.patch(name1=val1, name2=val2):
@config.patch({"name1": val1, "name2", val2})
def foo(...):
...
As a context manager:
with config.patch("name", val):
...
"""
if arg1 is not None:
if arg2 is not None:
# patch("key", True) syntax
changes = {arg1: arg2}
else:
# patch({"key": True}) syntax
changes = arg1
assert not kwargs
else:
# patch(key=True) syntax
changes = kwargs
assert arg2 is None
assert isinstance(changes, dict), f"expected `dict` got {type(changes)}"
prior = {}
config = self
class ConfigPatch(ContextDecorator):
def __enter__(self):
assert not prior
for key in changes.keys():
# KeyError on invalid entry
prior[key] = config._config[key]
config._config.update(changes)
def __exit__(self, exc_type, exc_val, exc_tb):
config._config.update(prior)
prior.clear()
return ConfigPatch()
class ContextDecorator(contextlib.ContextDecorator):
"""
Same as contextlib.ContextDecorator, but with support for
`unittest.TestCase`
"""
def __call__(self, func):
if isinstance(func, type) and issubclass(func, unittest.TestCase):
class _TestCase(func):
@classmethod
def setUpClass(cls):
self.__enter__()
try:
super().setUpClass()
except Exception:
self.__exit__(None, None, None)
raise
@classmethod
def tearDownClass(cls):
try:
super().tearDownClass()
finally:
self.__exit__(None, None, None)
_TestCase.__name__ = func.__name__
_TestCase.__qualname__ = func.__qualname__
_TestCase.__module__ = func.__module__
return _TestCase
return super().__call__(func)
class SubConfigProxy:
"""
Shim to redirect to main config.
`config.triton.cudagraphs` maps to _config["triton.cudagraphs"]
"""
def __init__(self, config, prefix):
# `super().__setattr__` to bypass custom `__setattr__`
super().__setattr__("_config", config)
super().__setattr__("_prefix", prefix)
def __setattr__(self, name, value):
return self._config.__setattr__(self._prefix + name, value)
def __getattr__(self, name):
return self._config.__getattr__(self._prefix + name)
def __delattr__(self, name):
return self._config.__delattr__(self._prefix + name)
def patch_object(obj, name, value):
"""
Workaround `mock.patch.object` issue with ConfigModule
"""
if isinstance(obj, ConfigModule):
return obj.patch(name, value)
return mock.patch.object(obj, name, value)
|
3,157 |
model to pk
|
"""Define class based views for the various API views."""
import json
import logging
from django.db.models import Model
from django.http import Http404
from rest_framework import mixins, viewsets
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from tracker import logutil
from tracker.api.messages import GENERIC_NOT_FOUND
from tracker.api.pagination import TrackerPagination
from tracker.api.permissions import UNAUTHORIZED_OBJECT
from tracker.api.serializers import (
EventSerializer,
RunnerSerializer,
SpeedRunSerializer,
)
from tracker.models.event import Event, Runner, SpeedRun
log = logging.getLogger(__name__)
class FlatteningViewSetMixin(object):
"""Override a view set's data query methods in order to have a flat dictionary of objects
rather than the REST default of a nested tree.
"""
def list(self, request, *args, **kwargs):
"""Change the response type to be a dictionary if flat related objects have been requested."""
log.debug('query params: %s', request.query_params)
flatten = request.query_params.get('include', None)
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
log.debug(serializer.data)
# if we need to flatten, it's time to walk this dictionary
if flatten:
targets = flatten.split(',')
prepared_data = self._flatten_data(serializer.data, targets)
else:
prepared_data = serializer.data
log.debug(prepared_data)
return self.get_paginated_response(prepared_data)
def retrieve(self, request, *args, **kwargs):
"""Change the response type to be a dictionary if flat related objects have been requested."""
log.debug('query params: %s', request.query_params)
instance = self.get_object()
serializer = self.get_serializer(instance)
log.debug(serializer.data)
flatten = request.query_params.get('include', None)
# if we need to flatten, it's time to walk this dictionary
if flatten:
targets = flatten.split(',')
prepared_data = self._flatten_data([serializer.data], targets)
else:
prepared_data = serializer.data
log.debug(prepared_data)
return Response(prepared_data)
@staticmethod
def _flatten_data(initial_data, targets):
log.debug('targets for flattening: %s', targets)
primary_objs = list()
obj_label = None
for item in initial_data:
obj_label = '{0:s}s'.format(item['type'])
primary_objs.append(dict(item))
prepared_data = {obj_label: primary_objs}
for which in targets:
log.debug('searching for target %s', which)
target_objs = dict()
for item in primary_objs:
log.debug('searching in %s', item)
hits = item.get(which, [])
if hits:
# winch this into a list if it isn't a many=True field)
if not isinstance(hits, list):
log.debug('winching %s into a list', hits)
hits = [hits]
new_hit_list = list()
for hit in hits:
log.debug('found a hit: %s', hit)
target_objs[hit['id']] = hit
new_hit_list.append(hit['id'])
item[which] = new_hit_list
prepared_data[which] = list(target_objs.values())
return prepared_data
class EventNestedMixin:
def get_queryset(self):
queryset = super().get_queryset()
event_pk = self.kwargs.get('event_pk', None)
if event_pk:
event = EventViewSet(
kwargs={'pk': event_pk}, request=self.request
).get_object()
queryset = self.get_event_filter(queryset, event)
return queryset
def get_event_filter(self, queryset, event):
return queryset.filter(event=event)
def get_event_from_request(self, request):
if 'event' in request.data:
try:
return Event.objects.filter(pk=request.data['event']).first()
except (TypeError, ValueError):
pass
return None
def is_event_locked(self, request):
event = self.get_event_from_request(request)
return event and event.locked
def generic_404(exception_handler):
def _inner(exc, context):
# override the default messaging for 404s
if isinstance(exc, Http404):
exc = NotFound(detail=GENERIC_NOT_FOUND)
if isinstance(exc, NotFound) and exc.detail == NotFound.default_detail:
exc.detail = GENERIC_NOT_FOUND
return exception_handler(exc, context)
return _inner
def METHOD_NAME(model):
if isinstance(model, Model):
return model.pk
raise TypeError
class TrackerCreateMixin(mixins.CreateModelMixin):
def perform_create(self, serializer):
super().perform_create(serializer)
logutil.addition(self.request, serializer.instance)
class TrackerUpdateMixin(mixins.UpdateModelMixin):
def perform_update(self, serializer):
old_values = {}
for key, value in serializer.initial_data.items():
if key not in serializer.fields:
continue
old_values[key] = getattr(serializer.instance, key)
if isinstance(old_values[key], Model):
old_values[key] = old_values[key].pk
super().perform_update(serializer)
changed_values = {}
for key, value in old_values.items():
if value != serializer.data[key]:
changed_values[key] = {'old': value, 'new': serializer.data[key]}
if changed_values:
logutil.change(
self.request,
serializer.instance,
json.dumps(changed_values, default=METHOD_NAME),
)
class TrackerReadViewSet(viewsets.ReadOnlyModelViewSet):
def permission_denied(self, request, message=None, code=None):
if code == UNAUTHORIZED_OBJECT:
raise Http404
else:
super().permission_denied(request, message=message, code=code)
def get_exception_handler(self):
return generic_404(super().get_exception_handler())
class EventViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = Event.objects.with_annotations().all()
serializer_class = EventSerializer
pagination_class = TrackerPagination
def get_serializer(self, *args, **kwargs):
serializer_class = self.get_serializer_class()
with_totals = self.request.query_params.get('totals') is not None
return serializer_class(*args, **kwargs, with_totals=with_totals)
class RunnerViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = Runner.objects.all()
serializer_class = RunnerSerializer
pagination_class = TrackerPagination
class SpeedRunViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = SpeedRun.objects.select_related('event').prefetch_related(
'runners', 'hosts', 'commentators'
)
serializer_class = SpeedRunSerializer
pagination_class = TrackerPagination
|
3,158 |
initial seed
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator
import oneccl_bindings_for_pytorch # noqa: F401 # type: ignore
import psutil
import os
# accelerator for Intel CPU
class CPU_Accelerator(DeepSpeedAccelerator):
def __init__(self):
self._name = 'cpu'
self._communication_backend_name = 'ccl'
self.max_mem = psutil.Process().memory_info().rss
def is_synchronized_device(self):
return True
# Device APIs
def device_name(self, device_index=None):
return 'cpu'
def device(self, device_index=None):
return None
def set_device(self, device_index):
return
def current_device(self):
return os.environ.get('LOCAL_RANK', 0)
def current_device_name(self):
return 'cpu'
def device_count(self):
device_count = int(os.environ.get('LOCAL_SIZE', 0))
if device_count > 0:
return device_count
else:
from deepspeed.utils.numa import get_numa_cores
# Count NUMA node for number of cpu accelerators. On machine with HBM
# In flat mode, HBM is in separate NUMA node with no cores on this node.
# Ignore these NUMA nodes with no cores.
numa_core_lists = get_numa_cores()
numa_count = 0
prev_core_list = []
for core_list in numa_core_lists:
if len(core_list) > 0 and core_list != prev_core_list:
numa_count += 1
prev_core_list = core_list
return numa_count
def synchronize(self, device_index=None):
return
# RNG APIs
def random(self):
return torch.random
def set_rng_state(self, new_state, device_index=None):
if device_index == None:
return torch.set_rng_state(new_state)
return torch.set_rng_state(new_state, device_index)
def get_rng_state(self, device_index=None):
return torch.get_rng_state()
def manual_seed(self, seed):
return torch.manual_seed(seed)
def manual_seed_all(self, seed):
return torch.manual_seed(seed)
def METHOD_NAME(self, seed):
return torch.METHOD_NAME(seed)
def default_generator(self, device_index):
return torch.default_generator
# Streams/Events
@property
def Stream(self):
return None
def stream(self, stream):
from deepspeed.runtime.utils import noop_context
return noop_context()
def current_stream(self, device_index=None):
return None
def default_stream(self, device_index=None):
return None
@property
def Event(self):
return None
# Memory management
def empty_cache(self):
return
def get_rss(self):
mem = psutil.Process().memory_info().rss
if mem > self.max_mem:
self.max_mem = mem
return mem
def reset_rss(self):
mem = psutil.Process().memory_info().rss
self.max_mem = mem
return mem
def memory_allocated(self, device_index=None):
return self.get_rss()
def max_memory_allocated(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_allocated(self, device_index=None):
self.reset_rss()
return
def memory_cached(self, device_index=None):
return self.get_rss()
def max_memory_cached(self, device_index=None):
self.get_rss()
return self.max_mem
def reset_max_memory_cached(self, device_index=None):
self.reset_rss()
return
def memory_stats(self, device_index=None):
mem = self.get_rss()
mem_stat = {}
mem_stat['allocated_bytes.all.current'] = mem
mem_stat['allocated_bytes.all.peak'] = self.max_mem
return mem_stat
def reset_peak_memory_stats(self, device_index=None):
self.reset_rss()
return
def memory_reserved(self, device_index=None):
return self.get_rss()
def max_memory_reserved(self, device_index=None):
self.get_rss()
return self.max_mem
def total_memory(self, device_index=None):
return psutil.virtual_memory().total
# Misc
def amp(self):
return torch.cpu.amp
def is_available(self):
return True
def range_push(self, msg):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_push(msg)
return
def range_pop(self):
# TODO itt is currently not supported yet
# return torch.profiler.itt.range_pop()
return
def lazy_call(self, callback):
return callback()
def communication_backend_name(self):
return self._communication_backend_name
# Data types
def is_bf16_supported(self):
return True
def is_fp16_supported(self):
return False
def supported_dtypes(self):
return [torch.float, torch.bfloat16]
# Tensor operations
@property
def BFloat16Tensor(self):
return torch.BFloat16Tensor
@property
def ByteTensor(self):
return torch.ByteTensor
@property
def DoubleTensor(self):
return torch.DoubleTensor
@property
def FloatTensor(self):
return torch.FloatTensor
@property
def HalfTensor(self):
return torch.HalfTensor
@property
def IntTensor(self):
return torch.IntTensor
@property
def LongTensor(self):
return torch.LongTensor
def pin_memory(self, tensor):
return tensor
def op_builder_dir(self):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401 # type: ignore
return "op_builder.cpu"
except ImportError:
return "deepspeed.ops.op_builder.cpu"
def on_accelerator(self, tensor):
device_str = str(tensor.device)
if device_str.startswith('cpu'):
return True
else:
return False
# create an instance of op builder and return, name specified by class_name
def create_op_builder(self, op_name):
builder_class = self.get_op_builder(op_name)
if builder_class != None:
return builder_class()
return None
# return an op builder class, name specified by class_name
def get_op_builder(self, class_name):
try:
# is op_builder from deepspeed or a 3p version? this should only succeed if it's deepspeed
# if successful this also means we're doing a local install and not JIT compile path
from op_builder import __deepspeed__ # noqa: F401 # type: ignore
from op_builder.cpu import CCLCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder
except ImportError:
from deepspeed.ops.op_builder.cpu import CCLCommBuilder, FusedAdamBuilder, CPUAdamBuilder, NotImplementedBuilder
if class_name == "CCLCommBuilder":
return CCLCommBuilder
elif class_name == "FusedAdamBuilder":
return FusedAdamBuilder
elif class_name == "CPUAdamBuilder":
return CPUAdamBuilder
else:
# return a NotImplementedBuilder to avoid get NoneType[Name] in unit tests
return NotImplementedBuilder
def build_extension(self):
from torch.utils.cpp_extension import BuildExtension
return BuildExtension
|
3,159 |
process row
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import logging
import requests
from tempfile import NamedTemporaryFile
from django.core.management.base import BaseCommand
from django.conf import settings
from aquifers.models import WaterRightsLicence, WaterRightsPurpose, Aquifer
from wells.models import Well
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Downloads licences from DataBC and stores them locally.
"""
def add_arguments(self, parser):
parser.add_argument('filename', type=str, nargs="?", default=None,
help='The file to import. If not specified, download latest')
parser.add_argument('-d', '--dev-fixtures', action='store_const', const=1,
help='Set this if you do not have a full production database, and only have dev fixtures.')
def handle(self, *args, **options):
if options['filename']:
filename = options['filename']
else:
logging.info("Downloading licences from DataBC")
input_file = NamedTemporaryFile(delete=False)
url = "https://openmaps.gov.bc.ca/geo/pub/wfs?SERVICE=WFS&VERSION=2.0.0&REQUEST=GetFeature&outputFormat=csv&typeNames=WHSE_WATER_MANAGEMENT.WLS_WATER_RIGHTS_LICENCES_SV&count=10000&cql_filter=POD_SUBTYPE NOT LIKE 'POD'"
r = requests.get(url, allow_redirects=True)
input_file.write(r.content)
filename = input_file.name
input_file.close()
error_count = 0
with open(filename, newline='') as csvfile:
reader = csv.DictReader(csvfile)
# used in DEBUG mode only.
counter = 0
num_wells = Well.objects.count()
# Delete all the water rights licenses as the WLS_WRL_SYSIDs can change (re: WATER-1115)
WaterRightsLicence.objects.all().delete()
use_dev_fixtures = options.get('dev_fixtures')
# in dev envs, only import 100 licences.
for row in reader:
counter += 1
well = None
aquifer = None
# If using dev_fixtures option then we limit the data for testng.
if use_dev_fixtures:
if counter > 100:
break
well = Well.objects.all()[counter % num_wells:][0]
# assign some wells to aquifers and leave other wells unassociated.
if not well.aquifer and counter % 2:
well.aquifer = Aquifer.objects.first()
well.save()
aquifer = well.aquifer
try:
self.METHOD_NAME(row, use_dev_fixtures=use_dev_fixtures, well=well, aquifer=aquifer)
except:
error_count += 1
logger.exception('Error processing CSV row WLS_WRL_SYSID=%s', row['WLS_WRL_SYSID'])
self.stdout.write(self.style.SUCCESS(f'Licence import complete with {error_count} errors.'))
def METHOD_NAME(self, row, use_dev_fixtures=False, well=None, aquifer=None):
if row['POD_SUBTYPE'].strip() not in ['PWD', 'PG']:
# [Nicole]: (we are only concerned with PWD and PG data – exclude any
# rows with POD. POD refers to surface water which is out of scope for GWELLS)
return
if not row['SOURCE_NAME'].strip().isdigit() and not row['WELL_TAG_NUMBER'].strip().isdigit():
# Licence must be for a well or aquifer
return
logging.info("importing licence #{}".format(row['LICENCE_NUMBER']))
# Check the Licence is for a valid Aquifer and Well
# the if check here allows this function to be called with a specific
# well or aquifer for dev/test environments.
if not aquifer and row.get('SOURCE_NAME', '').strip().isdigit():
try:
aquifer = Aquifer.objects.get(pk=row['SOURCE_NAME'])
except Aquifer.DoesNotExist:
pass
if not well and row.get('WELL_TAG_NUMBER', '').strip().isdigit():
try:
well = Well.objects.get(pk=row['WELL_TAG_NUMBER'])
except Well.DoesNotExist:
pass
well_updated = False
try:
# Maintain code table with water rights purpose.
purpose = WaterRightsPurpose.objects.get(
code=row['PURPOSE_USE_CODE'].strip())
except WaterRightsPurpose.DoesNotExist:
purpose = WaterRightsPurpose.objects.create(
code=row['PURPOSE_USE_CODE'].strip(),
description=row['PURPOSE_USE'].strip())
licence = WaterRightsLicence(wrl_sysid=row['WLS_WRL_SYSID'])
licence.licence_number = row['LICENCE_NUMBER'].strip()
licence.quantity_flag = row['QUANTITY_FLAG'].strip()
licence.purpose = purpose
# Convert quantity to m3/year
quantity = float(row['QUANTITY'].strip() or '0')
if row['QUANTITY_UNITS'].strip() == "m3/sec":
quantity = quantity * 60*60*24*365
elif row['QUANTITY_UNITS'].strip() == "m3/day":
quantity = quantity * 365
elif row['QUANTITY_UNITS'].strip() == "m3/year":
quantity = quantity
else:
raise Exception('unknown quantity unit: `{}`'.format(row['QUANTITY_UNITS']))
licence.quantity = quantity
licence.save()
if aquifer and well and not well.aquifer:
well.aquifer = aquifer
well_updated = True
if well and licence not in well.licences.all():
well.licences.add(licence)
well_updated = True
if well_updated:
well.save()
logging.info('assocated well={} aquifer={} licence_sysid={}'.format(
well.pk if well else "None",
aquifer.pk if aquifer else "None",
licence.pk
))
return licence
|
3,160 |
test fv
|
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
import numpy as np
from common import (
Data,
j_loader,
tests_path,
)
from deepmd.common import (
data_requirement,
j_must_have,
)
from deepmd.env import (
GLOBAL_NP_FLOAT_PRECISION,
tf,
)
from deepmd.infer.data_modifier import (
DipoleChargeModifier,
)
from deepmd.train.run_options import (
RunOptions,
)
from deepmd.train.trainer import (
DPTrainer,
)
from deepmd.utils.data_system import (
DeepmdDataSystem,
)
if GLOBAL_NP_FLOAT_PRECISION == np.float32:
global_default_fv_hh = 1e-2
global_default_dw_hh = 1e-2
global_default_places = 3
else:
global_default_fv_hh = 1e-6
global_default_dw_hh = 1e-4
global_default_places = 5
modifier_datapath = "data_modifier"
INPUT = os.path.join(modifier_datapath, "dipole.json")
class TestDataModifier(tf.test.TestCase):
def setUp(self):
# with tf.variable_scope('load', reuse = False) :
tf.reset_default_graph()
self._setUp()
def tearDown(self):
tf.reset_default_graph()
def _setUp(self):
run_opt = RunOptions(
restart=None, init_model=None, log_path=None, log_level=30, mpi_log="master"
)
jdata = j_loader(INPUT)
# init model
model = DPTrainer(jdata, run_opt=run_opt)
rcut = model.model.get_rcut()
# init data system
systems = j_must_have(jdata["training"], "systems")
# systems[0] = tests_path / systems[0]
systems = [tests_path / ii for ii in systems]
set_pfx = j_must_have(jdata["training"], "set_prefix")
batch_size = j_must_have(jdata["training"], "batch_size")
test_size = j_must_have(jdata["training"], "numb_test")
data = DeepmdDataSystem(
systems, batch_size, test_size, rcut, set_prefix=set_pfx
)
data.add_dict(data_requirement)
# clear the default graph
tf.reset_default_graph()
# build the model with stats from the first system
model.build(data)
# freeze the graph
with self.test_session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
nodes = "o_dipole,o_rmat,o_rmat_deriv,o_nlist,o_rij,descrpt_attr/rcut,descrpt_attr/ntypes,descrpt_attr/sel,descrpt_attr/ndescrpt,model_attr/tmap,model_attr/sel_type,model_attr/model_type,model_attr/output_dim,model_attr/model_version"
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, input_graph_def, nodes.split(",")
)
output_graph = str(
tests_path / os.path.join(modifier_datapath, "dipole.pb")
)
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
def METHOD_NAME(self):
# with tf.variable_scope('load', reuse = False) :
self._test_fv()
def _test_fv(self):
dcm = DipoleChargeModifier(
str(tests_path / os.path.join(modifier_datapath, "dipole.pb")),
[-8],
[6, 1],
1,
0.25,
)
data = Data()
coord, box, atype = data.get_data()
atype = atype[0]
ve, vf, vv = dcm.eval(coord, box, atype)
hh = global_default_fv_hh
hh = 1e-4
places = global_default_places
places = 1
nframes = coord.shape[0]
ndof = coord.shape[1]
natoms = ndof // 3
vf = np.reshape(vf, [nframes, -1])
for ii in range(ndof):
coordp = np.copy(coord)
coordm = np.copy(coord)
coordp[:, ii] += hh
coordm[:, ii] -= hh
ep, _, __ = dcm.eval(coordp, box, atype, eval_fv=False)
em, _, __ = dcm.eval(coordm, box, atype, eval_fv=False)
num_f = -(ep - em) / (2.0 * hh)
np.testing.assert_almost_equal(
vf[:, ii].ravel(),
num_f.ravel(),
places,
err_msg="dof %d does not match" % (ii),
)
box3 = np.reshape(box, [nframes, 3, 3])
rbox3 = np.linalg.inv(box3)
coord3 = np.reshape(coord, [nframes, natoms, 3])
rcoord3 = np.matmul(coord3, rbox3)
num_deriv = np.zeros([nframes, 3, 3])
for ii in range(3):
for jj in range(3):
box3p = np.copy(box3)
box3m = np.copy(box3)
box3p[:, ii, jj] = box3[:, ii, jj] + hh
box3m[:, ii, jj] = box3[:, ii, jj] - hh
boxp = np.reshape(box3p, [-1, 9])
boxm = np.reshape(box3m, [-1, 9])
coord3p = np.matmul(rcoord3, box3p)
coord3m = np.matmul(rcoord3, box3m)
coordp = np.reshape(coord3p, [nframes, -1])
coordm = np.reshape(coord3m, [nframes, -1])
ep, _, __ = dcm.eval(coordp, boxp, atype, eval_fv=False)
em, _, __ = dcm.eval(coordm, boxm, atype, eval_fv=False)
num_deriv[:, ii, jj] = -(ep - em) / (2.0 * hh)
# box3t = np.transpose(box3, [0,2,1])
# t_esti = np.matmul(num_deriv, box3t)
num_deriv = np.transpose(num_deriv, [0, 2, 1])
t_esti = np.matmul(num_deriv, box3)
# print(t_esti, '\n', vv.reshape([-1, 3, 3]))
np.testing.assert_almost_equal(
t_esti.ravel(), vv.ravel(), places, err_msg="virial component failed"
)
|
3,161 |
test content search
|
import os
import json
import shutil
import tarfile
import unittest
from click.testing import CliRunner
from rsconnect.main import cli
from rsconnect import VERSION
from rsconnect.models import BuildStatus
from rsconnect.metadata import _normalize_server_url
from .utils import apply_common_args, require_api_key, require_connect
# run these tests in the order they are defined
# because we are integration testing the state file
unittest.TestLoader.sortTestMethodsUsing = None
_bundle_download_dest = "download.tar.gz"
_content_guids = [
"015143da-b75f-407c-81b1-99c4a724341e",
"4ffc819c-065c-420c-88eb-332db1133317",
"bcc74209-3a81-4b9c-acd5-d24a597c256c",
]
_test_build_dir = "rsconnect-build-test"
class TestContentSubcommand(unittest.TestCase):
@classmethod
def tearDownClass(cls):
if os.path.exists(_bundle_download_dest):
os.remove(_bundle_download_dest)
if os.path.exists(_test_build_dir):
shutil.rmtree(_test_build_dir, ignore_errors=True)
def test_version(self):
runner = CliRunner()
result = runner.invoke(cli, ["version"])
self.assertEqual(result.exit_code, 0, result.output)
self.assertIn(VERSION, result.output)
def METHOD_NAME(self):
connect_server = require_connect()
api_key = require_api_key()
runner = CliRunner()
args = ["content", "search"]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
response = json.loads(result.output)
self.assertIsNotNone(response, result.output)
self.assertEqual(len(response), 3, result.output)
def test_content_describe(self):
connect_server = require_connect()
api_key = require_api_key()
runner = CliRunner()
args = ["content", "describe", "-g", _content_guids[0], "-g", _content_guids[1]]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
response = json.loads(result.output)
self.assertIn("id", response[0])
self.assertIn("id", response[1])
self.assertEqual(response[0]["guid"], _content_guids[0])
self.assertEqual(response[1]["guid"], _content_guids[1])
def test_content_download_bundle(self):
connect_server = require_connect()
api_key = require_api_key()
runner = CliRunner()
args = ["content", "download-bundle", "-g", _content_guids[1], "-o", _bundle_download_dest]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
with tarfile.open(_bundle_download_dest, mode="r:gz") as tgz:
self.assertIsNotNone(tgz.extractfile("manifest.json").read())
def test_build(self):
connect_server = require_connect()
api_key = require_api_key()
runner = CliRunner()
# add a content item
args = ["content", "build", "add", "-g", _content_guids[0]]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
self.assertTrue(
os.path.exists("%s/%s.json" % (_test_build_dir, _normalize_server_url(os.environ.get("CONNECT_SERVER"))))
)
# list the "tracked" content
args = ["content", "build", "ls", "-g", _content_guids[0]]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
listing = json.loads(result.output)
self.assertTrue(len(listing) == 1)
self.assertEqual(listing[0]["guid"], _content_guids[0])
self.assertEqual(listing[0]["bundle_id"], "176")
self.assertEqual(listing[0]["rsconnect_build_status"], BuildStatus.NEEDS_BUILD)
# run the build
args = ["content", "build", "run", "--debug"]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
# check that the build succeeded
args = ["content", "build", "ls", "-g", _content_guids[0]]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
listing = json.loads(result.output)
self.assertTrue(len(listing) == 1)
self.assertEqual(listing[0]["rsconnect_build_status"], BuildStatus.COMPLETE)
def test_build_rm(self):
connect_server = require_connect()
api_key = require_api_key()
runner = CliRunner()
# remove a content item
args = ["content", "build", "rm", "-g", _content_guids[0]]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
# check that it was removed
args = ["content", "build", "ls"]
apply_common_args(args, server=connect_server, key=api_key)
result = runner.invoke(cli, args)
self.assertEqual(result.exit_code, 0, result.output)
listing = json.loads(result.output)
self.assertEqual(len(listing), 0, result.output)
|
3,162 |
c z gates1
|
#!/usr/bin/env python3
import numpy as np
import qibo
from qibo import Circuit, gates
class QuantumClassifer:
def __init__(self, nclasses, nqubits, nlayers, RY=True):
"""
Class for a multi-task variational quantum classifier
Args:
nclases: int number of classes to be classified
nqubits: int number of qubits employed in the quantum circuit
"""
self.nclasses = nclasses
self.nqubits = nqubits
self.measured_qubits = int(np.ceil(np.log2(self.nclasses)))
if self.nqubits <= 1:
raise ValueError("nqubits must be larger than 1")
if RY:
def rotations():
for q in range(self.nqubits):
yield gates.RY(q, theta=0)
else:
def rotations():
for q in range(self.nqubits):
yield gates.RX(q, theta=0)
yield gates.RZ(q, theta=0)
yield gates.RX(q, theta=0)
self._circuit = self.ansatz(nlayers, rotations)
def METHOD_NAME(self):
"""Yields CZ gates used in the variational circuit."""
for q in range(0, self.nqubits - 1, 2):
yield gates.CZ(q, q + 1)
def _CZ_gates2(self):
"""Yields CZ gates used in the variational circuit."""
for q in range(1, self.nqubits - 1, 2):
yield gates.CZ(q, q + 1)
yield gates.CZ(0, self.nqubits - 1)
def ansatz(self, nlayers, rotations):
"""
Args:
theta: list or numpy.array with the angles to be used in the circuit
nlayers: int number of layers of the varitional circuit ansatz
Returns:
Circuit implementing the variational ansatz
"""
c = Circuit(self.nqubits)
for _ in range(nlayers):
c.add(rotations())
c.add(self.METHOD_NAME())
c.add(rotations())
c.add(self._CZ_gates2())
# Final rotations
c.add(rotations())
# Measurements
c.add(gates.M(*range(self.measured_qubits)))
return c
def Classifier_circuit(self, theta):
"""
Args:
theta: list or numpy.array with the biases and the angles to be used in the circuit
nlayers: int number of layers of the varitional circuit ansatz
RY: if True, parameterized Rx,Rz,Rx gates are used in the circuit
if False, parameterized Ry gates are used in the circuit (default=False)
Returns:
Circuit implementing the variational ansatz for angles "theta"
"""
bias = np.array(theta[0 : self.measured_qubits])
angles = theta[self.measured_qubits :]
self._circuit.set_parameters(angles)
return self._circuit
def Predictions(self, circuit, theta, init_state, nshots=10000):
"""
Args:
theta: list or numpy.array with the biases to be used in the circuit
init_state: numpy.array with the quantum state to be classified
nshots: int number of runs of the circuit during the sampling process (default=10000)
Returns:
numpy.array() with predictions for each qubit, for the initial state
"""
bias = np.array(theta[0 : self.measured_qubits])
circuit = circuit(init_state, nshots)
result = circuit.frequencies(binary=False)
prediction = np.zeros(self.measured_qubits)
for qubit in range(self.measured_qubits):
for clase in range(self.nclasses):
binary = bin(clase)[2:].zfill(self.measured_qubits)
prediction[qubit] += result[clase] * (1 - 2 * int(binary[-qubit - 1]))
return prediction / nshots + bias
def square_loss(self, labels, predictions):
"""
Args:
labels: list or numpy.array with the qubit labels of the quantum states to be classified
predictions: list or numpy.array with the qubit predictions for the quantum states to be classified
Returns:
numpy.float32 with the value of the square-loss function
"""
loss = 0
for l, p in zip(labels, predictions):
for qubit in range(self.measured_qubits):
loss += (l[qubit] - p[qubit]) ** 2
return loss / len(labels)
def Cost_function(self, theta, data=None, labels=None, nshots=10000):
"""
Args:
theta: list or numpy.array with the biases and the angles to be used in the circuit
nlayers: int number of layers of the varitional circuit ansatz
data: numpy.array data[page][word] (this is an array of kets)
labels: list or numpy.array with the labels of the quantum states to be classified
nshots: int number of runs of the circuit during the sampling process (default=10000)
Returns:
numpy.float32 with the value of the square-loss function
"""
circ = self.Classifier_circuit(theta)
Bias = np.array(theta[0 : self.measured_qubits])
predictions = np.zeros(shape=(len(data), self.measured_qubits))
for i, text in enumerate(data):
predictions[i] = self.Predictions(circ, Bias, text, nshots)
s = self.square_loss(labels, predictions)
return s
def minimize(
self, init_theta, data=None, labels=None, nshots=10000, method="Powell"
):
"""
Args:
theta: list or numpy.array with the angles to be used in the circuit
nlayers: int number of layers of the varitional ansatz
init_state: numpy.array with the quantum state to be Schmidt-decomposed
nshots: int number of runs of the circuit during the sampling process (default=10000)
RY: if True, parameterized Rx,Rz,Rx gates are used in the circuit
if False, parameterized Ry gates are used in the circuit (default=True)
method: str 'classical optimizer for the minimization'. All methods from scipy.optimize.minmize are suported (default='Powell')
Returns:
numpy.float64 with value of the minimum found, numpy.ndarray with the optimal angles
"""
from scipy.optimize import minimize
result = minimize(
self.Cost_function, init_theta, args=(data, labels, nshots), method=method
)
loss = result.fun
optimal_angles = result.x
return loss, optimal_angles
def Accuracy(self, labels, predictions, sign=True, tolerance=1e-2):
"""
Args:
labels: numpy.array with the labels of the quantum states to be classified
predictions: numpy.array with the predictions for the quantum states classified
sign: if True, labels = np.sign(labels) and predictions = np.sign(predictions) (default=True)
tolerance: float tolerance level to consider a prediction correct (default=1e-2)
Returns:
float with the proportion of states classified successfully
"""
if sign == True:
labels = [np.sign(label) for label in labels]
predictions = [np.sign(prediction) for prediction in predictions]
accur = 0
for l, p in zip(labels, predictions):
if np.allclose(l, p, rtol=0.0, atol=tolerance):
accur += 1
accur = accur / len(labels)
return accur
|
3,163 |
validate content type
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Validates responses and their security features."""
import dataclasses
from typing import Collection
from werkzeug.datastructures import Headers
from werkzeug import http
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
_HTML_MIME_TYPE = "text/html"
_CSP_DEFAULT_SRC = "default-src"
# Whitelist of allowed CSP violations.
_CSP_IGNORE = {
# Polymer-based code uses unsafe-inline.
"style-src": ["'unsafe-inline'", "data:"],
# Used in canvas
"img-src": ["blob:", "data:"],
# Used by numericjs.
# TODO(stephanwlee): remove it eventually.
"script-src": ["'unsafe-eval'"],
"font-src": ["data:"],
}
@dataclasses.dataclass(frozen=True)
class Directive:
"""Content security policy directive.
Loosely follow vocabulary from https://www.w3.org/TR/CSP/#framework-directives.
Attributes:
name: A non-empty string.
value: A collection of non-empty strings.
"""
name: str
value: Collection[str]
def _maybe_raise_value_error(error_msg):
logger.warning("In 3.0, this warning will become an error:\n%s" % error_msg)
# TODO(3.x): raise a value error.
class SecurityValidatorMiddleware:
"""WSGI middleware validating security on response.
It validates:
- responses have Content-Type
- responses have X-Content-Type-Options: nosniff
- text/html responses have CSP header. It also validates whether the CSP
headers pass basic requirement. e.g., default-src should be present, cannot
use "*" directive, and others. For more complete list, please refer to
_validate_csp_policies.
Instances of this class are WSGI applications (see PEP 3333).
"""
def __init__(self, application):
"""Initializes an `SecurityValidatorMiddleware`.
Args:
application: The WSGI application to wrap (see PEP 3333).
"""
self._application = application
def __call__(self, environ, start_response):
def start_response_proxy(status, headers, exc_info=None):
self._validate_headers(headers)
return start_response(status, headers, exc_info)
return self._application(environ, start_response_proxy)
def _validate_headers(self, headers_list):
headers = Headers(headers_list)
self.METHOD_NAME(headers)
self._validate_x_content_type_options(headers)
self._validate_csp_headers(headers)
def METHOD_NAME(self, headers):
if headers.get("Content-Type"):
return
_maybe_raise_value_error("Content-Type is required on a Response")
def _validate_x_content_type_options(self, headers):
option = headers.get("X-Content-Type-Options")
if option == "nosniff":
return
_maybe_raise_value_error(
'X-Content-Type-Options is required to be "nosniff"'
)
def _validate_csp_headers(self, headers):
mime_type, _ = http.parse_options_header(headers.get("Content-Type"))
if mime_type != _HTML_MIME_TYPE:
return
csp_texts = headers.get_all("Content-Security-Policy")
policies = []
for csp_text in csp_texts:
policies += self._parse_serialized_csp(csp_text)
self._validate_csp_policies(policies)
def _validate_csp_policies(self, policies):
has_default_src = False
violations = []
for directive in policies:
name = directive.name
for value in directive.value:
has_default_src = has_default_src or name == _CSP_DEFAULT_SRC
if value in _CSP_IGNORE.get(name, []):
# There are cases where certain directives are legitimate.
continue
# TensorBoard follows principle of least privilege. However, to make it
# easier to conform to the security policy for plugin authors,
# TensorBoard trusts request and resources originating its server. Also,
# it can selectively trust domains as long as they use https protocol.
# Lastly, it can allow 'none' directive.
# TODO(stephanwlee): allow configuration for whitelist of domains for
# stricter enforcement.
# TODO(stephanwlee): deprecate the sha-based whitelisting.
if (
value == "'self'"
or value == "'none'"
or value.startswith("https:")
or value.startswith("'sha256-")
):
continue
msg = "Illegal Content-Security-Policy for {name}: {value}".format(
name=name, value=value
)
violations.append(msg)
if not has_default_src:
violations.append(
"Requires default-src for Content-Security-Policy"
)
if violations:
_maybe_raise_value_error("\n".join(violations))
def _parse_serialized_csp(self, csp_text):
# See https://www.w3.org/TR/CSP/#parse-serialized-policy.
# Below Steps are based on the algorithm stated in above spec.
# Deviations:
# - it does not warn and ignore duplicative directive (Step 2.5)
# Step 2
csp_srcs = csp_text.split(";")
policy = []
for token in csp_srcs:
# Step 2.1
token = token.strip()
if not token:
# Step 2.2
continue
# Step 2.3
token_frag = token.split(None, 1)
name = token_frag[0]
values = token_frag[1] if len(token_frag) == 2 else ""
# Step 2.4
name = name.lower()
# Step 2.6
value = values.split()
# Step 2.7
directive = Directive(name=name, value=value)
# Step 2.8
policy.append(directive)
return policy
|
3,164 |
canonic index list to biallelic gt list
|
"""
Utility functions only used by unit tests
"""
import textwrap
from collections import defaultdict
from whatshap.core import Read, ReadSet, Genotype
def string_to_readset(s, w=None, sample_ids=None, source_id=0, scale_quality=None):
s = textwrap.dedent(s).strip()
if w is not None:
w = textwrap.dedent(w).strip().split("\n")
rs = ReadSet()
for index, line in enumerate(s.split("\n")):
if len(line) == 0:
continue
if sample_ids is None:
read = Read(f"Read {index + 1}", 50, source_id)
else:
read = Read(f"Read {index + 1}", 50, source_id, sample_ids[index])
for pos, c in enumerate(line):
if c == " ":
continue
q = 1
if w is not None:
q = int(w[index][pos])
if scale_quality is not None:
read.add_variant(position=(pos + 1) * 10, allele=int(c), quality=q * scale_quality)
else:
read.add_variant(position=(pos + 1) * 10, allele=int(c), quality=q)
assert len(read) > 1, "Reads covering less than two variants are not allowed"
rs.add(read)
print(rs)
return rs
def string_to_readset_pedigree(s, w=None, scaling_quality=None):
s = textwrap.dedent(s).strip()
read_sources = []
s2 = ""
for line in s.split("\n"):
if len(line) == 0:
continue
individual = ord(line[0]) - ord("A")
assert 0 <= individual < 26
read_sources.append(individual)
s2 += line[1:] + "\n"
rs = string_to_readset(s=s2, w=w, sample_ids=read_sources, scale_quality=scaling_quality)
print("read_sources:", read_sources)
return rs
def matrix_to_readset(lines):
rs = ReadSet()
index_tracker = 0
for line in lines:
s = line.split()
assert len(s) % 2 == 1, "Not in matrix format."
index = int(s[0])
index_tracker += 1
assert index == index_tracker, "Not in matrix format."
read = Read(f"Read {index}", 50)
for i in range(int(len(s) / 2)):
offset = int(s[2 * i + 1])
for pos, c in enumerate(s[2 * i + 2]):
read.add_variant(position=(offset + pos) * 10, allele=int(c), quality=1)
rs.add(read)
print(rs)
return rs
def flip_cost(variant, target_value):
"""Returns cost of flipping the given read variant to target_value."""
if variant.allele == target_value:
return 0
else:
return variant.quality
def is_ambiguous(assignments):
sets = [set(), set()]
for assignment in assignments:
for s, allele in zip(sets, assignment):
s.add(allele)
return [len(s) > 1 for s in sets]
def column_cost(variants, possible_assignments):
"""Compute cost for one position and return the minimum cost assignment.
Returns ('X','X') if minimum is not unique (i.e. a "tie")."""
costs = []
for allele1, allele2 in possible_assignments:
cost1 = sum(flip_cost(v, allele1) for v in variants[0])
cost2 = sum(flip_cost(v, allele2) for v in variants[1])
costs.append(cost1 + cost2)
l = [(cost, i) for i, cost in enumerate(costs)]
l.sort()
min_cost = l[0][0]
best_assignment = list(possible_assignments[l[0][1]])
# check for ties
counts = defaultdict(int)
for cost, index in l:
counts[cost] += 1
ties = counts[min_cost]
ambiguous = is_ambiguous([possible_assignments[i] for cost, i in l[:ties]])
for i in range(2):
if ambiguous[i]:
best_assignment[i] = 3
return min_cost, best_assignment
def brute_force_phase(read_set, all_heterozygous):
"""Solves MEC by enumerating all possible bipartitions."""
def print(*args):
pass
assert len(read_set) < 10, "Too many reads for brute force"
positions = read_set.get_positions()
if all_heterozygous:
possible_assignments = [(0, 1), (1, 0)]
else:
possible_assignments = [(0, 0), (0, 1), (1, 0), (1, 1)]
# bit i in "partition" encodes to which set read i belongs
best_partition = None
best_cost = None
best_haplotypes = None
solution_count = 0
for partition in range(2 ** len(read_set)):
print(f"Looking at partition {partition:0>{len(read_set)}b}")
# compute cost induced by that partition
cost = 0
haplotypes = []
for p in positions:
# find variants covering this position
variants = [[], []]
for n, read in enumerate(read_set):
i = (partition >> n) & 1
for variant in read:
if variant.position == p:
variants[i].append(variant)
c, assignment = column_cost(variants, possible_assignments)
print(f" position: {p}, variants: {str(variants)} --> cost = {c}")
cost += c
haplotypes.append(assignment)
print(" --> cost for this partitioning:", cost)
if (best_cost is None) or (cost < best_cost):
best_partition = partition
best_cost = cost
best_haplotypes = haplotypes
solution_count = 1
elif cost == best_cost:
solution_count += 1
# Each partition has its inverse with the same cost
assert solution_count % 2 == 0
haplotype1 = "".join([str(allele1) for allele1, allele2 in best_haplotypes])
haplotype2 = "".join([str(allele2) for allele1, allele2 in best_haplotypes])
return (
best_cost,
[(best_partition >> x) & 1 for x in range(len(read_set))],
solution_count // 2,
haplotype1,
haplotype2,
)
def canonic_index_to_biallelic_gt(num_alt, ploidy=2):
"""Takes the numeric VCF representation of a biallelic genotpyte and given ploidy
Diploid:
0 -> 0/0
1 -> 0/1
2 -> 1/1
Trioploid:
0 -> 0/0/0
1 -> 0/0/1
2 -> 0/1/1
3 -> 1/1/1
...
and converts it into a Genotype object
See this link for further explanation:
https://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
"""
if 0 <= num_alt <= ploidy:
return Genotype([0] * (ploidy - num_alt) + [1] * (num_alt))
else:
return Genotype([])
def METHOD_NAME(list_int, ploidy=2):
"""Returns a list of diploid, biallelic genotype objects
according to the provided integer representation
See this link for further explanation:
https://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
"""
return [canonic_index_to_biallelic_gt(i, ploidy) for i in list_int]
|
3,165 |
test timeouterror deprecated
|
import eventlet
from eventlet import greenio, hubs, greenthread
from eventlet.green import ssl
import tests
def check_hub():
# Clear through the descriptor queue
eventlet.sleep(0)
eventlet.sleep(0)
hub = hubs.get_hub()
for nm in 'get_readers', 'get_writers':
dct = getattr(hub, nm)()
assert not dct, "hub.%s not empty: %s" % (nm, dct)
hub.abort(wait=True)
assert not hub.running
class TestApi(tests.LimitedTestCase):
def test_tcp_listener(self):
socket = eventlet.listen(('0.0.0.0', 0))
assert socket.getsockname()[0] == '0.0.0.0'
socket.close()
check_hub()
def test_connect_tcp(self):
def accept_once(listenfd):
try:
conn, addr = listenfd.accept()
fd = conn.makefile(mode='wb')
conn.close()
fd.write(b'hello\n')
fd.close()
finally:
listenfd.close()
server = eventlet.listen(('0.0.0.0', 0))
eventlet.spawn_n(accept_once, server)
client = eventlet.connect(('127.0.0.1', server.getsockname()[1]))
fd = client.makefile('rb')
client.close()
assert fd.readline() == b'hello\n'
assert fd.read() == b''
fd.close()
check_hub()
@tests.skip_if_no_ssl
def test_connect_ssl(self):
def accept_once(listenfd):
try:
conn, addr = listenfd.accept()
conn.write(b'hello\r\n')
greenio.shutdown_safe(conn)
conn.close()
finally:
greenio.shutdown_safe(listenfd)
listenfd.close()
server = eventlet.wrap_ssl(
eventlet.listen(('0.0.0.0', 0)),
tests.private_key_file,
tests.certificate_file,
server_side=True
)
eventlet.spawn_n(accept_once, server)
raw_client = eventlet.connect(('127.0.0.1', server.getsockname()[1]))
client = ssl.wrap_socket(raw_client)
fd = client.makefile('rb', 8192)
assert fd.readline() == b'hello\r\n'
try:
self.assertEqual(b'', fd.read(10))
except greenio.SSL.ZeroReturnError:
# if it's a GreenSSL object it'll do this
pass
greenio.shutdown_safe(client)
client.close()
check_hub()
def test_001_trampoline_timeout(self):
server_sock = eventlet.listen(('127.0.0.1', 0))
bound_port = server_sock.getsockname()[1]
def server(sock):
client, addr = sock.accept()
eventlet.sleep(0.1)
server_evt = eventlet.spawn(server, server_sock)
eventlet.sleep(0)
try:
desc = eventlet.connect(('127.0.0.1', bound_port))
hubs.trampoline(desc, read=True, write=False, timeout=0.001)
except eventlet.Timeout:
pass # test passed
else:
assert False, "Didn't timeout"
server_evt.wait()
check_hub()
def test_timeout_cancel(self):
server = eventlet.listen(('0.0.0.0', 0))
bound_port = server.getsockname()[1]
done = [False]
def client_closer(sock):
while True:
(conn, addr) = sock.accept()
conn.close()
def go():
desc = eventlet.connect(('127.0.0.1', bound_port))
try:
hubs.trampoline(desc, read=True, timeout=0.1)
except eventlet.Timeout:
assert False, "Timed out"
server.close()
desc.close()
done[0] = True
greenthread.spawn_after_local(0, go)
server_coro = eventlet.spawn(client_closer, server)
while not done[0]:
eventlet.sleep(0)
eventlet.kill(server_coro)
check_hub()
def test_killing_dormant(self):
DELAY = 0.1
state = []
def test():
try:
state.append('start')
eventlet.sleep(DELAY)
except:
state.append('except')
# catching GreenletExit
pass
# when switching to hub, hub makes itself the parent of this greenlet,
# thus after the function's done, the control will go to the parent
eventlet.sleep(0)
state.append('finished')
g = eventlet.spawn(test)
eventlet.sleep(DELAY / 2)
self.assertEqual(state, ['start'])
eventlet.kill(g)
# will not get there, unless switching is explicitly scheduled by kill
self.assertEqual(state, ['start', 'except'])
eventlet.sleep(DELAY)
self.assertEqual(state, ['start', 'except', 'finished'])
def test_nested_with_timeout(self):
def func():
return eventlet.with_timeout(0.2, eventlet.sleep, 2, timeout_value=1)
try:
eventlet.with_timeout(0.1, func)
self.fail(u'Expected Timeout')
except eventlet.Timeout:
pass
def test_wrap_is_timeout():
class A(object):
pass
obj = eventlet.wrap_is_timeout(A)()
tests.check_is_timeout(obj)
def METHOD_NAME():
# https://github.com/eventlet/eventlet/issues/378
code = '''import eventlet; eventlet.Timeout(1).cancel(); print('pass')'''
args = ['-Werror:eventlet.Timeout:DeprecationWarning', '-c', code]
tests.run_python(path=None, args=args, expect_pass=True)
|
3,166 |
scatter
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from paddle.common_ops_import import Variable
from paddle.framework import core
from pgl.utils import op
def check_is_tensor(*data):
"""Check if the given datas have paddle.Tensor
"""
for d in data:
if isinstance(d, paddle.Tensor) or isinstance(d, Variable):
return True
return False
def to_paddle_tensor(data, uva=False):
"""Convert a numpy ndarray to paddle.Tensor.
"""
if not uva:
data = paddle.to_tensor(data)
else:
if not paddle.device.is_compiled_with_cuda() or \
not paddle.device.get_device().startswith("gpu"):
raise ValueError(
"UVA tensor should be used under GPU environment.")
data = core.to_uva_tensor(data)
return data
def METHOD_NAME(x, index, updates, overwrite=True, name=None):
"""
**Scatter Layer**
Output is obtained by updating the input on selected indices based on updates.
.. code-block:: python
import numpy as np
#input:
x = np.array([[1, 1], [2, 2], [3, 3]])
index = np.array([2, 1, 0, 1])
# shape of updates should be the same as x
# shape of updates with dim > 1 should be the same as input
updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
overwrite = False
# calculation:
if not overwrite:
for i in range(len(index)):
x[index[i]] = np.zeros((2))
for i in range(len(index)):
if (overwrite):
x[index[i]] = updates[i]
else:
x[index[i]] += updates[i]
# output:
out = np.array([[3, 3], [6, 6], [1, 1]])
out.shape # [3, 2]
**NOTICE**: The order in which updates are applied is nondeterministic,
so the output will be nondeterministic if index contains duplicates.
Args:
x (Tensor): The input N-D Tensor with ndim>=1. Data type can be float32, float64.
index (Tensor): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
updates (Tensor): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
overwrite (bool): The mode that updating the output when there are same indices.
If True, use the overwrite mode to update the output of the same index,
if False, use the accumulate mode to update the output of the same index.Default value is True.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Tensor: The output is a Tensor with the same shape as x.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 1], [2, 2], [3, 3]], dtype='float32')
index = paddle.to_tensor([2, 1, 0, 1], dtype='int64')
updates = paddle.to_tensor([[1, 1], [2, 2], [3, 3], [4, 4]], dtype='float32')
output1 = paddle.scatter(x, index, updates, overwrite=False)
# [[3., 3.],
# [6., 6.],
# [1., 1.]]
output2 = paddle.scatter(x, index, updates, overwrite=True)
# CPU device:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# GPU device maybe have two results because of the repeated numbers in index
# result 1:
# [[3., 3.],
# [4., 4.],
# [1., 1.]]
# result 2:
# [[3., 3.],
# [2., 2.],
# [1., 1.]]
"""
return paddle.METHOD_NAME(x, index, updates, overwrite, name)
def generate_segment_id_from_index(index):
if check_is_tensor(index):
zeros = paddle.zeros(index[-1] + 1, dtype="int32")
index = index[:-1]
segments = paddle.METHOD_NAME(
zeros, index, paddle.ones_like(
index, dtype="int32"))
segments = paddle.cumsum(segments)[:-1] - 1
return segments
else:
segments = np.zeros(index[-1] + 1, dtype="int32")
index = index[:-1]
segments[index] += 1
segments = np.cumsum(segments)[:-1] - 1
return segments
def maybe_num_nodes(edges):
"""Guess the number of nodes from edges
Args:
edges: numpy.ndarry of paddle.Tensor
Return:
An int or paddle.Tensor about the number of nodes.
"""
if isinstance(edges, Variable):
return paddle.max(edges) + 1
if len(edges) == 0:
return 0
if check_is_tensor(edges):
return paddle.max(edges) + 1
else:
return np.max(edges) + 1
def unique_segment(data, dtype="int64"):
"""Return Segment Id from data
"""
unique, index = paddle.unique(data, return_inverse=True, dtype=dtype)
return unique, index
def graph_send_recv(x, src_index, dst_index, pool_type="sum"):
"""This method combines the send and recv function in different pool_type.
Now, this method only supports default copy send function, and built-in receive pool_type
function ('sum', 'mean', 'max', 'min').
Args:
x (Tensor): The input tensor, and the available data type is float32, float64, int32, int64.
src_index (Tensor): An 1-D tensor, and the available data type is int32, int64.
dst_index (Tensor): An 1-D tensor, and should have the same shape as `src_index`.
The available data type is int32, int64.
pool_type (str): The pooling type of graph_send_recv, including `sum`, `mean`,
`max`, `min`. Default value is `sum`.
Returns:
out (Tensor): The output tensor, should have the same shape and same dtype as input tensor `x`.
"""
# TODO:@ZHUI add support for 'mean', 'max', 'min' pool_type.
assert pool_type == "sum", "Only implement 'sum' pool_type function right now. Maybe you can update PaddlePaddle version to fix this problem."
def send(message_func, src_feat):
src_feat_temp = {}
if src_feat is not None:
assert isinstance(src_feat,
dict), "The input src_feat must be a dict"
src_feat_temp.update(src_feat)
src_feat = op.RowReader(src_feat_temp, src_index)
msg = message_func(src_feat)
return msg
def _sum_recv(feat):
output_dim = feat.shape[-1]
init_output = paddle.zeros(
shape=[x.shape[0], output_dim], dtype=feat.dtype)
final_output = paddle.METHOD_NAME(
init_output, dst_index, feat, overwrite=False)
return final_output
msg = send(lambda sf: {"msg": sf["h"]}, src_feat={"h": x})
return eval("_%s_recv" % pool_type)(msg["msg"])
|
3,167 |
pre process coco mobilenet
|
"""
dataset related classes and methods
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import sys
import time
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dataset")
class Item():
def __init__(self, label, img, idx):
self.label = label
self.img = img
self.idx = idx
self.start = time.time()
def usleep(sec):
if sys.platform == 'win32':
# on windows time.sleep() doesn't work to well
import ctypes
kernel32 = ctypes.windll.kernel32
timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p())
delay = ctypes.c_longlong(int(-1 * (10 * 1000000 * sec)))
kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False)
kernel32.WaitForSingleObject(timer, 0xffffffff)
else:
time.sleep(sec)
class Dataset():
def __init__(self):
self.arrival = None
self.image_list = []
self.label_list = []
self.image_list_inmemory = {}
self.last_loaded = -1
def preprocess(self, use_cache=True):
raise NotImplementedError("Dataset:preprocess")
def get_item_count(self):
return len(self.image_list)
def get_list(self):
raise NotImplementedError("Dataset:get_list")
def load_query_samples(self, sample_list):
self.image_list_inmemory = {}
for sample in sample_list:
self.image_list_inmemory[sample], _ = self.get_item(sample)
self.last_loaded = time.time()
def unload_query_samples(self, sample_list):
if sample_list:
for sample in sample_list:
if sample in self.image_list_inmemory :
del self.image_list_inmemory[sample]
else:
self.image_list_inmemory = {}
def get_samples(self, id_list):
data = np.array([self.image_list_inmemory[id] for id in id_list])
return data, self.label_list[id_list]
def get_item_loc(self, id):
raise NotImplementedError("Dataset:get_item_loc")
#
# Post processing
#
class PostProcessCommon:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
n = len(results[0])
for idx in range(0, n):
result = results[0][idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessArgMax:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
results = np.argmax(results[0], axis=1)
n = results.shape[0]
for idx in range(0, n):
result = results[idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
#
# pre-processing
#
def center_crop(img, out_height, out_width):
height, width, _ = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
height, width, _ = img.shape
new_height = int(100. * out_height / scale)
new_width = int(100. * out_width / scale)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=inter_pol)
return img
def pre_process_vgg(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
cv2_interpol = cv2.INTER_AREA
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2_interpol)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
# normalize image
means = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img -= means
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_mobilenet(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
img /= 255.0
img -= 0.5
img *= 2
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_imagenet_pytorch(img, dims=None, need_transpose=False):
from PIL import Image
import torchvision.transforms.functional as F
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img = F.resize(img, 256, Image.BILINEAR)
img = F.center_crop(img, 224)
img = F.to_tensor(img)
img = F.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], inplace=False)
if not need_transpose:
img = img.permute(1, 2, 0) # NHWC
img = np.asarray(img, dtype='float32')
return img
def maybe_resize(img, dims):
img = np.array(img, dtype=np.float32)
if len(img.shape) < 3 or img.shape[2] != 3:
# some images might be grayscale
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if dims != None:
im_height, im_width, _ = dims
img = cv2.resize(img, (im_width, im_height), interpolation=cv2.INTER_LINEAR)
return img
def METHOD_NAME(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img = np.asarray(img, dtype=np.uint8)
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_pt_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img -= 127.5
img /= 127.5
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = img / 255. - mean
img = img / std
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34_tf(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img = img - mean
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_openimages_retinanet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img /= 255.
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return im
|
3,168 |
test sequence of iterables
|
from enum import auto
from importlib.metadata import version as package_version
from os.path import abspath, expanduser, sep
from pathlib import Path
import pytest
from packaging.version import parse as parse_version
from napari.utils.misc import (
StringEnum,
_is_array_type,
_quiet_array_equal,
abspath_or_url,
ensure_iterable,
ensure_list_of_layer_data_tuple,
ensure_sequence_of_iterables,
pick_equality_operator,
)
ITERABLE = (0, 1, 2)
NESTED_ITERABLE = [ITERABLE, ITERABLE, ITERABLE]
DICT = {'a': 1, 'b': 3, 'c': 5}
LIST_OF_DICTS = [DICT, DICT, DICT]
PARTLY_NESTED_ITERABLE = [ITERABLE, None, None]
REPEATED_PARTLY_NESTED_ITERABLE = [PARTLY_NESTED_ITERABLE] * 3
@pytest.mark.parametrize(
'input_data, expected',
[
[ITERABLE, NESTED_ITERABLE],
[NESTED_ITERABLE, NESTED_ITERABLE],
[(ITERABLE, (2,), (3, 1, 6)), (ITERABLE, (2,), (3, 1, 6))],
[DICT, LIST_OF_DICTS],
[LIST_OF_DICTS, LIST_OF_DICTS],
[(ITERABLE, (2,), (3, 1, 6)), (ITERABLE, (2,), (3, 1, 6))],
[None, (None, None, None)],
[PARTLY_NESTED_ITERABLE, REPEATED_PARTLY_NESTED_ITERABLE],
[[], ([], [], [])],
],
)
def METHOD_NAME(input_data, expected):
"""Test ensure_sequence_of_iterables returns a sequence of iterables."""
zipped = zip(
range(3),
ensure_sequence_of_iterables(input_data, repeat_empty=True),
expected,
)
for _i, result, expectation in zipped:
assert result == expectation
def test_sequence_of_iterables_allow_none():
input_data = [(1, 2), None]
assert (
ensure_sequence_of_iterables(input_data, allow_none=True) == input_data
)
def test_sequence_of_iterables_no_repeat_empty():
assert ensure_sequence_of_iterables([], repeat_empty=False) == []
with pytest.raises(ValueError):
ensure_sequence_of_iterables([], repeat_empty=False, length=3)
def test_sequence_of_iterables_raises():
with pytest.raises(ValueError):
# the length argument asserts a specific length
ensure_sequence_of_iterables(((0, 1),), length=4)
# BEWARE: only the first element of a nested sequence is checked.
with pytest.raises(AssertionError):
iterable = (None, (0, 1), (0, 2))
result = iter(ensure_sequence_of_iterables(iterable))
assert next(result) is None
@pytest.mark.parametrize(
'input_data, expected',
[
[ITERABLE, ITERABLE],
[DICT, DICT],
[1, [1, 1, 1]],
['foo', ['foo', 'foo', 'foo']],
[None, [None, None, None]],
],
)
def test_ensure_iterable(input_data, expected):
"""Test test_ensure_iterable returns an iterable."""
zipped = zip(range(3), ensure_iterable(input_data), expected)
for _i, result, expectation in zipped:
assert result == expectation
def test_string_enum():
# Make a test StringEnum
class TestEnum(StringEnum):
THING = auto()
OTHERTHING = auto()
# test setting by value, correct case
assert TestEnum('thing') == TestEnum.THING
# test setting by value mixed case
assert TestEnum('thInG') == TestEnum.THING
# test setting by instance of self
assert TestEnum(TestEnum.THING) == TestEnum.THING
# test setting by name correct case
assert TestEnum['THING'] == TestEnum.THING
# test setting by name mixed case
assert TestEnum['tHiNg'] == TestEnum.THING
# test setting by value with incorrect value
with pytest.raises(ValueError):
TestEnum('NotAThing')
# test setting by name with incorrect name
with pytest.raises(KeyError):
TestEnum['NotAThing']
# test creating a StringEnum with the functional API
animals = StringEnum('Animal', 'AARDVARK BUFFALO CAT DOG')
assert str(animals.AARDVARK) == 'aardvark'
assert animals('BUffALO') == animals.BUFFALO
assert animals['BUffALO'] == animals.BUFFALO
# test setting by instance of self
class OtherEnum(StringEnum):
SOMETHING = auto()
# test setting by instance of a different StringEnum is an error
with pytest.raises(ValueError):
TestEnum(OtherEnum.SOMETHING)
# test string conversion
assert str(TestEnum.THING) == 'thing'
# test direct comparison with a string
assert TestEnum.THING == 'thing'
assert 'thing' == TestEnum.THING
assert TestEnum.THING != 'notathing'
assert 'notathing' != TestEnum.THING
# test comparison with another enum with same value names
class AnotherTestEnum(StringEnum):
THING = auto()
ANOTHERTHING = auto()
assert TestEnum.THING != AnotherTestEnum.THING
# test lookup in a set
assert TestEnum.THING in {TestEnum.THING, TestEnum.OTHERTHING}
assert TestEnum.THING not in {TestEnum.OTHERTHING}
assert TestEnum.THING in {'thing', TestEnum.OTHERTHING}
assert TestEnum.THING not in {
AnotherTestEnum.THING,
AnotherTestEnum.ANOTHERTHING,
}
def test_abspath_or_url():
relpath = "~" + sep + "something"
assert abspath_or_url(relpath) == expanduser(relpath)
assert abspath_or_url('something') == abspath('something')
assert abspath_or_url(sep + 'something') == abspath(sep + 'something')
assert abspath_or_url('https://something') == 'https://something'
assert abspath_or_url('http://something') == 'http://something'
assert abspath_or_url('ftp://something') == 'ftp://something'
assert abspath_or_url('s3://something') == 's3://something'
assert abspath_or_url('file://something') == 'file://something'
with pytest.raises(TypeError):
abspath_or_url({'a', '~'})
def test_type_stable():
assert isinstance(abspath_or_url('~'), str)
assert isinstance(abspath_or_url(Path('~')), Path)
def test_equality_operator():
import operator
import dask.array as da
import numpy as np
import xarray as xr
import zarr
class MyNPArray(np.ndarray):
pass
assert pick_equality_operator(np.ones((1, 1))) == _quiet_array_equal
assert pick_equality_operator(MyNPArray([1, 1])) == _quiet_array_equal
assert pick_equality_operator(da.ones((1, 1))) == operator.is_
assert pick_equality_operator(zarr.ones((1, 1))) == operator.is_
assert (
pick_equality_operator(xr.DataArray(np.ones((1, 1))))
== _quiet_array_equal
)
@pytest.mark.skipif(
parse_version(package_version("numpy")) >= parse_version("1.25.0"),
reason="Numpy 1.25.0 return true for below comparison",
)
def test_equality_operator_silence():
import numpy as np
eq = pick_equality_operator(np.asarray([]))
# make sure this doesn't warn
assert not eq(np.asarray([]), np.asarray([], '<U32'))
def test_is_array_type_with_xarray():
import numpy as np
import xarray as xr
assert _is_array_type(xr.DataArray(), 'xarray.DataArray')
assert not _is_array_type(xr.DataArray(), 'xr.DataArray')
assert not _is_array_type(
xr.DataArray(), 'xarray.core.dataarray.DataArray'
)
assert not _is_array_type([], 'xarray.DataArray')
assert not _is_array_type(np.array([]), 'xarray.DataArray')
@pytest.mark.parametrize(
'input_data, expected',
[
([([1, 10],)], [([1, 10],)]),
([([1, 10], {'name': 'hi'})], [([1, 10], {'name': 'hi'})]),
(
[([1, 10], {'name': 'hi'}, "image")],
[([1, 10], {'name': 'hi'}, "image")],
),
([], []),
],
)
def test_ensure_list_of_layer_data_tuple(input_data, expected):
"""Ensure that when given layer data that a tuple can be generated.
When data with a name is supplied a layer should be created and named.
When an empty dataset is supplied no layer is created and no errors are produced.
"""
assert ensure_list_of_layer_data_tuple(input_data) == expected
|
3,169 |
type
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCacheRuleResult',
'AwaitableGetCacheRuleResult',
'get_cache_rule',
'get_cache_rule_output',
]
@pulumi.output_type
class GetCacheRuleResult:
"""
An object that represents a cache rule for a container registry.
"""
def __init__(__self__, creation_date=None, credential_set_resource_id=None, id=None, name=None, provisioning_state=None, source_repository=None, system_data=None, target_repository=None, METHOD_NAME=None):
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if credential_set_resource_id and not isinstance(credential_set_resource_id, str):
raise TypeError("Expected argument 'credential_set_resource_id' to be a str")
pulumi.set(__self__, "credential_set_resource_id", credential_set_resource_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if source_repository and not isinstance(source_repository, str):
raise TypeError("Expected argument 'source_repository' to be a str")
pulumi.set(__self__, "source_repository", source_repository)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if target_repository and not isinstance(target_repository, str):
raise TypeError("Expected argument 'target_repository' to be a str")
pulumi.set(__self__, "target_repository", target_repository)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> str:
"""
The creation date of the cache rule.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter(name="credentialSetResourceId")
def credential_set_resource_id(self) -> Optional[str]:
"""
The ARM resource ID of the credential store which is associated with the cache rule.
"""
return pulumi.get(self, "credential_set_resource_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="sourceRepository")
def source_repository(self) -> Optional[str]:
"""
Source repository pulled from upstream.
"""
return pulumi.get(self, "source_repository")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetRepository")
def target_repository(self) -> Optional[str]:
"""
Target repository specified in docker pull command.
Eg: docker pull myregistry.azurecr.io/{targetRepository}:{tag}
"""
return pulumi.get(self, "target_repository")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetCacheRuleResult(GetCacheRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCacheRuleResult(
creation_date=self.creation_date,
credential_set_resource_id=self.credential_set_resource_id,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
source_repository=self.source_repository,
system_data=self.system_data,
target_repository=self.target_repository,
METHOD_NAME=self.METHOD_NAME)
def get_cache_rule(cache_rule_name: Optional[str] = None,
registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCacheRuleResult:
"""
Gets the properties of the specified cache rule resource.
Azure REST API version: 2023-01-01-preview.
:param str cache_rule_name: The name of the cache rule.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['cacheRuleName'] = cache_rule_name
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry:getCacheRule', __args__, opts=opts, typ=GetCacheRuleResult).value
return AwaitableGetCacheRuleResult(
creation_date=pulumi.get(__ret__, 'creation_date'),
credential_set_resource_id=pulumi.get(__ret__, 'credential_set_resource_id'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
source_repository=pulumi.get(__ret__, 'source_repository'),
system_data=pulumi.get(__ret__, 'system_data'),
target_repository=pulumi.get(__ret__, 'target_repository'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_cache_rule)
def get_cache_rule_output(cache_rule_name: Optional[pulumi.Input[str]] = None,
registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCacheRuleResult]:
"""
Gets the properties of the specified cache rule resource.
Azure REST API version: 2023-01-01-preview.
:param str cache_rule_name: The name of the cache rule.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
3,170 |
verify arccos 1d
|
from lpython import i32, f32, f64
from numpy import empty, arcsin, arccos, sin, cos, sqrt, arctan, tan, degrees, radians, float32, float64
from math import pi
def verify1d_same(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(array[i] - result[i]) <= eps
def verify_arcsin_1d(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arcsin(array[i])**f32(2.0) - result[i]) <= eps
def verify_arcsin_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arcsin(array[i, j])**2.0 - result[i, j]) <= eps
def METHOD_NAME(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arccos(array[i])**f32(2.0) - result[i]) <= eps
def verify_arccos_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arccos(array[i, j])**2.0 - result[i, j]) <= eps
def verify_arctan_1d(array: f32[:], result: f32[:], size: i32):
i: i32
eps: f32
eps = f32(1e-6)
for i in range(size):
assert abs(arctan(array[i])**f32(2.0) - result[i]) <= eps
def verify_arctan_2d(array: f64[:, :], result: f64[:, :], size1:i32, size2:i32):
i: i32
j: i32
eps: f64
eps = 1e-12
for i in range(size1):
for j in range(size2):
assert abs(arctan(array[i, j])**2.0 - result[i, j]) <= eps
def elemental_arcsin():
i: i32
j: i32
array1d: f32[201] = empty(201, dtype=float32)
arcsin1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
arcsin1d = arcsin(array1d) ** f32(2.0)
verify_arcsin_1d(array1d, arcsin1d, 201)
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arcsin2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64): # 2048 = 64 * 32
array2d[i,j]= float((i * 64 + j - 2048 )/2048)
arcsin2d = arcsin(array2d) ** 2.0
verify_arcsin_2d(array2d, arcsin2d, 64, 64)
def elemental_arccos():
i: i32
j: i32
array1d: f32[201] = empty(201, dtype=float32)
arccos1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
arccos1d = arccos(array1d) ** f32(2.0)
METHOD_NAME(array1d, arccos1d, 201)
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arccos2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64): # 2048 = 64 * 32
array2d[i,j]= float((i * 64 + j - 2048 )/2048)
arccos2d = arccos(array2d) ** 2.0
verify_arccos_2d(array2d, arccos2d, 64, 64)
def elemental_arctan():
i: i32
j: i32
eps: f32
eps = f32(1e-6)
array1d: f32[201] = empty(201, dtype=float32)
array1d_rec: f32[201] = empty(201, dtype=float32)
arctan1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32(i - 100)
arctan1d = arctan(array1d) ** f32(2.0)
verify_arctan_1d(array1d, arctan1d, 201)
for i in range(201):
array1d[i] = f32(i + 1)
array1d_rec[i] = f32(1.0/f64(i+1))
arctan1d = arctan(array1d) + arctan(array1d_rec)
for i in range(201):
assert abs(arctan1d[i] - f32(f64(pi) / 2.0)) <= eps
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
arctan2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(64*i + j - 2048)
arctan2d = arctan(array2d) ** 2.0
verify_arctan_2d(array2d, arctan2d, 64, 64)
def elemental_trig_identity():
i: i32
eps: f32
eps = f32(1e-6)
array1d: f32[201] = empty(201, dtype=float32)
observed1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
observed1d = arcsin(array1d) + arccos(array1d)
for i in range(201):
assert abs(observed1d[i] - f32(pi / 2.0)) <= eps
def elemental_reverse():
i: i32
array1d: f32[201] = empty(201, dtype=float32)
observed1d: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
observed1d = sin(arcsin(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = cos(arccos(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = tan(arctan(array1d))
verify1d_same(observed1d, array1d, 201)
observed1d = degrees(radians(array1d))
verify1d_same(observed1d, array1d, 201)
def elemental_trig_identity_extra():
i: i32
array1d: f32[201] = empty(201, dtype=float32)
array_x: f32[201] = empty(201, dtype=float32)
array_y: f32[201] = empty(201, dtype=float32)
for i in range(201):
array1d[i] = f32((i - 100)/100)
array_x = sin(arccos(array1d))
array_y = cos(arcsin(array1d))
for i in range(201):
array1d[i] = f32(1.0) - array1d[i] ** f32(2.0)
array1d = sqrt(array1d)
verify1d_same(array_x, array_y, 201)
verify1d_same(array_x, array1d, 201)
def elemental_degrees():
i: i32
j: i32
eps_32: f32
eps_64: f64
eps_32 = f32(1e-6)
eps_64 = 1e-12
array1d: f32[200] = empty(200, dtype=float32)
degrees1d: f32[200] = empty(200, dtype=float32)
for i in range(200):
array1d[i] = f32(i)
degrees1d = sin(degrees(array1d))
for i in range(200):
assert abs(degrees1d[i] - sin(degrees(array1d[i]))) <= eps_32
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
degrees2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(i*64+j)
degrees2d = sin(degrees(array2d))
for i in range(64):
for j in range(64):
assert abs(degrees2d[i, j] - sin(degrees(array2d[i, j]))) <= eps_64
def elemental_radians():
i: i32
j: i32
eps_32: f32
eps_64: f64
eps_32 = f32(1e-6)
eps_64 = 1e-12
array1d: f32[200] = empty(200, dtype=float32)
radians1d: f32[200] = empty(200, dtype=float32)
for i in range(200):
array1d[i] = f32(i)
radians1d = cos(radians(array1d))
for i in range(200):
assert abs(radians1d[i] - cos(radians(array1d[i]))) <= eps_32
array2d: f64[64, 64] = empty((64, 64), dtype=float64)
radians2d: f64[64, 64] = empty((64, 64), dtype=float64)
for i in range(64):
for j in range(64):
array2d[i,j]= float(i*64+j)
radians2d = cos(radians(array2d))
for i in range(64):
for j in range(64):
assert abs(radians2d[i, j] - cos(radians(array2d[i, j]))) <= eps_64
elemental_arcsin()
elemental_arccos()
elemental_arctan()
elemental_degrees()
elemental_radians()
elemental_trig_identity()
elemental_reverse()
elemental_trig_identity_extra()
|
3,171 |
test on retry
|
from typing import Type, Union
import pytest
from mock import AsyncMock, call
import mock
from opentrons.drivers.asyncio.communication.async_serial import AsyncSerial
from opentrons.drivers.asyncio.communication.serial_connection import (
SerialConnection,
AsyncResponseSerialConnection,
)
from opentrons.drivers.asyncio.communication import (
NoResponse,
AlarmResponse,
ErrorResponse,
)
@pytest.fixture
def mock_serial_port() -> AsyncMock:
return AsyncMock(spec=AsyncSerial)
@pytest.fixture
def ack() -> str:
return "ack"
SerialKind = Union[AsyncResponseSerialConnection, SerialConnection]
# Async because SerialConnection.__init__() needs an event loop,
# so this fixture needs to run in an event loop.
@pytest.fixture(
params=[AsyncResponseSerialConnection, SerialConnection], # type: ignore[return]
)
async def subject(
request: pytest.FixtureRequest, mock_serial_port: AsyncMock, ack: str
) -> SerialKind:
"""Create the test subject."""
serial_class = request.param # type: ignore[attr-defined]
serial_class.RETRY_WAIT_TIME = 0
if serial_class == AsyncResponseSerialConnection:
return serial_class( # type: ignore[no-any-return]
serial=mock_serial_port,
ack=ack,
name="name",
port="port",
retry_wait_time_seconds=0,
error_keyword="err",
alarm_keyword="alarm",
async_error_ack="async",
)
elif serial_class == SerialConnection:
return serial_class( # type: ignore[no-any-return]
serial=mock_serial_port,
ack=ack,
name="name",
port="port",
retry_wait_time_seconds=0,
error_keyword="error",
alarm_keyword="alarm",
)
@pytest.fixture
async def async_subject(
mock_serial_port: AsyncMock, ack: str
) -> AsyncResponseSerialConnection:
"""Create the test async subject."""
AsyncResponseSerialConnection.RETRY_WAIT_TIME = 0 # type: ignore[attr-defined]
return AsyncResponseSerialConnection(
serial=mock_serial_port,
ack=ack,
name="name",
port="port",
retry_wait_time_seconds=0,
error_keyword="err",
alarm_keyword="alarm",
async_error_ack="async",
)
@pytest.fixture
async def subject_raise_on_error_patched(async_subject):
raise_on_error_mock = mock.MagicMock()
with mock.patch.object(async_subject, "raise_on_error", raise_on_error_mock):
yield async_subject
async def test_send_command(
mock_serial_port: AsyncMock, subject: SerialKind, ack: str
) -> None:
"""It should send a command."""
serial_response = "response data " + ack
mock_serial_port.read_until.return_value = serial_response.encode()
await subject.send_data(data="send data")
mock_serial_port.timeout_override.assert_called_once_with("timeout", None)
mock_serial_port.write.assert_called_once_with(data=b"send data")
mock_serial_port.read_until.assert_called_once_with(match=ack.encode())
async def test_send_command_with_retry(
mock_serial_port: AsyncMock, subject: SerialKind, ack: str
) -> None:
"""It should retry sending after a read failure."""
serial_response = "response data " + ack
mock_serial_port.read_until.side_effect = (b"", serial_response.encode())
await subject.send_data(data="send data", retries=1)
mock_serial_port.timeout_override.assert_called_once_with("timeout", None)
mock_serial_port.write.assert_has_calls(
calls=[call(data=b"send data"), call(data=b"send data")]
)
mock_serial_port.read_until.assert_has_calls(
calls=[
call(match=ack.encode()),
call(match=ack.encode()),
]
)
async def test_send_command_with_retry_exhausted(
mock_serial_port: AsyncMock, subject: SerialKind
) -> None:
"""It should raise after retries exhausted."""
mock_serial_port.read_until.side_effect = (b"", b"", b"")
with pytest.raises(NoResponse):
await subject.send_data(data="send data", retries=2)
async def test_send_command_response(
mock_serial_port: AsyncMock, subject: SerialKind, ack: str
) -> None:
"""It should return response without the ack and stripped."""
response_data = "response data"
serial_response = f" {response_data} {ack}"
mock_serial_port.read_until.return_value = serial_response.encode()
response = await subject.send_data(data="send data")
assert response == response_data
@pytest.mark.parametrize(
argnames=["response", "exception_type"],
argvalues=[
["error", ErrorResponse],
["Error", ErrorResponse],
["Error: was found.", ErrorResponse],
["alarm", AlarmResponse],
["ALARM", AlarmResponse],
["This is an Alarm", AlarmResponse],
["error:Alarm lock", AlarmResponse],
["alarm:error", AlarmResponse],
["ALARM: Hard limit -X", AlarmResponse],
],
)
def test_raise_on_error(
subject: SerialKind, response: str, exception_type: Type[Exception]
) -> None:
"""It should raise an exception on error/alarm responses."""
with pytest.raises(expected_exception=exception_type, match=response):
subject.raise_on_error(response)
async def METHOD_NAME(mock_serial_port: AsyncMock, subject: SerialKind) -> None:
"""It should try to re-open connection."""
await subject.on_retry()
mock_serial_port.close.assert_called_once()
mock_serial_port.open.assert_called_once()
async def test_send_data_with_async_error_before(
mock_serial_port: AsyncMock,
subject_raise_on_error_patched: AsyncResponseSerialConnection,
ack: str,
) -> None:
"""It should return response without the ack and stripped. It should also handle the async error."""
error_response = "async ERR106:main motor:speedsensor failed"
serial_error_response = f" {error_response} {ack}"
encoded_error_response = serial_error_response.encode()
successful_response = "G28"
serial_successful_response = f" {successful_response} {ack}"
encoded_successful_response = serial_successful_response.encode()
mock_serial_port.read_until.side_effect = [
encoded_error_response,
encoded_successful_response,
]
response = await subject_raise_on_error_patched._send_data(data="G28")
assert response == successful_response
mock_serial_port.read_until.assert_has_calls(
calls=[
call(match=ack.encode()),
call(match=ack.encode()),
]
)
subject_raise_on_error_patched.raise_on_error.assert_has_calls( # type: ignore[attr-defined]
calls=[
call(response=error_response),
call(response=successful_response),
]
)
async def test_send_data_with_async_error_after(
mock_serial_port: AsyncMock,
subject_raise_on_error_patched: AsyncResponseSerialConnection,
ack: str,
) -> None:
"""It should return response without the ack and stripped. It should not handle the async error."""
error_response = "async ERR106:main motor:speedsensor failed"
serial_error_response = f" {error_response} {ack}"
encoded_error_response = serial_error_response.encode()
successful_response = "G28"
serial_successful_response = f" {successful_response} {ack}"
encoded_successful_response = serial_successful_response.encode()
mock_serial_port.read_until.side_effect = [
encoded_successful_response,
encoded_error_response,
]
response = await subject_raise_on_error_patched._send_data(data="G28")
assert response == successful_response
mock_serial_port.read_until.assert_has_calls(
calls=[
call(match=ack.encode()),
]
)
subject_raise_on_error_patched.raise_on_error.assert_has_calls( # type: ignore[attr-defined]
calls=[
call(response=successful_response),
]
)
|
3,172 |
perform
|
# SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
""" Non-blocking database connections.
"""
from typing import Callable, Any, Optional, Iterator, Sequence
import logging
import select
import time
import psycopg2
from psycopg2.extras import wait_select
# psycopg2 emits different exceptions pre and post 2.8. Detect if the new error
# module is available and adapt the error handling accordingly.
try:
import psycopg2.errors # pylint: disable=no-name-in-module,import-error
__has_psycopg2_errors__ = True
except ImportError:
__has_psycopg2_errors__ = False
from nominatim.typing import T_cursor, Query
LOG = logging.getLogger()
class DeadlockHandler:
""" Context manager that catches deadlock exceptions and calls
the given handler function. All other exceptions are passed on
normally.
"""
def __init__(self, handler: Callable[[], None], ignore_sql_errors: bool = False) -> None:
self.handler = handler
self.ignore_sql_errors = ignore_sql_errors
def __enter__(self) -> 'DeadlockHandler':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> bool:
if __has_psycopg2_errors__:
if exc_type == psycopg2.errors.DeadlockDetected: # pylint: disable=E1101
self.handler()
return True
elif exc_type == psycopg2.extensions.TransactionRollbackError \
and exc_value.pgcode == '40P01':
self.handler()
return True
if self.ignore_sql_errors and isinstance(exc_value, psycopg2.Error):
LOG.info("SQL error ignored: %s", exc_value)
return True
return False
class DBConnection:
""" A single non-blocking database connection.
"""
def __init__(self, dsn: str,
cursor_factory: Optional[Callable[..., T_cursor]] = None,
ignore_sql_errors: bool = False) -> None:
self.dsn = dsn
self.current_query: Optional[Query] = None
self.current_params: Optional[Sequence[Any]] = None
self.ignore_sql_errors = ignore_sql_errors
self.conn: Optional['psycopg2.connection'] = None
self.cursor: Optional['psycopg2.cursor'] = None
self.connect(cursor_factory=cursor_factory)
def close(self) -> None:
""" Close all open connections. Does not wait for pending requests.
"""
if self.conn is not None:
if self.cursor is not None:
self.cursor.close() # type: ignore[no-untyped-call]
self.cursor = None
self.conn.close()
self.conn = None
def connect(self, cursor_factory: Optional[Callable[..., T_cursor]] = None) -> None:
""" (Re)connect to the database. Creates an asynchronous connection
with JIT and parallel processing disabled. If a connection was
already open, it is closed and a new connection established.
The caller must ensure that no query is pending before reconnecting.
"""
self.close()
# Use a dict to hand in the parameters because async is a reserved
# word in Python3.
self.conn = psycopg2.connect(**{'dsn': self.dsn, 'async': True}) # type: ignore
assert self.conn
self.wait()
if cursor_factory is not None:
self.cursor = self.conn.cursor(cursor_factory=cursor_factory)
else:
self.cursor = self.conn.cursor()
# Disable JIT and parallel workers as they are known to cause problems.
# Update pg_settings instead of using SET because it does not yield
# errors on older versions of Postgres where the settings are not
# implemented.
self.METHOD_NAME(
""" UPDATE pg_settings SET setting = -1 WHERE name = 'jit_above_cost';
UPDATE pg_settings SET setting = 0
WHERE name = 'max_parallel_workers_per_gather';""")
self.wait()
def _deadlock_handler(self) -> None:
LOG.info("Deadlock detected (params = %s), retry.", str(self.current_params))
assert self.cursor is not None
assert self.current_query is not None
assert self.current_params is not None
self.cursor.execute(self.current_query, self.current_params)
def wait(self) -> None:
""" Block until any pending operation is done.
"""
while True:
with DeadlockHandler(self._deadlock_handler, self.ignore_sql_errors):
wait_select(self.conn)
self.current_query = None
return
def METHOD_NAME(self, sql: Query, args: Optional[Sequence[Any]] = None) -> None:
""" Send SQL query to the server. Returns immediately without
blocking.
"""
assert self.cursor is not None
self.current_query = sql
self.current_params = args
self.cursor.execute(sql, args)
def fileno(self) -> int:
""" File descriptor to wait for. (Makes this class select()able.)
"""
assert self.conn is not None
return self.conn.fileno()
def is_done(self) -> bool:
""" Check if the connection is available for a new query.
Also checks if the previous query has run into a deadlock.
If so, then the previous query is repeated.
"""
assert self.conn is not None
if self.current_query is None:
return True
with DeadlockHandler(self._deadlock_handler, self.ignore_sql_errors):
if self.conn.poll() == psycopg2.extensions.POLL_OK:
self.current_query = None
return True
return False
class WorkerPool:
""" A pool of asynchronous database connections.
The pool may be used as a context manager.
"""
REOPEN_CONNECTIONS_AFTER = 100000
def __init__(self, dsn: str, pool_size: int, ignore_sql_errors: bool = False) -> None:
self.threads = [DBConnection(dsn, ignore_sql_errors=ignore_sql_errors)
for _ in range(pool_size)]
self.free_workers = self._yield_free_worker()
self.wait_time = 0.0
def finish_all(self) -> None:
""" Wait for all connection to finish.
"""
for thread in self.threads:
while not thread.is_done():
thread.wait()
self.free_workers = self._yield_free_worker()
def close(self) -> None:
""" Close all connections and clear the pool.
"""
for thread in self.threads:
thread.close()
self.threads = []
self.free_workers = iter([])
def next_free_worker(self) -> DBConnection:
""" Get the next free connection.
"""
return next(self.free_workers)
def _yield_free_worker(self) -> Iterator[DBConnection]:
ready = self.threads
command_stat = 0
while True:
for thread in ready:
if thread.is_done():
command_stat += 1
yield thread
if command_stat > self.REOPEN_CONNECTIONS_AFTER:
self._reconnect_threads()
ready = self.threads
command_stat = 0
else:
tstart = time.time()
_, ready, _ = select.select([], self.threads, [])
self.wait_time += time.time() - tstart
def _reconnect_threads(self) -> None:
for thread in self.threads:
while not thread.is_done():
thread.wait()
thread.connect()
def __enter__(self) -> 'WorkerPool':
return self
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
self.finish_all()
self.close()
|
3,173 |
check
|
# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class NetlibLapack(CMakePackage):
"""LAPACK version 3.X is a comprehensive FORTRAN library that does
linear algebra operations including matrix inversions, least squared
solutions to linear sets of equations, eigenvector analysis, singular
value decomposition, etc. It is a very comprehensive and reputable
package that has found extensive use in the scientific community.
"""
homepage = "http://www.netlib.org/lapack/"
url = "http://www.netlib.org/lapack/lapack-3.5.0.tgz"
version(
"3.8.0",
"96591affdbf58c450d45c1daa540dbd2",
url="http://www.netlib.org/lapack/lapack-3.8.0.tar.gz",
)
version("3.7.1", md5="dcdeeed73de152c4643ccc5b1aeb453c")
version("3.7.0", md5="697bb8d67c7d336a0f339cc9dd0fa72f")
version("3.6.1", md5="421b2cb72e15f237e144428f9c460ee0")
version("3.6.0", md5="f2f6c67134e851fe189bb3ca1fbb5101")
version("3.5.0", md5="b1d3e3e425b2e44a06760ff173104bdf")
version("3.4.2", md5="61bf1a8a4469d4bdb7604f5897179478")
version("3.4.1", md5="44c3869c38c8335c2b9c2a8bb276eb55")
version("3.4.0", md5="02d5706ec03ba885fc246e5fa10d8c70")
version("3.3.1", md5="d0d533ec9a5b74933c2a1e84eedc58b4")
variant("shared", default=True, description="Build shared library version")
variant("external-blas", default=False, description="Build lapack with an external blas")
variant("lapacke", default=True, description="Activates the build of the LAPACKE C interface")
variant("xblas", default=False, description="Builds extended precision routines using XBLAS")
patch("ibm-xl.patch", when="@3.7: %xl")
patch("ibm-xl.patch", when="@3.7: %xl_r")
# https://github.com/Reference-LAPACK/lapack/issues/228
# TODO: update 'when' once the version of lapack
# containing the fix is released and added to Spack.
patch("undefined_declarations.patch", when="@3.8.0:")
# https://github.com/Reference-LAPACK/lapack/pull/268
# TODO: update 'when' once the version of lapack
# containing the fix is released and added to Spack.
patch("testing.patch", when="@3.7.0:")
# virtual dependency
provides("blas", when="~external-blas")
provides("lapack")
depends_on("blas", when="+external-blas")
depends_on("netlib-xblas+fortran+plain_blas", when="+xblas")
depends_on("[email protected]:", type="test")
# We need to run every phase twice in order to get static and shared
# versions of the libraries. When ~shared, we run the default
# implementations of the CMakePackage's phases and get only one building
# directory 'spack-build-static' with -DBUILD_SHARED_LIBS:BOOL=OFF (see
# implementations of self.build_directory and self.cmake_args() below).
# When +shared, we run the overridden methods for the phases, each
# running the default implementation twice with different values for
# self._building_shared. As a result, we get two building directories:
# 'spack-build-static' with -DBUILD_SHARED_LIBS:BOOL=OFF and
# 'spack-build-shared' with -DBUILD_SHARED_LIBS:BOOL=ON.
_building_shared = False
def patch(self):
# Fix cblas CMakeLists.txt -- has wrong case for subdirectory name.
if self.spec.satisfies("@3.6.0:"):
filter_file(
"${CMAKE_CURRENT_SOURCE_DIR}/CMAKE/",
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/",
"CBLAS/CMakeLists.txt",
string=True,
)
@property
def blas_libs(self):
shared = True if "+shared" in self.spec else False
query_parameters = self.spec.last_query.extra_parameters
query2libraries = {
tuple(): ["libblas"],
("c", "fortran"): ["libcblas", "libblas"],
("c",): ["libcblas"],
("fortran",): ["libblas"],
}
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(libraries, root=self.prefix, shared=shared, recursive=True)
# TUTORIAL: add a proper `lapack_lib` property, along the lines
# of the `blas_lib` property above. The library that provides
# the lapack API is called `liblapack`.
@property
def headers(self):
include_dir = self.spec.prefix.include
cblas_h = join_path(include_dir, "cblas.h")
lapacke_h = join_path(include_dir, "lapacke.h")
return HeaderList([cblas_h, lapacke_h])
@property
def build_directory(self):
return join_path(
self.stage.source_path,
"spack-build-shared" if self._building_shared else "spack-build-static",
)
def cmake_args(self):
args = ["-DBUILD_SHARED_LIBS:BOOL=" + ("ON" if self._building_shared else "OFF")]
if self.spec.satisfies("+lapacke"):
args.extend(["-DLAPACKE:BOOL=ON", "-DLAPACKE_WITH_TMG:BOOL=ON"])
else:
args.extend(["-DLAPACKE:BOOL=OFF", "-DLAPACKE_WITH_TMG:BOOL=OFF"])
if self.spec.satisfies("@3.6.0:"):
args.append("-DCBLAS=ON") # always build CBLAS
if self.spec.satisfies("%intel"):
# Intel compiler finds serious syntax issues when trying to
# build CBLAS and LapackE
args.extend(["-DCBLAS=OFF", "-DLAPACKE:BOOL=OFF"])
if self.spec.satisfies("%xl") or self.spec.satisfies("%xl_r"):
# use F77 compiler if IBM XL
args.extend(
[
"-DCMAKE_Fortran_COMPILER=" + self.compiler.f77,
"-DCMAKE_Fortran_FLAGS="
+ (" ".join(self.spec.compiler_flags["fflags"]))
+ " -O3 -qnohot",
]
)
# deprecated routines are commonly needed by, for example, suitesparse
# Note that OpenBLAS spack is built with deprecated routines
args.append("-DBUILD_DEPRECATED:BOOL=ON")
if self.spec.satisfies("+external-blas"):
args.extend(
[
"-DUSE_OPTIMIZED_BLAS:BOOL=ON",
"-DBLAS_LIBRARIES:PATH=" + self.spec["blas"].libs.joined(";"),
]
)
if self.spec.satisfies("+xblas"):
args.extend(
[
"-DXBLAS_INCLUDE_DIR=" + self.spec["netlib-xblas"].prefix.include,
"-DXBLAS_LIBRARY=" + self.spec["netlib-xblas"].libs.joined(";"),
]
)
args.append("-DBUILD_TESTING:BOOL=" + ("ON" if self.run_tests else "OFF"))
return args
# Build, install, and check both static and shared versions of the
# libraries when +shared
@when("+shared")
def cmake(self, spec, prefix):
for self._building_shared in (False, True):
super().cmake(spec, prefix)
@when("+shared")
def build(self, spec, prefix):
for self._building_shared in (False, True):
super().build(spec, prefix)
@when("+shared")
def install(self, spec, prefix):
for self._building_shared in (False, True):
super().install(spec, prefix)
@when("+shared")
def METHOD_NAME(self):
for self._building_shared in (False, True):
super().METHOD_NAME()
|
3,174 |
fprint
|
from typing import Callable, Optional
class Colors:
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
purple = "\033[35m"
cyan = "\033[36m"
white = "\033[37m"
underline = "\033[2m"
bold = "\033[1m"
negative = "\033[3m"
reset = "\033[0m"
def __getattr__(self, arg):
# If we get a non existent color, return the reset color
return self.reset
class Printer:
def __init__(self, string: Optional[str] = "", autospace: bool = False):
self._autospace = autospace
self._string = string
# Default adding text
self.text = lambda text: self + text
# Colors
self.red = lambda text: self + (Colors.red + str(text) + Colors.reset)
self.green = lambda text: self + (Colors.green + str(text) + Colors.reset)
self.yellow = lambda text: self + (Colors.yellow + str(text) + Colors.reset)
self.blue = lambda text: self + (Colors.blue + str(text) + Colors.reset)
self.purple = lambda text: self + (Colors.purple + str(text) + Colors.reset)
self.cyan = lambda text: self + (Colors.cyan + str(text) + Colors.reset)
self.white = lambda text: self + (Colors.white + str(text) + Colors.reset)
# Text effects
self.underline = lambda text: Printer(
self._string + Colors.underline + str(text) + Colors.reset,
)
self.bold = lambda text: Printer(
self._string + Colors.bold + str(text) + Colors.reset,
)
self.negative = lambda text: Printer(
self._string + Colors.negative + str(text) + Colors.reset,
)
# For passing in custom formatting
self.custom = lambda text, effect: self + (effect + str(text) + Colors.reset)
def __repr__(self):
return self._string + Colors.reset
__str__ = __repr__
def __add__(self, other):
extra_space = " " if self._autospace and self._string != "" else ""
return Printer(self._string + extra_space + str(other), self._autospace)
@property
def set_red(self):
return Printer(self._string + Colors.red)
@property
def set_green(self):
return Printer(self._string + Colors.green)
@property
def set_yellow(self):
return Printer(self._string + Colors.yellow)
@property
def set_blue(self):
return Printer(self._string + Colors.blue)
@property
def set_purple(self):
return Printer(self._string + Colors.purple)
@property
def set_cyan(self):
return Printer(self._string + Colors.cyan)
@property
def set_white(self):
return Printer(self._string + Colors.white)
@property
def reset(self):
return Printer(self._string + Colors.reset)
def space(self, count=1):
return Printer(self._string + " " * count)
def newline(self, count=1):
return Printer(self._string + "\n" * count)
def enable_autospaces(self):
self._autospace = False
def disable_autospaces(self):
self._autospace = True
class FprintFactory:
"""
Factory method for producing a printer with the specified characteristics.
Args:
title (Optional[str]): The title to produce with each printed
message.
time (Optional[Callable]): A method for getting the time to produce
with each method. If ``None``, then no time is sent with a message.
msg_color (Optional[str]): The color of each message. If ``None``,
defaults to white.
auto_bold (bool): Automatically bolds each method. Defaults to ``True``.
newline (int): The number of newlines to print after each method.
"""
def __init__(
self,
title: Optional[str] = None,
time: Optional[Callable] = None,
msg_color: Optional[str] = None,
auto_bold: bool = True,
newline: int = 1,
):
assert time is None or callable(
time,
), "`time` should be `None` for no printing or a function that generates a timestamp."
assert msg_color is None or isinstance(
msg_color,
str,
), "`msg_color` should be `None` for default printing or a string color."
assert isinstance(
auto_bold,
bool,
), "`auto_bold` should be true or false if messages should be printed\
as bold by default or not"
assert newline is None or isinstance(
newline,
int,
), "`newline` should be the number of newlines after the text (default 1)"
# All these can be overwritten if not specified here
self.title = title # Title to print with each message
# Either `None` for no printing or a function that generates a
# timestamp
self.time = time
self.msg_color = (
msg_color # Either `None` for default printing or a string color
)
self.auto_bold = auto_bold # Should each message be bolded by default
self.newline = newline # The number of newlines characters to add to the end
self.printer = Printer()
def METHOD_NAME(self, text: str, **kwargs):
"""
Prints some text with the specified characteristics. Characteristics
can be passed through the kwargs argument or through the class' constructor.
Args:
text (str): The text to format and then print
kwargs: Any characteristics to print with the text. All keyword arguments
are the same as the arguments specified in the constructor.
"""
title = kwargs.get("title", self.title)
time = kwargs.get("time", self.time)
msg_color = kwargs.get("msg_color", self.msg_color)
auto_bold = kwargs.get("auto_bold", self.auto_bold)
newline = kwargs.get("newline", self.newline)
message = self.printer
if title is not None:
message = message.set_blue.bold(title).reset.space()
if time is not None:
t = time()
message = message.bold(t).space()
message += ": "
if auto_bold:
text = str(self.printer.bold(text))
if msg_color is not None:
message = message.custom(text, getattr(Colors, msg_color))
else:
message = message.text(text)
if newline == 1:
print(message)
else:
print(message.newline(newline - 1))
# Standard instantiation
METHOD_NAME = FprintFactory().METHOD_NAME
|
3,175 |
test get subscripts of
|
# -----------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2021-2022, Science and Technology Facilities Council.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# Author: Joerg Henrichs, Bureau of Meteorology
'''This module tests the ComponentIndices class in psyclone/core.'''
from __future__ import absolute_import
import pytest
from psyclone.core import ComponentIndices, VariablesAccessInfo
from psyclone.errors import InternalError
def test_component_indices():
'''Test the ComponentIndices class.
'''
component_indices = ComponentIndices()
assert component_indices.indices_lists == [[]]
assert str(component_indices) == "[[]]"
assert len(component_indices) == 1
component_indices = ComponentIndices(["a"])
assert component_indices.indices_lists == [["a"]]
assert str(component_indices) == "[['a']]"
assert component_indices[0] == ['a']
component_indices = ComponentIndices([["a", "b"], ["c"]]).indices_lists
assert component_indices == [["a", "b"], ["c"]]
assert component_indices[0] == ["a", "b"]
assert component_indices[1] == ["c"]
assert len(component_indices) == 2
# -----------------------------------------------------------------------------
def test_is_array():
'''Test if arrays are correctly detected.
'''
component_indices = ComponentIndices([["a", "b"], ["c"]])
assert component_indices.is_array()
component_indices = ComponentIndices([[], []])
assert not component_indices.is_array()
# -----------------------------------------------------------------------------
def test_component_indices_exceptions():
'''Test that the right exceptions are raised.
'''
with pytest.raises(InternalError) as err:
_ = ComponentIndices(123)
assert "Index object in ComponentIndices constructor must be None, a " \
"list or list of lists, got '123'" in str(err.value)
with pytest.raises(InternalError) as err:
_ = ComponentIndices([[], 123])
assert "ComponentIndices: Invalid list parameter '[[], 123]' - some " \
"elements but not all are lists" in str(err.value)
# -----------------------------------------------------------------------------
def test_iterating():
'''Tests that iterating works, and that the returned values from the
iterator can be used in dictionary-like accesses.
'''
component_indices = ComponentIndices([["a", "b"], ["c"]])
correct_index_pairs = [(0, 0), (0, 1), (1, 0)]
correct = ["a", "b", "c"]
for count, indx in enumerate(component_indices.iterate()):
assert correct_index_pairs[count] == indx
assert correct[count] == component_indices[indx]
# -----------------------------------------------------------------------------
def test_component_indices_getitem_exceptions():
'''Tests useful error messages are provided if a tuple is provided that
is out of bounds.
'''
component_indices = ComponentIndices([["a", "b"], ["c"]])
with pytest.raises(IndexError) as err:
_ = component_indices[(2, 0)]
assert "First index (2) of (2, 0) is out of range." in str(err.value)
with pytest.raises(IndexError) as err:
_ = component_indices[(1, 2)]
assert "Second index (2) of (1, 2) is out of range." in str(err.value)
with pytest.raises(IndexError) as err:
_ = component_indices[(-1, 0)]
assert "First index (-1) of (-1, 0) is out of range." in str(err.value)
with pytest.raises(IndexError) as err:
_ = component_indices[(1, -1)]
assert "Second index (-1) of (1, -1) is out of range." in str(err.value)
# -----------------------------------------------------------------------------
@pytest.mark.parametrize("expression, correct",
# We look for i, j and k; l will be ignored
[("a1(i+i+j+l)", [set(("i", "j"))]),
("a1(1)", [set()]),
("a2(i+j,2*j+k+1)", [set(("i", "j")),
set(("j", "k"))]),
("a3(i,j,i)", [set("i"), set("j"), set("i")]),
("dv(i)%a(j)%b(k)", [set("i"), set("j"),
set("k")])])
def METHOD_NAME(expression, correct, fortran_reader):
'''Tests that getting the indices of an array expressions
works as expected.
'''
source = f'''program test
use my_mod, only: my_type
type(my_type) :: dv(10)
integer i, j, k, l
integer, parameter :: n=10
real, dimension(n) :: a1
real, dimension(n,n) :: a2
real, dimension(n,n,n) :: a3
{expression} = 1
end program test'''
psyir = fortran_reader.psyir_from_source(source)
assign = psyir.children[0].children[0]
# Get all access info for the expression
access_info = VariablesAccessInfo(assign)
# Find the access that is not to i,j, or k --> this must be
# the 'main' array variable we need to check for:
sig = None
loop_vars = set(["i", "j", "k"])
for sig in access_info:
if str(sig) not in loop_vars:
break
# Get all accesses to the array variable. It has only one
# access
access = access_info[sig][0]
result = access.component_indices.get_subscripts_of(loop_vars)
assert result == correct
|
3,176 |
db project event
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from logging import getLogger
from typing import Optional, Dict
from ogr.abstract import GitProject
from ogr.parsing import RepoUrl
from packit.config import PackageConfig, JobConfigTriggerType
from packit_service.config import ServiceConfig, PackageConfigGetter
from packit_service.models import ProjectReleaseModel, ProjectEventModel
from packit_service.worker.events import Event
from packit_service.worker.events.event import use_for_job_config_trigger
logger = getLogger(__name__)
# the decorator is needed in case the DB project event is not created (not valid arguments)
# but we still want to report from pre_check of the PullFromUpstreamHandler
@use_for_job_config_trigger(trigger_type=JobConfigTriggerType.release)
class NewHotnessUpdateEvent(Event):
def __init__(
self,
package_name: str,
version: str,
distgit_project_url: str,
):
super().__init__()
self.package_name = package_name
self.version = version
self.distgit_project_url = distgit_project_url
self._repo_url: Optional[RepoUrl] = None
self._db_project_object: Optional[ProjectReleaseModel]
self._db_project_event: Optional[ProjectEventModel]
@property
def project(self):
if not self._project:
self._project = self.get_project()
return self._project
def get_project(self) -> Optional[GitProject]:
if not self.distgit_project_url:
return None
return ServiceConfig.get_service_config().get_project(
url=self.distgit_project_url
)
@property
def base_project(self):
return None
def _add_release_and_event(self):
if not self._db_project_object or not self._db_project_event:
if not (
self.tag_name
and self.repo_name
and self.repo_namespace
and self.project_url
):
logger.info(
"Not going to create the DB project event, not valid arguments."
)
return None
(
self._db_project_object,
self._db_project_event,
) = ProjectEventModel.add_release_event(
tag_name=self.tag_name,
namespace=self.repo_namespace,
repo_name=self.repo_name,
project_url=self.project_url,
commit_hash=None,
)
@property
def db_project_object(self) -> Optional[ProjectReleaseModel]:
if not self._db_project_object:
self._add_release_and_event()
return self._db_project_object
@property
def METHOD_NAME(self) -> Optional[ProjectEventModel]:
if not self._db_project_event:
self._add_release_and_event()
return self._db_project_event
@property
def packages_config(self):
if not self._package_config_searched and not self._package_config:
self._package_config = self.get_packages_config()
self._package_config_searched = True
return self._package_config
def get_packages_config(self) -> Optional[PackageConfig]:
logger.debug(f"Getting package_config:\n" f"\tproject: {self.project}\n")
package_config = PackageConfigGetter.get_package_config_from_repo(
base_project=None,
project=self.project,
pr_id=None,
reference=None,
fail_when_missing=False,
)
return package_config
@property
def project_url(self) -> Optional[str]:
return (
self.packages_config.upstream_project_url if self.packages_config else None
)
@property
def repo_url(self) -> Optional[RepoUrl]:
if not self._repo_url:
self._repo_url = RepoUrl.parse(self.project_url)
return self._repo_url
@property
def repo_namespace(self) -> Optional[str]:
return self.repo_url.namespace if self.repo_url else None
@property
def repo_name(self) -> Optional[str]:
return self.repo_url.repo if self.repo_url else None
@property
def tag_name(self):
if not (self.packages_config and self.packages_config.upstream_tag_template):
return self.version
return self.packages_config.upstream_tag_template.format(version=self.version)
def get_dict(self, default_dict: Optional[Dict] = None) -> dict:
d = self.__dict__
d["project_url"] = self.project_url
d["tag_name"] = self.tag_name
d["repo_name"] = self.repo_name
d["repo_namespace"] = self.repo_namespace
result = super().get_dict(d)
result.pop("_repo_url")
return result
|
3,177 |
test anydbm modification
|
"""Test script for the dbm.open function based on testdumbdbm.py"""
import unittest
import dbm
import os
from test.support import import_helper
from test.support import os_helper
try:
from dbm import ndbm
except ImportError:
ndbm = None
dirname = os_helper.TESTFN
_fname = os.path.join(dirname, os_helper.TESTFN)
#
# Iterates over every database module supported by dbm currently available.
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def cleaunup_test_dir():
os_helper.rmtree(dirname)
def setup_test_dir():
cleaunup_test_dir()
os.mkdir(dirname)
class AnyDBMTestCase:
_dict = {'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assertTrue(issubclass(self.module.error, OSError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_creation_n_file_exists_with_invalid_contents(self):
# create an empty file
os_helper.create_empty_file(_fname)
with dbm.open(_fname, 'n') as f:
self.assertEqual(len(f), 0)
def METHOD_NAME(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
# setdefault() works as in the dict interface
self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
# get() works as in the dict interface
self.assertEqual(f.get(b'a'), self._dict['a'])
self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
self.assertIsNone(f.get(b'xxx'))
with self.assertRaises(KeyError):
f[b'xxx']
f.close()
def test_anydbm_keys(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_empty_value(self):
if getattr(dbm._defaultmod, 'library', None) == 'Berkeley DB':
self.skipTest("Berkeley DB doesn't distinguish the empty value "
"from the absent one")
f = dbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
f[b'empty'] = b''
self.assertEqual(f.keys(), [b'empty'])
self.assertIn(b'empty', f)
self.assertEqual(f[b'empty'], b'')
self.assertEqual(f.get(b'empty'), b'')
self.assertEqual(f.setdefault(b'empty'), b'')
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
self.assertIn(key, f)
assert(f[key] == b"Python:")
f.close()
def test_open_with_bytes(self):
dbm.open(os.fsencode(_fname), "c").close()
def test_open_with_pathlib_path(self):
dbm.open(os_helper.FakePath(_fname), "c").close()
def test_open_with_pathlib_path_bytes(self):
dbm.open(os_helper.FakePath(os.fsencode(_fname)), "c").close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def test_keys(self):
with dbm.open(_fname, 'c') as d:
self.assertEqual(d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
d[k] = v
self.assertEqual(sorted(d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assertIn(k, d)
self.assertEqual(d[k], v)
self.assertNotIn(b'xxx', d)
self.assertRaises(KeyError, lambda: d[b'xxx'])
def setUp(self):
self.addCleanup(setattr, dbm, '_defaultmod', dbm._defaultmod)
dbm._defaultmod = self.module
self.addCleanup(cleaunup_test_dir)
setup_test_dir()
class WhichDBTestCase(unittest.TestCase):
def test_whichdb(self):
self.addCleanup(setattr, dbm, '_defaultmod', dbm._defaultmod)
_bytes_fname = os.fsencode(_fname)
fnames = [_fname, os_helper.FakePath(_fname),
_bytes_fname, os_helper.FakePath(_bytes_fname)]
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
name = module.__name__
setup_test_dir()
dbm._defaultmod = module
# Try with empty files first
with module.open(_fname, 'c'): pass
for path in fnames:
self.assertEqual(name, self.dbm.whichdb(path))
# Now add a key
with module.open(_fname, 'w') as f:
f[b"1"] = b"1"
# and test that we can find it
self.assertIn(b"1", f)
# and read it
self.assertEqual(f[b"1"], b"1")
for path in fnames:
self.assertEqual(name, self.dbm.whichdb(path))
@unittest.skipUnless(ndbm, reason='Test requires ndbm')
def test_whichdb_ndbm(self):
# Issue 17198: check that ndbm which is referenced in whichdb is defined
with open(_fname + '.db', 'wb'): pass
_bytes_fname = os.fsencode(_fname)
fnames = [_fname, os_helper.FakePath(_fname),
_bytes_fname, os_helper.FakePath(_bytes_fname)]
for path in fnames:
self.assertIsNone(self.dbm.whichdb(path))
def setUp(self):
self.addCleanup(cleaunup_test_dir)
setup_test_dir()
self.dbm = import_helper.import_fresh_module('dbm')
for mod in dbm_iterator():
assert mod.__name__.startswith('dbm.')
suffix = mod.__name__[4:]
testname = f'TestCase_{suffix}'
globals()[testname] = type(testname,
(AnyDBMTestCase, unittest.TestCase),
{'module': mod})
if __name__ == "__main__":
unittest.main()
|
3,178 |
write registry
|
"""
Copyright (C) 2023 Intel Corporation
Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions.
See LICENSE.TXT
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Generates a unique id for each spec function that doesn't have it.
"""
from fileinput import FileInput
import util
import yaml
import re
import copy
ENUM_NAME = '$x_function_t'
class quoted(str):
pass
def quoted_presenter(dumper, data):
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
def get_registry_header():
return {'type': 'header', 'desc': quoted('Intel $OneApi Unified Runtime function registry'), 'ordinal': quoted(-1)}
def METHOD_NAME(data, path):
with open(path, 'w') as fout:
yaml.add_representer(quoted, quoted_presenter)
yaml.dump_all(data, fout,
default_flow_style=False,
sort_keys=False,
explicit_start=True)
def find_type_in_specs(specs, type):
return [obj for s in specs for obj in s['objects'] if obj['name'] == type][0]
def get_max_enum(enum):
return int(max(enum['etors'], key=lambda x : int(x['value']))['value'])
def copy_and_strip_prefix_from_enums(enum, prefix):
cpy = copy.deepcopy(enum)
for etor in cpy['etors']:
etor['name'] = etor['name'][len(prefix):]
return cpy
def generate_function_type(specs, meta, update_fn) -> dict:
existing_function_type = find_type_in_specs(specs, '$x_function_t')
existing_etors = {etor['name'] : etor['value'] for etor in existing_function_type['etors']}
max_etor = get_max_enum(existing_function_type)
functions = [obj['class'][len('$x'):] + obj['name'] for s in specs for obj in s['objects'] if obj['type'] == 'function']
registry = list()
for fname in functions:
etor_name = "$X_FUNCTION_" + util.to_snake_case(fname).upper()
id = existing_etors.get(etor_name)
if id is None:
max_etor += 1
id = max_etor
registry.append({
'name': etor_name,
'desc': f'Enumerator for $x{fname}',
'value': str(id)}
)
registry = sorted(registry, key=lambda x : int(x['value']))
existing_function_type['etors'] = registry
update_fn(existing_function_type, meta)
## create a copy to write back to registry.yml
return copy_and_strip_prefix_from_enums(existing_function_type, '$X_FUNCTION_')
def generate_structure_type(specs, meta, refresh_fn) -> dict:
structure_type = find_type_in_specs(specs, '$x_structure_type_t')
extended_structs = [obj for s in specs for obj in s['objects'] if re.match(r"struct|union", obj['type']) and 'base' in obj]
max_enum = get_max_enum(structure_type)
structure_type_etors = list()
for struct in extended_structs:
# skip experimental enumerations
if struct['name'].startswith('$x_exp_'):
continue
etor = [mem for mem in struct['members'] if mem['name'] == 'stype'][0]['init']
# try and match the etor
matched_etor = [e for e in structure_type['etors'] if e['name'] == etor]
out_etor = {
'name': etor,
'desc': struct['name']
}
# if no match exists we assign it a new value
if len(matched_etor) == 0:
max_enum += 1
out_etor['value'] = str(max_enum)
else:
out_etor['value'] = matched_etor[0]['value']
structure_type_etors.append(out_etor)
structure_type_etors = sorted(structure_type_etors, key = lambda x : int(x['value']))
structure_type['etors'] = structure_type_etors
refresh_fn(structure_type, meta)
## create a copy to write back to registry.yml
return copy_and_strip_prefix_from_enums(structure_type, '$X_STRUCTURE_TYPE_')
def generate_registry(path, specs, meta, update_fn):
try:
METHOD_NAME([
get_registry_header(),
generate_function_type(specs, meta, update_fn),
generate_structure_type(specs, meta, update_fn)
], path)
except BaseException as e:
print("Failed to generate registry.yml... %s", e)
raise e
|
3,179 |
get frozen defaults
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""The `BuildFileDefaultsParserState.set_defaults` is used by the pants.engine.internals.Parser,
exposed as the `__defaults__` BUILD file symbol.
When parsing a BUILD (from the rule `pants.engine.internals.build_files.parse_address_family`) the
defaults from the closest parent BUILD file is passed as input to the parser, and the new defaults
resulting after the BUILD file have been parsed is returned in the `AddressFamily`.
These defaults are then applied when creating the `TargetAdaptor` targets by the `Registrar` in the
parser.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Callable, Iterable, Mapping, Tuple, Union
from pants.engine.addresses import Address
from pants.engine.internals.parametrize import Parametrize
from pants.engine.target import (
Field,
ImmutableValue,
InvalidFieldException,
RegisteredTargetTypes,
Target,
TargetGenerator,
)
from pants.engine.unions import UnionMembership
from pants.util.frozendict import FrozenDict
SetDefaultsValueT = Mapping[str, Any]
SetDefaultsKeyT = Union[str, Tuple[str, ...]]
SetDefaultsT = Mapping[SetDefaultsKeyT, SetDefaultsValueT]
class BuildFileDefaults(FrozenDict[str, FrozenDict[str, ImmutableValue]]):
"""Map target types to default field values."""
class ParametrizeDefault(Parametrize):
"""Parametrize for default field values.
This is to have eager validation on the field values rather than erroring first when applied on
an actual target.
"""
@classmethod
def create(
cls, freeze: Callable[[Any], ImmutableValue], parametrize: Parametrize
) -> ParametrizeDefault:
return cls(
*map(freeze, parametrize.args),
**{kw: freeze(arg) for kw, arg in parametrize.kwargs.items()},
)
@dataclass
class BuildFileDefaultsParserState:
address: Address
defaults: dict[str, Mapping[str, Any]]
registered_target_types: RegisteredTargetTypes
union_membership: UnionMembership
@classmethod
def create(
cls,
path: str,
defaults: BuildFileDefaults,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
) -> BuildFileDefaultsParserState:
return cls(
address=Address(path, generated_name="__defaults__"),
defaults=dict(defaults),
registered_target_types=registered_target_types,
union_membership=union_membership,
)
def _freeze_field_value(self, field_type: type[Field], value: Any) -> ImmutableValue:
if isinstance(value, ParametrizeDefault):
return value
elif isinstance(value, Parametrize):
def freeze(v: Any) -> ImmutableValue:
return self._freeze_field_value(field_type, v)
return ParametrizeDefault.create(freeze, value)
else:
return field_type.compute_value(raw_value=value, address=self.address)
def METHOD_NAME(self) -> BuildFileDefaults:
types = self.registered_target_types.aliases_to_types
return BuildFileDefaults(
{
target_alias: FrozenDict(
{
field_type.alias: self._freeze_field_value(field_type, default)
for field_alias, default in fields.items()
for field_type in self._target_type_field_types(types[target_alias])
if field_alias in (field_type.alias, field_type.deprecated_alias)
}
)
for target_alias, fields in self.defaults.items()
}
)
def get(self, target_alias: str) -> Mapping[str, Any]:
# Used by `pants.engine.internals.parser.Parser._generate_symbols.Registrar.__call__`
return self.defaults.get(target_alias, {})
def set_defaults(
self,
*args: SetDefaultsT,
all: SetDefaultsValueT | None = None,
extend: bool = False,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
) -> None:
defaults: dict[str, dict[str, Any]] = (
{} if not extend else {k: dict(v) for k, v in self.defaults.items()}
)
if all is not None:
self._process_defaults(
defaults,
{tuple(self.registered_target_types.aliases): all},
ignore_unknown_fields=True,
ignore_unknown_targets=ignore_unknown_targets,
)
for arg in args:
self._process_defaults(
defaults,
arg,
ignore_unknown_fields=ignore_unknown_fields,
ignore_unknown_targets=ignore_unknown_targets,
)
# Update with new defaults, dropping targets without any default values.
for tgt, default in defaults.items():
if not default:
self.defaults.pop(tgt, None)
else:
self.defaults[tgt] = default
def _target_type_field_types(self, target_type: type[Target]) -> tuple[type[Field], ...]:
return (
*target_type.class_field_types(self.union_membership),
*(target_type.moved_fields if issubclass(target_type, TargetGenerator) else ()),
)
def _process_defaults(
self,
defaults: dict[str, dict[str, Any]],
targets_defaults: SetDefaultsT,
ignore_unknown_fields: bool = False,
ignore_unknown_targets: bool = False,
):
if not isinstance(targets_defaults, dict):
raise ValueError(
f"Expected dictionary mapping targets to default field values for {self.address} "
f"but got: {type(targets_defaults).__name__}."
)
types = self.registered_target_types.aliases_to_types
for target, default in targets_defaults.items():
if not isinstance(default, dict):
raise ValueError(
f"Invalid default field values in {self.address} for target type {target}, "
f"must be an `dict` but was {default!r} with type `{type(default).__name__}`."
)
targets: Iterable[str]
targets = target if isinstance(target, tuple) else (target,)
for target_alias in map(str, targets):
if target_alias in types:
target_type = types[target_alias]
elif ignore_unknown_targets:
continue
else:
raise ValueError(f"Unrecognized target type {target_alias} in {self.address}.")
# Copy default dict if we may mutate it.
raw_values = dict(default) if ignore_unknown_fields else default
# Validate that field exists on target
valid_field_aliases = set(
target_type._get_field_aliases_to_field_types(
self._target_type_field_types(target_type)
).keys()
)
for field_alias in default.keys():
if field_alias not in valid_field_aliases:
if ignore_unknown_fields:
del raw_values[field_alias]
else:
raise InvalidFieldException(
f"Unrecognized field `{field_alias}` for target {target_type.alias}. "
f"Valid fields are: {', '.join(sorted(valid_field_aliases))}.",
)
# Merge all provided defaults for this call.
defaults.setdefault(target_type.alias, {}).update(raw_values)
|
3,180 |
write binary proto
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def node_name_parts_from_input(input_name):
prefix = ''
node_name = ''
suffix = ''
if input_name.startswith('^'):
prefix = '^'
input_name = input_name[1:]
input_parts = input_name.split(':')
if len(input_parts) < 2:
suffix = ''
else:
suffix = ':' + input_parts[1]
node_name = input_parts[0]
return prefix, node_name, suffix
def node_name_from_input(input_name):
"""Strips off ports and other decorations to get the underlying node name."""
prefix, node_name, suffix = node_name_parts_from_input(input_name)
return node_name
def canonical_output_name(input_name):
prefix, node_name, suffix = node_name_parts_from_input(input_name)
if not suffix:
suffix = ':0'
return ''.join([prefix, node_name, suffix])
def dtype_to_tf_string(dtype):
if type(dtype) == nndct_dtypes.DType:
tf_dtype = nndct_dtypes.to_tf(dtype)
elif type(dtype) == tf_dtypes.DType:
tf_dtype = dtype
return ".".join(["tf", tf_dtypes._TYPE_TO_STRING[tf_dtype]])
def parse_tf_tensor(tensor):
"""Parse data from given `tensor`."""
if not isinstance(tensor, tensor_pb2.TensorProto):
raise TypeError("TensorProto required, but given {}".format(type(tensor)))
return tensor_util.MakeNdarray(tensor)
def values_from_tf_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError("Node '%s' should be a Const op." % node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
def parse_attr_proto(attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for key, value in attr_proto.items():
attrs[key] = get_attr_proto_value(value)
return attrs
def get_attr_proto_value(attr_value):
"""Returns the value of the attr of this buf with the given `name`.
Args:
attr_value: attrvalue protobuf.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
x = attr_value
ret = []
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return ret
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
ret += [tf_dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
ret += list(getattr(x.list, f))
else:
for f in fields:
if x.HasField(f):
if f == "type":
ret = tf_dtypes.as_dtype(getattr(x, f))
else:
ret = getattr(x, f)
return ret
def tf_shape_to_list(shape):
"""Get shape from tensorflow attr 'shape'."""
dims = None
try:
if not shape.unknown_rank:
dims = [int(d.size) for d in shape.dim]
except: # pylint: disable=bare-except
pass
return dims
def tf_tensor_shape(tensor):
shape = []
try:
shape = tensor.get_shape().as_list()
except Exception: # pylint: disable=broad-except
shape = None
return shape
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_text_proto(path, message):
write_proto(path, message, as_text=True)
def METHOD_NAME(path, message):
write_proto(path, message, as_text=False)
def tf_version():
return tf.__version__
def is_tf_version_equal(version: str):
return tf_version() == LooseVersion(version)
def is_tf_version_greater_than(version: str):
return tf_version() > LooseVersion(version)
def is_tf_version_greater_equal(version: str):
return tf_version() >= LooseVersion(version)
def is_tf_version_less_than(version: str):
return tf_version() < LooseVersion(version)
def is_tf_version_less_equal(version: str):
return tf_version() <= LooseVersion(version)
def is_tf_concat(op):
return op.type in ("Concat", "ConcatV2", "ConcatV3")
def is_tf_const(op):
return op.type in ["Const", "ConstV2"]
def is_tf_identity(op):
return op.type == "Identity" or op.type == "IdentityN"
def is_tf_placeholder(op):
return op.type == "Placeholder"
def is_tf_biasadd(op):
return op.type == "BiasAdd"
|
3,181 |
unflatten
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Contains the BasisEmbedding template.
"""
# pylint: disable-msg=too-many-branches,too-many-arguments,protected-access
import pennylane as qml
import pennylane.numpy as np
from pennylane.operation import Operation, AnyWires
from pennylane.wires import Wires
class BasisEmbedding(Operation):
r"""Encodes :math:`n` binary features into a basis state of :math:`n` qubits.
For example, for ``features=np.array([0, 1, 0])`` or ``features=2`` (binary 10), the
quantum system will be prepared in state :math:`|010 \rangle`.
.. warning::
``BasisEmbedding`` calls a circuit whose architecture depends on the binary features.
The ``features`` argument is therefore not differentiable when using the template, and
gradients with respect to the argument cannot be computed by PennyLane.
Args:
features (tensor_like): binary input of shape ``(len(wires), )``
wires (Any or Iterable[Any]): wires that the template acts on
Example:
Basis embedding encodes the binary feature vector into a basis state.
.. code-block:: python
dev = qml.device('default.qubit', wires=3)
@qml.qnode(dev)
def circuit(feature_vector):
qml.BasisEmbedding(features=feature_vector, wires=range(3))
return qml.state()
X = [1,1,1]
The resulting circuit is:
>>> print(qml.draw(circuit, expansion_strategy="device")(X))
0: ──X─┤ State
1: ──X─┤ State
2: ──X─┤ State
And, the output state is:
>>> print(circuit(X))
[0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 1.+0.j]
Thus, ``[1,1,1]`` is mapped to :math:`|111 \rangle`.
"""
num_wires = AnyWires
grad_method = None
def _flatten(self):
basis_state = self.hyperparameters["basis_state"]
basis_state = tuple(basis_state) if isinstance(basis_state, list) else basis_state
return tuple(), (self.wires, basis_state)
@classmethod
def METHOD_NAME(cls, _, metadata) -> "BasisEmbedding":
return cls(features=metadata[1], wires=metadata[0])
def __init__(self, features, wires, id=None):
if isinstance(features, list):
features = qml.math.stack(features)
tracing = qml.math.is_abstract(features)
if qml.math.shape(features) == ():
if not tracing and features >= 2 ** len(wires):
raise ValueError(
f"Features must be of length {len(wires)}, got features={features} which is >= {2 ** len(wires)}"
)
bin = 2 ** np.arange(len(wires))[::-1]
features = qml.math.where((features & bin) > 0, 1, 0)
wires = Wires(wires)
shape = qml.math.shape(features)
if len(shape) != 1:
raise ValueError(f"Features must be one-dimensional; got shape {shape}.")
n_features = shape[0]
if n_features != len(wires):
raise ValueError(
f"Features must be of length {len(wires)}; got length {n_features} (features={features})."
)
if not tracing:
features = list(qml.math.toarray(features))
if not set(features).issubset({0, 1}):
raise ValueError(f"Basis state must only consist of 0s and 1s; got {features}")
self._hyperparameters = {"basis_state": features}
super().__init__(wires=wires, id=id)
@property
def num_params(self):
return 0
@staticmethod
def compute_decomposition(wires, basis_state): # pylint: disable=arguments-differ
r"""Representation of the operator as a product of other operators.
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.BasisEmbedding.decomposition`.
Args:
features (tensor-like): binary input of shape ``(len(wires), )``
wires (Any or Iterable[Any]): wires that the operator acts on
Returns:
list[.Operator]: decomposition of the operator
**Example**
>>> features = torch.tensor([1, 0, 1])
>>> qml.BasisEmbedding.compute_decomposition(features, wires=["a", "b", "c"])
[PauliX(wires=['a']),
PauliX(wires=['c'])]
"""
if not qml.math.is_abstract(basis_state):
ops_list = []
for wire, bit in zip(wires, basis_state):
if bit == 1:
ops_list.append(qml.PauliX(wire))
return ops_list
ops_list = []
for wire, state in zip(wires, basis_state):
ops_list.append(qml.PhaseShift(state * np.pi / 2, wire))
ops_list.append(qml.RX(state * np.pi, wire))
ops_list.append(qml.PhaseShift(state * np.pi / 2, wire))
return ops_list
|
3,182 |
test create temp table
|
from __future__ import annotations
import pathlib
from contextlib import _GeneratorContextManager, contextmanager
from typing import TYPE_CHECKING, Any, Callable, Generator, Optional
import pytest
from pydantic import ValidationError
from great_expectations.datasource.fluent import SqliteDatasource
from tests.datasource.fluent.conftest import sqlachemy_execution_engine_mock_cls
if TYPE_CHECKING:
from great_expectations.data_context import AbstractDataContext
@pytest.fixture
def sqlite_datasource_name() -> str:
return "sqlite_datasource"
@pytest.fixture
def sqlite_database_path() -> pathlib.Path:
relative_path = pathlib.Path(
"..",
"..",
"test_sets",
"taxi_yellow_tripdata_samples",
"sqlite",
"yellow_tripdata.db",
)
return pathlib.Path(__file__).parent.joinpath(relative_path).resolve(strict=True)
@pytest.fixture
def sqlite_datasource(
empty_data_context, sqlite_database_path, sqlite_datasource_name
) -> SqliteDatasource:
connection_string = f"sqlite:///{sqlite_database_path}"
return SqliteDatasource(
name=sqlite_datasource_name,
connection_string=connection_string, # type: ignore[arg-type] # pydantic will coerce
)
@pytest.mark.unit
def test_connection_string_starts_with_sqlite(
sqlite_datasource, sqlite_database_path, sqlite_datasource_name
):
# The actual file doesn't matter only it's existence since SqlAlchemy does a check
# when it creates the database engine.
assert sqlite_datasource.name == sqlite_datasource_name
assert sqlite_datasource.connection_string == f"sqlite:///{sqlite_database_path}"
@pytest.mark.unit
def test_connection_string_that_does_not_start_with_sqlite():
name = "sqlite_datasource"
connection_string = "stuff+sqlite:///path/to/database/file.db"
with pytest.raises(ValidationError) as e:
SqliteDatasource(
name=name,
connection_string=connection_string,
)
# the first error is due to missing a config template string
assert e.value.errors()[1]["msg"] == "URL scheme not permitted"
assert e.value.errors()[1].get("ctx") == {
"allowed_schemes": {
"sqlite",
"sqlite+aiosqlite",
"sqlite+pysqlcipher",
"sqlite+pysqlite",
}
}
@pytest.mark.unit
def test_non_select_query_asset(sqlite_datasource):
with pytest.raises(ValueError):
sqlite_datasource.add_query_asset(name="query_asset", query="* from table")
# Test double used to return canned responses for splitter queries.
@contextmanager
def _create_sqlite_source(
data_context: Optional[AbstractDataContext] = None,
splitter_query_response: Optional[list[tuple[str]]] = None,
create_temp_table: bool = True,
) -> Generator[Any, Any, Any]:
execution_eng_cls = sqlachemy_execution_engine_mock_cls(
validate_batch_spec=lambda _: None,
dialect="sqlite",
splitter_query_response=splitter_query_response,
)
# These type ignores when dealing with the execution_engine_override are because
# it is a generic. We don't care about the exact type since we swap it out with our
# mock for the purpose of this test and then replace it with the original.
original_override = SqliteDatasource.execution_engine_override # type: ignore[misc]
try:
SqliteDatasource.execution_engine_override = execution_eng_cls # type: ignore[misc]
sqlite_datasource = SqliteDatasource(
name="sqlite_datasource",
connection_string="sqlite://", # type: ignore[arg-type] # pydantic will coerce
create_temp_table=create_temp_table,
)
if data_context:
sqlite_datasource._data_context = data_context
yield sqlite_datasource
finally:
SqliteDatasource.execution_engine_override = original_override # type: ignore[misc]
@pytest.fixture
def create_sqlite_source() -> (
Callable[
[Optional[AbstractDataContext], list[tuple[str]]], _GeneratorContextManager[Any]
]
):
return _create_sqlite_source
@pytest.mark.unit
@pytest.mark.parametrize(
[
"add_splitter_method_name",
"splitter_kwargs",
"splitter_query_responses",
"sorter_args",
"all_batches_cnt",
"specified_batch_request",
"specified_batch_cnt",
"last_specified_batch_metadata",
],
[
pytest.param(
"add_splitter_hashed_column",
{"column_name": "passenger_count", "hash_digits": 3},
[("abc",), ("bcd",), ("xyz",)],
["hash"],
3,
{"hash": "abc"},
1,
{"hash": "abc"},
id="hash",
),
pytest.param(
"add_splitter_converted_datetime",
{"column_name": "pickup_datetime", "date_format_string": "%Y-%m-%d"},
[("2019-02-01",), ("2019-02-23",)],
["datetime"],
2,
{"datetime": "2019-02-23"},
1,
{"datetime": "2019-02-23"},
id="converted_datetime",
),
],
)
def test_sqlite_specific_splitter(
empty_data_context,
create_sqlite_source,
add_splitter_method_name,
splitter_kwargs,
splitter_query_responses,
sorter_args,
all_batches_cnt,
specified_batch_request,
specified_batch_cnt,
last_specified_batch_metadata,
):
with create_sqlite_source(
data_context=empty_data_context,
splitter_query_response=[response for response in splitter_query_responses],
) as source:
asset = source.add_query_asset(name="query_asset", query="SELECT * from table")
getattr(asset, add_splitter_method_name)(**splitter_kwargs)
asset.add_sorters(sorter_args)
# Test getting all batches
all_batches = asset.get_batch_list_from_batch_request(
asset.build_batch_request()
)
assert len(all_batches) == all_batches_cnt
# Test getting specified batches
specified_batches = asset.get_batch_list_from_batch_request(
asset.build_batch_request(specified_batch_request)
)
assert len(specified_batches) == specified_batch_cnt
assert specified_batches[-1].metadata == last_specified_batch_metadata
@pytest.mark.unit
def METHOD_NAME(empty_data_context, create_sqlite_source):
with create_sqlite_source(
data_context=empty_data_context, create_temp_table=False
) as source:
assert source.create_temp_table is False
asset = source.add_query_asset(name="query_asset", query="SELECT * from table")
_ = asset.get_batch_list_from_batch_request(asset.build_batch_request())
assert source._execution_engine._create_temp_table is False
|
3,183 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetRouteMapResult',
'AwaitableGetRouteMapResult',
'get_route_map',
'get_route_map_output',
]
@pulumi.output_type
class GetRouteMapResult:
"""
The RouteMap child resource of a Virtual hub.
"""
def __init__(__self__, associated_inbound_connections=None, associated_outbound_connections=None, etag=None, id=None, METHOD_NAME=None, provisioning_state=None, rules=None, type=None):
if associated_inbound_connections and not isinstance(associated_inbound_connections, list):
raise TypeError("Expected argument 'associated_inbound_connections' to be a list")
pulumi.set(__self__, "associated_inbound_connections", associated_inbound_connections)
if associated_outbound_connections and not isinstance(associated_outbound_connections, list):
raise TypeError("Expected argument 'associated_outbound_connections' to be a list")
pulumi.set(__self__, "associated_outbound_connections", associated_outbound_connections)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rules and not isinstance(rules, list):
raise TypeError("Expected argument 'rules' to be a list")
pulumi.set(__self__, "rules", rules)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(METHOD_NAME="associatedInboundConnections")
def associated_inbound_connections(self) -> Optional[Sequence[str]]:
"""
List of connections which have this RoutMap associated for inbound traffic.
"""
return pulumi.get(self, "associated_inbound_connections")
@property
@pulumi.getter(METHOD_NAME="associatedOutboundConnections")
def associated_outbound_connections(self) -> Optional[Sequence[str]]:
"""
List of connections which have this RoutMap associated for outbound traffic.
"""
return pulumi.get(self, "associated_outbound_connections")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(METHOD_NAME="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the RouteMap resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def rules(self) -> Optional[Sequence['outputs.RouteMapRuleResponse']]:
"""
List of RouteMap rules to be applied.
"""
return pulumi.get(self, "rules")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetRouteMapResult(GetRouteMapResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRouteMapResult(
associated_inbound_connections=self.associated_inbound_connections,
associated_outbound_connections=self.associated_outbound_connections,
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
rules=self.rules,
type=self.type)
def get_route_map(resource_group_name: Optional[str] = None,
route_map_name: Optional[str] = None,
virtual_hub_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRouteMapResult:
"""
Retrieves the details of a RouteMap.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The resource group name of the RouteMap's resource group.
:param str route_map_name: The name of the RouteMap.
:param str virtual_hub_name: The name of the VirtualHub containing the RouteMap.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['routeMapName'] = route_map_name
__args__['virtualHubName'] = virtual_hub_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getRouteMap', __args__, opts=opts, typ=GetRouteMapResult).value
return AwaitableGetRouteMapResult(
associated_inbound_connections=pulumi.get(__ret__, 'associated_inbound_connections'),
associated_outbound_connections=pulumi.get(__ret__, 'associated_outbound_connections'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
rules=pulumi.get(__ret__, 'rules'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_route_map)
def get_route_map_output(resource_group_name: Optional[pulumi.Input[str]] = None,
route_map_name: Optional[pulumi.Input[str]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetRouteMapResult]:
"""
Retrieves the details of a RouteMap.
Azure REST API version: 2023-02-01.
:param str resource_group_name: The resource group name of the RouteMap's resource group.
:param str route_map_name: The name of the RouteMap.
:param str virtual_hub_name: The name of the VirtualHub containing the RouteMap.
"""
...
|
3,184 |
do template
|
"""
Base class for TIM plugin server. THIS IS DEPRECATED, DO NOT USE IN NEW CODE!
Serving from local port 5000.
"""
import http.server
import json
import logging
import os
import socketserver
from tim_common.fileParams import (
get_template,
file_to_string,
do_headers,
multi_post_params,
get_param,
QueryClass,
get_params,
post_params,
)
PORT = 5000
PROGDIR = "."
class TimServer(http.server.BaseHTTPRequestHandler):
"""Base class for TIM-server. THIS IS DEPRECATED, DO NOT USE IN NEW CODE!"""
def __init__(self, request, client_address, _server):
super().__init__(request, client_address, _server)
self.user_id = "--"
def do_OPTIONS(self):
"""Do needed things for OPTIONS request.
:return: nothing
"""
print("do_OPTIONS ==============================================")
do_headers(self, "text/plain")
print(self.path)
print(self.headers)
def do_GET(self):
"""Do needed things for GET request.
:return: nothing
"""
# print("do_GET ==================================================")
if self.path.find("/reqs") >= 0:
return self.do_reqs()
if self.path.find("/favicon.ico") >= 0:
return self.send_response(404)
if self.path.find("/template") >= 0:
return self.send_text(self.METHOD_NAME(get_params(self)), "text/plain")
fname = self.path.split("?")[0]
if fname.find(".css") >= 0:
return self.send_text_file(fname, "css", "text/css")
if fname.find(".js") >= 0:
return self.send_text_file(fname, "js", "application/javascript")
if fname.find(".html") >= 0:
return self.send_text_file(fname, "html", "text/html")
return self.do_all(get_params(self))
def do_POST(self):
"""Do needed things for POST request This may be a f.ex a request single html-plugin or multiple plugins.
:return: nothing
"""
# print("do_POST =================================================")
if self.path.find("/multihtml") < 0:
return self.do_all(post_params(self))
print("do_POST MULTIHTML ==========================================")
queries = multi_post_params(self)
do_headers(self, "application/json")
htmls = []
self.user_id = get_param(queries[0], "user_id", "--")
print("UserId:", self.user_id)
log(self)
# print(queries)
for query in queries:
# print(query.jso)
# print(str(query))
s = self.get_html(query)
# print(s)
htmls.append(s)
# print(htmls)
sresult = json.dumps(htmls)
self.wout(sresult + "\n")
log(self) # to measure time spend in doing all the html
def do_PUT(self):
"""Do needed things for PUT request.
:return: nothing
"""
# print("do_PUT =================================================")
self.do_all(post_params(self))
def wout(self, s: str):
"""Write s to servers output stream as UTF8.
:rtype : object
:param s: string to write
:return: nothing
"""
self.wfile.write(s.encode("UTF-8"))
def send_text_file(self, name: str, ftype: str, content_type: str):
"""Sends a file to server from directory ftype with content_type.
:param name: files name part, possible extra directories
:param ftype: files type (js, html, css), specifies also the directory where to get the file
:param content_type: files_content type
:return: nothing
"""
# fname = re.sub(".*/", "", name)
fname = os.path.basename(name)
do_headers(self, content_type)
return self.wout(file_to_string(ftype + "/" + fname))
def send_text(self, txt: str, content_type: str):
"""Sends a txt to server.
:param txt: text to send
:param content_type: files_content type
:return: nothing
"""
# fname = re.sub(".*/", "", name)
do_headers(self, content_type)
return self.wout(txt)
def get_html(self, query: QueryClass) -> str:
"""Return the html for this query. Params are dumbed as hexstring to avoid problems with html input and so on.
:rtype : str
:param query: get or put params
:return : html string for this markup
"""
return ""
def get_reqs_result(self) -> dict:
"""
:return: reqs result as json
"""
return {}
def do_reqs(self):
"""Answer to /reqs route.
:type self: TimServer
"""
do_headers(self, "application/json")
result_json = self.get_reqs_result()
result_str = json.dumps(result_json)
return self.wout(result_str)
def METHOD_NAME(self, query: QueryClass):
"""Gets a template.
:rtype : str
:param query: get or put params
:return: template result as json
"""
tempfile = get_param(query, "file", "")
tidx = get_param(query, "idx", "0")
return get_template("templates", tidx, tempfile)
def do_all(self, query: QueryClass):
"""Do all other routes.
:param query: post and get params
:return: nothing
"""
if self.path.find("/html") >= 0:
do_headers(self, "text/html; charset=utf-8")
s = self.get_html(query)
return self.wout(s)
if self.path.find("/answer") >= 0:
return self.do_answer(query)
do_headers(self, "text/plain")
return self.wout("Unknown query: " + self.path)
def do_answer(self, query: QueryClass):
"""Do answer route.
:param query: post and get params
:return: nothing
"""
def log(request: TimServer):
"""Log the time and user.
:param request:
:return: Nothing
"""
agent = " :AG: " + request.headers["User-Agent"]
if agent.find("ython") >= 0:
agent = ""
logging.info(request.path + agent + " u:" + request.user_id)
# Kun debuggaa Windowsissa, pitää vaihtaa ThreadingMixIn
# Jos ajaa Linuxissa ThreadingMixIn, niin chdir vaihtaa kaikkien hakemistoa?
# Ongelmaa korjattu siten, että kaikki run-kommennot saavat prgpathin käyttöönsä
# if __debug__:
# if True:
class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
print("Debug mode/ThreadingMixIn")
# else:
# class ThreadedHTTPServer(socketserver.ForkingMixIn, http.server.HTTPServer):
# """Handle requests in a separate thread."""
# print("Normal mode/ForkingMixIn")
def start_server(http_server):
if not os.path.exists("/var/log"):
os.makedirs("/var/log")
# Logging to file is disabled for now because Docker redirects stdin to an internal JSON file automatically
# and setting ownership to volumes via Docker is not possible.
# logging.basicConfig(filename='/var/log/' + logname + '.log', level=logging.INFO, format='%(asctime)s %(message)s')
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
server = ThreadedHTTPServer(("", PORT), http_server)
print("Starting server, use <Ctrl-C> to stop")
logging.info("Starting server")
server.serve_forever()
|
3,185 |
post operations
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network route-table route delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a route from a route table.
:example: Delete a route from a route table.
az network route-table route delete -g MyResourceGroup --route-table-name MyRouteTable -n MyRoute
"""
_aaz_info = {
"version": "2015-06-15",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/routetables/{}/routes/{}", "2015-06-15"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Route name.",
required=True,
id_part="child_name_1",
)
_args_schema.route_table_name = AAZStrArg(
options=["--route-table-name"],
help="Route table name.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.RoutesDelete(ctx=self.ctx)()
self.METHOD_NAME()
@register_callback
def pre_operations(self):
pass
@register_callback
def METHOD_NAME(self):
pass
class RoutesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}/routes/{routeName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"routeName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"routeTableName", self.ctx.args.route_table_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2015-06-15",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"]
|
3,186 |
get sim station property options
|
import json
import plotly
import numpy as np
from NuRadioReco.utilities import units
from NuRadioReco.framework.parameters import stationParameters as stnp
from NuRadioReco.framework.parameters import showerParameters as shp
import radiotools.helper as hp
from dash import html
from dash import dcc
from dash.dependencies import Input, Output, State
from NuRadioReco.eventbrowser.app import app
import NuRadioReco.eventbrowser.dataprovider
import logging
logger = logging.getLogger('traces')
provider = NuRadioReco.eventbrowser.dataprovider.DataProvider()
layout = [
dcc.Graph(id='sim-event-3d', style={'flex': '1'}),
html.Div([
dcc.Dropdown(id='sim-station-properties-dropdown', options=[], multi=True, value=[]),
html.Div(id='sim-station-properties-table', className='table table-striped')
], style={'flex': '1', 'min-height': '500px'})
]
@app.callback(
Output('sim-event-3d', 'figure'),
[Input('event-counter-slider', 'value'),
Input('filename', 'value'),
Input('station-id-dropdown', 'value')],
[State('user_id', 'children')]
)
def update_sim_event_3d(i_event, filename, station_id, juser_id):
if filename is None or station_id is None:
return {}
user_id = json.loads(juser_id)
nurio = provider.get_file_handler(user_id, filename)
evt = nurio.get_event_i(i_event)
station = evt.get_station(station_id)
det = nurio.get_detector()
det.update(station.get_station_time())
sim_station = station.get_sim_station()
sim_showers = [sim_shower for sim_shower in evt.get_sim_showers()]
if sim_station is None:
logger.info("No simulated station for selected event and station")
return {}
data = [plotly.graph_objs.Scatter3d(
x=[0],
y=[0],
z=[0],
mode='markers',
name='Station'
)]
vertex, neutrino_path = None, None
if sim_station.has_parameter(stnp.nu_vertex): # look for the neutrino vertex, direction in the sim_station
vertex = sim_station.get_parameter(stnp.nu_vertex)
event_type = 'Neutrino'
if sim_station.has_parameter(stnp.nu_zenith) and sim_station.has_parameter(stnp.nu_azimuth):
neutrino_path = hp.spherical_to_cartesian(sim_station.get_parameter(stnp.nu_zenith),
sim_station.get_parameter(stnp.nu_azimuth))
elif len(sim_showers) > 0: # look in the event sim_showers
if sim_station.is_neutrino():
event_type = 'Neutrino'
vertices = np.unique([ss.get_parameter(shp.vertex) for ss in sim_showers], axis=0)
else:
event_type = 'Cosmic Ray'
vertices = np.unique([ss.get_parameter(shp.core) for ss in sim_showers], axis=0)
zeniths = np.unique([ss.get_parameter(shp.zenith) for ss in sim_showers], axis=0)
azimuths = np.unique([ss.get_parameter(shp.azimuth) for ss in sim_showers], axis=0)
if any([len(k) > 1 for k in [vertices, zeniths, azimuths]]):
logger.warning("Event contains more than one shower. Only the first shower will be shown.")
if len(vertices):
vertex = vertices[0] - det.get_absolute_position(station_id) # shower vertex coordinates are global coordinates
if len(zeniths) & len(azimuths):
neutrino_path = hp.spherical_to_cartesian(zeniths[0], azimuths[0])
else:
logger.info("Simulated neutrino vertex not found.")
plot_range = 1 * units.km
if vertex is not None:
data.append(plotly.graph_objs.Scatter3d(
x=[vertex[0]],
y=[vertex[1]],
z=[vertex[2]],
mode='markers',
name='Interaction Vertex'
))
plot_range = 1.5 * np.max(np.abs(vertex))
if neutrino_path is not None:
data.append(plotly.graph_objs.Scatter3d(
x=[vertex[0], vertex[0] + .25 * plot_range * neutrino_path[0]],
y=[vertex[1], vertex[1] + .25 * plot_range * neutrino_path[1]],
z=[vertex[2], vertex[2] + .25 * plot_range * neutrino_path[2]],
name='{} Direction'.format(event_type),
mode='lines'
))
fig = plotly.graph_objs.Figure(
data=data,
layout=plotly.graph_objs.Layout(
width=1000,
height=1000,
legend={
'orientation': 'h',
'y': 1.1
},
scene={
'aspectmode': 'manual',
'aspectratio': {
'x': 2,
'y': 2,
'z': 1
},
'xaxis': {
'range': [-plot_range, plot_range]
},
'yaxis': {
'range': [-plot_range, plot_range]
},
'zaxis': {
'range': [-plot_range, np.max([0, vertex[2] + .3 * plot_range * neutrino_path[2]])]
}
}
)
)
return fig
@app.callback(Output('sim-station-properties-dropdown', 'options'),
[Input('event-counter-slider', 'value'),
Input('filename', 'value'),
Input('station-id-dropdown', 'value')],
[State('user_id', 'children')])
def METHOD_NAME(i_event, filename, station_id, juser_id):
if filename is None or station_id is None:
logger.info('No file or station selected')
return []
user_id = json.loads(juser_id)
nurio = provider.get_file_handler(user_id, filename)
evt = nurio.get_event_i(i_event)
station = evt.get_station(station_id).get_sim_station()
if station is None:
logger.info('No simulated station found')
return []
options = []
all_params = []
for parameter in stnp:
all_params.append(parameter.name)
if station.has_parameter(parameter):
options.append({'label': parameter.name, 'value': parameter.value})
found_params = [option['label'] for option in options]
not_found_params = [param for param in all_params if param not in found_params]
logger.info('Simulated station has the following parameters:\n {}\nNot defined are:\n {}'.format(found_params, not_found_params))
return options
@app.callback(Output('sim-station-properties-table', 'children'),
[Input('event-counter-slider', 'value'),
Input('filename', 'value'),
Input('sim-station-properties-dropdown', 'value'),
Input('station-id-dropdown', 'value')],
[State('user_id', 'children')])
def get_sim_station_property_table(i_event, filename, properties, station_id, juser_id):
if filename is None or station_id is None:
return []
user_id = json.loads(juser_id)
nurio = provider.get_file_handler(user_id, filename)
evt = nurio.get_event_i(i_event)
station = evt.get_station(station_id).get_sim_station()
reply = []
for prop in properties:
reply.append(
html.Div([
html.Div(str(stnp(prop).name), className='custom-table-td'),
html.Div(str(station.get_parameter(stnp(prop))), className='custom-table-td custom-table-td-last')
], className='custom-table-row')
)
return reply
|
3,187 |
test eq
|
import pickle
from bluesky.plans import count
import pytest
from ..queries import (
Contains,
FullText,
In,
Key,
NotIn,
TimeRange,
Regex,
ScanID,
ScanIDRange,
)
from ..tests.utils import get_uids
def test_time_range():
# TODO Test an actual search.
# RE does not let us spoof time, so this is not straightforward to do cleanly.
with pytest.raises(ValueError):
# since must not be greater than until
TimeRange(since="2021", until="2020")
def test_pickle():
"Ensure that query objects are pickle-able."
q = TimeRange(since="2020-01-01 9:52", timezone="US/Eastern")
serialized = pickle.dumps(q)
deserialized = pickle.loads(serialized)
assert q == deserialized
def test_full_text(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="some words"))
(should_not_match,) = get_uids(RE(count([hw.det])))
results = c.search(FullText("some words"))
assert should_match in results
assert should_not_match not in results
def test_regex(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match1,) = get_uids(RE(count([hw.det]), foo="a1"))
(should_match2,) = get_uids(RE(count([hw.det]), foo="a2"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="a3", bar="a1"))
results = c.search(Regex("foo", "a[1-2]"))
assert should_match1 in results
assert should_match2 in results
assert should_not_match not in results
def METHOD_NAME(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="a"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="b"))
results = c.search(Key("foo") == "a")
assert should_match in results
assert should_not_match not in results
def test_not_eq(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="a"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="b"))
results = c.search(Key("foo") != "b")
assert should_match in results
assert should_not_match not in results
def test_scan_id(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det])))
(should_not_match,) = get_uids(RE(count([hw.det])))
scan_id = c[should_match].start["scan_id"]
results = c.search(ScanID(scan_id))
assert scan_id == results[0].start["scan_id"]
def test_scan_id_range(c, RE, hw):
RE.subscribe(c.v1.insert)
(scan1,) = get_uids(RE(count([hw.det])))
scan_id1 = c[scan1].start["scan_id"]
(scan2,) = get_uids(RE(count([hw.det])))
scan_id2 = c[scan2].start["scan_id"]
(scan3,) = get_uids(RE(count([hw.det])))
scan_id3 = c[scan3].start["scan_id"]
results = c.search(ScanIDRange(scan_id1, scan_id3))
scan_id_results = [run.start["scan_id"] for uid, run in results.items()]
assert scan_id_results == [scan_id1, scan_id2]
assert scan_id3 not in scan_id_results
def test_in(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="a"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="b"))
results = c.search(In("foo", ["a", "z"]))
assert should_match in results
assert should_not_match not in results
def test_not_in(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="a"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="b"))
results = c.search(NotIn("foo", ["b", "z"]))
assert should_match in results
assert should_not_match not in results
def test_comparison(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo=5))
(should_not_match,) = get_uids(RE(count([hw.det]), foo=15))
results = c.search(Key("foo") < 10)
assert should_match in results
assert should_not_match not in results
def test_contains(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo=[1, 3, 5, 7, 9]))
(should_not_match,) = get_uids(RE(count([hw.det]), foo=[2, 4, 6, 8, 10]))
results = c.search(Contains("foo", 3))
assert should_match in results
assert should_not_match not in results
def test_distinct(c, RE, hw):
RE.subscribe(c.v1.insert)
(should_match,) = get_uids(RE(count([hw.det]), foo="a"))
(should_not_match,) = get_uids(RE(count([hw.det]), foo="b"))
expected = {
"metadata": {
"start.foo": [{"value": "a", "count": 1}, {"value": "b", "count": 1}]
},
"structure_families": [{"value": "container", "count": 2}],
"specs": [{"value": [{"name": "BlueskyRun", "version": "1"}], "count": 2}],
}
results = c.distinct("foo", structure_families=True, specs=True, counts=True)
assert results["metadata"] == expected["metadata"]
assert results["specs"] == expected["specs"]
assert results["structure_families"] == expected["structure_families"]
|
3,188 |
get table line
|
import sys
import numpy as np
def sci_not(num):
exp = int(np.log10(num))
mant = num/10.0**exp
if mant < 1:
mant *= 10.0
exp -= 1
return r"${:5.3f} \times 10^{{{}}}$".format(round(mant, 3), exp)
class Variable():
def __init__(self, name, lo, o1, med, o2, hi):
self.name = name
self.lo = float(lo)
self.o1 = float(o1)
self.med = float(med)
self.o2 = float(o2)
self.hi = float(hi)
def METHOD_NAME(self, pretty_name=None):
if pretty_name is not None:
name = pretty_name
else:
name = self.name
_str = r" {:27} & {:23} & {:5.3f} & {:23} & {:5.3f} & {:23} \\"
return _str.format(name, sci_not(self.lo), round(self.o1, 3), sci_not(self.med), round(self.o2, 3), sci_not(self.hi))
class ConvergenceData():
def __init__(self):
self.data = []
def add_variable(self, name, lo, order1, med, order2, hi):
self.data.append(Variable(name, lo, order1, med, order2, hi))
def read_convergence(file_lo, file_hi):
lines_lo = []
found_l1 = False
with open(file_lo, "r") as flo:
for line in flo:
# skip everything until we see the L1 norm
if line.find("L1 norm") > 0:
found_l1 = True
continue
elif not found_l1:
continue
if line.startswith("#") or len(line.strip()) == 0:
continue
if line.startswith("Variable"):
continue
if len(line.replace(r"\\", "").split("&")) != 4:
continue
lines_lo.append(line.replace(r"\\", "").strip())
lines_hi = []
found_l1 = False
with open(file_hi, "r") as fhi:
for line in fhi:
# skip everything until we see the L1 norm
if line.find("L1 norm") > 0:
found_l1 = True
continue
elif not found_l1:
continue
if line.startswith("#") or len(line.strip()) == 0:
continue
if line.startswith("Variable"):
continue
if len(line.replace(r"\\", "").split("&")) != 4:
continue
lines_hi.append(line.replace(r"\\", "").strip())
cd = ConvergenceData()
for llo, lhi in zip(lines_lo, lines_hi):
vlo, elo, o1, emed1 = llo.split("&")
vhi, emed2, o2, ehi = lhi.split("&")
if "---" in o1 or "---" in o2:
print("skipping {}".format(vlo))
continue
if vlo != vhi:
sys.exit("error: variable mismatch")
if emed1.strip() != emed2.strip():
print(emed1, emed2)
sys.exit("error: error mismatch")
cd.add_variable(vlo, elo, o1, emed1, o2, ehi)
return cd
if __name__ == "__main__":
good_vars = {"density": r"$\rho$",
"xmom": r"$\rho u$",
"ymom": r"$\rho v$",
"rho_E": r"$\rho E$",
"rho_e": r"$\rho e$",
"Temp": r"$T$",
"rho_He4": r"$\rho X(\isotm{He}{4})$",
"rho_C12": r"$\rho X(\isotm{C}{12})$",
"rho_O16": r"$\rho X(\isotm{O}{16})$",
"rho_Fe56": r"$\rho X(\isotm{Fe}{56})$"}
# sdc4
file_lo = "convergence.2d.lo.sdc4.out"
file_hi = "convergence.2d.hi.sdc4.out"
sdc4 = read_convergence(file_lo, file_hi)
print("\n SDC 4 \n\n")
for v in sdc4.data:
if v.name in good_vars.keys():
print(v.METHOD_NAME(pretty_name=good_vars[v.name]))
# sdc2
file_lo = "convergence.2d.lo.sdc.out"
file_hi = "convergence.2d.hi.sdc.out"
sdc4 = read_convergence(file_lo, file_hi)
print("\n SDC 2 \n\n")
for v in sdc4.data:
if v.name in good_vars.keys():
print(v.METHOD_NAME(pretty_name=good_vars[v.name]))
# strang
file_lo = "convergence.2d.lo.strang.out"
file_hi = "convergence.2d.hi.strang.out"
sdc4 = read_convergence(file_lo, file_hi)
print("\n strang 4 \n\n")
for v in sdc4.data:
if v.name in good_vars.keys():
print(v.METHOD_NAME(pretty_name=good_vars[v.name]))
|
3,189 |
kronecker product
|
# Copyright (C) Unitary Fund
# Portions of this code have been adapted from PennyLane's tutorial
# on Classical Shadows.
# Original authors: PennyLane developers: Brian Doolittle, Roeland Wiersema
# Tutorial link: https://pennylane.ai/qml/demos/tutorial_classical_shadows
#
# This source code is licensed under the GPL license (v3) found in the
# LICENSE file in the root directory of this source tree.
"""Defines utility functions for classical shadows protocol."""
from itertools import product
from typing import Any, List, Tuple
import cirq
import numpy as np
from numpy.typing import NDArray
from scipy.linalg import sqrtm
import mitiq
PAULIS = [
cirq.I._unitary_(),
cirq.X._unitary_(),
cirq.Y._unitary_(),
cirq.Z._unitary_(),
]
def METHOD_NAME(matrices: List[NDArray[Any]]) -> NDArray[Any]:
"""
Returns the Kronecker product of a list of matrices.
Args:
matrices: A list of matrices.
Returns:
The Kronecker product of the matrices in the list.
"""
result = matrices[0]
for matrix in matrices[1:]:
result = np.kron(result, matrix)
return result
def operator_ptm_vector_rep(opt: NDArray[Any]) -> NDArray[Any]:
r"""
Returns the PTM vector representation of an operator.
:math:`\mathcal{L}(\mathcal{H}_{2^n})\ni \mathtt{opt}\rightarrow
|\mathtt{opt}\rangle\!\rangle\in \mathcal{H}_{4^n}`.
Args:
opt: A square matrix representing an operator.
Returns:
A Pauli transfer matrix (PTM) representation of the operator.
"""
# vector i-th entry is math:`d^{-1/2}Tr(oP_i)`
# where P_i is the i-th Pauli matrix
if not (len(opt.shape) == 2 and opt.shape[0] == opt.shape[1]):
raise TypeError("Input must be a square matrix")
num_qubits = int(np.log2(opt.shape[0]))
opt_vec = []
for pauli_combination in product(PAULIS, repeat=num_qubits):
kron_product = METHOD_NAME(pauli_combination)
opt_vec.append(
np.trace(opt @ kron_product) * np.sqrt(1 / 2**num_qubits)
)
return np.array(opt_vec)
def eigenvalues_to_bitstring(values: List[int]) -> str:
"""Converts eigenvalues to bitstring. e.g., [-1,1,1] -> '100'
Args:
values: A list of eigenvalues.
Returns:
A string of 1s and 0s corresponding to the states associated to
eigenvalues.
"""
return "".join(["1" if v == -1 else "0" for v in values])
def bitstring_to_eigenvalues(bitstring: str) -> List[int]:
"""Converts bitstring to eigenvalues. e.g., '100' -> [-1,1,1]
Args:
bitstring: A string of 1s and 0s.
Returns:
A list of eigenvalues corresponding to the bitstring.
"""
return [1 if b == "0" else -1 for b in bitstring]
def create_string(str_len: int, loc_list: List[int]) -> str:
"""
This function returns a string of length str_len with 1s at the locations
specified by loc_list and 0s elsewhere.
Args:
str_len: The length of the string.
loc_list: A list of integers specifying the locations of 1s in the
string.
Returns:
A string of length str_len with 1s at the locations specified by
loc_list and 0s elsewhere.
e.g. if str_len = 5, loc_list = [1,3], return '01010'
"""
return "".join(
map(lambda i: "1" if i in set(loc_list) else "0", range(str_len))
)
def n_measurements_tomography_bound(epsilon: float, num_qubits: int) -> int:
"""
This function returns the minimum number of classical shadows required
for state reconstruction for achieving the desired accuracy.
Args:
epsilon: The error on the estimator.
num_qubits: The number of qubits in the system.
Returns:
An integer that gives the number of snapshots required to satisfy the
shadow bound.
"""
return int(34 * (4**num_qubits) * epsilon ** (-2))
def local_clifford_shadow_norm(obs: mitiq.PauliString) -> float:
"""
Calculate shadow norm of an operator with random unitary sampled from local
Clifford group.
Args:
obs: A self-adjoint operator, i.e. mitiq.PauliString with real coffe.
Returns:
Shadow norm when unitary ensemble is local Clifford group.
"""
opt = obs.matrix()
norm = (
np.linalg.norm(
opt - np.trace(opt) / 2 ** int(np.log2(opt.shape[0])),
ord=np.inf,
)
** 2
)
return float(norm)
def n_measurements_opts_expectation_bound(
error: float,
observables: List[mitiq.PauliString],
failure_rate: float,
) -> Tuple[int, int]:
"""
This function returns the minimum number of classical shadows required and
the number of groups "k" into which we need to split the shadows for
achieving the desired accuracy and failure rate in operator expectation
value estimation.
Args:
error: The error on the estimator.
observables: List of mitiq.PauliString corresponding to the
observables we intend to measure.
failure_rate: Rate of failure for the bound to hold.
Returns:
Integers quantifying the number of snapshots required to satisfy
the shadow bound and the chunk size required to attain the specified
failure rate.
"""
M = len(observables)
K = 2 * np.log(2 * M / failure_rate)
N = (
34
* max(local_clifford_shadow_norm(o) for o in observables)
/ error**2
)
return int(np.ceil(N * K)), int(K)
def fidelity(
sigma: NDArray[np.complex64],
rho: NDArray[np.complex64],
) -> float:
"""
Calculate the fidelity between two states.
Args:
sigma: A state in terms of square matrix or vector.
rho: A state in terms square matrix or vector.
Returns:
Scalar corresponding to the fidelity.
"""
if sigma.ndim == 1 and rho.ndim == 1:
val = np.abs(np.dot(sigma.conj(), rho)) ** 2.0
elif sigma.ndim == 1 and rho.ndim == 2:
val = np.abs(sigma.conj().T @ rho @ sigma)
elif sigma.ndim == 2 and rho.ndim == 1:
val = np.abs(rho.conj().T @ sigma @ rho)
elif sigma.ndim == 2 and rho.ndim == 2:
val = np.abs(np.trace(sqrtm(sigma) @ rho @ sqrtm(sigma)))
else:
raise ValueError("Invalid input dimensions")
return float(val)
|
3,190 |
assert raises unavailable solver
|
import pyomo.common.unittest as unittest
import pyomo.environ as pyo
from pyomo.contrib.iis import write_iis
from pyomo.contrib.iis.iis import _supported_solvers
from pyomo.common.tempfiles import TempfileManager
import os
def _get_infeasible_model():
m = pyo.ConcreteModel()
m.x = pyo.Var(within=pyo.Binary)
m.y = pyo.Var(within=pyo.NonNegativeReals)
m.c1 = pyo.Constraint(expr=m.y <= 100.0 * m.x)
m.c2 = pyo.Constraint(expr=m.y <= -100.0 * m.x)
m.c3 = pyo.Constraint(expr=m.x >= 0.5)
m.o = pyo.Objective(expr=-m.y)
return m
class TestIIS(unittest.TestCase):
@unittest.skipUnless(
pyo.SolverFactory("cplex_persistent").available(exception_flag=False),
"CPLEX not available",
)
def test_write_iis_cplex(self):
_test_iis("cplex")
@unittest.skipUnless(
pyo.SolverFactory("gurobi_persistent").available(exception_flag=False),
"Gurobi not available",
)
def test_write_iis_gurobi(self):
_test_iis("gurobi")
@unittest.skipUnless(
pyo.SolverFactory("xpress_persistent").available(exception_flag=False),
"Xpress not available",
)
def test_write_iis_xpress(self):
_test_iis("xpress")
@unittest.skipUnless(
(
pyo.SolverFactory("cplex_persistent").available(exception_flag=False)
or pyo.SolverFactory("gurobi_persistent").available(exception_flag=False)
or pyo.SolverFactory("xpress_persistent").available(exception_flag=False)
),
"Persistent solver not available",
)
def test_write_iis_any_solver(self):
_test_iis(None)
@unittest.skipIf(
pyo.SolverFactory("cplex_persistent").available(exception_flag=False),
"CPLEX available",
)
def test_exception_cplex_not_available(self):
self.METHOD_NAME("cplex")
@unittest.skipIf(
pyo.SolverFactory("gurobi_persistent").available(exception_flag=False),
"Gurobi available",
)
def test_exception_gurobi_not_available(self):
self.METHOD_NAME("gurobi")
@unittest.skipIf(
pyo.SolverFactory("xpress_persistent").available(exception_flag=False),
"Xpress available",
)
def test_exception_xpress_not_available(self):
self.METHOD_NAME("xpress")
@unittest.skipIf(
(
pyo.SolverFactory("cplex_persistent").available(exception_flag=False)
or pyo.SolverFactory("gurobi_persistent").available(exception_flag=False)
or pyo.SolverFactory("xpress_persistent").available(exception_flag=False)
),
"Persistent solver available",
)
def test_exception_iis_no_solver_available(self):
with self.assertRaises(
RuntimeError,
msg=f"Could not find a solver to use, supported solvers are {_supported_solvers}",
):
_test_iis(None)
def METHOD_NAME(self, solver_name):
with self.assertRaises(
RuntimeError,
msg=f"The Pyomo persistent interface to {solver_name} could not be found.",
):
_test_iis(solver_name)
def _test_iis(solver_name):
m = _get_infeasible_model()
TempfileManager.push()
tmp_path = TempfileManager.create_tempdir()
file_name = os.path.join(tmp_path, f"{solver_name}_iis.ilp")
file_name = write_iis(m, solver=solver_name, iis_file_name=str(file_name))
_validate_ilp(file_name)
TempfileManager.pop()
def _validate_ilp(file_name):
lines_found = {"c2: 100 x + y <= 0": False, "c3: x >= 0.5": False}
with open(file_name, "r") as f:
for line in f.readlines():
for k, v in lines_found.items():
if (not v) and k in line:
lines_found[k] = True
if not all(lines_found.values()):
raise Exception(
f"The file {file_name} is not as expected. Missing constraints:\n"
+ "\n".join(k for k, v in lines_found.items() if not v)
)
if __name__ == "__main__":
unittest.main()
|
3,191 |
test llvm objcopy
|
import unittest
from unittest import mock
from pythonforandroid.androidndk import AndroidNDK
class TestAndroidNDK(unittest.TestCase):
"""
An inherited class of `unittest.TestCase`to test the module
:mod:`~pythonforandroid.androidndk`.
"""
def setUp(self):
"""Configure a :class:`~pythonforandroid.androidndk.AndroidNDK` so we can
perform our unittests"""
self.ndk = AndroidNDK("/opt/android/android-ndk")
@mock.patch("sys.platform", "linux")
def test_host_tag_linux(self):
"""Test the `host_tag` property of the :class:`~pythonforandroid.androidndk.AndroidNDK`
class when the host is Linux."""
self.assertEqual(self.ndk.host_tag, "linux-x86_64")
@mock.patch("sys.platform", "darwin")
def test_host_tag_darwin(self):
"""Test the `host_tag` property of the :class:`~pythonforandroid.androidndk.AndroidNDK`
class when the host is Darwin."""
self.assertEqual(self.ndk.host_tag, "darwin-x86_64")
def test_llvm_prebuilt_dir(self):
"""Test the `llvm_prebuilt_dir` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_prebuilt_dir,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}",
)
def test_llvm_bin_dir(self):
"""Test the `llvm_bin_dir` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_bin_dir,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin",
)
def test_clang(self):
"""Test the `clang` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.clang,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/clang",
)
def test_clang_cxx(self):
"""Test the `clang_cxx` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.clang_cxx,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/clang++",
)
def test_llvm_ar(self):
"""Test the `llvm_ar` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_ar,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-ar",
)
def test_llvm_ranlib(self):
"""Test the `llvm_ranlib` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_ranlib,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-ranlib",
)
def METHOD_NAME(self):
"""Test the `llvm_objcopy` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_objcopy,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-objcopy",
)
def test_llvm_objdump(self):
"""Test the `llvm_objdump` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_objdump,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-objdump",
)
def test_llvm_readelf(self):
"""Test the `llvm_readelf` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_readelf,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-readelf",
)
def test_llvm_strip(self):
"""Test the `llvm_strip` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.llvm_strip,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-strip",
)
def test_sysroot(self):
"""Test the `sysroot` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.sysroot,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/sysroot",
)
def test_sysroot_include_dir(self):
"""Test the `sysroot_include_dir` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.sysroot_include_dir,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/sysroot/usr/include",
)
def test_sysroot_lib_dir(self):
"""Test the `sysroot_lib_dir` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.sysroot_lib_dir,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/sysroot/usr/lib",
)
def test_libcxx_include_dir(self):
"""Test the `libcxx_include_dir` property of the
:class:`~pythonforandroid.androidndk.AndroidNDK` class."""
self.assertEqual(
self.ndk.libcxx_include_dir,
f"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/sysroot/usr/include/c++/v1",
)
|
3,192 |
test roc curve
|
from unittest.mock import patch
import numpy as np
import pytest
import sklearn
import torch
from sklearn.metrics import roc_curve
from ignite import distributed as idist
from ignite.contrib.metrics.roc_auc import RocCurve
from ignite.engine import Engine
from ignite.exceptions import NotComputableError
from ignite.metrics.epoch_metric import EpochMetricWarning
def test_wrong_setup():
def compute_fn(y_preds, y_targets):
return 0.0
with pytest.raises(NotComputableError, match="RocCurve must have at least one example before it can be computed"):
metric = RocCurve(compute_fn)
metric.compute()
@pytest.fixture()
def mock_no_sklearn():
with patch.dict("sys.modules", {"sklearn.metrics": None}):
yield sklearn
def test_no_sklearn(mock_no_sklearn):
with pytest.raises(ModuleNotFoundError, match=r"This contrib module requires scikit-learn to be installed"):
RocCurve()
def METHOD_NAME():
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
roc_curve_metric = RocCurve()
y_pred = torch.from_numpy(np_y_pred)
y = torch.from_numpy(np_y)
roc_curve_metric.update((y_pred, y))
fpr, tpr, thresholds = roc_curve_metric.compute()
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (x[1], x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_integration_roc_curve_with_activated_output_transform():
np.random.seed(1)
size = 100
np_y_pred = np.random.rand(size, 1)
np_y_pred_sigmoid = torch.sigmoid(torch.from_numpy(np_y_pred)).numpy()
np_y = np.zeros((size,))
np_y[size // 2 :] = 1
np.random.shuffle(np_y)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(np_y, np_y_pred_sigmoid)
batch_size = 10
def update_fn(engine, batch):
idx = (engine.state.iteration - 1) * batch_size
y_true_batch = np_y[idx : idx + batch_size]
y_pred_batch = np_y_pred[idx : idx + batch_size]
return idx, torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
engine = Engine(update_fn)
roc_curve_metric = RocCurve(output_transform=lambda x: (torch.sigmoid(x[1]), x[2]))
roc_curve_metric.attach(engine, "roc_curve")
data = list(range(size // batch_size))
fpr, tpr, thresholds = engine.run(data, max_epochs=1).metrics["roc_curve"]
assert np.array_equal(fpr, sk_fpr)
assert np.array_equal(tpr, sk_tpr)
# assert thresholds almost equal, due to numpy->torch->numpy conversion
np.testing.assert_array_almost_equal(thresholds, sk_thresholds)
def test_check_compute_fn():
y_pred = torch.zeros((8, 13))
y_pred[:, 1] = 1
y_true = torch.zeros_like(y_pred)
output = (y_pred, y_true)
em = RocCurve(check_compute_fn=True)
em.reset()
with pytest.warns(EpochMetricWarning, match=r"Probably, there can be a problem with `compute_fn`"):
em.update(output)
em = RocCurve(check_compute_fn=False)
em.update(output)
def test_distrib_integration(distributed):
rank = idist.get_rank()
torch.manual_seed(41 + rank)
n_batches, batch_size = 5, 10
y = torch.randint(0, 2, size=(n_batches * batch_size,))
y_pred = torch.rand((n_batches * batch_size,))
def update(engine, i):
return (
y_pred[i * batch_size : (i + 1) * batch_size],
y[i * batch_size : (i + 1) * batch_size],
)
engine = Engine(update)
device = torch.device("cpu") if idist.device().type == "xla" else idist.device()
metric = RocCurve(device=device)
metric.attach(engine, "roc_curve")
data = list(range(n_batches))
engine.run(data=data, max_epochs=1)
fpr, tpr, thresholds = engine.state.metrics["roc_curve"]
assert isinstance(fpr, torch.Tensor) and fpr.device == device
assert isinstance(tpr, torch.Tensor) and tpr.device == device
assert isinstance(thresholds, torch.Tensor) and thresholds.device == device
y = idist.all_gather(y)
y_pred = idist.all_gather(y_pred)
sk_fpr, sk_tpr, sk_thresholds = roc_curve(y.cpu().numpy(), y_pred.cpu().numpy())
np.testing.assert_array_almost_equal(fpr.cpu().numpy(), sk_fpr)
np.testing.assert_array_almost_equal(tpr.cpu().numpy(), sk_tpr)
np.testing.assert_array_almost_equal(thresholds.cpu().numpy(), sk_thresholds)
|
3,193 |
re emit values
|
"""
[Name] BeamInfo
[Description]
BeamInfo hardware object is used to define final beam size and shape.
It can include aperture, slits and/or other beam definer (lenses or other eq.)
[Emited signals]
beamInfoChanged
[Included Hardware Objects]
-----------------------------------------------------------------------
| name | signals | functions
-----------------------------------------------------------------------
aperture_HO apertureChanged
slits_HO
beam_definer_HO
-----------------------------------------------------------------------
"""
import logging
from mxcubecore.BaseHardwareObjects import Equipment
from mxcubecore import HardwareRepository as HWR
class BeamInfo(Equipment):
def __init__(self, *args):
Equipment.__init__(self, *args)
self.beam_size_slits = [9999, 9999]
self.beam_size_aperture = [9999, 9999]
self.beam_size_definer = [9999, 9999]
self.beam_position = [0, 0]
self.aperture_HO = None
self.slits_HO = None
self.beam_info_dict = {}
def init(self):
try:
self.aperture_HO = HWR.get_hardware_repository().get_hardware_object(
self.get_property("aperture")
)
self.connect(self.aperture_HO, "apertureChanged", self.aperture_pos_changed)
except Exception:
logging.getLogger("HWR").debug("BeamInfo: aperture not defined correctly")
try:
self.slits_HO = HWR.get_hardware_repository().get_hardware_object(
self.get_property("slits")
)
self.connect(self.slits_HO, "gapSizeChanged", self.slits_gap_changed)
except Exception:
logging.getLogger("HWR").debug("BeamInfo: slits not defined correctly")
try:
self.connect(
HWR.beamline.beam.beam_definer,
"definerPosChanged",
self.definer_pos_changed,
)
except Exception:
logging.getLogger("HWR").debug(
"BeamInfo: beam definer not defined correctly"
)
self.beam_position_hor = self.get_channel_object("beam_position_hor")
self.beam_position_hor.connect_signal("update", self.beam_pos_hor_changed)
self.beam_position_ver = self.get_channel_object("beam_position_ver")
self.beam_position_ver.connect_signal("update", self.beam_pos_ver_changed)
self.chan_beam_size_microns = self.get_channel_object("beam_size_microns")
self.chan_beam_shape_ellipse = self.get_channel_object("beam_shape_ellipse")
def beam_pos_hor_changed(self, value):
self.beam_position[0] = value
self.emit("beamPosChanged", (self.beam_position,))
def beam_pos_ver_changed(self, value):
self.beam_position[1] = value
self.emit("beamPosChanged", (self.beam_position,))
def get_beam_position(self):
return self.beam_position
def set_beam_position(self, beam_x, beam_y):
self.beam_position = [beam_x, beam_y]
self.beam_position_hor.set_value(int(beam_x))
self.beam_position_ver.set_value(int(beam_y))
def aperture_pos_changed(self, nameList, name, size):
self.beam_size_aperture = size
self.evaluate_beam_info()
self.METHOD_NAME()
def slits_gap_changed(self, size):
self.beam_size_slits = size
self.evaluate_beam_info()
self.METHOD_NAME()
def definer_pos_changed(self, name, size):
self.beam_size_definer = size
self.evaluate_beam_info()
self.METHOD_NAME()
def get_beam_info(self):
return self.evaluate_beam_info()
def get_beam_size(self):
"""
Description: returns beam size in microns
Resturns: list with two integers
"""
self.evaluate_beam_info()
return (
int(self.beam_info_dict["size_x"] * 1000),
int(self.beam_info_dict["size_y"] * 1000),
)
def get_beam_shape(self):
self.evaluate_beam_info()
return self.beam_info_dict["shape"]
def get_slits_gap(self):
self.evaluate_beam_info()
return self.beam_size_slits
def evaluate_beam_info(self):
"""
Description: called if aperture, slits or focusing has been changed
Returns: dictionary, {size_x: 0.1, size_y: 0.1, shape: "rectangular"}
"""
size_x = min(
self.beam_size_aperture[0],
self.beam_size_slits[0],
self.beam_size_definer[0],
)
size_y = min(
self.beam_size_aperture[1],
self.beam_size_slits[1],
self.beam_size_definer[1],
)
self.beam_info_dict["size_x"] = size_x
self.beam_info_dict["size_y"] = size_y
if self.beam_size_aperture < self.beam_size_slits:
self.beam_info_dict["shape"] = "ellipse"
else:
self.beam_info_dict["shape"] = "rectangular"
return self.beam_info_dict
def METHOD_NAME(self):
if (
self.beam_info_dict["size_x"] != 9999
and self.beam_info_dict["size_y"] != 9999
):
self.emit(
"beamSizeChanged",
((self.beam_info_dict["size_x"], self.beam_info_dict["size_y"]),),
)
self.emit("beamInfoChanged", (self.beam_info_dict,))
if self.chan_beam_size_microns is not None:
self.chan_beam_size_microns.set_value(
(
self.beam_info_dict["size_x"] * 1000,
self.beam_info_dict["size_y"] * 1000,
)
)
if self.chan_beam_shape_ellipse is not None:
self.chan_beam_shape_ellipse.set_value(
self.beam_info_dict["shape"] == "ellipse"
)
|
3,194 |
to dict
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2ContainerResourceMetricStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'container': 'str',
'current': 'V2MetricValueStatus',
'name': 'str'
}
attribute_map = {
'container': 'container',
'current': 'current',
'name': 'name'
}
def __init__(self, container=None, current=None, name=None, local_vars_configuration=None): # noqa: E501
"""V2ContainerResourceMetricStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._container = None
self._current = None
self._name = None
self.discriminator = None
self.container = container
self.current = current
self.name = name
@property
def container(self):
"""Gets the container of this V2ContainerResourceMetricStatus. # noqa: E501
container is the name of the container in the pods of the scaling target # noqa: E501
:return: The container of this V2ContainerResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V2ContainerResourceMetricStatus.
container is the name of the container in the pods of the scaling target # noqa: E501
:param container: The container of this V2ContainerResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and container is None: # noqa: E501
raise ValueError("Invalid value for `container`, must not be `None`") # noqa: E501
self._container = container
@property
def current(self):
"""Gets the current of this V2ContainerResourceMetricStatus. # noqa: E501
:return: The current of this V2ContainerResourceMetricStatus. # noqa: E501
:rtype: V2MetricValueStatus
"""
return self._current
@current.setter
def current(self, current):
"""Sets the current of this V2ContainerResourceMetricStatus.
:param current: The current of this V2ContainerResourceMetricStatus. # noqa: E501
:type: V2MetricValueStatus
"""
if self.local_vars_configuration.client_side_validation and current is None: # noqa: E501
raise ValueError("Invalid value for `current`, must not be `None`") # noqa: E501
self._current = current
@property
def name(self):
"""Gets the name of this V2ContainerResourceMetricStatus. # noqa: E501
name is the name of the resource in question. # noqa: E501
:return: The name of this V2ContainerResourceMetricStatus. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V2ContainerResourceMetricStatus.
name is the name of the resource in question. # noqa: E501
:param name: The name of this V2ContainerResourceMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.METHOD_NAME())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2ContainerResourceMetricStatus):
return False
return self.METHOD_NAME() == other.METHOD_NAME()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2ContainerResourceMetricStatus):
return True
return self.METHOD_NAME() != other.METHOD_NAME()
|
3,195 |
get client
|
import json
import cv2
import base64
import threading
import time
from datetime import datetime
from websocket_server import WebsocketServer
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host, car):
t = threading.Thread(target=self.run_server)
self.payload = {'image': ''}
self.left_payload = {'image': ''}
self.server = None
self.client = None
self.host = host
# Image variables
self.image_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
self.left_image_to_be_shown = None
self.left_image_to_be_shown_updated = False
self.left_image_show_lock = threading.Lock()
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
# Take the console object to set the same websocket and client
self.car = car
t.start()
# Explicit initialization function
# Class method, so user can call it without instantiation
@classmethod
def initGUI(cls, host):
# self.payload = {'image': '', 'shape': []}
new_instance = cls(host)
return new_instance
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
image_to_be_shown = self.image_to_be_shown
self.image_show_lock.release()
image = image_to_be_shown
payload = {'image': '', 'shape': ''}
if not image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payload
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadLeftImage(self):
self.left_image_show_lock.acquire()
left_image_to_be_shown_updated = self.left_image_to_be_shown_updated
left_image_to_be_shown = self.left_image_to_be_shown
self.left_image_show_lock.release()
image = left_image_to_be_shown
payload = {'image': '', 'shape': ''}
if not left_image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.left_image_show_lock.acquire()
self.left_image_to_be_shown_updated = False
self.left_image_show_lock.release()
return payload
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function for student to call
def showLeftImage(self, image):
self.left_image_show_lock.acquire()
self.left_image_to_be_shown = image
self.left_image_to_be_shown_updated = True
self.left_image_show_lock.release()
# Function to get the client
# Called when a new client is received
def METHOD_NAME(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload["image"] = json.dumps(payload)
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Payload Left Image Message
left_payload = self.payloadLeftImage()
self.left_payload["image"] = json.dumps(left_payload)
message = "#gul" + json.dumps(self.left_payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if message[:4] == "#ack":
self.set_acknowledge(True)
elif message[:4] == "#car":
self.car.start_car(int(message[4:5]))
elif message[:4] == "#stp":
self.car.stop_car()
elif message[:4] == "#rst":
self.car.reset_car()
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.METHOD_NAME)
self.server.set_fn_message_received(self.get_message)
logged = False
while not logged:
try:
f = open("/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def reset_gui(self):
pass
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.measure_thread = threading.Thread(target=self.measure_thread)
self.thread = threading.Thread(target=self.run)
self.measure_thread.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def measure_thread(self):
while self.gui.client is None:
pass
previous_time = datetime.now()
while True:
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def run(self):
while self.gui.client is None:
pass
while True:
start_time = datetime.now()
self.gui.update_gui()
acknowledge_message = self.gui.get_acknowledge()
while not acknowledge_message:
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if ms < self.ideal_cycle:
time.sleep((self.ideal_cycle-ms) / 1000.0)
|
3,196 |
get subcommand entry points
|
# Copyright 2020 pydicom authors. See LICENSE file for details.
"""Pydicom command line interface program
Each subcommand is a module within pydicom.cli, which
defines an add_subparser(subparsers) function to set argparse
attributes, and calls set_defaults(func=callback_function)
"""
import argparse
from importlib.metadata import entry_points
import re
import sys
from typing import cast, Any
from collections.abc import Callable
from pydicom import dcmread
from pydicom.data.data_manager import get_charset_files, get_testdata_file
from pydicom.dataset import Dataset
from pathlib import Path
subparsers: argparse._SubParsersAction | None = None
# Restrict the allowed syntax tightly, since use Python `eval`
# on the expression. Do not allow callables, or assignment, for example.
re_kywd_or_item = (
r"\w+" # Keyword (\w allows underscore, needed for file_meta)
r"(\[(-)?\d+\])?" # Optional [index] or [-index]
)
re_file_spec_object = re.compile(re_kywd_or_item + r"(\." + re_kywd_or_item + r")*$")
filespec_help = (
"File specification, in format [pydicom::]filename[::element]. "
"If `pydicom::` prefix is present, then use the pydicom "
"test file with that name. If `element` is given, "
"use only that data element within the file. "
"Examples: "
"path/to/your_file.dcm, "
"your_file.dcm::StudyDate, "
"pydicom::rtplan.dcm::BeamSequence[0], "
"yourplan.dcm::BeamSequence[0].BeamNumber"
)
def eval_element(ds: Dataset, element: str) -> Any:
try:
return eval("ds." + element, {"ds": ds})
except AttributeError:
raise argparse.ArgumentTypeError(
f"Data element '{element}' is not in the dataset"
)
except IndexError as e:
raise argparse.ArgumentTypeError(f"'{element}' has an index error: {str(e)}")
def filespec_parts(filespec: str) -> tuple[str, str, str]:
"""Parse the filespec format into prefix, filename, element
Format is [prefix::filename::element]
Note that ':' can also exist in valid filename, e.g. r'c:\temp\test.dcm'
"""
*prefix_file, last = filespec.split("::")
if not prefix_file: # then only the filename component
return "", last, ""
prefix = "pydicom" if prefix_file[0] == "pydicom" else ""
if prefix:
prefix_file.pop(0)
# If list empty after pop above, then have pydicom::filename
if not prefix_file:
return prefix, last, ""
return prefix, "".join(prefix_file), last
def filespec_parser(filespec: str) -> list[tuple[Dataset, Any]]:
"""Utility to return a dataset and an optional data element value within it
Note: this is used as an argparse 'type' for adding parsing arguments.
Parameters
----------
filespec: str
A filename with optional `pydicom::` prefix and optional data element,
in format:
[pydicom::]<filename>[::<element>]
If an element is specified, it must be a path to a data element,
sequence item (dataset), or a sequence.
Examples:
your_file.dcm
your_file.dcm::StudyDate
pydicom::rtplan.dcm::BeamSequence[0]
pydicom::rtplan.dcm::BeamSequence[0].BeamLimitingDeviceSequence
Returns
-------
List[Tuple[Dataset, Any]]
Matching pairs of (dataset, data element value)
This usually is a single pair, but a list is returned for future
ability to work across multiple files.
Note
----
This function is meant to be used in a call to an `argparse` library's
`add_argument` call for subparsers, with name="filespec" and
`type=filespec_parser`. When used that way, the resulting args.filespec
will contain the return values of this function
(e.g. use `ds, element_val = args.filespec` after parsing arguments)
See the `pydicom.cli.show` module for an example.
Raises
------
argparse.ArgumentTypeError
If the filename does not exist in local path or in pydicom test files,
or if the optional element is not a valid expression,
or if the optional element is a valid expression but does not exist
within the dataset
"""
prefix, filename, element = filespec_parts(filespec)
# Get the pydicom test filename even without prefix, in case user forgot it
try:
pydicom_filename = cast(str, get_testdata_file(filename))
except NotImplementedError: # will get this if absolute path passed
pydicom_filename = ""
# Check if filename is in charset files
if not pydicom_filename:
try:
char_filenames = get_charset_files(filename)
if char_filenames:
pydicom_filename = char_filenames[0]
except NotImplementedError: # will get this if absolute path passed
pass
if prefix == "pydicom":
filename = pydicom_filename
# Check element syntax first to avoid unnecessary load of file
if element and not re_file_spec_object.match(element):
raise argparse.ArgumentTypeError(
f"Component '{element}' is not valid syntax for a "
"data element, sequence, or sequence item"
)
# Read DICOM file
try:
ds = dcmread(filename, force=True)
except FileNotFoundError:
extra = (
(f", \nbut 'pydicom::{filename}' test data file is available")
if pydicom_filename
else ""
)
raise argparse.ArgumentTypeError(f"File '{filename}' not found{extra}")
except Exception as e:
raise argparse.ArgumentTypeError(f"Error reading '{filename}': {str(e)}")
if not element:
return [(ds, None)]
data_elem_val = eval_element(ds, element)
return [(ds, data_elem_val)]
def help_command(args: argparse.Namespace) -> None:
if subparsers is None:
print("No subcommands are available")
return
subcommands: list[str] = list(subparsers.choices.keys())
if args.subcommand and args.subcommand in subcommands:
subparsers.choices[args.subcommand].print_help()
else:
print("Use pydicom help [subcommand] to show help for a subcommand")
subcommands.remove("help")
print(f"Available subcommands: {', '.join(subcommands)}")
SubCommandType = dict[str, Callable[[argparse._SubParsersAction], None]]
def METHOD_NAME() -> SubCommandType:
subcommands = {}
for entry_point in entry_points(group="pydicom_subcommands"):
subcommands[entry_point.name] = entry_point.load()
return subcommands
def main(args: list[str] | None = None) -> None:
"""Entry point for 'pydicom' command line interface
Parameters
----------
args : List[str], optional
Command-line arguments to parse. If ``None``, then :attr:`sys.argv`
is used.
"""
global subparsers
py_version = sys.version.split()[0]
parser = argparse.ArgumentParser(
prog="pydicom",
description=f"pydicom command line utilities (Python {py_version})",
)
subparsers = parser.add_subparsers(help="subcommand help")
help_parser = subparsers.add_parser("help", help="display help for subcommands")
help_parser.add_argument(
"subcommand", nargs="?", help="Subcommand to show help for"
)
help_parser.set_defaults(func=help_command)
# Get subcommands to register themselves as a subparser
subcommands = METHOD_NAME()
for subcommand in subcommands.values():
subcommand(subparsers)
ns = parser.parse_args(args)
if not vars(ns):
parser.print_help()
else:
ns.func(ns)
|
3,197 |
show mesh
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from porespy.tools import get_tqdm
__all__ = [
'bar',
'imshow',
'show_mesh',
'show_panels',
]
tqdm = get_tqdm()
def show_panels(im, rc=[3, 3], axis=0):
r"""
Show slices of a 3D image as a 2D array of panels.
Parameters
----------
im : ndarray
The 3D image to visualize
rc : list if ints
The number of rows and columns to create
axis : int
The axis along which to create the slices
Returns
-------
fig, ax : Matplotlib figure and axis handles
"""
from porespy.visualization import prep_for_imshow
i, j = rc
im = np.swapaxes(im, axis, 2)
slices = np.linspace(0, im.shape[2], i*j, endpoint=False).astype(int)
fig, ax = plt.subplots(i, j)
s = 0
for row in range(i):
for col in range(j):
temp = prep_for_imshow(im[..., slices[s]])
ax[row][col].imshow(**temp)
ax[row][col].text(
0, 1,
f"Slice {slices[s]}",
# ha="center", va="center",
bbox=dict(boxstyle="square,pad=0.3",
fc="white", ec="white", lw=1, alpha=0.75))
s += 1
return fig, ax
def bar(results, h='pdf', **kwargs): # pragma: no cover
r"""
Convenience wrapper for matplotlib's ``bar``.
This automatically:
* fetches the ``bin_centers``
* fetches the bin heights from the specified ``h``
* sets the bin widths
* sets the edges to black
Parameters
----------
results : object
The objects returned by various functions in the
``porespy.metrics`` submodule, such as ``chord_length_distribution``.
h : str
The value to use for bin heights. The default is ``pdf``, but
``cdf`` is another option. Depending on the function the named-tuple
may have different options.
kwargs : keyword arguments
All other keyword arguments are passed to ``bar``, including
``edgecolor`` if you wish to overwrite the default black.
Returns
-------
fig: Matplotlib figure handle
Examples
--------
`Click here
<https://porespy.org/examples/visualization/reference/bar.html>`_
to view online example.
"""
if 'edgecolor' not in kwargs:
kwargs['edgecolor'] = 'k'
fig = plt.bar(x=results.bin_centers, height=getattr(results, h),
width=results.bin_widths, **kwargs)
xlab = [attr for attr in results.__dir__() if not attr.startswith('_')][0]
plt.xlabel(xlab)
plt.ylabel(h)
return fig
def imshow(*im, ind=None, axis=None, **kwargs): # pragma: no cover
r"""
Convenience wrapper for matplotlib's ``imshow``.
This automatically:
* slices a 3D image in the middle of the last axis
* uses a masked array to make 0's white
* sets the origin to 'lower' so bottom-left corner is [0, 0]
* disables interpolation
Parameters
----------
im : ndarray
The 2D or 3D image (or images) to show. If 2D then all other
arguments are ignored.
ind : int
The slice to show if ``im`` is 3D. If not given then the middle of
the image is used.
axis : int
The axis to show if ``im`` is 3D. If not given, then the last
axis of the image is used, so an 'lower' slice is shown.
**kwargs
All other keyword arguments are passed to ``plt.imshow``
Note
----
``im`` can also be a series of unnamed arguments, in which case all
received images will be shown using ``subplot``.
Examples
--------
`Click here
<https://porespy.org/examples/visualization/reference/imshow.html>`_
to view online example.
"""
if 'origin' not in kwargs.keys():
kwargs['origin'] = 'lower'
if 'interpolation' not in kwargs.keys():
kwargs['interpolation'] = 'none'
if not isinstance(im, tuple):
im = tuple([im])
for i, image in enumerate(im):
if image.ndim == 3:
if axis is None:
axis = 2
if ind is None:
ind = int(image.shape[axis]/2)
image = image.take(indices=ind, axis=axis)
image = np.ma.array(image, mask=image == 0)
fig = plt.subplot(1, len(im), i+1)
plt.imshow(image, **kwargs)
return fig
def METHOD_NAME(mesh): # pragma: no cover
r"""
Visualizes the mesh of a region as obtained by ``get_mesh`` function in
the ``metrics`` submodule.
Parameters
----------
mesh : tuple
A mesh returned by ``skimage.measure.marching_cubes``
Returns
-------
fig : Matplotlib figure
A handle to a matplotlib 3D axis
Examples
--------
`Click here
<https://porespy.org/examples/visualization/reference/show_mesh.html>`_
to view online example.
"""
try:
verts = mesh.vertices
except AttributeError:
verts = mesh.verts
lim_max = np.amax(verts, axis=0)
lim_min = np.amin(verts, axis=0)
# Display resulting triangular mesh using Matplotlib.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[mesh.faces])
mesh.set_edgecolor('k')
ax.add_collection3d(mesh)
ax.set_xlabel("x-axis")
ax.set_ylabel("y-axis")
ax.set_zlabel("z-axis")
ax.set_xlim(lim_min[0], lim_max[0])
ax.set_ylim(lim_min[1], lim_max[1])
ax.set_zlim(lim_min[2], lim_max[2])
return fig
|
3,198 |
class browser
|
"""Class browser.
XXX TO DO:
- reparse when source changed (maybe just a button would be OK?)
(or recheck on window popup)
- add popup menu with more options (e.g. doc strings, base classes, imports)
- show function argument list? (have to do pattern matching on source)
- should the classes and methods lists also be in the module's menu bar?
- add base classes to class browser tree
"""
import os
import sys
import pyclbr
from idlelib import PyShell
from idlelib.WindowList import ListedToplevel
from idlelib.TreeWidget import TreeNode, TreeItem, ScrolledCanvas
from idlelib.configHandler import idleConf
file_open = None # Method...Item and Class...Item use this.
# Normally PyShell.flist.open, but there is no PyShell.flist for htest.
class ClassBrowser:
def __init__(self, flist, name, path, _htest=False):
# XXX This API should change, if the file doesn't end in ".py"
# XXX the code here is bogus!
"""
_htest - bool, change box when location running htest.
"""
global file_open
if not _htest:
file_open = PyShell.flist.open
self.name = name
self.file = os.path.join(path[0], self.name + ".py")
self._htest = _htest
self.init(flist)
def close(self, event=None):
self.top.destroy()
self.node.destroy()
def init(self, flist):
self.flist = flist
# reset pyclbr
pyclbr._modules.clear()
# create top
self.top = top = ListedToplevel(flist.root)
top.protocol("WM_DELETE_WINDOW", self.close)
top.bind("<Escape>", self.close)
if self._htest: # place dialog below parent if running htest
top.geometry("+%d+%d" %
(flist.root.winfo_rootx(), flist.root.winfo_rooty() + 200))
self.settitle()
top.focus_set()
# create scrolled canvas
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0, takefocus=1)
sc.frame.pack(expand=1, fill="both")
item = self.rootnode()
self.node = node = TreeNode(sc.canvas, None, item)
node.update()
node.expand()
def settitle(self):
self.top.wm_title("Class Browser - " + self.name)
self.top.wm_iconname("Class Browser")
def rootnode(self):
return ModuleBrowserTreeItem(self.file)
class ModuleBrowserTreeItem(TreeItem):
def __init__(self, file):
self.file = file
def GetText(self):
return os.path.basename(self.file)
def GetIconName(self):
return "python"
def GetSubList(self):
sublist = []
for name in self.listclasses():
item = ClassBrowserTreeItem(name, self.classes, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if os.path.normcase(self.file[-3:]) != ".py":
return
if not os.path.exists(self.file):
return
PyShell.flist.open(self.file)
def IsExpandable(self):
return os.path.normcase(self.file[-3:]) == ".py"
def listclasses(self):
dir, file = os.path.split(self.file)
name, ext = os.path.splitext(file)
if os.path.normcase(ext) != ".py":
return []
try:
dict = pyclbr.readmodule_ex(name, [dir] + sys.path)
except ImportError:
return []
items = []
self.classes = {}
for key, cl in dict.items():
if cl.module == name:
s = key
if hasattr(cl, 'super') and cl.super:
supers = []
for sup in cl.super:
if type(sup) is type(''):
sname = sup
else:
sname = sup.name
if sup.module != cl.module:
sname = "%s.%s" % (sup.module, sname)
supers.append(sname)
s = s + "(%s)" % ", ".join(supers)
items.append((cl.lineno, s))
self.classes[s] = cl
items.sort()
list = []
for item, s in items:
list.append(s)
return list
class ClassBrowserTreeItem(TreeItem):
def __init__(self, name, classes, file):
self.name = name
self.classes = classes
self.file = file
try:
self.cl = self.classes[self.name]
except (IndexError, KeyError):
self.cl = None
self.isfunction = isinstance(self.cl, pyclbr.Function)
def GetText(self):
if self.isfunction:
return "def " + self.name + "(...)"
else:
return "class " + self.name
def GetIconName(self):
if self.isfunction:
return "python"
else:
return "folder"
def IsExpandable(self):
if self.cl:
try:
return not not self.cl.methods
except AttributeError:
return False
def GetSubList(self):
if not self.cl:
return []
sublist = []
for name in self.listmethods():
item = MethodBrowserTreeItem(name, self.cl, self.file)
sublist.append(item)
return sublist
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = file_open(self.file)
if hasattr(self.cl, 'lineno'):
lineno = self.cl.lineno
edit.gotoline(lineno)
def listmethods(self):
if not self.cl:
return []
items = []
for name, lineno in self.cl.methods.items():
items.append((lineno, name))
items.sort()
list = []
for item, name in items:
list.append(name)
return list
class MethodBrowserTreeItem(TreeItem):
def __init__(self, name, cl, file):
self.name = name
self.cl = cl
self.file = file
def GetText(self):
return "def " + self.name + "(...)"
def GetIconName(self):
return "python" # XXX
def IsExpandable(self):
return 0
def OnDoubleClick(self):
if not os.path.exists(self.file):
return
edit = file_open(self.file)
edit.gotoline(self.cl.methods[self.name])
def METHOD_NAME(parent): #Wrapper for htest
try:
file = __file__
except NameError:
file = sys.argv[0]
if sys.argv[1:]:
file = sys.argv[1]
else:
file = sys.argv[0]
dir, file = os.path.split(file)
name = os.path.splitext(file)[0]
flist = PyShell.PyShellFileList(parent)
global file_open
file_open = flist.open
ClassBrowser(flist, name, [dir], _htest=True)
if __name__ == "__main__":
from idlelib.idle_test.htest import run
run(METHOD_NAME)
|
3,199 |
test20
|
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2021 sliptonic <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Path
import Path.Base.Generator.rotation as generator
import PathTests.PathTestUtils as PathTestUtils
import numpy as np
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
class TestPathRotationGenerator(PathTestUtils.PathTestBase):
def test00(self):
"""Test relAngle function"""
v = FreeCAD.Vector(0.5, 0.5, 0.5)
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.x), 45))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.y), 45))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.z), 45))
v = FreeCAD.Vector(-0.5, 0.5, 0.5)
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.x), 135))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.y), -45))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.z), 45))
v = FreeCAD.Vector(-0.5, -0.5, -0.5)
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.x), -135))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.y), -135))
self.assertTrue(np.isclose(generator.relAngle(v, generator.refAxis.z), -135))
def test10(self):
"""Test Basic Rotation Generator Return"""
v1 = FreeCAD.Vector(0.0, 0.0, 1.0)
args = {
"normalVector": v1,
"aMin": -360,
"aMax": 360,
"cMin": -360,
"cMax": 360,
"compound": True,
}
result = generator.generate(**args)
self.assertTrue(type(result) is list)
self.assertTrue(len(result) == 1)
self.assertTrue(type(result[0]) is Path.Command)
command = result[0]
self.assertTrue(np.isclose(command.Parameters["A"], 0))
self.assertTrue(np.isclose(command.Parameters["C"], 0))
args["compound"] = False
result = generator.generate(**args)
self.assertTrue(len(result) == 2)
Path.Log.debug(result)
def METHOD_NAME(self):
"""Test non-zero rotation"""
v1 = FreeCAD.Vector(0.5, 0.5, 0.5)
args = {
"normalVector": v1,
"aMin": -360,
"aMax": 360,
"cMin": -360,
"cMax": 360,
"compound": True,
}
result = generator.generate(**args)
command = result[0]
Path.Log.debug(command.Parameters)
self.assertTrue(np.isclose(command.Parameters["A"], 54.736))
self.assertTrue(np.isclose(command.Parameters["C"], 45))
Path.Log.track(result)
def test30(self):
"""Test A limits"""
v1 = FreeCAD.Vector(0.5, 0.5, 0.5)
args = {"normalVector": v1, "cMin": -360, "cMax": 360, "compound": True}
# Constrain a axis rotation negative
args["aMin"] = -90
args["aMax"] = 0
result = generator.generate(**args)
Path.Log.debug(result)
command = result[0]
self.assertTrue(np.isclose(command.Parameters["A"], -54.736))
self.assertTrue(np.isclose(command.Parameters["C"], -135))
# Constrain a axis rotation positive
args["aMin"] = 0
args["aMax"] = 90
result = generator.generate(**args)
Path.Log.debug(result)
command = result[0]
self.assertTrue(np.isclose(command.Parameters["A"], 54.736))
self.assertTrue(np.isclose(command.Parameters["C"], 45))
def test40(self):
"""Test C limits"""
v1 = FreeCAD.Vector(0.5, 0.5, 0.5)
args = {"normalVector": v1, "aMin": -360, "aMax": 360, "compound": True}
# Constrain a axis rotation negative
args["cMin"] = -180
args["cMax"] = 0
result = generator.generate(**args)
Path.Log.debug(result)
command = result[0]
self.assertTrue(np.isclose(command.Parameters["A"], -54.736))
self.assertTrue(np.isclose(command.Parameters["C"], -135))
# Constrain a axis rotation positive
args["cMin"] = 0
args["cMax"] = 180
result = generator.generate(**args)
Path.Log.debug(result)
command = result[0]
self.assertTrue(np.isclose(command.Parameters["A"], 54.736))
self.assertTrue(np.isclose(command.Parameters["C"], 45))
def test50(self):
"""Test handling of no valid solution"""
v1 = FreeCAD.Vector(0.5, 0.5, 0.5)
args = {
"normalVector": v1,
"aMin": 0,
"aMax": 10,
"cMin": 0,
"cMax": 10,
"compound": True,
}
self.assertRaises(ValueError, generator.generate, **args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.