id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
3,700 |
close path
|
"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing
for shapes.
"""
from fontTools.pens.basePen import BasePen
from fontTools.misc.bezierTools import solveQuadratic, solveCubic
__all__ = ["PointInsidePen"]
class PointInsidePen(BasePen):
"""This pen implements "point inside" testing: to test whether
a given point lies inside the shape (black) or outside (white).
Instances of this class can be recycled, as long as the
setTestPoint() method is used to set the new point to test.
Typical usage:
pen = PointInsidePen(glyphSet, (100, 200))
outline.draw(pen)
isInside = pen.getResult()
Both the even-odd algorithm and the non-zero-winding-rule
algorithm are implemented. The latter is the default, specify
True for the evenOdd argument of __init__ or setTestPoint
to use the even-odd algorithm.
"""
# This class implements the classical "shoot a ray from the test point
# to infinity and count how many times it intersects the outline" (as well
# as the non-zero variant, where the counter is incremented if the outline
# intersects the ray in one direction and decremented if it intersects in
# the other direction).
# I found an amazingly clear explanation of the subtleties involved in
# implementing this correctly for polygons here:
# http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
# I extended the principles outlined on that page to curves.
def __init__(self, glyphSet, testPoint, evenOdd=False):
BasePen.__init__(self, glyphSet)
self.setTestPoint(testPoint, evenOdd)
def setTestPoint(self, testPoint, evenOdd=False):
"""Set the point to test. Call this _before_ the outline gets drawn."""
self.testPoint = testPoint
self.evenOdd = evenOdd
self.firstPoint = None
self.intersectionCount = 0
def getWinding(self):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
return self.intersectionCount
def getResult(self):
"""After the shape has been drawn, getResult() returns True if the test
point lies within the (black) shape, and False if it doesn't.
"""
winding = self.getWinding()
if self.evenOdd:
result = winding % 2
else: # non-zero
result = self.intersectionCount != 0
return not not result
def _addIntersection(self, goingUp):
if self.evenOdd or goingUp:
self.intersectionCount += 1
else:
self.intersectionCount -= 1
def _moveTo(self, point):
if self.firstPoint is not None:
# always make sure the sub paths are closed; the algorithm only works
# for closed paths.
self.closePath()
self.firstPoint = point
def _lineTo(self, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = point
if x1 < x and x2 < x:
return
if y1 < y and y2 < y:
return
if y1 >= y and y2 >= y:
return
dx = x2 - x1
dy = y2 - y1
t = (y - y1) / dy
ix = dx * t + x1
if ix < x:
return
self._addIntersection(y2 > y1)
def _curveToOne(self, bcp1, bcp2, point):
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp1
x3, y3 = bcp2
x4, y4 = point
if x1 < x and x2 < x and x3 < x and x4 < x:
return
if y1 < y and y2 < y and y3 < y and y4 < y:
return
if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
return
dy = y1
cy = (y2 - dy) * 3.0
by = (y3 - y2) * 3.0 - cy
ay = y4 - dy - cy - by
solutions = sorted(solveCubic(ay, by, cy, dy - y))
solutions = [t for t in solutions if -0.0 <= t <= 1.0]
if not solutions:
return
dx = x1
cx = (x2 - dx) * 3.0
bx = (x3 - x2) * 3.0 - cx
ax = x4 - dx - cx - bx
above = y1 >= y
lastT = None
for t in solutions:
if t == lastT:
continue
lastT = t
t2 = t * t
t3 = t2 * t
direction = 3 * ay * t2 + 2 * by * t + cy
incomingGoingUp = outgoingGoingUp = direction > 0.0
if direction == 0.0:
direction = 6 * ay * t + 2 * by
outgoingGoingUp = direction > 0.0
incomingGoingUp = not outgoingGoingUp
if direction == 0.0:
direction = ay
incomingGoingUp = outgoingGoingUp = direction > 0.0
xt = ax * t3 + bx * t2 + cx * t + dx
if xt < x:
continue
if t in (0.0, -0.0):
if not outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
elif t == 1.0:
if incomingGoingUp:
self._addIntersection(incomingGoingUp)
else:
if incomingGoingUp == outgoingGoingUp:
self._addIntersection(outgoingGoingUp)
# else:
# we're not really intersecting, merely touching
def _qCurveToOne_unfinished(self, bcp, point):
# XXX need to finish this, for now doing it through a cubic
# (BasePen implements _qCurveTo in terms of a cubic) will
# have to do.
x, y = self.testPoint
x1, y1 = self._getCurrentPoint()
x2, y2 = bcp
x3, y3 = point
c = y1
b = (y2 - c) * 2.0
a = y3 - c - b
solutions = sorted(solveQuadratic(a, b, c - y))
solutions = [
t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON
]
if not solutions:
return
# XXX
def METHOD_NAME(self):
if self._getCurrentPoint() != self.firstPoint:
self.lineTo(self.firstPoint)
self.firstPoint = None
def _endPath(self):
"""Insideness is not defined for open contours."""
raise NotImplementedError
|
3,701 |
test create tracking request
|
import unittest
from unittest.mock import patch
from karrio.core.utils import DP
from karrio import Tracking
from karrio.core.models import TrackingRequest
from .fixture import gateway
class TestCarrierTracking(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.TrackingRequest = TrackingRequest(tracking_numbers=TRACKING_PAYLOAD)
def METHOD_NAME(self):
request = gateway.mapper.create_tracking_request(self.TrackingRequest)
self.assertEqual(request.serialize(), TrackingRequestJSON)
def test_get_tracking(self):
with patch("karrio.mappers.dicom.proxy.http") as mock:
mock.return_value = "{}"
Tracking.fetch(self.TrackingRequest).from_(gateway)
self.assertEqual(
mock.call_args[1]["url"],
f"{gateway.settings.server_url}/v1/tracking/1Z12345E6205277936",
)
def test_parse_tracking_response(self):
with patch("karrio.mappers.dicom.proxy.http") as mock:
mock.return_value = TrackingResponseJSON
parsed_response = (
Tracking.fetch(self.TrackingRequest).from_(gateway).parse()
)
self.assertEqual(
DP.to_dict(parsed_response), DP.to_dict(ParsedTrackingResponse)
)
def test_parse_error_response(self):
with patch("karrio.mappers.dicom.proxy.http") as mock:
mock.return_value = ErrorResponseJSON
parsed_response = (
Tracking.fetch(self.TrackingRequest).from_(gateway).parse()
)
self.assertEqual(
DP.to_dict(parsed_response), DP.to_dict(ParsedErrorResponse)
)
if __name__ == "__main__":
unittest.main()
TRACKING_PAYLOAD = ["1Z12345E6205277936"]
ParsedTrackingResponse = [
[
{
"carrier_id": "dicom",
"carrier_name": "dicom",
"events": [
{
"code": "Delivered",
"date": "2021-02-10",
"description": "Shipper Release NSR",
"location": "MTL",
"time": "04:27",
}
],
"tracking_number": "W1234567",
}
],
[],
]
ParsedErrorResponse = [
[],
[
{
"carrier_id": "dicom",
"carrier_name": "dicom",
"message": "Authorization has been denied for this request.",
}
],
]
TrackingRequestJSON = ["1Z12345E6205277936"]
TrackingResponseJSON = """{
"custRefNum": "string",
"activities": [
{
"activityDate": "2021-02-10T04:27:31Z",
"createDate": "2021-02-10T04:27:31Z",
"status": "Delivered",
"statusDetail": "Shipper Release NSR",
"code": "DL",
"codeDetail": "NI",
"group": "DL",
"additionalInformation": "Package did not need a signature.",
"terminal": "MTL",
"latitude": "45.46770054493765",
"longitude": "-73.71983885765076",
"height": "10.5",
"weight": "5.0",
"width": "5.0",
"length": "5.5",
"parcelId": "2"
}
],
"activityImages": [
{
"imageDate": "2021-02-10T04:27:31Z",
"url": "https://www.dicom.com/",
"clientName": "Georges",
"imageType": "POD"
}
],
"isAuthorized": true,
"id": "78852145",
"trackingNumber": "W1234567",
"category": "Parcel",
"paymentType": "Prepaid",
"billingAccount": "400040",
"note": "This is a note for the driver",
"status": "0",
"direction": "CA",
"sender": {
"id": "4658",
"addressLine1": "10500 mystreet",
"addressLine2": "Suite 10",
"streetNumber": "10500",
"streetType": "AVE",
"streetName": "mystreet",
"streetDirection": "N",
"suite": "10",
"city": "Montreal",
"provinceCode": "QC",
"postalCode": "H9P2T7",
"countryCode": "CA",
"customerName": "Test Company",
"customerNickName": "string",
"contact": {
"language": "en",
"email": "[email protected]",
"department": "IT",
"telephone": "4501234567",
"extension": "320",
"fullName": "FullName Contact"
}
},
"consignee": {
"id": "4658",
"addressLine1": "10500 mystreet",
"addressLine2": "Suite 10",
"streetNumber": "10500",
"streetType": "AVE",
"streetName": "mystreet",
"streetDirection": "N",
"suite": "10",
"city": "Montreal",
"provinceCode": "QC",
"postalCode": "H9P2T7",
"countryCode": "CA",
"customerName": "Test Company",
"customerNickName": "string",
"contact": {
"language": "en",
"email": "[email protected]",
"department": "IT",
"telephone": "4501234567",
"extension": "320",
"fullName": "FullName Contact"
}
},
"unitOfMeasurement": "K",
"parcels": [
{
"id": "1",
"parcelType": "Box",
"quantity": "3",
"weight": "5",
"length": "5",
"depth": "5",
"width": "5",
"note": "Special instruction...",
"status": 0,
"FCA_Class": "100.00",
"hazmat": {
"number": "100",
"phone": "4510214786"
},
"requestReturnLabel": true,
"returnWaybill": "Q1234568"
}
],
"surcharges": [
{
"id": "4658723",
"value": "Heating",
"type": "HEAT",
"name": "Heating",
"amount": 0
}
],
"createDate": "2021-02-10T04:27:31Z",
"updateDate": "2021-02-10T04:27:31Z",
"deliveryType": "GRD",
"references": [
{
"type": "INV",
"code": "123"
}
],
"returnAddress": {
"id": "4658",
"addressLine1": "10500 mystreet",
"addressLine2": "Suite 10",
"streetNumber": "10500",
"streetType": "AVE",
"streetName": "mystreet",
"streetDirection": "N",
"suite": "10",
"city": "Montreal",
"provinceCode": "QC",
"postalCode": "H9P2T7",
"countryCode": "CA",
"customerName": "Test Company",
"customerNickName": "string",
"contact": {
"language": "en",
"email": "[email protected]",
"department": "IT",
"telephone": "4501234567",
"extension": "320",
"fullName": "FullName Contact"
}
},
"appointment": {
"ID": "string",
"type": "Scheduled",
"date": "2021-02-10T04:27:31Z",
"time": "15:00",
"phone": "5142648798"
},
"promoCodes": [
{
"code": "PROMO-465871"
}
],
"internationalDetails": {
"currency": "USD",
"exchangeRate": 0,
"totalRetailValue": "500",
"dutyBilling": "N",
"descriptionOfGoods": "Floor Cleaner",
"importerOfRecord": {
"id": "4658",
"addressLine1": "10500 mystreet",
"addressLine2": "Suite 10",
"streetNumber": "10500",
"streetType": "AVE",
"streetName": "mystreet",
"streetDirection": "N",
"suite": "10",
"city": "Montreal",
"provinceCode": "QC",
"postalCode": "H9P2T7",
"countryCode": "CA",
"customerName": "Test Company",
"customerNickName": "string",
"contact": {
"language": "en",
"email": "[email protected]",
"department": "IT",
"telephone": "4501234567",
"extension": "320",
"fullName": "FullName Contact"
}
},
"broker": {
"id": "45612456",
"href": "",
"otherBroker": "Levingstone",
"CSA_BusinessNumber": "789"
},
"purpose": "COM",
"products": [
{
"id": "123",
"Quantity": 1
}
],
"borderStatus": "LVS",
"isDicomBroker": true
},
"pickupDate": "2021-02-10T04:27:31Z"
}
"""
ErrorResponseJSON = """{
"Message": "Authorization has been denied for this request."
}
"""
|
3,702 |
set initial data
|
from django import forms
from django.core import exceptions
from django.forms.models import inlineformset_factory
from django.utils.translation import gettext_lazy as _
from oscar.core.loading import get_classes, get_model
Product = get_model("catalogue", "Product")
ProductClass = get_model("catalogue", "ProductClass")
ProductAttribute = get_model("catalogue", "ProductAttribute")
StockRecord = get_model("partner", "StockRecord")
ProductCategory = get_model("catalogue", "ProductCategory")
ProductImage = get_model("catalogue", "ProductImage")
ProductRecommendation = get_model("catalogue", "ProductRecommendation")
AttributeOptionGroup = get_model("catalogue", "AttributeOptionGroup")
AttributeOption = get_model("catalogue", "AttributeOption")
(
StockRecordForm,
ProductCategoryForm,
ProductImageForm,
ProductRecommendationForm,
ProductAttributesForm,
AttributeOptionForm,
) = get_classes(
"dashboard.catalogue.forms",
(
"StockRecordForm",
"ProductCategoryForm",
"ProductImageForm",
"ProductRecommendationForm",
"ProductAttributesForm",
"AttributeOptionForm",
),
)
BaseStockRecordFormSet = inlineformset_factory(
Product, StockRecord, form=StockRecordForm, extra=1
)
class StockRecordFormSet(BaseStockRecordFormSet):
def __init__(self, product_class, user, *args, **kwargs):
self.user = user
self.require_user_stockrecord = not user.is_staff
self.product_class = product_class
if not user.is_staff and "instance" in kwargs and "queryset" not in kwargs:
kwargs.update(
{
"queryset": StockRecord.objects.filter(
product=kwargs["instance"], partner__in=user.partners.all()
)
}
)
super().__init__(*args, **kwargs)
self.METHOD_NAME()
def METHOD_NAME(self):
"""
If user has only one partner associated, set the first
stock record's partner to it. Can't pre-select for staff users as
they're allowed to save a product without a stock record.
This is intentionally done after calling __init__ as passing initial
data to __init__ creates a form for each list item. So depending on
whether we can pre-select the partner or not, we'd end up with 1 or 2
forms for an unbound form.
"""
if self.require_user_stockrecord:
try:
user_partner = self.user.partners.get()
except (exceptions.ObjectDoesNotExist, exceptions.MultipleObjectsReturned):
pass
else:
partner_field = self.forms[0].fields.get("partner", None)
if partner_field and partner_field.initial is None:
partner_field.initial = user_partner
def _construct_form(self, i, **kwargs):
kwargs["product_class"] = self.product_class
kwargs["user"] = self.user
return super()._construct_form(i, **kwargs)
def clean(self):
"""
If the user isn't a staff user, this validation ensures that at least
one stock record's partner is associated with a users partners.
"""
if any(self.errors):
return
if self.require_user_stockrecord:
stockrecord_partners = set(
[form.cleaned_data.get("partner", None) for form in self.forms]
)
user_partners = set(self.user.partners.all())
if not user_partners & stockrecord_partners:
raise exceptions.ValidationError(
_(
"At least one stock record must be set to a partner that"
" you're associated with."
)
)
BaseProductCategoryFormSet = inlineformset_factory(
Product, ProductCategory, form=ProductCategoryForm, extra=1, can_delete=True
)
class ProductCategoryFormSet(BaseProductCategoryFormSet):
# pylint: disable=unused-argument
def __init__(self, product_class, user, *args, **kwargs):
# This function just exists to drop the extra arguments
super().__init__(*args, **kwargs)
def clean(self):
if not self.instance.is_child and self.get_num_categories() == 0:
raise forms.ValidationError(
_("Stand-alone and parent products must have at least one category")
)
if self.instance.is_child and self.get_num_categories() > 0:
raise forms.ValidationError(_("A child product should not have categories"))
def get_num_categories(self):
num_categories = 0
for i in range(0, self.total_form_count()):
form = self.forms[i]
if (
hasattr(form, "cleaned_data")
and form.cleaned_data.get("category", None)
and not form.cleaned_data.get("DELETE", False)
):
num_categories += 1
return num_categories
BaseProductImageFormSet = inlineformset_factory(
Product, ProductImage, form=ProductImageForm, extra=2
)
class ProductImageFormSet(BaseProductImageFormSet):
# pylint: disable=unused-argument
def __init__(self, product_class, user, *args, **kwargs):
super().__init__(*args, **kwargs)
BaseProductRecommendationFormSet = inlineformset_factory(
Product,
ProductRecommendation,
form=ProductRecommendationForm,
extra=5,
fk_name="primary",
)
class ProductRecommendationFormSet(BaseProductRecommendationFormSet):
# pylint: disable=unused-argument
def __init__(self, product_class, user, *args, **kwargs):
super().__init__(*args, **kwargs)
ProductAttributesFormSet = inlineformset_factory(
ProductClass, ProductAttribute, form=ProductAttributesForm, extra=3
)
AttributeOptionFormSet = inlineformset_factory(
AttributeOptionGroup, AttributeOption, form=AttributeOptionForm, extra=3
)
|
3,703 |
test mark late runs marks multiple runs
|
import pendulum
import pytest
from prefect.server import models, schemas
from prefect.server.services.late_runs import MarkLateRuns
from prefect.settings import (
PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS,
temporary_settings,
)
@pytest.fixture
async def late_run(session, flow):
async with session.begin():
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Scheduled(
scheduled_time=pendulum.now("UTC").subtract(minutes=1)
),
),
)
@pytest.fixture
async def late_run_2(session, flow):
async with session.begin():
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Scheduled(
scheduled_time=pendulum.now("UTC").subtract(minutes=1)
),
),
)
@pytest.fixture
async def future_run(session, flow):
async with session.begin():
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Scheduled(
scheduled_time=pendulum.now("UTC").add(minutes=1)
),
),
)
@pytest.fixture
async def now_run(session, flow):
async with session.begin():
return await models.flow_runs.create_flow_run(
session=session,
flow_run=schemas.core.FlowRun(
flow_id=flow.id,
state=schemas.states.Scheduled(scheduled_time=pendulum.now("UTC")),
),
)
async def test_marks_late_run(session, late_run):
assert late_run.state.name == "Scheduled"
st = late_run.state.state_details.scheduled_time
assert (
late_run.next_scheduled_start_time == st
), "Next scheduled time is set by orchestration rules correctly"
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
assert late_run.state.name == "Late"
st2 = late_run.state.state_details.scheduled_time
assert st == st2, "Scheduled time is unchanged"
async def test_marks_late_run_at_buffer(session, late_run):
assert late_run.state.name == "Scheduled"
st = late_run.state.state_details.scheduled_time
assert (
late_run.next_scheduled_start_time == st
), "Next scheduled time is set by orchestration rules correctly"
with temporary_settings(updates={PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS: 60}):
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
assert late_run.state.name == "Late"
st2 = late_run.state.state_details.scheduled_time
assert st == st2, "Scheduled time is unchanged"
async def test_does_not_mark_run_late_if_within_buffer(session, late_run):
assert late_run.state.name == "Scheduled"
st = late_run.state.state_details.scheduled_time
assert (
late_run.next_scheduled_start_time == st
), "Next scheduled time is set by orchestration rules correctly"
with temporary_settings(updates={PREFECT_API_SERVICES_LATE_RUNS_AFTER_SECONDS: 61}):
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
assert late_run.state.name == "Scheduled"
st2 = late_run.state.state_details.scheduled_time
assert st == st2, "Scheduled time is unchanged"
async def test_does_not_mark_run_late_if_in_future(session, future_run):
assert future_run.state.name == "Scheduled"
st = future_run.state.state_details.scheduled_time
assert (
future_run.next_scheduled_start_time == st
), "Next scheduled time is set by orchestration rules correctly"
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(future_run)
assert future_run.state.name == "Scheduled"
st2 = future_run.state.state_details.scheduled_time
assert st == st2, "Scheduled time is unchanged"
async def test_does_not_mark_run_late_if_now(session, now_run):
# The 'now' time check during the run will be after the 'now' scheduled time on the
# run, but it should still be within the 'mark late after' buffer.
assert now_run.state.name == "Scheduled"
st = now_run.state.state_details.scheduled_time
assert (
now_run.next_scheduled_start_time == st
), "Next scheduled time is set by orchestration rules correctly"
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(now_run)
assert now_run.state.name == "Scheduled"
st2 = now_run.state.state_details.scheduled_time
assert st == st2, "Scheduled time is unchanged"
async def test_mark_late_runs_doesnt_visit_runs_twice(session, late_run):
assert late_run.state.name == "Scheduled"
si = late_run.state.id
st = late_run.state.timestamp
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
si2 = late_run.state.id
st2 = late_run.state.timestamp
assert si != si2
assert st != st2
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
si3 = late_run.state.id
st3 = late_run.state.timestamp
# same timestamp; unchanged state
assert si2 == si3
assert st2 == st3
async def METHOD_NAME(
session, late_run, late_run_2
):
assert late_run.state.name == "Scheduled"
assert late_run_2.state.name == "Scheduled"
await MarkLateRuns(handle_signals=False).start(loops=1)
await session.refresh(late_run)
await session.refresh(late_run_2)
assert late_run.state_name == "Late"
assert late_run_2.state_name == "Late"
|
3,704 |
member
|
import urllib.parse
from mage_ai.api.errors import ApiError
from mage_ai.api.resources.GenericResource import GenericResource
from mage_ai.cache.block_action_object import BlockActionObjectCache
from mage_ai.data_preparation.models.custom_templates.constants import (
DIRECTORY_FOR_BLOCK_TEMPLATES,
DIRECTORY_FOR_PIPELINE_TEMPLATES,
)
from mage_ai.data_preparation.models.custom_templates.custom_block_template import (
CustomBlockTemplate,
)
from mage_ai.data_preparation.models.custom_templates.custom_pipeline_template import (
CustomPipelineTemplate,
)
from mage_ai.data_preparation.models.custom_templates.utils import (
flatten_files,
get_templates,
group_and_hydrate_files,
)
from mage_ai.data_preparation.models.pipeline import Pipeline
from mage_ai.data_preparation.templates.template import fetch_template_source
from mage_ai.shared.hash import ignore_keys
from mage_ai.shared.utils import clean_name
OBJECT_TYPE_KEY = 'object_type'
class CustomTemplateResource(GenericResource):
@classmethod
def collection(self, query, meta, user, **kwargs):
object_type = query.get(OBJECT_TYPE_KEY, [None])
if object_type:
object_type = object_type[0]
templates = []
file_dicts = []
if DIRECTORY_FOR_BLOCK_TEMPLATES == object_type:
file_dicts = get_templates(DIRECTORY_FOR_BLOCK_TEMPLATES)
template_class = CustomBlockTemplate
elif DIRECTORY_FOR_PIPELINE_TEMPLATES == object_type:
file_dicts = get_templates(DIRECTORY_FOR_PIPELINE_TEMPLATES)
template_class = CustomPipelineTemplate
if file_dicts:
file_dicts_flat = flatten_files(file_dicts)
templates = group_and_hydrate_files(file_dicts_flat, template_class)
return self.build_result_set(
templates,
user,
**kwargs,
)
@classmethod
async def create(self, payload, user, **kwargs):
custom_template = None
object_type = payload.get(OBJECT_TYPE_KEY)
template_uuid = payload.get('template_uuid')
if template_uuid:
template_uuid = clean_name(template_uuid)
payload['template_uuid'] = template_uuid
if DIRECTORY_FOR_BLOCK_TEMPLATES == object_type:
custom_template = CustomBlockTemplate.load(template_uuid=template_uuid)
if not custom_template:
custom_template = CustomBlockTemplate(**ignore_keys(payload, [
'uuid',
OBJECT_TYPE_KEY,
]))
if user:
custom_template.user = dict(
username=user.username,
)
custom_template.content = fetch_template_source(
custom_template.block_type,
payload.get('config', {}),
language=custom_template.language,
)
custom_template.save()
cache = await BlockActionObjectCache.initialize_cache()
cache.update_custom_block_template(custom_template)
elif DIRECTORY_FOR_PIPELINE_TEMPLATES == object_type:
custom_template = CustomPipelineTemplate.load(template_uuid=template_uuid)
if not custom_template:
pipeline = Pipeline.get(payload.get('pipeline_uuid'))
custom_template = CustomPipelineTemplate.create_from_pipeline(
pipeline,
template_uuid,
name=payload.get('name'),
description=payload.get('description'),
)
if user:
custom_template.user = dict(
username=user.username,
)
custom_template.save()
if custom_template:
return self(custom_template, user, **kwargs)
@classmethod
def METHOD_NAME(self, pk, user, **kwargs):
query = kwargs.get('query', {})
object_type = query.get(OBJECT_TYPE_KEY, [None])
if object_type:
object_type = object_type[0]
template_uuid = urllib.parse.unquote(pk)
try:
if DIRECTORY_FOR_BLOCK_TEMPLATES == object_type:
return self(CustomBlockTemplate.load(template_uuid=template_uuid), user, **kwargs)
elif DIRECTORY_FOR_PIPELINE_TEMPLATES == object_type:
return self(
CustomPipelineTemplate.load(template_uuid=template_uuid),
user,
**kwargs,
)
except Exception as err:
print(f'[WARNING] CustomTemplateResource.member: {err}')
raise ApiError(ApiError.RESOURCE_NOT_FOUND)
async def delete(self, **kwargs):
cache = await BlockActionObjectCache.initialize_cache()
cache.update_custom_block_template(self.model, remove=True)
self.model.delete
async def update(self, payload, **kwargs):
template_uuid = payload.get('template_uuid')
if template_uuid:
template_uuid = clean_name(template_uuid)
payload['template_uuid'] = template_uuid
object_type = payload.get('object_type')
cache = None
if DIRECTORY_FOR_BLOCK_TEMPLATES == object_type:
cache = await BlockActionObjectCache.initialize_cache()
cache.update_custom_block_template(self.model, remove=True)
for key, value in ignore_keys(payload, [
'uuid',
OBJECT_TYPE_KEY,
]).items():
setattr(self.model, key, value)
self.model.save()
if DIRECTORY_FOR_BLOCK_TEMPLATES == object_type and cache:
cache.update_custom_block_template(self.model)
|
3,705 |
set last thread
|
from django.core.cache import cache
from django.db import models
from mptt.managers import TreeManager
from mptt.models import MPTTModel, TreeForeignKey
from . import PRIVATE_THREADS_ROOT_NAME, THREADS_ROOT_NAME
from ..acl.cache import clear_acl_cache
from ..acl.models import BaseRole
from ..conf import settings
from ..core.utils import slugify
from ..threads.threadtypes import trees_map
CACHE_NAME = "misago_categories_tree"
class CategoryManager(TreeManager):
def private_threads(self):
return self.get_special(PRIVATE_THREADS_ROOT_NAME)
def root_category(self):
return self.get_special(THREADS_ROOT_NAME)
def get_special(self, special_role):
cache_name = "%s_%s" % (CACHE_NAME, special_role)
special_category = cache.get(cache_name, "nada")
if special_category == "nada":
special_category = self.get(special_role=special_role)
cache.set(cache_name, special_category)
return special_category
def all_categories(self, include_root=False):
tree_id = trees_map.get_tree_id_for_root(THREADS_ROOT_NAME)
queryset = self.filter(tree_id=tree_id)
if not include_root:
queryset = queryset.filter(level__gt=0)
return queryset.order_by("lft")
def get_cached_categories_dict(self):
categories_dict = cache.get(CACHE_NAME, "nada")
if categories_dict == "nada":
categories_dict = self.get_categories_dict_from_db()
cache.set(CACHE_NAME, categories_dict)
return categories_dict
def get_categories_dict_from_db(self):
categories_dict = {}
for category in self.all_categories(include_root=True):
categories_dict[category.pk] = category
return categories_dict
def clear_cache(self):
cache.delete(CACHE_NAME)
class Category(MPTTModel):
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
special_role = models.CharField(max_length=255, null=True, blank=True)
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255)
short_name = models.CharField(max_length=255, null=True, blank=True)
color = models.CharField(max_length=7, null=True, blank=True)
description = models.TextField(null=True, blank=True)
is_closed = models.BooleanField(default=False)
threads = models.PositiveIntegerField(default=0)
posts = models.PositiveIntegerField(default=0)
last_post_on = models.DateTimeField(null=True, blank=True)
last_thread = models.ForeignKey(
"misago_threads.Thread",
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
last_thread_title = models.CharField(max_length=255, null=True, blank=True)
last_thread_slug = models.CharField(max_length=255, null=True, blank=True)
last_poster = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="+",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
last_poster_name = models.CharField(max_length=255, null=True, blank=True)
last_poster_slug = models.CharField(max_length=255, null=True, blank=True)
require_threads_approval = models.BooleanField(default=False)
require_replies_approval = models.BooleanField(default=False)
require_edits_approval = models.BooleanField(default=False)
prune_started_after = models.PositiveIntegerField(default=0)
prune_replied_after = models.PositiveIntegerField(default=0)
archive_pruned_in = models.ForeignKey(
"self",
related_name="pruned_archive",
null=True,
blank=True,
on_delete=models.SET_NULL,
)
css_class = models.CharField(max_length=255, null=True, blank=True)
objects = CategoryManager()
def __str__(self):
return str(self.thread_type.get_category_name(self))
@property
def thread_type(self):
return trees_map.get_type_for_tree_id(self.tree_id)
def delete(self, *args, **kwargs):
Category.objects.clear_cache()
clear_acl_cache()
return super().delete(*args, **kwargs)
def synchronize(self):
threads_queryset = self.thread_set.filter(is_hidden=False, is_unapproved=False)
self.threads = threads_queryset.count()
if self.threads:
replies_sum = threads_queryset.aggregate(models.Sum("replies"))
self.posts = self.threads + replies_sum["replies__sum"]
else:
self.posts = 0
if self.threads:
last_thread_qs = threads_queryset.filter(
is_hidden=False, is_unapproved=False
)
last_thread = last_thread_qs.order_by("-last_post_on")[:1][0]
self.METHOD_NAME(last_thread)
else:
self.empty_last_thread()
def delete_content(self):
from .signals import delete_category_content
delete_category_content.send(sender=self)
def move_content(self, new_category):
from .signals import move_category_content
move_category_content.send(sender=self, new_category=new_category)
def get_absolute_url(self):
return self.thread_type.get_category_absolute_url(self)
def get_last_thread_url(self):
return self.thread_type.get_category_last_thread_url(self)
def get_last_thread_new_url(self):
return self.thread_type.get_category_last_thread_new_url(self)
def get_last_post_url(self):
return self.thread_type.get_category_last_post_url(self)
def set_name(self, name):
self.name = name
self.slug = slugify(name)
def METHOD_NAME(self, thread):
self.last_post_on = thread.last_post_on
self.last_thread = thread
self.last_thread_title = thread.title
self.last_thread_slug = thread.slug
self.last_poster = thread.last_poster
self.last_poster_name = thread.last_poster_name
self.last_poster_slug = thread.last_poster_slug
def empty_last_thread(self):
self.last_post_on = None
self.last_thread = None
self.last_thread_title = None
self.last_thread_slug = None
self.last_poster = None
self.last_poster_name = None
self.last_poster_slug = None
def has_child(self, child):
return child.lft > self.lft and child.rght < self.rght
class CategoryRole(BaseRole):
pass
class RoleCategoryACL(models.Model):
role = models.ForeignKey(
"misago_acl.Role", related_name="categories_acls", on_delete=models.CASCADE
)
category = models.ForeignKey(
"Category", related_name="category_role_set", on_delete=models.CASCADE
)
category_role = models.ForeignKey(CategoryRole, on_delete=models.CASCADE)
|
3,706 |
get context data
|
# Copyright © Michal Čihař <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import csv
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.translation import activate, gettext, pgettext
from django.views.generic.list import ListView
from weblate.accounts.notifications import NOTIFICATIONS_ACTIONS
from weblate.lang.models import Language
from weblate.trans.forms import ChangesForm
from weblate.trans.models import Component, Project, Translation, Unit
from weblate.trans.models.change import Change
from weblate.utils.site import get_site_url
from weblate.utils.stats import ProjectLanguage
from weblate.utils.views import PathViewMixin
class ChangesView(PathViewMixin, ListView):
"""Browser for changes."""
paginate_by = 20
supported_path_types = (
None,
Project,
Component,
Translation,
Language,
ProjectLanguage,
Unit,
)
def METHOD_NAME(self, **kwargs):
"""Create context for rendering page."""
context = super().METHOD_NAME(**kwargs)
context["path_object"] = self.path_object
if isinstance(self.path_object, Unit):
context["title"] = (
pgettext(
"Changes of string in a translation", "Changes of string in %s"
)
% self.path_object
)
elif isinstance(self.path_object, Translation):
context["title"] = (
pgettext("Changes in translation", "Changes in %s") % self.path_object
)
elif isinstance(self.path_object, Component):
context["title"] = (
pgettext("Changes in component", "Changes in %s") % self.path_object
)
elif isinstance(self.path_object, Project):
context["title"] = (
pgettext("Changes in project", "Changes in %s") % self.path_object
)
elif isinstance(self.path_object, Language):
context["title"] = (
pgettext("Changes in language", "Changes in %s") % self.path_object
)
elif isinstance(self.path_object, ProjectLanguage):
context["title"] = (
pgettext("Changes in project", "Changes in %s") % self.path_object
)
elif self.path_object is None:
context["title"] = gettext("Changes")
else:
raise TypeError(f"Unsupported {self.path_object}")
if self.path_object is None:
context["changes_rss"] = reverse("rss")
else:
context["changes_rss"] = reverse(
"rss", kwargs={"path": self.path_object.get_url_path()}
)
if self.changes_form.is_valid():
context["query_string"] = self.changes_form.urlencode()
context["search_items"] = self.changes_form.items()
context["form"] = self.changes_form
return context
def setup(self, *args, **kwargs):
super().setup(*args, **kwargs)
self.changes_form = ChangesForm(data=self.request.GET)
def get_queryset(self):
"""Return list of changes to browse."""
filters = {}
if self.path_object is None:
params = {}
elif isinstance(self.path_object, Project):
params = {"project": self.path_object}
elif isinstance(self.path_object, Component):
params = {"component": self.path_object}
elif isinstance(self.path_object, Translation):
params = {"translation": self.path_object}
elif isinstance(self.path_object, Unit):
params = {"unit": self.path_object}
elif isinstance(self.path_object, Language):
params = {}
filters = {"language": self.path_object}
elif isinstance(self.path_object, ProjectLanguage):
params = {"project": self.path_object.project}
filters = {"language": self.path_object.language}
else:
raise TypeError(f"Unsupported {self.path_object}")
form = self.changes_form
if form.is_valid():
if action := form.cleaned_data.get("action"):
filters["action__in"] = action
if start_date := form.cleaned_data.get("start_date"):
filters["timestamp__date__gte"] = start_date
if end_date := form.cleaned_data.get("end_date"):
filters["timestamp__date__lte"] = end_date
if user := form.cleaned_data.get("user"):
filters["user"] = user
result = Change.objects.last_changes(self.request.user, **params)
if filters:
result = result.filter(**filters)
return result
def paginate_queryset(self, queryset, page_size):
if not self.changes_form.is_valid():
queryset = queryset.none()
paginator, page, queryset, is_paginated = super().paginate_queryset(
queryset, page_size
)
page = Change.objects.preload_list(page)
return paginator, page, queryset, is_paginated
class ChangesCSVView(ChangesView):
"""CSV renderer for changes view."""
paginate_by = None
def get(self, request, *args, **kwargs):
object_list = self.get_queryset()[:2000]
if not request.user.has_perm("change.download", self.path_object):
raise PermissionDenied
# Always output in english
activate("en")
response = HttpResponse(content_type="text/csv; charset=utf-8")
response["Content-Disposition"] = "attachment; filename=changes.csv"
writer = csv.writer(response)
# Add header
writer.writerow(
("timestamp", "action", "user", "url", "target", "edit_distance")
)
for change in object_list:
writer.writerow(
(
change.timestamp.isoformat(),
change.get_action_display(),
change.user.username if change.user else "",
get_site_url(change.get_absolute_url()),
change.target,
change.get_distance(),
)
)
return response
@login_required
def show_change(request, pk):
change = get_object_or_404(Change, pk=pk)
acl_obj = change.translation or change.component or change.project
if not request.user.has_perm("unit.edit", acl_obj):
raise PermissionDenied
others = request.GET.getlist("other")
changes = None
if others:
changes = Change.objects.filter(pk__in=[*others, change.pk])
for change in changes:
acl_obj = change.translation or change.component or change.project
if not request.user.has_perm("unit.edit", acl_obj):
raise PermissionDenied
if change.action not in NOTIFICATIONS_ACTIONS:
content = ""
else:
notifications = NOTIFICATIONS_ACTIONS[change.action]
notification = notifications[0](None)
context = notification.get_context(change if not others else None)
context["request"] = request
context["changes"] = changes
context["subject"] = notification.render_template(
"_subject.txt", context, digest=bool(others)
)
content = notification.render_template(".html", context, digest=bool(others))
return HttpResponse(content_type="text/html; charset=utf-8", content=content)
|
3,707 |
test range end only
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2023 Scipp contributors (https://github.com/scipp)
# @file
# @author Matthew Andrew
import numpy as np
import pytest
import scipp as sc
class TestSliceByValue:
def setup_method(self):
var = sc.Variable(dims=['x'], values=np.arange(5, dtype=float) + 0.5)
values_a = sc.Variable(
dims=['x'], values=[1.0, 1.1, 1.2, 1.3, 1.4], unit=sc.units.m
)
values_b = sc.Variable(
dims=['x'], values=[1.0, 2.0, 3.0, 4.0, 5.0], unit=sc.units.m
)
self._d = sc.Dataset(data={'a': values_a, 'b': values_b}, coords={'x': var})
def test_slice_by_single_value(self):
def test(sliceable):
by_value = sliceable['x', 1.5 * sc.units.dimensionless]
by_index = sliceable['x', 1]
assert sc.identical(by_value, by_index)
test(self._d['a'])
test(self._d)
def test_assigning_to_slice_by_value_dataarray(self):
self._d['a']['x', 1.5 * sc.units.dimensionless] = 5.7 * sc.units.m
slice = self._d['a']['x', 1.5 * sc.units.dimensionless].values
assert slice == np.array(5.7)
def test_assigning_to_slice_by_value_dataset(self):
self._d['x', 0.5 * sc.units.dimensionless] = self._d[
'x', 1.5 * sc.units.dimensionless
]
assert self._d['x', 0]['a'].value == self._d['x', 1]['a'].value
def test_modifying_slice_in_place(self):
self._d['a']['x', 1.5 * sc.units.dimensionless] *= 2.5
slice = self._d['a']['x', 1.5 * sc.units.dimensionless].values
assert slice == np.array(2.75)
slice = self._d['x', 1.5 * sc.units.dimensionless]['a'].values
assert slice == np.array(2.75)
self._d['a']['x', 1.5 * sc.units.dimensionless] += 2.25 * sc.units.m
slice = self._d['a']['x', 1.5 * sc.units.dimensionless].values
assert slice == np.array(5.0)
slice = self._d['x', 1.5 * sc.units.dimensionless]['a'].values
assert slice == np.array(5.0)
self._d['a']['x', 1.5 * sc.units.dimensionless] -= 3.0 * sc.units.m
slice = self._d['a']['x', 1.5 * sc.units.dimensionless].values
assert slice == np.array(2.0)
slice = self._d['x', 1.5 * sc.units.dimensionless]['a'].values
assert slice == np.array(2.0)
self._d['a']['x', 1.5 * sc.units.dimensionless] /= 2.0
slice = self._d['a']['x', 1.5 * sc.units.dimensionless].values
assert slice == np.array(1.0)
slice = self._d['x', 1.5 * sc.units.dimensionless]['a'].values
assert slice == np.array(1.0)
def test_slice_with_range(self):
def test(sliceable):
by_value = sliceable[
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
]
by_index = sliceable['x', 1:-1]
assert sc.identical(by_value, by_index)
test(self._d['a'])
test(self._d)
def test_assign_variable_to_range_dataarray_fails(self):
with pytest.raises(sc.DataArrayError): # readonly
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
].data = sc.Variable(dims=['x'], values=[6.0, 6.0, 6.0], unit=sc.units.m)
def test_assign_variable_to_range_dataset_fails(self):
with pytest.raises(sc.DataArrayError): # readonly
self._d['x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless][
'a'
].data = sc.Variable(dims=['x'], values=[6.0, 6.0, 6.0], unit=sc.units.m)
def test_on_dataarray_modify_range_in_place_from_variable(self):
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
].data += sc.Variable(dims=['x'], values=[2.0, 2.0, 2.0], unit=sc.units.m)
assert self._d['a'].data.values.tolist() == [1.0, 3.1, 3.2, 3.3, 1.4]
def test_on_dataset_modify_range_in_place_from_variable(self):
self._d['x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless][
'a'
].data += sc.Variable(dims=['x'], values=[2.0, 2.0, 2.0], unit=sc.units.m)
assert self._d['a'].data.values.tolist() == [1.0, 3.1, 3.2, 3.3, 1.4]
def test_on_dataarray_assign_dataarray_to_range(self):
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
] = self._d['b']['x', 1:-1]
assert self._d['a'].data.values.tolist() == [1.0, 2.0, 3.0, 4.0, 1.4]
def test_on_dataset_assign_dataarray_to_range_fails(self):
with pytest.raises(sc.DatasetError): # readonly
self._d['x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless][
'a'
] = self._d['b']['x', 1:-1]
def test_on_dataarray_modify_range_in_place_from_dataarray(self):
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
] += self._d['b']['x', 1:-1]
assert self._d['a'].data.values.tolist() == [1.0, 3.1, 4.2, 5.3, 1.4]
def test_on_dataset_modify_range_in_place_from_dataarray(self):
self._d['x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless][
'a'
] += self._d['b']['x', 1:-1]
assert self._d['a'].data.values.tolist() == [1.0, 3.1, 4.2, 5.3, 1.4]
def test_slice_by_incorrect_unit_throws(self):
with pytest.raises(sc.UnitError) as e_info:
_ = self._d['a']['x', 1.5 * sc.units.m]
assert '(m)' in str(e_info.value)
assert '(dimensionless)' in str(e_info.value)
def test_out_of_range_throws(self):
with pytest.raises(IndexError):
_ = self._d['a']['x', 5.0 * sc.units.dimensionless]
def test_assign_incompatible_variable_throws(self):
with pytest.raises(sc.DimensionError) as e_info:
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
] = sc.Variable(dims=['x'], values=[6.0, 6.0], unit=sc.units.m)
assert str(e_info.value) == 'Expected (x: 3) to include (x: 2).'
def test_assign_incompatible_dataarray(self):
with pytest.raises(RuntimeError):
self._d['a'][
'x', 1.5 * sc.units.dimensionless : 4.5 * sc.units.dimensionless
] = self._d['b']['x', 0:-2]
def test_range_slice_with_step_throws(self):
with pytest.raises(RuntimeError) as e_info:
self._d['a']['x', 1.5 * sc.units.m : 4.5 * sc.units.m : 4]
assert str(e_info.value) == "Step cannot be specified for value based slicing."
def test_range_start_only(self):
by_value = self._d['a']['x', 1.5 * sc.units.dimensionless :]
by_index = self._d['a']['x', 1:]
assert sc.identical(by_value, by_index)
by_value = self._d['x', 1.5 * sc.units.dimensionless :]
by_index = self._d['x', 1:]
assert sc.identical(by_value, by_index)
def METHOD_NAME(self):
by_value = self._d['a']['x', : 2.5 * sc.units.dimensionless]
by_index = self._d['a']['x', :2]
assert sc.identical(by_value, by_index)
by_value = self._d['x', : 2.5 * sc.units.dimensionless]
by_index = self._d['x', :2]
assert sc.identical(by_value, by_index)
def test_raises_DimensionError_if_dim_not_given():
var = sc.arange('x', 4)
da = sc.DataArray(var, coords={'x': var})
with pytest.raises(sc.DimensionError):
da[sc.scalar(1) : sc.scalar(3)]
|
3,708 |
read elf
|
import subprocess
import sys
import re
function_intro_re = re.compile(r'^(?P<addr>[0-9a-fA-F]{8}) <(?P<name>[a-zA-Z0-9\._]+)>:$')
insn_re = re.compile(r'^\s+(?P<addr>[0-9a-fA-F]+):\s+(?P<insn>[0-9a-fA-F ]+)\s+\t(?P<op>.*)$')
class Instruction:
def __init__(self, addr, insn, op):
self.addr = long(addr, 16)
self.insn = insn
args = op.split('\t', 1)
self.op = args[0].strip()
if len(args) == 2:
comment = args[1].strip().split(';', 1)
else:
comment = args
self.args = comment[0].strip()
if len(comment) == 2:
self.comment = comment[1].strip()
else:
self.comment = ''
def __repr__(self):
return '<insn %r>' % (self.__dict__)
def literal_branch_target(t):
return ' <' in t
class Function:
def __init__(self, addr, name):
self.name = name
self.addr = long(addr, 16)
self.insns = []
self.calls = []
def __repr__(self):
return '<%s %d instructions>' % (self.name, len(self.insns))
def add_insn(self, insn):
self.insns.append(Instruction(**insn))
def contains_addr(self, addr):
if self.insns:
return addr >= self.addr and addr <= self.insns[-1].addr
else:
return addr == self.addr
def dump(self):
print self.name + ':'
for insn in self.insns:
print ' ', '%04x' % insn.addr + ':', insn.op, insn.args, '\t;', insn.comment
def get_literal_word(self, addr):
for insn in self.insns:
if insn.addr == addr and insn.op == '.word':
w = int(insn.args, 16)
if w & 0x80000000:
w = -(w ^ 0xffffffff) + 1
return w
return None
def analyse(self, prog):
self.stack_guess = None
regs = {}
for insn in self.insns:
# stack adjustment with literal
if insn.op == 'sub' and insn.args.startswith('sp, ') and self.stack_guess is None:
sz = int(insn.args.split('#', 1)[1])
self.stack_guess = sz
# literal pool loads
if insn.op == 'ldr' and ', [pc, #' in insn.args:
reg, offset = insn.args.split(', [pc, #')
offset = int(offset.replace(']', ''))
word = self.get_literal_word(insn.addr + offset + 2)
if word is not None:
regs[reg] = word
if insn.op == 'add' and insn.args.startswith('sp, r') and self.stack_guess is None:
reg = insn.args.split(', ')[1]
if reg in regs:
self.stack_guess = regs[reg]
# static branches
if insn.op[0] == 'b' and literal_branch_target(insn.args):
target = long(insn.args.split(' <', 1)[0], 16)
targetf = prog.function_at_addr(target)
if targetf and targetf != self:
self.calls.append(targetf)
if self.stack_guess is None:
self.stack_guess = 0
def stack_usage(self, hints, warns, prog, depth = 0):
hinted_calls = []
if self.stack_guess:
print ' ' * depth, 'stack:', self.name, self.stack_guess, 'bytes'
our_hints = [h for h in hints if h and h[0] == self.name]
if our_hints:
hints = [h[1:] for h in our_hints]
hinted_calls = [prog.function_by_name(h[0]) for h in hints if h]
else:
if self.name in warns:
print ' WARN: no calls hints for fn-ptr caller', self.name
if self.calls + hinted_calls:
call_usage = max([f.stack_usage(hints, warns, prog, depth + 1) for f in self.calls + hinted_calls])
else:
call_usage = 0
return self.stack_guess + call_usage
class Program:
def __init__(self):
self.functions = []
# sequence of tuples naming a call sequence known to occur
# this allows working out calls through pointers
self.call_hints = []
# function names to warn on if we don't have callees
self.call_warns = set()
def METHOD_NAME(self, elf):
current_fn = None
for x in subprocess.Popen(['arm-none-eabi-objdump', '-d', elf],
stdout = subprocess.PIPE).stdout:
x = x.rstrip('\n')
m = function_intro_re.match(x)
if m:
fn = Function(**m.groupdict())
current_fn = fn
self.functions.append(fn)
m = insn_re.match(x)
if m:
assert current_fn
current_fn.add_insn(m.groupdict())
def analyse(self):
for f in self.functions:
f.analyse(self)
def function_by_name(self, name):
fns = [fn for fn in self.functions if fn.name == name]
if len(fns) == 0:
return None
elif len(fns) == 1:
return fns[0]
else:
print 'warn: more than one function named', name
return None
def function_at_addr(self, addr):
for f in self.functions:
if f.addr == addr:
return f
return None
def add_call_hint(self, *seq):
self.call_hints.append(seq)
def add_call_warn(self, fn):
self.call_warns.add(fn)
def measure_stack(self, name):
fn = self.function_by_name(name)
if fn is None:
return 0
return fn.stack_usage(self.call_hints, self.call_warns, self)
_, exe, fn = sys.argv
p = Program()
p.METHOD_NAME(exe)
p.analyse()
# calls which indirect through fn ptrs
p.add_call_warn('cf_blockwise_accumulate')
p.add_call_warn('cf_blockwise_accumulate_final')
# hints to resolve those
p.add_call_hint('cf_sha224_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha256_update_block')
p.add_call_hint('cf_sha256_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha256_update_block')
p.add_call_hint('cf_sha384_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha512_update_block')
p.add_call_hint('cf_sha512_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha512_update_block')
p.add_call_hint('cf_norx32_encrypt', 'input', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'input_block')
p.add_call_hint('cf_norx32_decrypt', 'input', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'input_block')
p.add_call_hint('cf_cbcmac_stream_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'cbcmac_process')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'cmac_process_final_pad')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate_final', 'cmac_process')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate_final', 'cmac_process_final_nopad')
print 'stack', fn, '=', p.measure_stack(fn)
|
3,709 |
match end title
|
#!/usr/bin/env python3
# ---------------------- faust2sublimecompletions -----------------------
# Usage: `faust2sublimecompletions *.lib > faust.sublime-completions`
#
# Creates a ST4 completions file for each function in a library
# Assumes the same format than faust2md.
# This has been adapted from the faust2atomsnippets script, adding link to doc and Usage information by doing more parsing of the std lib
#
# The generated file has the following structure:
# {
# "scope": "source.faust",
# "completions": [
# {
# "annotation": "oscillators.lib",
# "contents": "os.osc",
# "details": "<code>osc(freq) : _</code> - <a href='https://faustlibraries.grame.fr/libs/oscillators/#ososc'>Docs</a>",
# "kind": "ambiguous",
# "trigger": "os.osc"
# },
# ...
# ]}
#
# The format of a title is :
# //############# Title Name #################
# // markdown text....
# // markdown text....
# //##########################################
#
# The format of a section is :
# //============== Section Name ==============
# // markdown text....
# // markdown text....
# //==========================================
#
# The format of a comment is :
# //-------------- foo(x,y) ------------------
# // markdown text....
# // markdown text....
# //------------------------------------------
# everything else is considered faust code.
# The translation is the following:
# ## foo(x,y)
# markdown text....
# markdown text....
# --------------------------------------------------------
import sys
import re
import getopt
import os
import json
# Outdent a comment line by n characters in
# order to remove the prefix "// "
def outdent(line, n):
if len(line) <= n:
return "\n"
else:
return line[n:]
# Match the 2-characters prefix of a library.
# We want to extract "no" from "...prefix is `no`..."
def matchPrefixName(line):
return re.search(r'^.*prefix is .(..).*', line)
# Match the first line of a title
# of type "//#### Title ####
# at least 3 * are needed
def matchBeginTitle(line):
return re.search(r'^\s*//#{3,}\s*([^#]+)#{3,}', line)
# Match the last line of a title
# of type "//#######"
# or a blank line
def METHOD_NAME(line):
return re.search(r'^\s*((//#{3,})|(\s*))$', line)
# Match the first line of a section
# of type "//==== Section ===="
# at least 3 = are needed
def matchBeginSection(line):
return re.search(r'^\s*//={3,}\s*([^=]+)={3,}', line)
# Match the last line of a section
# of type "//======="
# or a blank line
def matchEndSection(line):
return re.search(r'^\s*((//={3,})|(\s*))$', line)
# Match the first line of a comment
# of type "//--- foo(x,y) ----"
# at least 3 - are needed
def matchBeginComment(line):
return re.search(r'^\s*//-{3,}\s*`([^-`]+)`-{3,}', line)
def matchBeginUsage(line):
return ("####" in line) & ("Usage" in line)
# Match the last line of a comment
# of type "//-----------------"
# or a blank line
def matchEndComment(line):
return re.search(r'^\s*((//-{3,})|(\s*))$', line)
# Compute the indentation of a line,
# that is the position of the first word character
# after "// "
def indentation(line):
matchComment = re.search(r'(^\s*//\s*\w)', line)
if matchComment:
return len(matchComment.group(1))-1
else:
return 0
# Indicates if a line is a comment
def isComment(line):
matchComment = re.search(r'^\s*//', line)
if matchComment:
return 1
else:
return 0
#
# THE PROGRAM STARTS HERE
#
tabsize = 4 # tabsize used for expanding tabs
mode = 0 # 0: in code; 1: in md-comment
idt = 0 # indentation retained to outdent comment lines
libprefix = "xx" #
# Analyze command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "st:cf")
if not args:
raise getopt.error("At least one file argument required")
except getopt.error as e:
print(e.msg)
print("usage: %s [-s][-t tabsize] file ..." % (sys.argv[0],))
sys.exit(1)
for optname, optvalue in opts:
if optname == '-t':
tabsize = int(optvalue)
# Process all the files and print the documentation on the standard output
inUsage = 0
trigger = ""
usage = ""
completions = []
for file in args:
with open(file) as f:
lines = f.readlines()
for num, text in enumerate(lines):
line = text.expandtabs(tabsize)
matchPrefix = matchPrefixName(line)
if matchPrefix:
libprefix = matchPrefix.group(1)
if isComment(line) == 0:
if mode == 1:
# we are closing a md-comment
mode = 0
else:
if mode == 0: # we are in code
matchComment = matchBeginComment(line)
matchSection = matchBeginSection(line)
matchTitle = matchBeginTitle(line)
if matchComment:
trigger = ""
usage = ""
foo = matchComment.group(1)
trigger = foo[1:4]+foo[5:]
if matchComment or matchSection or matchTitle:
mode = 1 # we just started a md-comment
idt = 0 # we have to measure the indentation
else:
# we are in a md-comment
matchUsage = matchBeginUsage(line)
if matchUsage:
inUsage = 1
if line.startswith("//") and "```" in line and inUsage:
usage = lines[num+1][2:]
if usage.startswith(' '):
usage = usage[1:-1]
inUsage = 0
if idt == 0:
# we have to measure the indentation
idt = indentation(line)
# check end of md-comment
matchComment = matchEndComment(line)
matchSection = matchEndSection(line)
matchTitle = METHOD_NAME(line)
if matchComment or matchSection or matchTitle:
if matchComment:
libfile = os.path.basename(file)
libname = libfile.split('.')[0]
link = "<a href='https://faustlibraries.grame.fr/libs/{0}/#{1}'>Docs</a>".format(libname.lower(), ''.join(trigger.split('.')).lower())
completion = {
"trigger": trigger,
"contents": trigger,
"annotation": libfile,
"kind": "ambiguous",
"details": "<code>" + usage + "</code> - " + link if usage != "" else link
}
completions.append(completion)
# end of md-comment switch back to mode O
mode = 0
final = {
"scope": "source.faust",
"completions": completions
}
print(json.dumps(final, sort_keys=True, indent=4))
|
3,710 |
mark process
|
import argparse
import datetime
from functools import wraps
import requests
from mindsdb_sql import get_lexer_parser
from mindsdb_sql.parser.ast import Identifier
from mindsdb.utilities.fs import create_process_mark, delete_process_mark
def args_parse():
parser = argparse.ArgumentParser(description='CL argument for mindsdb server')
parser.add_argument('--api', type=str, default=None)
parser.add_argument('--config', type=str, default=None)
parser.add_argument('--install-handlers', type=str, default=None)
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--no_studio', action='store_true')
parser.add_argument('-v', '--version', action='store_true')
parser.add_argument('--ray', action='store_true', default=None)
return parser.parse_args()
def cast_row_types(row, field_types):
'''
'''
keys = [x for x in row.keys() if x in field_types]
for key in keys:
t = field_types[key]
if t == 'Timestamp' and isinstance(row[key], (int, float)):
timestamp = datetime.datetime.utcfromtimestamp(row[key])
row[key] = timestamp.strftime('%Y-%m-%d %H:%M:%S')
elif t == 'Date' and isinstance(row[key], (int, float)):
timestamp = datetime.datetime.utcfromtimestamp(row[key])
row[key] = timestamp.strftime('%Y-%m-%d')
elif t == 'Int' and isinstance(row[key], (int, float, str)):
try:
print(f'cast {row[key]} to {int(row[key])}')
row[key] = int(row[key])
except Exception:
pass
def is_notebook():
try:
if 'IPKernelApp' in get_ipython().config:
return True
else:
return False
except NameError:
return False # Probably standard Python interpreter
def METHOD_NAME(name):
def mark_process_wrapper(func):
@wraps(func)
def wrapper(*args, **kwargs):
mark = create_process_mark(name)
try:
return func(*args, **kwargs)
finally:
delete_process_mark(name, mark)
return wrapper
return mark_process_wrapper
def get_versions_where_predictors_become_obsolete():
""" Get list of MindsDB versions in which predictors should be retrained
Returns:
list of str or False
"""
versions_for_updating_predictors = []
try:
try:
res = requests.get(
'https://mindsdb-cloud-public-service-files.s3.us-east-2.amazonaws.com/version_for_updating_predictors.txt',
timeout=0.5
)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
raise
except Exception as e:
print(f'Is something wrong with getting version_for_updating_predictors.txt: {e}')
raise
if res.status_code != 200:
print(f'Cant get version_for_updating_predictors.txt: returned status code = {res.status_code}')
raise
try:
versions_for_updating_predictors = res.text.replace(' \t\r', '').split('\n')
except Exception as e:
print(f'Cant decode version_for_updating_predictors.txt: {e}')
raise
except Exception:
return False, versions_for_updating_predictors
versions_for_updating_predictors = [x for x in versions_for_updating_predictors if len(x) > 0]
return True, versions_for_updating_predictors
def init_lexer_parsers():
get_lexer_parser('mindsdb')
get_lexer_parser('mysql')
def resolve_model_identifier(name: Identifier) -> tuple:
""" split model name to parts
Examples:
>>> resolve_model_identifier(['a', 'b'])
('a', 'b', None)
>>> resolve_model_identifier(['a', '1'])
(None, 'a', 1)
>>> resolve_model_identifier(['a'])
(None, 'a', None)
>>> resolve_model_identifier(['a', 'b', 'c'])
(None, None, None) # not found
Args:
name (list): Identifier parts
Returns:
tuple: (database_name, model_name, model_version, describe)
"""
name = name.parts
database_name = None
model_name = None
model_version = None
parts_count = len(name)
if parts_count == 1:
database_name = None
model_name = name[0]
model_version = None
elif parts_count == 2:
if name[-1].isdigit():
database_name = None
model_name = name[0]
model_version = int(name[-1])
else:
database_name = name[0]
model_name = name[1]
model_version = None
elif parts_count == 3:
database_name = name[0]
model_name = name[1]
if name[2].isdigit():
model_version = int(name[2])
else:
# not found
return None, None, None
return database_name, model_name, model_version
|
3,711 |
test invalid issue report params
|
# -*- coding: utf-8 -*-
import io
import os
import re
import tempfile
from unittest.mock import MagicMock, patch
from django import test
import tests.factories as f
from tcms.issuetracker.models import CredentialTypes
from tests import HelperAssertions
class TestIssueTrackerValidation(HelperAssertions, test.TestCase):
@classmethod
def setUpTestData(cls):
cls.tracker_product = f.IssueTrackerProductFactory(name="CoolIssueTracker")
def METHOD_NAME(self):
st = f.IssueTrackerFactory(tracker_product=self.tracker_product)
st.issue_report_params = "product=name"
self.assertValidationError(
"issue_report_params",
r"Line .+ is not a pair of key/value separated by ':'",
st.full_clean,
)
st = f.IssueTrackerFactory(tracker_product=self.tracker_product)
st.issue_report_params = "product: name\ncustom_field: a:b:c"
self.assertValidationError(
"issue_report_params", r"Line .+ contains multiple ':'", st.full_clean
)
def test_invalid_class_path(self):
tracker = f.IssueTrackerFactory(
tracker_product=self.tracker_product,
class_path="a.b.c",
issues_display_url_fmt="http://localhost/{issue_keys}",
)
self.assertValidationError("class_path", r"Cannot import a\.b", tracker.full_clean)
def test_member_name_does_not_exist_in_imported_module(self):
tracker = f.IssueTrackerFactory(
tracker_product=self.tracker_product,
class_path="tracker.klass",
issues_display_url_fmt="http://localhost/{issue_keys}",
)
with patch("importlib.import_module") as import_module:
# A magic mock to fail function hasattr to find out attribute name klass
import_module.return_value = MagicMock(spec=object())
self.assertValidationError(
"class_path",
"Module tracker does not have class klass",
tracker.full_clean,
)
def test_invalid_regex_for_validating_issue_id(self):
tracker = f.IssueTrackerFactory(
tracker_product=self.tracker_product,
issues_display_url_fmt="http://localhost/{issue_keys}",
validate_regex="[0-9}+",
)
self.assertValidationError("validate_regex", "cannot be compiled", tracker.full_clean)
class TestGetIssueTrackerCredential(test.TestCase):
"""Test IssueTracker property credential"""
def setUp(self):
fd, self.user_pwd_secret_file = tempfile.mkstemp()
with io.open(fd, "w", encoding="utf-8") as f:
f.write("[issuetracker]\nusername = admin\npassword = admin\n")
fd, self.token_secret_file = tempfile.mkstemp()
with io.open(fd, "w", encoding="utf-8") as f:
f.write("[issuetracker]\ntoken = abcde\n")
def tearDown(self):
os.unlink(self.user_pwd_secret_file)
os.unlink(self.token_secret_file)
def test_get_noneed_credential(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.NoNeed.name)
self.assertEqual({}, issue_tracker.credential)
def test_get_user_pwd_credential_from_secret_file(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.UserPwd.name)
f.UserPwdCredentialFactory(
secret_file=self.user_pwd_secret_file, issue_tracker=issue_tracker
)
self.assertEqual({"username": "admin", "password": "admin"}, issue_tracker.credential)
def test_get_user_pwd_credential_from_database(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.UserPwd.name)
f.UserPwdCredentialFactory(username="abc", password="abc", issue_tracker=issue_tracker)
self.assertEqual({"username": "abc", "password": "abc"}, issue_tracker.credential)
def test_get_token_credential_from_secret_file(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.Token.name)
f.TokenCredentialFactory(secret_file=self.token_secret_file, issue_tracker=issue_tracker)
self.assertEqual({"token": "abcde"}, issue_tracker.credential)
def test_get_token_credential_from_database(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.Token.name)
f.TokenCredentialFactory(token="234wer", issue_tracker=issue_tracker)
self.assertEqual({"token": "234wer"}, issue_tracker.credential)
def assert_property_credential(self, issue_tracker):
try:
issue_tracker.credential
except ValueError as e:
if not re.search(r"credential is not set", str(e)):
self.fail(
"Expected ValueError is not raised. Instead, another"
" ValueError is raised with message: {}".format(str(e))
)
else:
self.fail("Expected ValueError is not raised.")
def test_user_pwd_credential_not_set(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.UserPwd.name)
self.assert_property_credential(issue_tracker)
def test_token_credential_not_set(self):
issue_tracker = f.IssueTrackerFactory(credential_type=CredentialTypes.Token.name)
self.assert_property_credential(issue_tracker)
|
3,712 |
test make aware already aware
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the :mod:`~aiida.common.timezone` module."""
from datetime import datetime, timedelta, timezone, tzinfo
from time import time
import pytest
from aiida.common.timezone import delta, localtime, make_aware, now, timezone_from_name
def is_aware(dt):
"""Return whether the datetime is aware.
See https://docs.python.org/3/library/datetime.html#determining-if-an-object-is-aware-or-naive
:param dt: The datetime object to check.
:returns: True if ``datetime`` is aware, False otherwise.
"""
return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None
def test_now():
"""Test the :func:`aiida.common.timezone.now` function.
Check that the time returned by AiiDA's ``now`` function is compatible with attaching a timezone to a "naive" time
stamp using ``make_aware``.
"""
dt = timedelta(minutes=1)
ref = now()
from_tz = make_aware(datetime.fromtimestamp(time()))
assert from_tz <= ref + dt
assert from_tz >= ref - dt
def test_make_aware():
"""Test the :func:`aiida.common.timezone.make_aware` function.
This should make a naive datetime object aware using the timezone of the operating system.
"""
system_tzinfo = datetime.now(timezone.utc).astimezone() # This is how to get the timezone of the OS.
naive = datetime(1970, 1, 1)
aware = make_aware(naive)
assert is_aware(aware)
assert aware.tzinfo.tzname(aware) == system_tzinfo.tzname()
assert aware.tzinfo.utcoffset(aware) == system_tzinfo.utcoffset()
def METHOD_NAME():
"""Test the :func:`aiida.common.timezone.make_aware` function for an already aware datetime.
This should simply return the datetime if ``timezone`` is not specified, otherwise the ``timezone`` will be set on
the datetime object.
"""
aware = datetime.now(timezone.utc).astimezone() # This creates an aware object.
assert is_aware(aware)
assert make_aware(aware) == aware
different_tz = make_aware(aware, timezone(timedelta(hours=12)))
assert different_tz == aware
assert different_tz.tzinfo.tzname(different_tz) != aware.tzinfo.tzname(aware)
assert different_tz.tzinfo.utcoffset(different_tz) != aware.tzinfo.tzname(aware)
def test_localtime_aware():
"""Test the :func:`aiida.common.timezone.test_localtime` function for an already aware datetime.
This should simply return the datetime if ``timezone`` is not specified, otherwise the ``timezone`` will be set on
the datetime object.
"""
aware = datetime.now(timezone.utc).astimezone() # This creates an aware object.
assert is_aware(aware)
assert localtime(aware) == aware
def test_localtime_naive():
"""Test the :func:`aiida.common.timezone.test_localtime` function for a naive datetime.
This should not raise but simply return the same datetime made aware.
"""
naive = datetime.now() # This creates a naive object.
assert not is_aware(naive)
local = localtime(naive)
assert local != naive
assert is_aware(local)
def test_make_aware_timezone():
"""Test the :func:`aiida.common.timezone.make_aware` function passing an explicit timezone."""
dt = timedelta(hours=2)
naive = datetime(1970, 1, 1)
aware = make_aware(naive, timezone(dt))
assert is_aware(aware)
assert aware.tzinfo.utcoffset(aware) == dt
def test_timezone_from_name():
"""Test the :func:`aiida.common.timezone.timezone_from_name` function."""
assert isinstance(timezone_from_name('Europe/Amsterdam'), tzinfo)
def test_timezone_from_name_unknown():
"""Test the :func:`aiida.common.timezone.timezone_from_name` function for unknown timezone."""
with pytest.raises(ValueError, match=r'unknown timezone: .*'):
timezone_from_name('Invalid/Unknown')
def test_delta():
"""Test the :func:`aiida.common.timezone.delta` function."""
datetime_01 = datetime(1980, 1, 1, 0, 0, 0)
datetime_02 = datetime(1980, 1, 1, 0, 0, 2)
# Should return an instance of ``timedelta``
assert isinstance(delta(datetime_02), timedelta)
# If no comparison datetime is provided, it should be compared to ``now`` as a default
assert delta(datetime_01).total_seconds() > 0
assert delta(datetime_01, datetime_02).total_seconds() == 2
assert delta(datetime_02, datetime_01).total_seconds() == -2
|
3,713 |
test hybrid units recording
|
import pytest
import shutil
from pathlib import Path
from spikeinterface.core import WaveformExtractor, extract_waveforms, load_extractor
from spikeinterface.core.testing import check_recordings_equal
from spikeinterface.comparison import (
create_hybrid_units_recording,
create_hybrid_spikes_recording,
generate_injected_sorting,
)
from spikeinterface.extractors import toy_example
from spikeinterface.preprocessing import bandpass_filter
if hasattr(pytest, "global_test_folder"):
cache_folder = pytest.global_test_folder / "comparison" / "hybrid"
else:
cache_folder = Path("cache_folder") / "comparison" / "hybrid"
def setup_module():
if cache_folder.is_dir():
shutil.rmtree(cache_folder)
cache_folder.mkdir(parents=True, exist_ok=True)
recording, sorting = toy_example(
duration=60, num_channels=4, num_units=5, num_segments=2, average_peak_amplitude=-1000
)
recording = bandpass_filter(recording, freq_min=300, freq_max=6000)
recording = recording.save(folder=cache_folder / "recording")
sorting = sorting.save(folder=cache_folder / "sorting")
wvf_extractor = extract_waveforms(
recording, sorting, folder=cache_folder / "wvf_extractor", ms_before=10.0, ms_after=10.0
)
def METHOD_NAME():
wvf_extractor = WaveformExtractor.load(cache_folder / "wvf_extractor")
recording = wvf_extractor.recording
templates = wvf_extractor.get_all_templates()
templates[:, 0, :] = 0
templates[:, -1, :] = 0
hybrid_units_recording = create_hybrid_units_recording(
recording, templates, nbefore=wvf_extractor.nbefore, injected_sorting_folder=cache_folder / "injected0"
)
assert hybrid_units_recording.get_traces(end_frame=600, segment_index=0).shape == (600, 4)
assert hybrid_units_recording.get_traces(start_frame=100, end_frame=600, segment_index=1).shape == (500, 4)
assert hybrid_units_recording.get_traces(start_frame=recording.get_num_frames(0) - 200, segment_index=0).shape == (
200,
4,
)
# Check dumpability
saved_loaded = load_extractor(hybrid_units_recording.to_dict())
check_recordings_equal(hybrid_units_recording, saved_loaded, return_scaled=False)
saved_1job = hybrid_units_recording.save(folder=cache_folder / "units_1job")
saved_2job = hybrid_units_recording.save(folder=cache_folder / "units_2job", n_jobs=2, chunk_duration="1s")
check_recordings_equal(hybrid_units_recording, saved_1job, return_scaled=False)
check_recordings_equal(hybrid_units_recording, saved_2job, return_scaled=False)
def test_hybrid_spikes_recording():
wvf_extractor = WaveformExtractor.load_from_folder(cache_folder / "wvf_extractor")
recording = wvf_extractor.recording
sorting = wvf_extractor.sorting
hybrid_spikes_recording = create_hybrid_spikes_recording(
wvf_extractor, injected_sorting_folder=cache_folder / "injected1"
)
hybrid_spikes_recording = create_hybrid_spikes_recording(
wvf_extractor, unit_ids=sorting.unit_ids[:3], injected_sorting_folder=cache_folder / "injected2"
)
assert hybrid_spikes_recording.get_traces(end_frame=600, segment_index=0).shape == (600, 4)
assert hybrid_spikes_recording.get_traces(start_frame=100, end_frame=600, segment_index=1).shape == (500, 4)
assert hybrid_spikes_recording.get_traces(start_frame=recording.get_num_frames(0) - 200, segment_index=0).shape == (
200,
4,
)
# Check dumpability
saved_loaded = load_extractor(hybrid_spikes_recording.to_dict())
check_recordings_equal(hybrid_spikes_recording, saved_loaded, return_scaled=False)
saved_1job = hybrid_spikes_recording.save(folder=cache_folder / "spikes_1job")
saved_2job = hybrid_spikes_recording.save(folder=cache_folder / "spikes_2job", n_jobs=2, chunk_duration="1s")
check_recordings_equal(hybrid_spikes_recording, saved_1job, return_scaled=False)
check_recordings_equal(hybrid_spikes_recording, saved_2job, return_scaled=False)
def test_generate_injected_sorting():
recording = load_extractor(cache_folder / "recording")
sorting = load_extractor(cache_folder / "sorting")
injected_sorting = generate_injected_sorting(
sorting, [recording.get_num_frames(seg_index) for seg_index in range(recording.get_num_segments())]
)
if __name__ == "__main__":
setup_module()
test_generate_injected_sorting()
METHOD_NAME()
test_hybrid_spikes_recording()
|
3,714 |
range callback
|
#!/usr/bin/env python3
import sys
import cv2
import image_geometry
import mil_ros_tools
import numpy as np
import rospy
from geometry_msgs.msg import Pose2D
from mil_msgs.msg import RangeStamped
from std_msgs.msg import Header
from subjugator_msgs.srv import VisionRequest2D, VisionRequest2DResponse
def contour_sort(l_arr):
"""Sort contours by area largest to smallest."""
length = len(l_arr)
if length <= 1:
return l_arr
else:
pivot = l_arr.pop(int(length / 2))
less, more = [], []
for x in l_arr:
if cv2.contourArea(x) >= cv2.contourArea(pivot):
less.append(x)
else:
more.append(x)
return [*contour_sort(less), pivot, *contour_sort(more)]
def evaluate_bin(roi):
"""Check for orangeness."""
b1 = 163
g1 = 145
r1 = 223
b2 = 251
g2 = 240
r2 = 255
lower_value = np.array([b1, g1, r1], np.uint8)
upper_value = np.array([b2, g2, r2], np.uint8)
temp = np.array(0)
mask = cv2.inRange(roi, lower_value, upper_value)
bimg = cv2.bitwise_or(mask, temp)
orangeness = bimg.mean()
return orangeness
class BinFinder:
def __init__(self):
rospy.sleep(1.0)
self.bin_type = None
self.last_image = None
self.last_draw_image = None
self.last_image_time = None
self.camera_model = None
self.pose_service = rospy.Service(
"vision/bin/2D",
VisionRequest2D,
self.request_bin,
)
self.image_sub = mil_ros_tools.Image_Subscriber(
"/down/left/image_rect_color",
self.image_cb,
)
self.image_pub = mil_ros_tools.Image_Publisher("/vision/bin_2d/target_info")
self.range = None
self.range_sub = rospy.Subscriber(
"dvl/range",
RangeStamped,
self.METHOD_NAME,
)
# Occasional status publisher
self.timer = rospy.Timer(rospy.Duration(1.0), self.publish_target_info)
self.bins = {
"orange": "/color/bin/orange",
"norange": "/color/bin/norange",
}
def request_bin(self, srv):
self.bin_type = srv.target_name
if self.last_image is not None:
response = self.find_single_bin(np.copy(self.last_image), srv.target_name)
if response is False or response is None:
rospy.loginfo("did not find")
resp = VisionRequest2DResponse(
header=mil_ros_tools.make_header(frame="/down"),
found=False,
)
else:
# Fill in
center, radius = response
resp = VisionRequest2DResponse(
header=Header(stamp=self.last_image_time, frame_id="/down"),
pose=Pose2D(x=center[0], y=center[1], theta=radius),
max_x=self.last_image.shape[0],
max_y=self.last_image.shape[1],
camera_info=self.image_sub.camera_info,
found=True,
)
return resp
def publish_target_info(self, *args):
if self.last_image is None:
return
self.find_bins(np.copy(self.last_image), self.bin_type)
if self.last_draw_image is not None:
self.image_pub.publish(self.last_draw_image)
def image_cb(self, image):
"""Hang on to last image"""
self.last_image = image
self.last_image_time = self.image_sub.last_image_time
if self.camera_model is None:
if self.image_sub.camera_info is None:
return
self.camera_model = image_geometry.PinholeCameraModel()
self.camera_model.fromCameraInfo(self.image_sub.camera_info)
def find_single_bin(self, img, bin_type):
"""Find the bins and their orientations."""
assert (
bin_type in self.bins[bin_type]
), f"Bins_2d does not know bin color: {bin_type}"
if img is not None:
kernel = np.ones((2, 2), np.float32) / 4
img = cv2.filter2D(img, -1, kernel)
debug_image = np.copy(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, img = cv2.threshold(img, 254, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(
np.copy(img),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE,
)
contours = contour_sort(contours)
"""This finds the bins and looks for the one that is orange or is not
orange. Each bin is given an orangeness rating and either the most
or least orange bin is selected.
"""
if len(contours) > 0:
bins = 2
orangeness = 0 if self.bin_type == "orange" else 100000
if len(contours) < bins:
bins = len(contours)
for i in range(0, bins + 1):
x, y, w, h = cv2.boundingRect(contours[i])
roi = debug_image[y : y + h, x : x + w]
temp = evaluate_bin(roi)
if (orangeness > temp and self.bin_type == "norange") or (
orangeness < temp and self.bin_type == "orange"
):
orangeness = temp
M = cv2.moments(contours[i])
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
img_h, img_w, _ = np.shape(debug_image)
point = (cx, cy)
(_, _), (_, _), rad = cv2.fitEllipse(contours[i])
cv2.rectangle(debug_image, (x, y), (x + w, y + h), (127), 2)
ellipse = cv2.fitEllipse(contours[i])
cv2.ellipse(debug_image, ellipse, (170), 2)
if point is not None:
cv2.circle(debug_image, point, 5, (0, 0, 255), -1)
pixels = np.copy(point)
point = [cx - (img_w / 2), cy - (img_h / 2)]
tuple_center = (point[0], point[1], 0)
rad = ((rad) * np.pi) / 180.0
P = np.asarray(self.image_sub.camera_info.P).reshape(3, 4)
_P = np.linalg.pinv(P)
pixels = np.asarray([pixels[0], pixels[1], 1])
ray = _P.dot(pixels)
tuple_center = self.range * ray
tuple_center[2] = (
-tuple_center[2] + 0.45 + 1
) # height of the bin and some buffer
self.last_draw_image = debug_image
return tuple_center, rad
def METHOD_NAME(self, msg):
"""Handle range data grabbed from dvl"""
self.range = msg.range
def find_bins(self, img, srv):
return self.find_single_bin(img, self.bin_type)
def main(args):
BinFinder()
rospy.spin()
if __name__ == "__main__":
rospy.init_node("bin_vision")
main(sys.argv)
|
3,715 |
get ref data
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test winograd convolution using nnpack impl."""
import numpy as np
from pytest import skip
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm.contrib import nnpack
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
def verify_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
devices,
dilation=1,
add_bias=False,
add_relu=False,
):
"""Verify conv2d nchw workload."""
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation)
)
in_height = in_width = in_size
placholder_a = te.placeholder((batch, in_channel, in_height, in_width), name="A")
placeholder_w = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(placholder_a.shape)
w_shape = get_const_tuple(placeholder_w.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = placholder_a.dtype
@memoize("topi.tests.test_topi_conv2d_nchw.verify_conv2d_nchw")
def METHOD_NAME():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = METHOD_NAME()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skipping %s becuase it is not enabled" % device)
print("Running on target: %s" % device)
with tvm.target.Target(device):
result_c = topi.nn.conv2d(
placholder_a,
placeholder_w,
stride,
padding,
dilation,
data_layout="NCHW",
out_dtype=dtype,
)
if add_bias:
result_c = topi.add(result_c, bias)
if add_relu:
result_c = topi.nn.relu(result_c)
schedule = topi.generic.schedule_conv2d_nchw([result_c])
buff_a = tvm.nd.array(a_np, dev)
buff_w = tvm.nd.array(w_np, dev)
buff_b = tvm.nd.array(b_np, dev)
buff_c = tvm.nd.array(np.zeros(get_const_tuple(result_c.shape), dtype=result_c.dtype), dev)
if add_bias:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, bias, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_b, buff_c)
else:
func = tvm.build(
schedule,
[placholder_a, placeholder_w, result_c],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
func(buff_a, buff_w, buff_c)
tvm.testing.assert_allclose(buff_c.numpy(), c_np, rtol=1e-4)
for device in devices:
check_device(device)
class WinogradFallback(autotvm.FallbackContext):
"""Winograd fallbacks."""
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = FallbackConfigEntity()
cfg.template_key = "winograd_nnpack_fp32"
self.memory[key] = cfg
return cfg
def test_conv2d_nchw():
"""Verify conv2d nchw winograd works."""
if not tvm.get_global_func(
"tvm.contrib.nnpack.convolution_inference_without_weight_transform", True
):
skip("extern function is not available")
if not nnpack.is_available():
skip("nnpack is not available")
devices = ["llvm -device=arm_cpu"]
autotvm.GLOBAL_SCOPE.silent = True
with WinogradFallback():
# resnet 18 workloads
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1, devices=devices)
# unet workloads
verify_conv2d_nchw(1, 3, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 4, 192, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 96, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 48, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 24, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 12, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 6, 220, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 220, 6, 180, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 180, 12, 96, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 96, 24, 48, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 48, 48, 24, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 24, 96, 12, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 12, 192, 1, 3, 1, 1, add_bias=True, devices=devices)
# relu, bias
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_bias=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, devices=devices)
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True, devices=devices)
# werid workloads
verify_conv2d_nchw(1, 3, 3, 3, 3, 1, 1, devices=devices)
verify_conv2d_nchw(1, 13, 71, 59, 3, 1, 1, devices=devices)
autotvm.GLOBAL_SCOPE.silent = False
if __name__ == "__main__":
tvm.testing.main()
|
3,716 |
edit task
|
'Full terminal interface for TaskWarrior (task).'
import tasklib
from visidata import vd, launchExternalEditorValue, Sheet, ColumnItem, date, vlen, CellColorizer, Column, run
from visidata import *
vd.options.disp_date_fmt = '%Y-%m-%d %H:%M'
vd.option('color_task_changed', 'reverse yellow', 'color when vtask is changed')
def METHOD_NAME(task):
taskdesc = ''
# pack "key: value" metadata
for k in 'description project status'.split():
taskdesc += '%s: %s\n' % (k, task[k])
# pack annotations separated by ---
for note in task["annotations"]:
taskdesc += "\n---\n"
taskdesc += note.description
# add final sentinel to indicate how to separate if no notes
taskdesc += "\n---\n"
ret = launchExternalEditorValue(taskdesc)
# unpack annotations separated by ---
newnotes = ret.split("\n---\n")
task["annotations"] = newnotes[1:]
# unpack "key: value" metadata
for line in newnotes[0].splitlines():
k, v = line.split(': ', maxsplit=1)
task[k] = v
# task.save()
class TodoSheet(Sheet):
rowtype = 'tasks' # rowdef: tasklib.Task
columns = [
ColumnItem('id', type=int, width=4),
ColumnItem('project'),
ColumnItem('description'),
ColumnItem('status'),
ColumnItem('urgency', type=float, fmtstr='{:.01f}'),
ColumnItem('start', type=date),
ColumnItem('due', type=date),
ColumnItem('wait', type=date, width=0),
ColumnItem('scheduled', type=date, width=0),
ColumnItem('until', type=date, width=0),
ColumnItem('entry', type=date, width=0),
ColumnItem('modified', type=date, width=0),
ColumnItem('completed', type=date, width=0),
# ColumnItem('depends'),
Column('tags', getter=lambda c,r: ' '.join(r["tags"]),
setter=lambda c,r: r["tags"].tags.split(' ')),
# ColumnItem('tags'),
ColumnItem('annotations', type=vlen),
# Column('age', width=3, getter=lambda c,r: date()-r.date_entered), # formatter=duration
]
nKeys = 1
colorizers = Sheet.colorizers + [
CellColorizer(8, 'color_task_changed', lambda s,c,r,v: r and c and isChanged(r, c.name)),
]
def newRow(self, **kwargs):
return tasklib.Task(self.tw, **kwargs)
def reload(self):
self.tw = tasklib.TaskWarrior(data_location=str(self.source), create=True)
self.rows = list(self.tw.tasks.pending())
self.orderBy(None, self.column('urgency'), reverse=True)
def isChanged(r, key):
return r._data.get(key, None) != r._original_data.get(key, None)
class TaskAnnotationsSheet(Sheet):
rowtype = 'notes' # rowdef: TaskAnnotation
columns = [
ColumnItem('entry', type=date),
ColumnItem('description'),
]
def reload(self):
self.rows = self.source['annotations']
TodoSheet.addCommand('^O', 'edit-notes', 'editTask(cursorRow)')
TodoSheet.addCommand('a', 'add-task', 't=newRow(description=input("new task: ")); rows.insert(cursorRowIndex+1, t); t.save(); cursorDown()')
TodoSheet.addCommand('d', 'complete-task', 'cursorRow.done(); cursorRow.refresh()')
TodoSheet.addCommand('gd', 'complete-tasks', 'for r in selectedRows: r.done() or r.refresh()')
TodoSheet.addCommand('zd', 'delete-task', 'cursorRow.delete(); cursorRow.refresh()')
TodoSheet.addCommand('gzd', 'delete-tasks', 'for r in selectedRows: r.delete() or r.refresh()')
TodoSheet.addCommand('z^R', 'refresh-tasks', 'cursorRow.refresh()')
TodoSheet.addCommand('z^S', 'save-task', 'cursorRow.save()')
TodoSheet.addCommand('^S', 'save-modified-tasks', 'list(r.save() for r in rows if r.modified)')
TodoSheet.addCommand(' ', 'start-task', 'cursorRow.stop() if cursorRow["start"] else cursorRow.start()')
TodoSheet.addCommand(ENTER, '', 'vd.push(TaskAnnotationsSheet("cursorRow.description", source=cursorRow))')
TaskAnnotationsSheet.addCommand('a', 'add-task-note', 'source.add_annotation(input("note: ")); reload()')
TaskAnnotationsSheet.addCommand('d', 'delete-task-note', 'source.remove_annotation(cursorRow); reload()')
def main_vtask():
run(TodoSheet('todos', source=Path('~/.task')))
vd.addGlobals(globals())
|
3,717 |
get output current
|
#
# Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
# Mellanox
#
# Module contains an implementation of SONiC PSU Base API and
# provides the PSUs status which are available in the platform
#
#############################################################################
try:
import os.path
import syslog
import subprocess
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
def log_err(msg):
syslog.openlog("psuutil")
syslog.syslog(syslog.LOG_ERR, msg)
syslog.closelog()
class PsuUtil(PsuBase):
"""Platform-specific PSUutil class"""
MAX_PSU_FAN = 1
MAX_NUM_PSU = 2
GET_HWSKU_CMD = ["sonic-cfggen", "-d", "-v", "DEVICE_METADATA.localhost.hwsku"]
# for spectrum1 switches with plugable PSUs, the output voltage file is psuX_volt
# for spectrum2 switches the output voltage file is psuX_volt_out2
sku_spectrum1_with_plugable_psu = ['ACS-MSN2410', 'ACS-MSN2700',
'Mellanox-SN2700', 'Mellanox-SN2700-D48C8', 'LS-SN2700', 'ACS-MSN2740']
def __init__(self):
PsuBase.__init__(self)
self.sku_name = self._get_sku_name()
self.psu_path = "/var/run/hw-management/"
self.psu_presence = "thermal/psu{}_status"
self.psu_oper_status = "thermal/psu{}_pwr_status"
self.psu_current = "power/psu{}_curr"
self.psu_power = "power/psu{}_power"
if self.sku_name in self.sku_spectrum1_with_plugable_psu:
self.psu_voltage = "power/psu{}_volt"
else:
self.psu_voltage = "power/psu{}_volt_out2"
self.fan_speed = "thermal/psu{}_fan1_speed_get"
def _get_sku_name(self):
p = subprocess.Popen(self.GET_HWSKU_CMD, universal_newlines=True, stdout=subprocess.PIPE)
out, err = p.communicate()
return out.rstrip('\n')
def get_num_psus(self):
"""
Retrieves the number of PSUs available on the device
:return: An integer, the number of PSUs available on the device
"""
return self.MAX_NUM_PSU
def _read_file(self, file_pattern, index):
"""
Reads the file of the PSU
:param file_pattern: The filename convention
:param index: An integer, 1-based index of the PSU of which to query status
:return: int
"""
return_value = 0
try:
with open(self.psu_path + file_pattern.format(index), 'r') as file_to_read:
return_value = int(file_to_read.read())
except IOError:
log_err("Read file {} failed".format(self.psu_path + file_pattern.format(index)))
return 0
return return_value
def get_psu_status(self, index):
"""
Retrieves the oprational status of power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query status
:return: Boolean, True if PSU is operating properly, False if PSU is faulty
"""
if index is None:
return False
if index > self.MAX_NUM_PSU:
raise RuntimeError("index ({}) shouldn't be greater than {}".format(index, self.MAX_NUM_PSU))
status = self._read_file(self.psu_oper_status, index)
return status == 1
def get_psu_presence(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if index > self.MAX_NUM_PSU:
raise RuntimeError("index ({}) shouldn't be greater than {}".format(index, self.MAX_NUM_PSU))
status = self._read_file(self.psu_presence, index)
return status == 1
def get_output_voltage(self, index):
"""
Retrieves the ouput volatage in milli volts of a power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query o/p volatge
:return: An integer, value of o/p voltage in mV if PSU is good, else zero
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if not self.get_psu_presence(index) or not self.get_psu_status(index):
return 0
voltage = self._read_file(self.psu_voltage, index)
return voltage
def METHOD_NAME(self, index):
"""
Retrieves the output current in milli amperes of a power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query o/p current
:return: An integer, value of o/p current in mA if PSU is good, else zero
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if not self.get_psu_presence(index) or not self.get_psu_status(index):
return 0
current = self._read_file(self.psu_current, index)
return current
def get_output_power(self, index):
"""
Retrieves the output power in micro watts of a power supply unit (PSU) defined
by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query o/p power
:return: An integer, value of o/p power in micro Watts if PSU is good, else zero
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if not self.get_psu_presence(index) or not self.get_psu_status(index):
return 0
power = self._read_file(self.psu_power, index)
return power
def get_fan_speed(self, index, fan_index):
"""
Retrieves the speed of fan, in rpm, denoted by 1-based <fan_index> of a power
supply unit (PSU) defined by 1-based index <index>
:param index: An integer, 1-based index of the PSU of which to query fan speed
:param fan_index: An integer, 1-based index of the PSU-fan of which to query speed
:return: An integer, value of PSU-fan speed in rpm if PSU-fan is good, else zero
"""
if index is None:
raise RuntimeError("index shouldn't be None")
if fan_index > self.MAX_PSU_FAN:
raise RuntimeError("fan_index ({}) shouldn't be greater than {}".format(fan_index, self.MAX_PSU_FAN))
if not self.get_psu_presence(index) or not self.get_psu_status(index):
return 0
fan_speed = self._read_file(self.fan_speed, index)
return fan_speed
|
3,718 |
to dict
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.28
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Node(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1NodeSpec',
'status': 'V1NodeStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Node - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Node. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Node. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Node.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Node. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Node. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Node. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Node.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Node. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Node. # noqa: E501
:return: The metadata of this V1Node. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Node.
:param metadata: The metadata of this V1Node. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Node. # noqa: E501
:return: The spec of this V1Node. # noqa: E501
:rtype: V1NodeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Node.
:param spec: The spec of this V1Node. # noqa: E501
:type: V1NodeSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Node. # noqa: E501
:return: The status of this V1Node. # noqa: E501
:rtype: V1NodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Node.
:param status: The status of this V1Node. # noqa: E501
:type: V1NodeStatus
"""
self._status = status
def METHOD_NAME(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.METHOD_NAME() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.METHOD_NAME()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].METHOD_NAME())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.METHOD_NAME())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Node):
return False
return self.METHOD_NAME() == other.METHOD_NAME()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Node):
return True
return self.METHOD_NAME() != other.METHOD_NAME()
|
3,719 |
test radix tree lookup multi index
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# SPDX-License-Identifier: LGPL-2.1-or-later
from drgn import NULL, Object
from drgn.helpers.linux.radixtree import radix_tree_for_each, radix_tree_lookup
from tests.linux_kernel import LinuxKernelTestCase, skip_unless_have_test_kmod
@skip_unless_have_test_kmod
class TestRadixTree(LinuxKernelTestCase):
def test_radix_tree_lookup_empty(self):
root = self.prog["drgn_test_radix_tree_empty"].address_of_()
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
def test_radix_tree_for_each_empty(self):
root = self.prog["drgn_test_radix_tree_empty"].address_of_()
self.assertIdentical(list(radix_tree_for_each(root)), [])
def test_radix_tree_lookup_one(self):
root = self.prog["drgn_test_radix_tree_one"].address_of_()
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
self.assertIdentical(radix_tree_lookup(root, 665), NULL(self.prog, "void *"))
self.assertIdentical(
radix_tree_lookup(root, 666), Object(self.prog, "void *", 0xDEADB00)
)
self.assertIdentical(radix_tree_lookup(root, 667), NULL(self.prog, "void *"))
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
def test_radix_tree_for_each_one(self):
root = self.prog["drgn_test_radix_tree_one"].address_of_()
self.assertIdentical(
list(radix_tree_for_each(root)),
[(666, Object(self.prog, "void *", 0xDEADB00))],
)
def test_radix_tree_lookup_one_at_zero(self):
root = self.prog["drgn_test_radix_tree_one_at_zero"].address_of_()
self.assertIdentical(
radix_tree_lookup(root, 0), Object(self.prog, "void *", 0x1234)
)
self.assertIdentical(radix_tree_lookup(root, 1), NULL(self.prog, "void *"))
self.assertIdentical(radix_tree_lookup(root, 100000), NULL(self.prog, "void *"))
def test_radix_tree_for_each_one_at_zero(self):
root = self.prog["drgn_test_radix_tree_one_at_zero"].address_of_()
self.assertIdentical(
list(radix_tree_for_each(root)), [(0, Object(self.prog, "void *", 0x1234))]
)
def test_radix_tree_lookup_sparse(self):
root = self.prog["drgn_test_radix_tree_sparse"].address_of_()
self.assertIdentical(radix_tree_lookup(root, 0), NULL(self.prog, "void *"))
self.assertIdentical(
radix_tree_lookup(root, 1), Object(self.prog, "void *", 0x1234)
)
self.assertIdentical(radix_tree_lookup(root, 2), NULL(self.prog, "void *"))
self.assertIdentical(
radix_tree_lookup(root, 0x40000000), NULL(self.prog, "void *")
)
self.assertIdentical(
radix_tree_lookup(root, 0x80000000), NULL(self.prog, "void *")
)
self.assertIdentical(
radix_tree_lookup(root, 0x80800000), NULL(self.prog, "void *")
)
self.assertIdentical(
radix_tree_lookup(root, 0x80808000), NULL(self.prog, "void *")
)
self.assertIdentical(
radix_tree_lookup(root, 0x80808080), Object(self.prog, "void *", 0x5678)
)
self.assertIdentical(
radix_tree_lookup(root, 0xFFFFFFFE), NULL(self.prog, "void *")
)
self.assertIdentical(
radix_tree_lookup(root, 0xFFFFFFFF), Object(self.prog, "void *", 0x9ABC)
)
def test_radix_tree_for_each_sparse(self):
root = self.prog["drgn_test_radix_tree_sparse"].address_of_()
self.assertIdentical(
list(radix_tree_for_each(root)),
[
(1, Object(self.prog, "void *", 0x1234)),
(0x80808080, Object(self.prog, "void *", 0x5678)),
(0xFFFFFFFF, Object(self.prog, "void *", 0x9ABC)),
],
)
def METHOD_NAME(self):
try:
root = self.prog["drgn_test_radix_tree_multi_order"].address_of_()
except KeyError:
# Radix tree multi-order support only exists between Linux kernel
# commits e61452365372 ("radix_tree: add support for multi-order
# entries") (in v4.6) and 3a08cd52c37c ("radix tree: Remove
# multiorder support") (in v4.20), and only if
# CONFIG_RADIX_TREE_MULTIORDER=y.
self.skipTest("kernel does not have multi-order radix trees")
self.assertIdentical(
radix_tree_lookup(root, 0x80807FFF), NULL(self.prog, "void *")
)
for index in range(0x80808000, 0x80808200):
with self.subTest(index=index):
self.assertIdentical(
radix_tree_lookup(root, index), Object(self.prog, "void *", 0x1234)
)
self.assertIdentical(
radix_tree_lookup(root, 0x80808200), NULL(self.prog, "void *")
)
def test_radix_tree_for_each_multi_index(self):
try:
root = self.prog["drgn_test_radix_tree_multi_order"].address_of_()
except KeyError:
# See test_radix_tree_lookup_multi_index().
self.skipTest("kernel does not have multi-order radix trees")
self.assertIdentical(
list(radix_tree_for_each(root)),
[(0x80808000, Object(self.prog, "void *", 0x1234))],
)
|
3,720 |
geti2c
|
#!/usr/bin/env python3
#######################################################
#
# devicebase.py
# Python implementation of the Class devicebase
# Original author: [email protected]
#
#######################################################
from plat_hal.osutil import osutil
from plat_hal.baseutil import baseutil
import subprocess
class devicebase(object):
_name = None
__error_ret = -99999
@property
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
def dumpValueByI2c(self, bus, loc):
str = ""
for i in range(256):
ret, val = self.get_i2c(bus, loc, i)
str += chr(val)
return str
def byteTostr(self, val):
strtmp = ''
for i in range(len(val)):
strtmp += chr(val[i])
return strtmp
def get_eeprom_info(self, conf):
if conf.get('way') == 'sysfs' or conf.get('way') == 'devfile':
ret, eeprom = self.get_value(conf)
if ret is False:
return None
else:
eeprom = self.dumpValueByI2c(conf.get('bus'), conf.get('addr'))
return eeprom
def exec_os_cmd(self, cmd):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, stderr=subprocess.STDOUT)
stdout = proc.communicate()[0]
proc.wait()
return proc.returncode, stdout
def get_value(self, config):
'''
get value by config way
way i2c/sysfs/lpc
'''
way = config.get("way")
if way == 'sysfs':
return self.get_sysfs(config.get("loc"), config.get("flock_path"))
elif way == "i2c":
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.get_i2c(bus, addr, offset)
elif way == "io":
io_addr = config.get('io_addr')
read_len = config.get('read_len', 1)
return self.get_io(io_addr, read_len)
elif way == "i2cword":
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.get_i2cword(bus, addr, offset)
elif way == "devmem":
addr = config.get("addr")
digit = config.get("digit")
mask = config.get("mask", None)
return self.get_devmem(addr, digit, mask)
elif way == "sdk":
type = config.get("type")
if type == "bcm_temp":
return self.getbcmtemp()
elif type == "bcm_reg":
reg = config.get("reg")
return self.getbcmreg(reg)
else:
raise Exception("cannot found sdk type deal")
elif way == "devfile":
loc = config.get("loc")
offset = config.get("offset")
len = config.get("len")
return self.devfile_read(loc, offset, len)
elif way == "devfile_ascii":
loc = config.get("loc")
offset = config.get("offset")
len = config.get("len")
return self.devfile_read_ascii(loc, offset, len)
elif way == 'cmd':
cmd = config.get("cmd")
ret, log = self.exec_os_cmd(cmd)
if ret:
return False, ("cmd write exec %s failed, log: %s" % (cmd, log))
else:
return True, log
else:
raise Exception("cannot found way deal")
def devfile_read(self, loc, offset, len):
return osutil.readdevfile(loc, offset, len)
def devfile_read_ascii(self, loc, offset, len):
return osutil.readdevfile_ascii(loc, offset, len)
def get_sysfs(self, loc, flock_path=None):
return self.getsysfs(loc, flock_path)
def getsysfs(self, loc, flock_path=None):
ret, val = osutil.readsysfs(loc, flock_path)
return ret, val
def get_devmem(self, addr, digit, mask):
return osutil.getdevmem(addr, digit, mask)
def get_i2cword(self, bus, addr, offset):
return self.geti2cword(bus, addr, offset)
def geti2cword(self, bus, addr, offset):
ret, val = osutil.geti2cword(bus, addr, offset)
return ret, val
def get_io(self, reg_addr, read_len):
return self.getio(reg_addr, read_len)
def getio(self, reg_addr, read_len):
ret, val = osutil.io_rd(reg_addr, read_len)
return ret, val
def get_i2c(self, bus, addr, offset):
return self.METHOD_NAME(bus, addr, offset)
def METHOD_NAME(self, bus, addr, offset):
ret, val = osutil.rji2cget(bus, addr, offset)
return ret, val
def set_value(self, config, val):
'''
get value by config way
way i2c/sysfs/lpc
'''
way = config.get("way")
if way == 'sysfs':
return self.set_sysfs(config.get("loc"), "0x%02x" % val)
elif way == "i2c":
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.set_i2c(bus, addr, offset, val)
elif way == "i2cpec":
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.seti2c_byte_pec(bus, addr, offset, val)
elif way == 'i2cword':
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.set_i2cword(bus, addr, offset, val)
elif way == "i2cwordpec":
bus = config.get("bus")
addr = config.get("addr")
offset = config.get("offset")
return self.set_i2cwordpec(bus, addr, offset, val)
return False, "unsupport ways: %s" % way
def set_sysfs(self, loc, value):
return self.setsysfs(loc, value)
def setsysfs(self, loc, value):
return osutil.writesysfs(loc, value)
def set_i2cword(self, bus, addr, offset, byte):
return self.seti2cword(bus, addr, offset, byte)
def seti2cword(self, bus, addr, offset, byte):
return osutil.seti2cword(bus, addr, offset, byte)
def set_i2cwordpec(self, bus, addr, offset, val):
return osutil.seti2cwordpec(bus, addr, offset, val)
def seti2c_byte_pec(self, bus, addr, offset, val):
return osutil.seti2c_byte_pec(bus, addr, offset, val)
def set_i2c(self, bus, addr, offset, byte):
return self.seti2c(bus, addr, offset, byte)
def seti2c(self, bus, addr, offset, byte):
ret, val = osutil.rji2cset(bus, addr, offset, byte)
return ret, val
def getbcmtemp(self):
try:
sta, ret = osutil.getmactemp()
if sta == True:
mac_aver = float(ret.get("average", self.__error_ret))
mac_aver = mac_aver * 1000
else:
return False, ret
except AttributeError as e:
return False, str(e)
return True, mac_aver
def getbcmreg(self, reg):
ret, val = osutil.getsdkreg(reg)
return ret, val
def logger_debug(self, msg):
baseutil.logger_debug(msg)
def command(self, cmd):
ret, output = osutil.command(cmd)
return ret, output
|
3,721 |
init test case
|
import numpy as np
import sys
import time
from shapely.geometry import Polygon
import paddle
import unittest
from ext_op import rbox_iou
def rbox2poly_single(rrect, get_best_begin_point=False):
"""
rrect:[x_ctr,y_ctr,w,h,angle]
to
poly:[x0,y0,x1,y1,x2,y2,x3,y3]
"""
x_ctr, y_ctr, width, height, angle = rrect[:5]
tl_x, tl_y, br_x, br_y = -width / 2, -height / 2, width / 2, height / 2
# rect 2x4
rect = np.array([[tl_x, br_x, br_x, tl_x], [tl_y, tl_y, br_y, br_y]])
R = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
# poly
poly = R.dot(rect)
x0, x1, x2, x3 = poly[0, :4] + x_ctr
y0, y1, y2, y3 = poly[1, :4] + y_ctr
poly = np.array([x0, y0, x1, y1, x2, y2, x3, y3], dtype=np.float64)
return poly
def intersection(g, p):
"""
Intersection.
"""
g = g[:8].reshape((4, 2))
p = p[:8].reshape((4, 2))
a = g
b = p
use_filter = True
if use_filter:
# step1:
inter_x1 = np.maximum(np.min(a[:, 0]), np.min(b[:, 0]))
inter_x2 = np.minimum(np.max(a[:, 0]), np.max(b[:, 0]))
inter_y1 = np.maximum(np.min(a[:, 1]), np.min(b[:, 1]))
inter_y2 = np.minimum(np.max(a[:, 1]), np.max(b[:, 1]))
if inter_x1 >= inter_x2 or inter_y1 >= inter_y2:
return 0.
x1 = np.minimum(np.min(a[:, 0]), np.min(b[:, 0]))
x2 = np.maximum(np.max(a[:, 0]), np.max(b[:, 0]))
y1 = np.minimum(np.min(a[:, 1]), np.min(b[:, 1]))
y2 = np.maximum(np.max(a[:, 1]), np.max(b[:, 1]))
if x1 >= x2 or y1 >= y2 or (x2 - x1) < 2 or (y2 - y1) < 2:
return 0.
g = Polygon(g)
p = Polygon(p)
if not g.is_valid or not p.is_valid:
return 0
inter = Polygon(g).intersection(Polygon(p)).area
union = g.area + p.area - inter
if union == 0:
return 0
else:
return inter / union
def rbox_overlaps(anchors, gt_bboxes, use_cv2=False):
"""
Args:
anchors: [NA, 5] x1,y1,x2,y2,angle
gt_bboxes: [M, 5] x1,y1,x2,y2,angle
Returns:
iou: [NA, M]
"""
assert anchors.shape[1] == 5
assert gt_bboxes.shape[1] == 5
gt_bboxes_ploy = [rbox2poly_single(e) for e in gt_bboxes]
anchors_ploy = [rbox2poly_single(e) for e in anchors]
num_gt, num_anchors = len(gt_bboxes_ploy), len(anchors_ploy)
iou = np.zeros((num_anchors, num_gt), dtype=np.float64)
start_time = time.time()
for i in range(num_anchors):
for j in range(num_gt):
try:
iou[i, j] = intersection(anchors_ploy[i], gt_bboxes_ploy[j])
except Exception as e:
print('cur anchors_ploy[i]', anchors_ploy[i],
'gt_bboxes_ploy[j]', gt_bboxes_ploy[j], e)
return iou
def gen_sample(n):
rbox = np.random.rand(n, 5)
rbox[:, 0:4] = rbox[:, 0:4] * 0.45 + 0.001
rbox[:, 4] = rbox[:, 4] - 0.5
return rbox
class RBoxIoUTest(unittest.TestCase):
def setUp(self):
self.METHOD_NAME()
self.rbox1 = gen_sample(self.n)
self.rbox2 = gen_sample(self.m)
def METHOD_NAME(self):
self.n = 13000
self.m = 7
def assertAllClose(self, x, y, msg, atol=5e-1, rtol=1e-2):
self.assertTrue(np.allclose(x, y, atol=atol, rtol=rtol), msg=msg)
def get_places(self):
places = [paddle.CPUPlace()]
if paddle.device.is_compiled_with_cuda():
places.append(paddle.CUDAPlace(0))
return places
def check_output(self, place):
paddle.disable_static()
pd_rbox1 = paddle.to_tensor(self.rbox1, place=place)
pd_rbox2 = paddle.to_tensor(self.rbox2, place=place)
actual_t = rbox_iou(pd_rbox1, pd_rbox2).numpy()
poly_rbox1 = self.rbox1
poly_rbox2 = self.rbox2
poly_rbox1[:, 0:4] = self.rbox1[:, 0:4] * 1024
poly_rbox2[:, 0:4] = self.rbox2[:, 0:4] * 1024
expect_t = rbox_overlaps(poly_rbox1, poly_rbox2, use_cv2=False)
self.assertAllClose(
actual_t,
expect_t,
msg="rbox_iou has diff at {} \nExpect {}\nBut got {}".format(
str(place), str(expect_t), str(actual_t)))
def test_output(self):
places = self.get_places()
for place in places:
self.check_output(place)
if __name__ == "__main__":
unittest.main()
|
3,722 |
inf
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddleseg.cvlibs import manager
from paddleseg.models import layers
from paddleseg.utils import utils
@manager.MODELS.add_component
class CCNet(nn.Layer):
"""
The CCNet implementation based on PaddlePaddle.
The original article refers to
Zilong Huang, et al. "CCNet: Criss-Cross Attention for Semantic Segmentation"
(https://arxiv.org/abs/1811.11721)
Args:
num_classes (int): The unique number of target classes.
backbone (paddle.nn.Layer): Backbone network, currently support Resnet18_vd/Resnet34_vd/Resnet50_vd/Resnet101_vd.
backbone_indices (tuple, list, optional): Two values in the tuple indicate the indices of output of backbone. Default: (2, 3).
enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: True.
dropout_prob (float, optional): The probability of dropout. Default: 0.0.
recurrence (int, optional): The number of recurrent operations. Defautl: 1.
align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
def __init__(self,
num_classes,
backbone,
backbone_indices=(2, 3),
enable_auxiliary_loss=True,
dropout_prob=0.0,
recurrence=1,
align_corners=False,
pretrained=None):
super().__init__()
self.enable_auxiliary_loss = enable_auxiliary_loss
self.recurrence = recurrence
self.align_corners = align_corners
self.backbone = backbone
self.backbone_indices = backbone_indices
backbone_channels = [
backbone.feat_channels[i] for i in backbone_indices
]
if enable_auxiliary_loss:
self.aux_head = layers.AuxLayer(
backbone_channels[0],
512,
num_classes,
dropout_prob=dropout_prob)
self.head = RCCAModule(
backbone_channels[1],
512,
num_classes,
dropout_prob=dropout_prob,
recurrence=recurrence)
self.pretrained = pretrained
def init_weight(self):
if self.pretrained is not None:
utils.load_entire_model(self, self.pretrained)
def forward(self, x):
feat_list = self.backbone(x)
logit_list = []
output = self.head(feat_list[self.backbone_indices[-1]])
logit_list.append(output)
if self.training and self.enable_auxiliary_loss:
aux_out = self.aux_head(feat_list[self.backbone_indices[-2]])
logit_list.append(aux_out)
return [
F.interpolate(
logit,
paddle.shape(x)[2:],
mode='bilinear',
align_corners=self.align_corners) for logit in logit_list
]
class RCCAModule(nn.Layer):
def __init__(self,
in_channels,
out_channels,
num_classes,
dropout_prob=0.1,
recurrence=1):
super().__init__()
inter_channels = in_channels // 4
self.recurrence = recurrence
self.conva = layers.ConvBNLeakyReLU(
in_channels, inter_channels, 3, padding=1, bias_attr=False)
self.cca = CrissCrossAttention(inter_channels)
self.convb = layers.ConvBNLeakyReLU(
inter_channels, inter_channels, 3, padding=1, bias_attr=False)
self.out = layers.AuxLayer(
in_channels + inter_channels,
out_channels,
num_classes,
dropout_prob=dropout_prob)
def forward(self, x):
feat = self.conva(x)
for i in range(self.recurrence):
feat = self.cca(feat)
feat = self.convb(feat)
output = self.out(paddle.concat([x, feat], axis=1))
return output
class CrissCrossAttention(nn.Layer):
def __init__(self, in_channels):
super().__init__()
self.q_conv = nn.Conv2D(in_channels, in_channels // 8, kernel_size=1)
self.k_conv = nn.Conv2D(in_channels, in_channels // 8, kernel_size=1)
self.v_conv = nn.Conv2D(in_channels, in_channels, kernel_size=1)
self.softmax = nn.Softmax(axis=3)
self.gamma = self.create_parameter(
shape=(1, ), default_initializer=nn.initializer.Constant(0))
self.inf_tensor = paddle.full(shape=(1, ), fill_value=float('inf'))
def forward(self, x):
b, c, h, w = paddle.shape(x)
proj_q = self.q_conv(x)
proj_q_h = proj_q.transpose([0, 3, 1, 2]).reshape(
[b * w, -1, h]).transpose([0, 2, 1])
proj_q_w = proj_q.transpose([0, 2, 1, 3]).reshape(
[b * h, -1, w]).transpose([0, 2, 1])
proj_k = self.k_conv(x)
proj_k_h = proj_k.transpose([0, 3, 1, 2]).reshape([b * w, -1, h])
proj_k_w = proj_k.transpose([0, 2, 1, 3]).reshape([b * h, -1, w])
proj_v = self.v_conv(x)
proj_v_h = proj_v.transpose([0, 3, 1, 2]).reshape([b * w, -1, h])
proj_v_w = proj_v.transpose([0, 2, 1, 3]).reshape([b * h, -1, w])
energy_h = (paddle.bmm(proj_q_h, proj_k_h) + self.METHOD_NAME(b, h, w)).reshape(
[b, w, h, h]).transpose([0, 2, 1, 3])
energy_w = paddle.bmm(proj_q_w, proj_k_w).reshape([b, h, w, w])
concate = self.softmax(paddle.concat([energy_h, energy_w], axis=3))
attn_h = concate[:, :, :, 0:h].transpose([0, 2, 1, 3]).reshape(
[b * w, h, h])
attn_w = concate[:, :, :, h:h + w].reshape([b * h, w, w])
out_h = paddle.bmm(proj_v_h, attn_h.transpose([0, 2, 1])).reshape(
[b, w, -1, h]).transpose([0, 2, 3, 1])
out_w = paddle.bmm(proj_v_w, attn_w.transpose([0, 2, 1])).reshape(
[b, h, -1, w]).transpose([0, 2, 1, 3])
return self.gamma * (out_h + out_w) + x
def METHOD_NAME(self, B, H, W):
return -paddle.tile(
paddle.diag(paddle.tile(self.inf_tensor, [H]), 0).unsqueeze(0),
[B * W, 1, 1])
|
3,723 |
test password
|
import pytest
from dvc_webdav import WebDAVFileSystem, WebDAVSFileSystem
from dvc.fs import get_cloud_fs
from tests.utils.asserts import issubset
url_fmt = "{scheme}://{user}@example.com/public.php/webdav"
url = "webdav://example.com/public.php/webdav"
user = "username"
password = "password"
token = "4MgjsNM5aSJjxIKM"
custom_auth_header = "Custom-Header"
def test_common():
fs = WebDAVFileSystem(
url=url,
cert_path="cert/path",
key_path="key/path",
ssl_verify="bundle.pem",
timeout=10,
prefix="/public.php/webdav",
user=None,
password=None,
ask_password=False,
token=None,
custom_auth_header=None,
)
assert issubset(
{
"headers": {},
"auth": None,
"base_url": url,
"cert": ("cert/path", "key/path"),
"verify": "bundle.pem",
"timeout": 10,
},
fs.fs_args,
)
assert fs.prefix == "/public.php/webdav"
def test_user():
fs = WebDAVFileSystem(url=url, user=user)
assert issubset({"auth": (user, None), "headers": {}}, fs.fs_args)
def METHOD_NAME():
config = {"url": url, "user": user, "password": password}
fs = WebDAVFileSystem(**config)
assert issubset(
{
"headers": {},
"auth": (user, password),
},
fs.fs_args,
)
def test_token():
config = {"token": token, "url": url}
fs = WebDAVFileSystem(**config)
assert issubset(
{"headers": {"Authorization": f"Bearer {token}"}, "auth": None},
fs.fs_args,
)
def test_ask_password(mocker):
ask_password_mocked = mocker.patch("dvc_webdav.ask_password", return_value="pass")
host = "host"
# it should not ask for password as password is set
config = {
"url": url,
"user": user,
"password": password,
"ask_password": True,
"host": host,
}
fs = WebDAVFileSystem(**config)
assert issubset({"auth": (user, password), "headers": {}}, fs.fs_args)
config.pop("password")
fs = WebDAVFileSystem(**config)
assert issubset({"auth": (user, "pass"), "headers": {}}, fs.fs_args)
ask_password_mocked.assert_called_once_with(host, user)
def test_custom_auth_header():
config = {
"url": url,
"custom_auth_header": custom_auth_header,
"password": password,
}
fs = WebDAVFileSystem(**config)
assert issubset(
{"headers": {custom_auth_header: password}, "auth": None},
fs.fs_args,
)
def test_ask_password_custom_auth_header(mocker):
ask_password_mocked = mocker.patch("dvc_webdav.ask_password", return_value="pass")
host = "host"
# it should not ask for password as password is set
config = {
"url": url,
"custom_auth_header": custom_auth_header,
"password": password,
"ask_password": True,
"host": host,
}
fs = WebDAVFileSystem(**config)
assert issubset(
{"headers": {custom_auth_header: password}, "auth": None}, fs.fs_args
)
config.pop("password")
fs = WebDAVFileSystem(**config)
assert issubset({"headers": {custom_auth_header: "pass"}, "auth": None}, fs.fs_args)
ask_password_mocked.assert_called_once_with(host, custom_auth_header)
def test_ssl_verify_custom_cert():
config = {
"url": url,
"ssl_verify": "/path/to/custom/cabundle.pem",
}
fs = WebDAVFileSystem(**config)
assert fs.fs_args["verify"] == "/path/to/custom/cabundle.pem"
@pytest.mark.parametrize(
"base_url, fs_cls",
[
(url_fmt.format(scheme="webdav", user=user), WebDAVFileSystem),
(url_fmt.format(scheme="webdavs", user=user), WebDAVSFileSystem),
],
)
def test_remote_with_jobs(dvc, base_url, fs_cls):
scheme = "http" + ("s" if fs_cls is WebDAVSFileSystem else "")
remote_config = {"url": base_url}
dvc.config["remote"]["dav"] = remote_config
cls, config, _ = get_cloud_fs(dvc.config, name="dav")
assert config["user"] == user
assert f"{scheme}://{user}@example.com" in config["host"]
assert cls is fs_cls
# config from remote takes priority
remote_config.update({"user": "admin"})
cls, config, _ = get_cloud_fs(dvc.config, name="dav")
assert config["user"] == "admin"
assert f"{scheme}://{user}@example.com" in config["host"]
assert cls is fs_cls
|
3,724 |
etag
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetIamPolicyResult',
'AwaitableGetIamPolicyResult',
'get_iam_policy',
'get_iam_policy_output',
]
@pulumi.output_type
class GetIamPolicyResult:
"""
A collection of values returned by getIamPolicy.
"""
def __init__(__self__, data_policy_id=None, METHOD_NAME=None, id=None, location=None, policy_data=None, project=None):
if data_policy_id and not isinstance(data_policy_id, str):
raise TypeError("Expected argument 'data_policy_id' to be a str")
pulumi.set(__self__, "data_policy_id", data_policy_id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if policy_data and not isinstance(policy_data, str):
raise TypeError("Expected argument 'policy_data' to be a str")
pulumi.set(__self__, "policy_data", policy_data)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="dataPolicyId")
def data_policy_id(self) -> str:
return pulumi.get(self, "data_policy_id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
(Computed) The etag of the IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
return pulumi.get(self, "location")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> str:
"""
(Required only by `bigquerydatapolicy.DataPolicyIamPolicy`) The policy data generated by
a `organizations_get_iam_policy` data source.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
class AwaitableGetIamPolicyResult(GetIamPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIamPolicyResult(
data_policy_id=self.data_policy_id,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
location=self.location,
policy_data=self.policy_data,
project=self.project)
def get_iam_policy(data_policy_id: Optional[str] = None,
location: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIamPolicyResult:
"""
Retrieves the current IAM policy data for datapolicy
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.bigquerydatapolicy.get_iam_policy(project=google_bigquery_datapolicy_data_policy["data_policy"]["project"],
location=google_bigquery_datapolicy_data_policy["data_policy"]["location"],
data_policy_id=google_bigquery_datapolicy_data_policy["data_policy"]["data_policy_id"])
```
:param str location: The name of the location of the data policy.
Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
__args__ = dict()
__args__['dataPolicyId'] = data_policy_id
__args__['location'] = location
__args__['project'] = project
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('gcp:bigquerydatapolicy/getIamPolicy:getIamPolicy', __args__, opts=opts, typ=GetIamPolicyResult).value
return AwaitableGetIamPolicyResult(
data_policy_id=pulumi.get(__ret__, 'data_policy_id'),
METHOD_NAME=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
policy_data=pulumi.get(__ret__, 'policy_data'),
project=pulumi.get(__ret__, 'project'))
@_utilities.lift_output_func(get_iam_policy)
def get_iam_policy_output(data_policy_id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[Optional[str]]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIamPolicyResult]:
"""
Retrieves the current IAM policy data for datapolicy
## example
```python
import pulumi
import pulumi_gcp as gcp
policy = gcp.bigquerydatapolicy.get_iam_policy(project=google_bigquery_datapolicy_data_policy["data_policy"]["project"],
location=google_bigquery_datapolicy_data_policy["data_policy"]["location"],
data_policy_id=google_bigquery_datapolicy_data_policy["data_policy"]["data_policy_id"])
```
:param str location: The name of the location of the data policy.
Used to find the parent resource to bind the IAM policy to
:param str project: The ID of the project in which the resource belongs.
If it is not provided, the project will be parsed from the identifier of the parent resource. If no project is provided in the parent identifier and no project is specified, the provider project is used.
"""
...
|
3,725 |
extract data
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.hybridcompute.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Gets a list of hybrid compute operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.hybridcompute.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2022-03-10"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, METHOD_NAME
)
list.metadata = {'url': '/providers/Microsoft.HybridCompute/operations'} # type: ignore
|
3,726 |
test raw ndarray
|
"""Unit tests for the PickleBuffer object.
Pickling tests themselves are in pickletester.py.
"""
import gc
from pickle import PickleBuffer
import weakref
import unittest
from test import support
class B(bytes):
pass
class PickleBufferTest(unittest.TestCase):
def check_memoryview(self, pb, equiv):
with memoryview(pb) as m:
with memoryview(equiv) as expected:
self.assertEqual(m.nbytes, expected.nbytes)
self.assertEqual(m.readonly, expected.readonly)
self.assertEqual(m.itemsize, expected.itemsize)
self.assertEqual(m.shape, expected.shape)
self.assertEqual(m.strides, expected.strides)
self.assertEqual(m.c_contiguous, expected.c_contiguous)
self.assertEqual(m.f_contiguous, expected.f_contiguous)
self.assertEqual(m.format, expected.format)
self.assertEqual(m.tobytes(), expected.tobytes())
def test_constructor_failure(self):
with self.assertRaises(TypeError):
PickleBuffer()
with self.assertRaises(TypeError):
PickleBuffer("foo")
# Released memoryview fails taking a buffer
m = memoryview(b"foo")
m.release()
with self.assertRaises(ValueError):
PickleBuffer(m)
def test_basics(self):
pb = PickleBuffer(b"foo")
self.assertEqual(b"foo", bytes(pb))
with memoryview(pb) as m:
self.assertTrue(m.readonly)
pb = PickleBuffer(bytearray(b"foo"))
self.assertEqual(b"foo", bytes(pb))
with memoryview(pb) as m:
self.assertFalse(m.readonly)
m[0] = 48
self.assertEqual(b"0oo", bytes(pb))
def test_release(self):
pb = PickleBuffer(b"foo")
pb.release()
with self.assertRaises(ValueError) as raises:
memoryview(pb)
self.assertIn("operation forbidden on released PickleBuffer object",
str(raises.exception))
# Idempotency
pb.release()
def test_cycle(self):
b = B(b"foo")
pb = PickleBuffer(b)
b.cycle = pb
wpb = weakref.ref(pb)
del b, pb
gc.collect()
self.assertIsNone(wpb())
def test_ndarray_2d(self):
# C-contiguous
ndarray = support.import_module("_testbuffer").ndarray
arr = ndarray(list(range(12)), shape=(4, 3), format='<i')
self.assertTrue(arr.c_contiguous)
self.assertFalse(arr.f_contiguous)
pb = PickleBuffer(arr)
self.check_memoryview(pb, arr)
# Non-contiguous
arr = arr[::2]
self.assertFalse(arr.c_contiguous)
self.assertFalse(arr.f_contiguous)
pb = PickleBuffer(arr)
self.check_memoryview(pb, arr)
# F-contiguous
arr = ndarray(list(range(12)), shape=(3, 4), strides=(4, 12), format='<i')
self.assertTrue(arr.f_contiguous)
self.assertFalse(arr.c_contiguous)
pb = PickleBuffer(arr)
self.check_memoryview(pb, arr)
# Tests for PickleBuffer.raw()
def check_raw(self, obj, equiv):
pb = PickleBuffer(obj)
with pb.raw() as m:
self.assertIsInstance(m, memoryview)
self.check_memoryview(m, equiv)
def test_raw(self):
for obj in (b"foo", bytearray(b"foo")):
with self.subTest(obj=obj):
self.check_raw(obj, obj)
def METHOD_NAME(self):
# 1-D, contiguous
ndarray = support.import_module("_testbuffer").ndarray
arr = ndarray(list(range(3)), shape=(3,), format='<h')
equiv = b"\x00\x00\x01\x00\x02\x00"
self.check_raw(arr, equiv)
# 2-D, C-contiguous
arr = ndarray(list(range(6)), shape=(2, 3), format='<h')
equiv = b"\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00"
self.check_raw(arr, equiv)
# 2-D, F-contiguous
arr = ndarray(list(range(6)), shape=(2, 3), strides=(2, 4),
format='<h')
# Note this is different from arr.tobytes()
equiv = b"\x00\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00"
self.check_raw(arr, equiv)
# 0-D
arr = ndarray(456, shape=(), format='<i')
equiv = b'\xc8\x01\x00\x00'
self.check_raw(arr, equiv)
def check_raw_non_contiguous(self, obj):
pb = PickleBuffer(obj)
with self.assertRaisesRegex(BufferError, "non-contiguous"):
pb.raw()
def test_raw_non_contiguous(self):
# 1-D
ndarray = support.import_module("_testbuffer").ndarray
arr = ndarray(list(range(6)), shape=(6,), format='<i')[::2]
self.check_raw_non_contiguous(arr)
# 2-D
arr = ndarray(list(range(12)), shape=(4, 3), format='<i')[::2]
self.check_raw_non_contiguous(arr)
def test_raw_released(self):
pb = PickleBuffer(b"foo")
pb.release()
with self.assertRaises(ValueError) as raises:
pb.raw()
if __name__ == "__main__":
unittest.main()
|
3,727 |
test rng snapshot
|
# Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests for the virtio-rng device"""
# pylint:disable=redefined-outer-name
import pytest
from framework.properties import global_props
from framework.utils import check_entropy
if global_props.instance == "c7g.metal" and global_props.host_linux_version == "4.14":
pytestmark = pytest.mark.skip(reason="c7g requires no SVE 5.10 kernel")
@pytest.fixture(params=[None])
def uvm_with_rng(uvm_nano, request):
"""Fixture of a microvm with virtio-rng configured"""
rate_limiter = request.param
uvm_nano.add_net_iface()
uvm_nano.api.entropy.put(rate_limiter=rate_limiter)
uvm_nano.start()
# Just stuff it in the microvm so we can look at it later
uvm_nano.rng_rate_limiter = rate_limiter
return uvm_nano
def test_rng_not_present(uvm_nano):
"""
Test a guest microVM *without* an entropy device and ensure that
we cannot get data from /dev/hwrng
"""
vm = uvm_nano
vm.add_net_iface()
vm.start()
# If the guest kernel has been built with the virtio-rng module
# the device should exist in the guest filesystem but we should
# not be able to get random numbers out of it.
cmd = "test -e /dev/hwrng"
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 0
cmd = "dd if=/dev/hwrng of=/dev/null bs=10 count=1"
ecode, _, _ = vm.ssh.run(cmd)
assert ecode == 1
def test_rng_present(uvm_with_rng):
"""
Test a guest microVM with an entropy defined configured and ensure
that we can access `/dev/hwrng`
"""
vm = uvm_with_rng
check_entropy(vm.ssh)
def METHOD_NAME(uvm_with_rng, microvm_factory):
"""
Test that a virtio-rng device is functional after resuming from
a snapshot
"""
vm = uvm_with_rng
check_entropy(vm.ssh)
snapshot = vm.snapshot_full()
new_vm = microvm_factory.build()
new_vm.spawn()
new_vm.restore_from_snapshot(snapshot, resume=True)
check_entropy(new_vm.ssh)
def _get_percentage_difference(measured, base):
"""Return the percentage delta between the arguments."""
if measured == base:
return 0
try:
return (abs(measured - base) / base) * 100.0
except ZeroDivisionError:
# It means base and only base is 0.
return 100.0
def _throughput_units_multiplier(units):
"""
Parse the throughput units and return the multiplier that would
translate the corresponding value to Bytes/sec
"""
if units == "kB/s":
return 1000
if units == "MB/s":
return 1000 * 1000
if units == "GB/s":
return 1000 * 1000 * 1000
raise Exception("Unknown units")
def _process_dd_output(out):
"""
Parse the output of `dd` and return the achieved throughput in
KB/sec.
"""
# Example `dd` output:
#
# $ dd if=/dev/hwrng of=/dev/null bs=100 count=1
# 1+0 records in
# 1+0 records out
# 100 bytes (100 B) copied, 0.000749912 s, 133 kB/s
# So we split the lines of the output and keep the last line.
report = out.splitlines()[-1].split(" ")
# Last two items in the line are value and units
(value, units) = (report[-2], report[-1])
return float(value) * _throughput_units_multiplier(units) / 1000
def _get_throughput(ssh, random_bytes):
"""
Request `random_bytes` from `/dev/hwrng` and return the achieved
throughput in KB/sec
"""
# Issue a `dd` command to request 100 times `random_bytes` from the device.
# 100 here is used to get enough confidence on the achieved throughput.
cmd = "dd if=/dev/hwrng of=/dev/null bs={} count=100".format(random_bytes)
exit_code, _, stderr = ssh.run(cmd)
assert exit_code == 0, stderr
# dd gives its output on stderr
return _process_dd_output(stderr)
def _check_entropy_rate_limited(ssh, random_bytes, expected_kbps):
"""
Ask for `random_bytes` from `/dev/hwrng` in the guest and check
that achieved throughput is within a 10% of the expected throughput.
NOTE: 10% is an arbitrarily selected limit which should be safe enough,
so that we don't run into many intermittent CI failures.
"""
measured_kbps = _get_throughput(ssh, random_bytes)
assert (
_get_percentage_difference(measured_kbps, expected_kbps) <= 10
), "Expected {} KB/s, measured {} KB/s".format(expected_kbps, measured_kbps)
def _rate_limiter_id(rate_limiter):
"""
Helper function to return a name for the rate_limiter to be
used as an id for parametrized tests.
"""
size = rate_limiter["bandwidth"]["size"]
refill_time = rate_limiter["bandwidth"]["refill_time"]
return "{} KB/sec".format(float(size) / float(refill_time))
# parametrize the RNG rate limiter
@pytest.mark.parametrize(
"uvm_with_rng",
[
{"bandwidth": {"size": 1000, "refill_time": 100}},
{"bandwidth": {"size": 10000, "refill_time": 100}},
{"bandwidth": {"size": 100000, "refill_time": 100}},
],
indirect=True,
ids=_rate_limiter_id,
)
def test_rng_bw_rate_limiter(uvm_with_rng):
"""
Test that rate limiter without initial burst budget works
"""
vm = uvm_with_rng
# _start_vm_with_rng(vm, rate_limiter)
size = vm.rng_rate_limiter["bandwidth"]["size"]
refill_time = vm.rng_rate_limiter["bandwidth"]["refill_time"]
expected_kbps = size / refill_time
# Check the rate limiter using a request size equal to the size
# of the token bucket.
_check_entropy_rate_limited(vm.ssh, size, expected_kbps)
|
3,728 |
create
|
import braintree
from braintree.address import Address
from braintree.resource import Resource
from braintree.configuration import Configuration
class PaymentMethod(Resource):
@staticmethod
def METHOD_NAME(params=None):
if params is None:
params = {}
return Configuration.gateway().payment_method.METHOD_NAME(params)
@staticmethod
def find(payment_method_token):
return Configuration.gateway().payment_method.find(payment_method_token)
@staticmethod
def update(payment_method_token, params):
return Configuration.gateway().payment_method.update(payment_method_token, params)
@staticmethod
def delete(payment_method_token, options=None):
if options is None:
options = {}
return Configuration.gateway().payment_method.delete(payment_method_token, options)
@staticmethod
def create_signature():
return PaymentMethod.signature("create")
@staticmethod
def signature(type):
options = [
"fail_on_duplicate_payment_method",
"make_default",
"skip_advanced_fraud_checking",
"us_bank_account_verification_method",
"verification_account_type",
"verification_amount",
"verification_merchant_account_id",
"verify_card",
{
"adyen": [
"overwrite_brand",
"selected_brand"
]
},
{
"paypal": [
"payee_email",
"order_id",
"custom_field",
"description",
"amount",
{ "shipping": Address.create_signature() }
],
},
]
three_d_secure_pass_thru = [
"cavv",
"ds_transaction_id",
"eci_flag",
"three_d_secure_version",
"xid"
]
signature = [
"billing_address_id",
"cardholder_name",
"customer_id",
"cvv",
"device_data",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"payment_method_nonce",
"paypal_refresh_token",
"token",
"device_session_id", # NEXT_MAJOR_VERSION remove device_session_id
{
"billing_address": Address.create_signature()
},
{
"options": options
},
{
"three_d_secure_pass_thru": three_d_secure_pass_thru
}
]
return signature
@staticmethod
def update_signature():
three_d_secure_pass_thru = [
"cavv",
"ds_transaction_id",
"eci_flag",
"three_d_secure_version",
"xid"
]
signature = [
"billing_address_id",
"cardholder_name",
"cvv",
"device_data",
"expiration_date",
"expiration_month",
"expiration_year",
"number",
"payment_method_nonce",
"token",
"venmo_sdk_payment_method_code",
"device_session_id", "fraud_merchant_id", # NEXT_MAJOR_VERSION remove device_session_id and fraud_merchant_id
{
"options": [
"make_default",
"skip_advanced_fraud_checking",
"us_bank_account_verification_method",
"venmo_sdk_session",
"verification_account_type",
"verification_amount",
"verification_merchant_account_id",
"verify_card",
{
"adyen": [
"overwrite_brand",
"selected_brand"
]
}
]
},
{
"billing_address": Address.update_signature() + [{"options": ["update_existing"]}]
},
{
"three_d_secure_pass_thru": three_d_secure_pass_thru
}
]
return signature
@staticmethod
def delete_signature():
return ["revoke_all_grants"]
|
3,729 |
get ref data
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
import tvm.topi.testing
from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
import tvm.testing
def verify_depthwise_conv2d_back_input(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = np.int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = np.int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
ishape = [batch, in_h, in_w, in_channel]
oshape = [batch, out_h, out_w, out_channel]
# placeholder
Out_grad = te.placeholder(oshape, name="Out_grad")
Filter = te.placeholder((filter_h, filter_w, filter_channel, channel_multiplier))
# declare
In_grad = topi.nn.depthwise_conv2d_backward_input_nhwc(
Filter,
Out_grad,
oshape,
ishape,
stride=[stride_h, stride_w],
padding=[padding_h, padding_w],
)
# schedule
schedule = schedule_depthwise_conv2d_backward_input_nhwc(In_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
filter_shape = get_const_tuple(Filter.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_input.nhwc")
def METHOD_NAME():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
# padding params in backward propagation
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = np.zeros(
(
batch,
dilated_out_grad_np.shape[1] + bpad_top + bpad_bottom,
dilated_out_grad_np.shape[2] + bpad_left + bpad_right,
out_channel,
)
)
padded_out_grad[
:,
bpad_top : dilated_out_grad_np.shape[1] + bpad_top,
bpad_left : dilated_out_grad_np.shape[2] + bpad_left,
:,
] = dilated_out_grad_np
in_grad_np = np.zeros((batch, in_h, in_w, in_channel))
for b in range(batch):
for c in range(in_channel):
for m in range(channel_multiplier):
in_grad_np[b, :, :, c] += signal.convolve2d(
padded_out_grad[b, :, :, c * channel_multiplier + m],
filter_np[:, :, c, m],
mode="valid",
)[0:in_h, 0:in_w]
return (out_grad_np, filter_np, in_grad_np)
(out_grad_np, filter_np, in_grad_np) = METHOD_NAME()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
in_grad_tvm = tvm.nd.array(np.zeros(shape=ishape, dtype=dtype), dev)
# launch the kernel
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean
tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.numpy(), rtol=1e-5)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_input_nhwc():
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_input_nhwc()
|
3,730 |
asyncio loop runner
|
import asyncio
import inspect
from .case import TestCase
class IsolatedAsyncioTestCase(TestCase):
# Names intentionally have a long prefix
# to reduce a chance of clashing with user-defined attributes
# from inherited test case
#
# The class doesn't call loop.run_until_complete(self.setUp()) and family
# but uses a different approach:
# 1. create a long-running task that reads self.setUp()
# awaitable from queue along with a future
# 2. await the awaitable object passing in and set the result
# into the future object
# 3. Outer code puts the awaitable and the future object into a queue
# with waiting for the future
# The trick is necessary because every run_until_complete() call
# creates a new task with embedded ContextVar context.
# To share contextvars between setUp(), test and tearDown() we need to execute
# them inside the same task.
# Note: the test case modifies event loop policy if the policy was not instantiated
# yet.
# asyncio.get_event_loop_policy() creates a default policy on demand but never
# returns None
# I believe this is not an issue in user level tests but python itself for testing
# should reset a policy in every test module
# by calling asyncio.set_event_loop_policy(None) in tearDownModule()
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self._asyncioTestLoop = None
self._asyncioCallsQueue = None
async def asyncSetUp(self):
pass
async def asyncTearDown(self):
pass
def addAsyncCleanup(self, func, /, *args, **kwargs):
# A trivial trampoline to addCleanup()
# the function exists because it has a different semantics
# and signature:
# addCleanup() accepts regular functions
# but addAsyncCleanup() accepts coroutines
#
# We intentionally don't add inspect.iscoroutinefunction() check
# for func argument because there is no way
# to check for async function reliably:
# 1. It can be "async def func()" iself
# 2. Class can implement "async def __call__()" method
# 3. Regular "def func()" that returns awaitable object
self.addCleanup(*(func, *args), **kwargs)
def _callSetUp(self):
self.setUp()
self._callAsync(self.asyncSetUp)
def _callTestMethod(self, method):
self._callMaybeAsync(method)
def _callTearDown(self):
self._callAsync(self.asyncTearDown)
self.tearDown()
def _callCleanup(self, function, *args, **kwargs):
self._callMaybeAsync(function, *args, **kwargs)
def _callAsync(self, func, /, *args, **kwargs):
assert self._asyncioTestLoop is not None
ret = func(*args, **kwargs)
assert inspect.isawaitable(ret)
fut = self._asyncioTestLoop.create_future()
self._asyncioCallsQueue.put_nowait((fut, ret))
return self._asyncioTestLoop.run_until_complete(fut)
def _callMaybeAsync(self, func, /, *args, **kwargs):
assert self._asyncioTestLoop is not None
ret = func(*args, **kwargs)
if inspect.isawaitable(ret):
fut = self._asyncioTestLoop.create_future()
self._asyncioCallsQueue.put_nowait((fut, ret))
return self._asyncioTestLoop.run_until_complete(fut)
else:
return ret
async def METHOD_NAME(self, fut):
self._asyncioCallsQueue = queue = asyncio.Queue()
fut.set_result(None)
while True:
query = await queue.get()
queue.task_done()
if query is None:
return
fut, awaitable = query
try:
ret = await awaitable
if not fut.cancelled():
fut.set_result(ret)
except asyncio.CancelledError:
raise
except Exception as ex:
if not fut.cancelled():
fut.set_exception(ex)
def _setupAsyncioLoop(self):
assert self._asyncioTestLoop is None
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_debug(True)
self._asyncioTestLoop = loop
fut = loop.create_future()
self._asyncioCallsTask = loop.create_task(self.METHOD_NAME(fut))
loop.run_until_complete(fut)
def _tearDownAsyncioLoop(self):
assert self._asyncioTestLoop is not None
loop = self._asyncioTestLoop
self._asyncioTestLoop = None
self._asyncioCallsQueue.put_nowait(None)
loop.run_until_complete(self._asyncioCallsQueue.join())
try:
# cancel all tasks
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during test shutdown',
'exception': task.exception(),
'task': task,
})
# shutdown asyncgens
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
asyncio.set_event_loop(None)
loop.close()
def run(self, result=None):
self._setupAsyncioLoop()
try:
return super().run(result)
finally:
self._tearDownAsyncioLoop()
|
3,731 |
test not upload all languages
|
from io import BytesIO
from django.test import SimpleTestCase
from unittest import mock
from couchexport.export import export_raw
from corehq.apps.app_manager.models import Application, LinkedApplication
from corehq.apps.app_manager.ui_translations import (
get_default_translations_for_download,
process_ui_translation_upload,
)
from corehq.apps.translations.utils import (
update_app_translations_from_trans_dict,
)
from corehq.util.test_utils import flag_enabled
INITIAL_TRANSLATIONS = {
'en': {
'translation.always.upload': 'Taylor',
'translation.sometimes.upload': 'Kanye',
},
'fra': {
'translation.always.upload': 'Swift',
'translation.sometimes.upload': 'West',
}
}
INITIAL_LINKED_APP_TRANSLATIONS = {
'en': {
'translation.always.upload': 'Taytay',
'translation.sometimes.upload': 'Kanye',
}
}
EXPECTED_TRANSLATIONS = {
'en': {
'translation.always.upload': 'Miley',
'translation.sometimes.upload': 'Kanye',
},
'fra': {
'translation.always.upload': 'Cyrus',
'translation.sometimes.upload': 'West',
}
}
class TestBulkUiTranslation(SimpleTestCase):
def setUp(self):
super(TestBulkUiTranslation, self).setUp()
self.app = Application.new_app("test-domain", "Test App")
self.app.langs = ["en", "fra"]
self.app.translations = INITIAL_TRANSLATIONS
self.linked_app = LinkedApplication.new_app('test-domain-2', 'Test Linked App')
self.linked_app.langs = ["en", "fra"]
self.linked_app.translations = INITIAL_TRANSLATIONS
self.linked_app.linked_app_translations = INITIAL_LINKED_APP_TRANSLATIONS
def _build_translation_download_file(self, headers, data=None):
if data is None:
data = []
translations = get_default_translations_for_download(self.app, 'latest')
for translation_key, translation_value in translations.items():
data.append((translation_key, translation_value))
data = (('translations', tuple(data)),)
temp = BytesIO()
export_raw(headers, data, temp)
temp.seek(0) # .read() is used somewhere so this needs to be at the beginning
return temp
def test_not_upload_all_properties(self):
headers = (('translations', ('property', 'en', 'fra')),)
data = (('translation.always.upload', 'Miley', 'Cyrus'),)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.app, f)
update_app_translations_from_trans_dict(self.app, translations)
self.assertDictEqual(self.app.translations['en'], {'translation.always.upload': 'Miley'})
self.assertDictEqual(self.app.translations['fra'], {'translation.always.upload': 'Cyrus'})
@flag_enabled('PARTIAL_UI_TRANSLATIONS')
def test_not_upload_all_properties_with_parital_ui_translations(self):
headers = (('translations', ('property', 'en', 'fra')),)
data = (('translation.always.upload', 'Miley', 'Cyrus'),)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.app, f)
update_app_translations_from_trans_dict(self.app, translations)
self.assertDictEqual(self.app.translations, EXPECTED_TRANSLATIONS)
def METHOD_NAME(self):
headers = (('translations', ('property', 'en')),)
data = (
('translation.always.upload', 'Miley'),
('translation.sometimes.upload', 'Kanye'),
)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.app, f)
update_app_translations_from_trans_dict(self.app, translations)
self.assertDictEqual(self.app.translations['en'], EXPECTED_TRANSLATIONS['en'])
self.assertDictEqual(self.app.translations['fra'], INITIAL_TRANSLATIONS['fra'])
@flag_enabled('PARTIAL_UI_TRANSLATIONS')
def test_linked_app_not_upload_all_languages_with_partial_ui_translations(self):
headers = (('translations', ('property', 'en')),)
data = (
('translation.always.upload', 'Miley'),
('translation.sometimes.upload', 'Kanye'),
)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.linked_app, f)
update_app_translations_from_trans_dict(self.linked_app, translations)
self.assertDictEqual(self.linked_app.translations['en'], EXPECTED_TRANSLATIONS['en'])
self.assertDictEqual(self.linked_app.translations['fra'], INITIAL_TRANSLATIONS['fra'])
self.assertDictEqual(self.linked_app.linked_app_translations['en'], EXPECTED_TRANSLATIONS['en'])
with mock.patch.object(LinkedApplication, 'save'):
self.linked_app.reapply_overrides()
self.assertDictEqual(self.linked_app.translations['en'], EXPECTED_TRANSLATIONS['en'])
def test_partial_property_and_language(self):
headers = (('translations', ('property', 'en')),)
data = (('translation.always.upload', 'Miley'),)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.app, f)
update_app_translations_from_trans_dict(self.app, translations)
self.assertDictEqual(self.app.translations['en'], {'translation.always.upload': 'Miley'})
self.assertDictEqual(self.app.translations['fra'], INITIAL_TRANSLATIONS['fra'])
@flag_enabled('PARTIAL_UI_TRANSLATIONS')
def test_partial_property_and_language_with_partial_ui_translations(self):
headers = (('translations', ('property', 'en')),)
data = (('translation.always.upload', 'Miley'),)
f = self._build_translation_download_file(headers, data)
translations, error_properties, warnings = process_ui_translation_upload(self.app, f)
update_app_translations_from_trans_dict(self.app, translations)
self.assertDictEqual(self.app.translations['en'], EXPECTED_TRANSLATIONS['en'])
self.assertDictEqual(self.app.translations['fra'], INITIAL_TRANSLATIONS['fra'])
|
3,732 |
method
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet-gateway list-learned-routes",
)
class ListLearnedRoutes(AAZCommand):
"""This operation retrieves a list of routes the virtual network gateway has learned, including routes learned from BGP peers.
:example: Retrieve a list of learned routes.
az network vnet-gateway list-learned-routes -g MyResourceGroup -n MyVnetGateway
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworkgateways/{}/getlearnedroutes", "2022-01-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the VNet gateway.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VirtualNetworkGatewaysGetLearnedRoutes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewaysGetLearnedRoutes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.as_path = AAZStrType(
serialized_name="asPath",
flags={"read_only": True},
)
_element.local_address = AAZStrType(
serialized_name="localAddress",
flags={"read_only": True},
)
_element.network = AAZStrType(
flags={"read_only": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"read_only": True},
)
_element.origin = AAZStrType(
flags={"read_only": True},
)
_element.source_peer = AAZStrType(
serialized_name="sourcePeer",
flags={"read_only": True},
)
_element.weight = AAZIntType(
flags={"read_only": True},
)
return cls._schema_on_200
class _ListLearnedRoutesHelper:
"""Helper class for ListLearnedRoutes"""
__all__ = ["ListLearnedRoutes"]
|
3,733 |
get options
|
""" This helper looks in the /Operations section of the CS, considering its specific nature:
the /Operations section is designed in a way that each configuration can be specific to a Setup,
while maintaining a default.
So, for example, given the following /Operations section::
Operations/
Defaults/
someOption = someValue
aSecondOption = aSecondValue
specificVo/
someSection/
someOption = someValueInVO
The following calls would give different results based on the setup::
Operations().getValue('someSection/someOption')
- someValueInVO if we are in 'specificVo' vo
- someValue if we are in any other VO
It becomes then important for the Operations() objects to know the VO name
for which we want the information, and this can be done in the following ways.
1. by specifying the VO name directly::
Operations(vo=anotherVOName).getValue('someSectionName/someOptionX')
2. by give a group name::
Operations(group=thisIsAGroupOfVO_X).getValue('someSectionName/someOptionX')
3. if no VO nor group is provided, the VO will be guessed from the proxy,
but this works iff the object is instantiated by a proxy (and not, e.g., using a server certificate)
"""
import os
import _thread
from diraccfg import CFG
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers import Registry, CSGlobals
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.Core.Security.ProxyInfo import getVOfromProxyGroup
from DIRAC.Core.Utilities import LockRing
from DIRAC.Core.Utilities.DErrno import ESECTION
class Operations:
"""Operations class
The /Operations CFG section is maintained in a cache by an Operations object
"""
__cache = {}
__cacheVersion = 0
__cacheLock = LockRing.LockRing().getLock()
def __init__(self, vo=False, group=False, setup=False):
"""c'tor
Setting some defaults
"""
self.__uVO = vo
self.__uGroup = group
self.__vo = False
self.__discoverSettings()
def __discoverSettings(self):
"""Discovers the vo and the setup"""
# Set the VO
globalVO = CSGlobals.getVO()
if globalVO:
self.__vo = globalVO
elif self.__uVO:
self.__vo = self.__uVO
elif self.__uGroup:
self.__vo = Registry.getVOForGroup(self.__uGroup)
if not self.__vo:
self.__vo = False
else:
result = getVOfromProxyGroup()
if result["OK"]:
self.__vo = result["Value"]
def __getCache(self):
Operations.__cacheLock.acquire()
try:
currentVersion = gConfigurationData.getVersion()
if currentVersion != Operations.__cacheVersion:
Operations.__cache = {}
Operations.__cacheVersion = currentVersion
cacheKey = (self.__vo,)
if cacheKey in Operations.__cache:
return Operations.__cache[cacheKey]
mergedCFG = CFG()
for path in self.__getSearchPaths():
pathCFG = gConfigurationData.mergedCFG[path]
if pathCFG:
mergedCFG = mergedCFG.mergeWith(pathCFG)
Operations.__cache[cacheKey] = mergedCFG
return Operations.__cache[cacheKey]
finally:
try:
Operations.__cacheLock.release()
except _thread.error:
pass
def __getSearchPaths(self):
paths = ["/Operations/Defaults"]
if not self.__vo:
globalVO = CSGlobals.getVO()
if not globalVO:
return paths
self.__vo = CSGlobals.getVO()
paths.append(f"/Operations/{self.__vo}/")
return paths
def getValue(self, optionPath, defaultValue=None):
return self.__getCache().getOption(optionPath, defaultValue)
def __getCFG(self, sectionPath):
cacheCFG = self.__getCache()
section = cacheCFG.getRecursive(sectionPath)
if not section:
return S_ERROR(ESECTION, f"{sectionPath} in Operations does not exist")
sectionCFG = section["value"]
if isinstance(sectionCFG, str):
return S_ERROR(f"{sectionPath} in Operations is not a section")
return S_OK(sectionCFG)
def getSections(self, sectionPath, listOrdered=False):
result = self.__getCFG(sectionPath)
if not result["OK"]:
return result
sectionCFG = result["Value"]
return S_OK(sectionCFG.listSections(listOrdered))
def METHOD_NAME(self, sectionPath, listOrdered=False):
result = self.__getCFG(sectionPath)
if not result["OK"]:
return result
sectionCFG = result["Value"]
return S_OK(sectionCFG.listOptions(listOrdered))
def getOptionsDict(self, sectionPath):
result = self.__getCFG(sectionPath)
if not result["OK"]:
return result
sectionCFG = result["Value"]
data = {}
for opName in sectionCFG.listOptions():
data[opName] = sectionCFG[opName]
return S_OK(data)
def getMonitoringBackends(self, monitoringType=None):
"""
Chooses the type of backend to use (Monitoring and/or Accounting) depending on the MonitoringType.
If a flag for the monitoringType specified is set, it will enable monitoring according to it,
otherwise it will use the `Default` value (Accounting set as default).
:param string MonitoringType: monitoring type to specify
"""
if monitoringType and self.getValue(f"MonitoringBackends/{monitoringType}"):
return self.getValue(f"MonitoringBackends/{monitoringType}", [])
else:
return self.getValue("MonitoringBackends/Default", ["Accounting"])
|
3,734 |
set up
|
# SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
import copy
import unittest
import os
import sys
import PyOpenColorIO as OCIO
from UnitTestUtils import TEST_NAMES, TEST_DESCS
class LookTest(unittest.TestCase):
TEST_PROCESS_SPACES = ['raw', 'lnh', 'vd8', 'a.b.c.', '1-2-3-']
TEST_EXP_VALUES = [0.1, 0.2, 0.3, 0.4]
def METHOD_NAME(self):
self.look = OCIO.Look()
def tearDown(self):
self.look = None
def test_copy(self):
"""
Test the deepcopy() method.
"""
self.look.setName('test name')
self.look.setProcessSpace('test space')
self.look.setDescription('test description')
mat = OCIO.MatrixTransform()
self.look.setTransform(mat)
self.look.setInverseTransform(mat)
other = copy.deepcopy(self.look)
self.assertFalse(other is self.look)
self.assertEqual(other.getName(), self.look.getName())
self.assertEqual(other.getProcessSpace(), self.look.getProcessSpace())
self.assertEqual(other.getDescription(), self.look.getDescription())
self.assertTrue(other.getTransform().equals(self.look.getTransform()))
self.assertTrue(other.getInverseTransform().equals(self.look.getInverseTransform()))
def test_name(self):
"""
Test the setName() and getName() methods.
"""
# Default initialized name value is ""
self.assertEqual(self.look.getName(), '')
for name in TEST_NAMES:
self.look.setName(name)
self.assertEqual(name, self.look.getName())
# Wrong type tests.
for invalid in (None, 1):
with self.assertRaises(TypeError):
self.look.setName(invalid)
def test_process_space(self):
"""
Test the setProcessSpace() and getProcessName() methods.
"""
# Default initialized process space value is ""
self.assertEqual(self.look.getProcessSpace(), '')
for process_space in self.TEST_PROCESS_SPACES:
self.look.setProcessSpace(process_space)
self.assertEqual(process_space, self.look.getProcessSpace())
# Wrong type tests.
for invalid in (None, 1):
with self.assertRaises(TypeError):
self.look.setProcessSpace(invalid)
def test_description(self):
"""
Test the setDescription() and getDescription() methods.
"""
# Default initialized description value is ""
self.assertEqual(self.look.getDescription(), '')
for desc in TEST_DESCS:
self.look.setDescription(desc)
self.assertEqual(desc, self.look.getDescription())
# Wrong type tests.
for invalid in (None, 1):
with self.assertRaises(TypeError):
self.look.setDescription(invalid)
def test_transform(self):
"""
Test the setTransform() and getTransform() methods.
"""
# Default initialized transform value is None
self.assertIsNone(self.look.getTransform())
exp_tr = OCIO.ExponentTransform()
exp_tr.setValue(self.TEST_EXP_VALUES)
self.look.setTransform(exp_tr)
out_exp_tr = self.look.getTransform()
self.assertListEqual(out_exp_tr.getValue(), self.TEST_EXP_VALUES)
# Wrong type tests.
for invalid in (OCIO.ALLOCATION_UNIFORM, 1):
with self.assertRaises(TypeError):
self.look.setTransform(invalid)
def test_inverse_transform(self):
"""
Test the setInverseTransform() and getInverseTransform() methods.
"""
# Default initialized inverse transform value is None
self.assertIsNone(self.look.getInverseTransform())
exp_tr = OCIO.ExponentTransform()
inv_exp_values = [1.0 / v for v in self.TEST_EXP_VALUES]
exp_tr.setValue(inv_exp_values)
self.look.setInverseTransform(exp_tr)
inv_oet = self.look.getInverseTransform()
self.assertListEqual(inv_oet.getValue(), inv_exp_values)
# Wrong type tests.
for invalid in (OCIO.ALLOCATION_UNIFORM, 1):
with self.assertRaises(TypeError):
self.look.setInverseTransform(invalid)
def test_constructor_with_keyword(self):
"""
Test Look constructor with keywords and validate its values.
"""
# With keywords in their proper order.
exp_tr = OCIO.ExponentTransform()
inv_exp_tr = OCIO.ExponentTransform()
look = OCIO.Look(name='coollook',
processSpace='somespace',
transform=exp_tr,
inverseTransform=inv_exp_tr,
description='this is a test')
self.assertEqual(look.getName(), 'coollook')
self.assertEqual(look.getProcessSpace(), 'somespace')
self.assertIsInstance(look.getTransform(), type(exp_tr))
self.assertIsInstance(look.getInverseTransform(), type(inv_exp_tr))
self.assertEqual(look.getDescription(), 'this is a test')
# With keyword not in their proper order.
exp_tr2 = OCIO.ExponentTransform()
inv_exp_tr2 = OCIO.ExponentTransform()
look2 = OCIO.Look(inverseTransform=inv_exp_tr,
description='this is a test',
name='coollook',
processSpace='somespace',
transform=exp_tr)
self.assertEqual(look2.getName(), 'coollook')
self.assertEqual(look2.getProcessSpace(), 'somespace')
self.assertIsInstance(look2.getTransform(), type(exp_tr2))
self.assertIsInstance(look2.getInverseTransform(), type(inv_exp_tr2))
self.assertEqual(look2.getDescription(), 'this is a test')
def test_constructor_with_positional(self):
"""
Test Look constructor without keywords and validate its values.
"""
exp_tr = OCIO.ExponentTransform()
inv_exp_tr = OCIO.ExponentTransform()
look = OCIO.Look('coollook',
'somespace',
exp_tr,
inv_exp_tr,
'this is a test')
self.assertEqual(look.getName(), 'coollook')
self.assertEqual(look.getProcessSpace(), 'somespace')
self.assertIsInstance(look.getTransform(), type(exp_tr))
self.assertIsInstance(look.getInverseTransform(), type(inv_exp_tr))
self.assertEqual(look.getDescription(), 'this is a test')
def test_constructor_wrong_parameter_type(self):
"""
Test Look constructor with a wrong parameter type.
"""
for invalid in (None, 1):
with self.assertRaises(TypeError):
look = OCIO.Look(invalid)
|
3,735 |
test save load pretrained additional features
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, Blip2Processor, BlipImageProcessor, GPT2Tokenizer, PreTrainedTokenizerFast
@require_vision
class Blip2ProcessorTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
image_processor = BlipImageProcessor()
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model")
processor = Blip2Processor(image_processor, tokenizer)
processor.save_pretrained(self.tmpdirname)
def get_tokenizer(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer
def get_image_processor(self, **kwargs):
return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
"""This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True,
or a list of PyTorch tensors if one specifies torchify=True.
"""
image_inputs = [np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)]
image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs]
return image_inputs
def METHOD_NAME(self):
processor = Blip2Processor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = Blip2Processor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, BlipImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_feat_extract = image_processor(image_input, return_tensors="np")
input_processor = processor(images=image_input, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str, return_token_type_ids=False)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = Blip2Processor(tokenizer=tokenizer, image_processor=image_processor)
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"])
|
3,736 |
test 01 cassandra stress
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2022 ScyllaDB
import pytest
from sdcm.stress_thread import CassandraStressThread
from unit_tests.dummy_remote import LocalLoaderSetDummy
pytestmark = [
pytest.mark.usefixtures("events"),
pytest.mark.integration,
]
def METHOD_NAME(request, docker_scylla, params):
params['cs_debug'] = True
loader_set = LocalLoaderSetDummy()
cmd = (
"""cassandra-stress write cl=ONE duration=1m -schema 'replication(strategy=NetworkTopologyStrategy,replication_factor=1) """
"""compaction(strategy=SizeTieredCompactionStrategy)' -mode cql3 native """
"""-rate threads=10 -pop seq=1..10000000 -log interval=5"""
)
cs_thread = CassandraStressThread(
loader_set, cmd, node_list=[docker_scylla], timeout=120, params=params
)
def cleanup_thread():
cs_thread.kill()
request.addfinalizer(cleanup_thread)
cs_thread.run()
output = cs_thread.get_results()
assert "latency mean" in output[0]
assert float(output[0]["latency mean"]) > 0
assert "latency 99th percentile" in output[0]
assert float(output[0]["latency 99th percentile"]) > 0
def test_02_cassandra_stress_user_profile(request, docker_scylla, params):
loader_set = LocalLoaderSetDummy()
cmd = (
"cassandra-stress user profile=/tmp/cassandra-stress-custom.yaml ops'(insert=1,simple1=1)' "
"cl=ONE duration=1m -mode cql3 native -rate threads=1"
)
cs_thread = CassandraStressThread(
loader_set, cmd, node_list=[docker_scylla], timeout=120, params=params
)
def cleanup_thread():
cs_thread.kill()
request.addfinalizer(cleanup_thread)
cs_thread.run()
output = cs_thread.get_results()
assert "latency mean" in output[0]
assert float(output[0]["latency mean"]) > 0
assert "latency 99th percentile" in output[0]
assert float(output[0]["latency 99th percentile"]) > 0
@pytest.mark.docker_scylla_args(ssl=True)
def test_03_cassandra_stress_client_encrypt(request, docker_scylla, params):
loader_set = LocalLoaderSetDummy()
cmd = (
"""cassandra-stress write cl=ONE duration=1m -schema 'replication(strategy=NetworkTopologyStrategy,replication_factor=1) """
"""compaction(strategy=SizeTieredCompactionStrategy)' -mode cql3 native """
"""-rate threads=10 -pop seq=1..10000000 -log interval=5"""
)
cs_thread = CassandraStressThread(
loader_set,
cmd,
node_list=[docker_scylla],
timeout=120,
client_encrypt=True,
params=params,
)
def cleanup_thread():
cs_thread.kill()
request.addfinalizer(cleanup_thread)
cs_thread.run()
output = cs_thread.get_results()
assert "latency mean" in output[0]
assert float(output[0]["latency mean"]) > 0
assert "latency 99th percentile" in output[0]
assert float(output[0]["latency 99th percentile"]) > 0
def test_03_cassandra_stress_multi_region(request, docker_scylla, params):
loader_set = LocalLoaderSetDummy()
loader_set.test_config.set_multi_region(True)
request.addfinalizer(lambda: loader_set.test_config.set_multi_region(False))
cmd = (
"""cassandra-stress write cl=ONE duration=1m -schema 'replication(strategy=NetworkTopologyStrategy,replication_factor=1) """
"""compaction(strategy=SizeTieredCompactionStrategy)' -mode cql3 native """
"""-rate threads=10 -pop seq=1..10000000 -log interval=5"""
)
cs_thread = CassandraStressThread(
loader_set, cmd, node_list=[docker_scylla], timeout=120, params=params
)
def cleanup_thread():
cs_thread.kill()
request.addfinalizer(cleanup_thread)
cs_thread.run()
output = cs_thread.get_results()
assert "latency mean" in output[0]
assert float(output[0]["latency mean"]) > 0
assert "latency 99th percentile" in output[0]
assert float(output[0]["latency 99th percentile"]) > 0
|
3,737 |
calculate bucket index
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""For internal use only; no backwards-compatibility guarantees."""
# pytype: skip-file
globals()['INT64_MAX'] = 2**63 - 1
globals()['INT64_MIN'] = -2**63
POWER_TEN = [
10e-1,
10e0,
10e1,
10e2,
10e3,
10e4,
10e5,
10e6,
10e7,
10e8,
10e9,
10e10,
10e11,
10e12,
10e13,
10e14,
10e15,
10e16,
10e17,
10e18
]
def get_log10_round_to_floor(element):
power = 0
while element >= POWER_TEN[power]:
power += 1
return power - 1
class DataflowDistributionCounter(object):
"""Pure python DataflowDistributionCounter in case Cython not available.
Please avoid using python mode if possible, since it's super slow
Cythonized DatadflowDistributionCounter defined in
apache_beam.transforms.cy_dataflow_distribution_counter.
Currently using special bucketing strategy suitable for Dataflow
Attributes:
min: minimum value of all inputs.
max: maximum value of all inputs.
count: total count of all inputs.
sum: sum of all inputs.
buckets: histogram buckets of value counts for a
distribution(1,2,5 bucketing). Max bucket_index is 58( sys.maxint as input).
is_cythonized: mark whether DataflowDistributionCounter cythonized.
"""
# Assume the max input is sys.maxint, then the possible max bucket size is 59
MAX_BUCKET_SIZE = 59
# 3 buckets for every power of ten -> 1, 2, 5
BUCKET_PER_TEN = 3
def __init__(self):
global INT64_MAX # pylint: disable=global-variable-not-assigned
self.min = INT64_MAX
self.max = 0
self.count = 0
self.sum = 0
self.buckets = [0] * self.MAX_BUCKET_SIZE
self.is_cythonized = False
def add_input(self, element):
if element < 0:
raise ValueError('Distribution counters support only non-negative value')
self.min = min(self.min, element)
self.max = max(self.max, element)
self.count += 1
self.sum += element
bucket_index = self.METHOD_NAME(element)
self.buckets[bucket_index] += 1
def add_input_n(self, element, n):
if element < 0:
raise ValueError('Distribution counters support only non-negative value')
self.min = min(self.min, element)
self.max = max(self.max, element)
self.count += n
self.sum += element * n
bucket_index = self.METHOD_NAME(element)
self.buckets[bucket_index] += n
def METHOD_NAME(self, element):
"""Calculate the bucket index for the given element."""
if element == 0:
return 0
log10_floor = get_log10_round_to_floor(element)
power_of_ten = POWER_TEN[log10_floor]
if element < power_of_ten * 2:
bucket_offset = 0
elif element < power_of_ten * 5:
bucket_offset = 1
else:
bucket_offset = 2
return 1 + log10_floor * self.BUCKET_PER_TEN + bucket_offset
def translate_to_histogram(self, histogram):
"""Translate buckets into Histogram.
Args:
histogram: apache_beam.runners.dataflow.internal.clents.dataflow.Histogram
Ideally, only call this function when reporting counter to
dataflow service.
"""
first_bucket_offset = 0
last_bucket_offset = 0
for index in range(0, self.MAX_BUCKET_SIZE):
if self.buckets[index] != 0:
first_bucket_offset = index
break
for index in range(self.MAX_BUCKET_SIZE - 1, -1, -1):
if self.buckets[index] != 0:
last_bucket_offset = index
break
histogram.firstBucketOffset = first_bucket_offset
histogram.bucketCounts = (
self.buckets[first_bucket_offset:last_bucket_offset + 1])
def extract_output(self):
global INT64_MIN # pylint: disable=global-variable-not-assigned
global INT64_MAX # pylint: disable=global-variable-not-assigned
if not INT64_MIN <= self.sum <= INT64_MAX:
self.sum %= 2**64
if self.sum >= INT64_MAX:
self.sum -= 2**64
mean = self.sum // self.count if self.count else float('nan')
return mean, self.sum, self.count, self.min, self.max
def merge(self, accumulators):
raise NotImplementedError()
|
3,738 |
flatten
|
"""Generic Rules for Diofant.
This file assumes knowledge of Basic and little else.
"""
import functools
from ..utilities.iterables import sift
from .basic import Atom, Basic
__all__ = ('arguments', 'operator', 'term', 'rm_id',
'glom', 'flatten', 'unpack', 'sort')
@functools.singledispatch
def arguments(o):
"""Extract arguments from an expression."""
return o.args
@arguments.register(int)
@arguments.register(Atom)
def arguments_atomic(o):
return ()
@functools.singledispatch
def operator(o):
"""Extract the head of an expression."""
return o.func
@operator.register(int)
@operator.register(Atom)
def operator_atomic(o):
return o
@functools.singledispatch
def term(op, args):
"""Build an expression from the head and arguments."""
return op(*args)
@term.register(int)
@term.register(Atom)
def term_atomic(op, args):
return op
# Functions that create rules
def rm_id(isid):
"""Create a rule to remove identities
isid - fn :: x -> Bool --- whether or not this element is an identity
>>> remove_zeros = rm_id(lambda x: x == 0)
>>> remove_zeros(Basic(1, 0, 2))
Basic(1, 2)
>>> remove_zeros(Basic(0, 0)) # If only identites then we keep one
Basic(0)
See Also
========
unpack
"""
def ident_remove(expr):
"""Remove identities."""
ids = list(map(isid, arguments(expr)))
if sum(ids) == 0: # No identities. Common case
return expr
if sum(ids) != len(ids): # there is at least one non-identity
return term(operator(expr),
[arg for arg, x in zip(arguments(expr), ids) if not x])
return term(operator(expr), [arguments(expr)[0]])
return ident_remove
def glom(key, count, combine):
"""Create a rule to conglomerate identical args.
>>> def key(x):
... return x.as_coeff_Mul()[1]
>>> def count(x):
... return x.as_coeff_Mul()[0]
>>> def combine(cnt, arg):
... return cnt * arg
>>> rl = glom(key, count, combine)
>>> rl(Add(x, -x, 3*x, 2, 3, evaluate=False))
3*x + 5
Wait, how are key, count and combine supposed to work?
>>> key(2*x)
x
>>> count(2*x)
2
>>> combine(2, x)
2*x
"""
def conglomerate(expr):
"""Conglomerate together identical args x + x -> 2x."""
groups = sift(arguments(expr), key)
counts = {k: sum(map(count, args)) for k, args in groups.items()}
newargs = [combine(cnt, mat) for mat, cnt in counts.items()]
if set(newargs) != set(arguments(expr)):
return term(operator(expr), newargs)
return expr
return conglomerate
def sort(key):
"""Create a rule to sort by a key function.
>>> sort_rl = sort(str)
>>> sort_rl(Basic(3, 1, 2))
Basic(1, 2, 3)
"""
def sort_rl(expr):
return term(operator(expr), sorted(arguments(expr), key=key))
return sort_rl
# Functions that are rules
def unpack(expr):
"""Rule to unpack singleton args.
>>> unpack(Basic(2))
2
"""
if len(arguments(expr)) == 1:
return arguments(expr)[0]
return expr
def METHOD_NAME(expr):
"""Flatten T(a, b, T(c, d), T2(e)) to T(a, b, c, d, T2(e))."""
cls = operator(expr)
args = []
for arg in arguments(expr):
if operator(arg) == cls:
args.extend(arguments(arg))
else:
args.append(arg)
return term(cls, args)
def identity(x):
return x
def switch(key, ruledict):
"""Select a rule based on the result of key called on the function."""
def switch_rl(expr):
rl = ruledict.get(key(expr), identity)
return rl(expr)
return switch_rl
def typed(ruletypes):
"""Apply rules based on the expression type.
Examples
========
>>> rm_zeros = rm_id(lambda x: x == 0)
>>> rm_ones = rm_id(lambda x: x == 1)
>>> remove_idents = typed({Add: rm_zeros, Mul: rm_ones})
"""
return switch(type, ruletypes)
def treeapply(tree, join, leaf=identity):
"""Apply functions onto recursive containers (tree).
join - a dictionary mapping container types to functions
e.g. ``{list: minimize, tuple: chain}``
Keys are containers/iterables. Values are functions [a] -> a.
Examples
========
>>> tree = [(3, 2), (4, 1)]
>>> treeapply(tree, {list: max, tuple: min})
2
>>> def mul(*args):
... total = 1
... for arg in args:
... total *= arg
... return total
>>> treeapply(tree, {list: mul, tuple: lambda *args: sum(args)})
25
"""
for typ in join:
if isinstance(tree, typ):
return join[typ](*map(functools.partial(treeapply, join=join, leaf=leaf),
tree))
return leaf(tree)
def minimize(*rules, objective=identity):
"""Select result of rules that minimizes objective.
Examples
========
>>> from diofant.core.strategies import minimize
>>> rl = minimize(lambda x: x + 1, lambda x: x - 1)
>>> rl(4)
3
"""
def minrule(expr):
return min((rule(expr) for rule in rules), key=objective)
return minrule
def chain(*rules):
"""Compose a sequence of rules so that they apply to the expr sequentially."""
def chain_rl(expr):
for rule in rules:
expr = rule(expr)
return expr
return chain_rl
def greedy(tree, objective=identity, **kwargs):
"""Execute a strategic tree. Select alternatives greedily,
Examples
========
>>> tree = [lambda x: x + 1,
... (lambda x: x - 1, lambda x: 2*x)] # either inc or dec-then-double
>>> fn = greedy(tree)
>>> fn(4) # lowest value comes from the inc
5
>>> fn(1) # lowest value comes from dec then double
0
This function selects between options in a tuple. The result is chosen that
minimizes the objective function.
>>> fn = greedy(tree, objective=lambda x: -x) # maximize
>>> fn(4) # highest value comes from the dec then double
6
>>> fn(1) # highest value comes from the inc
2
"""
optimize = functools.partial(minimize, objective=objective)
return treeapply(tree, {list: optimize, tuple: chain}, **kwargs)
def do_one(rules):
"""Try each of the rules until one works. Then stop."""
def do_one_rl(expr):
for rl in rules:
result = rl(expr)
if result != expr:
return result
return expr
return do_one_rl
def condition(cond, rule):
"""Only apply rule if condition is true."""
def conditioned_rl(expr):
if cond(expr):
return rule(expr)
return expr
return conditioned_rl
def exhaust(rule):
"""Apply a rule repeatedly until it has no effect."""
def exhaustive_rl(expr):
new, old = rule(expr), expr
while new != old:
new, old = rule(new), new
return new
return exhaustive_rl
basic_fns = {'op': type,
'new': Basic.__new__,
'leaf': lambda x: not isinstance(x, Basic) or x.is_Atom,
'children': lambda x: x.args}
def sall(rule, fns=basic_fns):
"""Strategic all - apply rule to args."""
op, new, children, leaf = map(fns.get, ('op', 'new', 'children', 'leaf'))
def all_rl(expr):
if leaf(expr):
return expr
args = map(rule, children(expr))
return new(op(expr), *args)
return all_rl
def bottom_up(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the bottom nodes first."""
def rec(expr):
return sall(bottom_up(rule, fns), fns)(expr)
return chain(rec, rule)
def null_safe(rule):
"""Return original expr if rule returns None."""
def null_safe_rl(expr):
result = rule(expr)
if result is None:
return expr
return result
return null_safe_rl
|
3,739 |
naws
|
"""
Parser for the Telnet protocol. (Not a complete implementation of the telnet
specification, but sufficient for a command line interface.)
Inspired by `Twisted.conch.telnet`.
"""
from __future__ import unicode_literals
import struct
from six import int2byte, binary_type, iterbytes
from .log import logger
__all__ = (
'TelnetProtocolParser',
)
# Telnet constants.
NOP = int2byte(0)
SGA = int2byte(3)
IAC = int2byte(255)
DO = int2byte(253)
DONT = int2byte(254)
LINEMODE = int2byte(34)
SB = int2byte(250)
WILL = int2byte(251)
WONT = int2byte(252)
MODE = int2byte(1)
SE = int2byte(240)
ECHO = int2byte(1)
NAWS = int2byte(31)
LINEMODE = int2byte(34)
SUPPRESS_GO_AHEAD = int2byte(3)
DM = int2byte(242)
BRK = int2byte(243)
IP = int2byte(244)
AO = int2byte(245)
AYT = int2byte(246)
EC = int2byte(247)
EL = int2byte(248)
GA = int2byte(249)
class TelnetProtocolParser(object):
"""
Parser for the Telnet protocol.
Usage::
def data_received(data):
print(data)
def size_received(rows, columns):
print(rows, columns)
p = TelnetProtocolParser(data_received, size_received)
p.feed(binary_data)
"""
def __init__(self, data_received_callback, size_received_callback):
self.data_received_callback = data_received_callback
self.size_received_callback = size_received_callback
self._parser = self._parse_coroutine()
self._parser.send(None)
def received_data(self, data):
self.data_received_callback(data)
def do_received(self, data):
""" Received telnet DO command. """
logger.info('DO %r', data)
def dont_received(self, data):
""" Received telnet DONT command. """
logger.info('DONT %r', data)
def will_received(self, data):
""" Received telnet WILL command. """
logger.info('WILL %r', data)
def wont_received(self, data):
""" Received telnet WONT command. """
logger.info('WONT %r', data)
def command_received(self, command, data):
if command == DO:
self.do_received(data)
elif command == DONT:
self.dont_received(data)
elif command == WILL:
self.will_received(data)
elif command == WONT:
self.wont_received(data)
else:
logger.info('command received %r %r', command, data)
def METHOD_NAME(self, data):
"""
Received NAWS. (Window dimensions.)
"""
if len(data) == 4:
# NOTE: the first parameter of struct.unpack should be
# a 'str' object. Both on Py2/py3. This crashes on OSX
# otherwise.
columns, rows = struct.unpack(str('!HH'), data)
self.size_received_callback(rows, columns)
else:
logger.warning('Wrong number of NAWS bytes')
def negotiate(self, data):
"""
Got negotiate data.
"""
command, payload = data[0:1], data[1:]
assert isinstance(command, bytes)
if command == NAWS:
self.METHOD_NAME(payload)
else:
logger.info('Negotiate (%r got bytes)', len(data))
def _parse_coroutine(self):
"""
Parser state machine.
Every 'yield' expression returns the next byte.
"""
while True:
d = yield
if d == int2byte(0):
pass # NOP
# Go to state escaped.
elif d == IAC:
d2 = yield
if d2 == IAC:
self.received_data(d2)
# Handle simple commands.
elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA):
self.command_received(d2, None)
# Handle IAC-[DO/DONT/WILL/WONT] commands.
elif d2 in (DO, DONT, WILL, WONT):
d3 = yield
self.command_received(d2, d3)
# Subnegotiation
elif d2 == SB:
# Consume everything until next IAC-SE
data = []
while True:
d3 = yield
if d3 == IAC:
d4 = yield
if d4 == SE:
break
else:
data.append(d4)
else:
data.append(d3)
self.negotiate(b''.join(data))
else:
self.received_data(d)
def feed(self, data):
"""
Feed data to the parser.
"""
assert isinstance(data, binary_type)
for b in iterbytes(data):
self._parser.send(int2byte(b))
|
3,740 |
get dims descriptor
|
# Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
from dataclasses import dataclass
from typing import Optional, Tuple, TypeVar
import numpy as np
from nncf.common.graph import NNCFGraph
from nncf.common.graph import NNCFNode
from nncf.common.graph.layer_attributes import ConvolutionLayerAttributes
from nncf.common.graph.transformations.commands import TargetPoint
from nncf.common.graph.transformations.commands import TargetType
from nncf.common.tensor_statistics.collectors import TensorStatisticCollectorBase
from nncf.common.utils.registry import Registry
TModel = TypeVar("TModel")
ALGO_BACKENDS = Registry("algo_backends")
@dataclass
class LayoutDescriptor:
"""
Container to store convolutional and linear layers layout information.
"""
conv_weight_out_channels_dim: int
conv_weight_in_channels_dim: int
bias_channels_dim: int
class ChannelAlignmentAlgoBackend:
@staticmethod
def target_point(target_type: TargetType, target_node_name: str, port_id: int) -> TargetPoint:
"""
Returns backend-specific target point.
:param target_type: Type of the location that should be modified.
:param target_node_name: Name of the located node.
:param port_id: id of the port for the statistics distribution.
:return: Backend-specific TargetPoint.
"""
@staticmethod
@abstractmethod
def get_bias_value(node: NNCFNode, model: TModel, nncf_graph: NNCFGraph) -> np.ndarray:
"""
Returns bias value in the NumPy format of provided node.
:param node: Node of NNCFGraph with bias value.
:param model: Backend-specific model for the initializer finding.
:param nncf_graph: NNCFGraph instance with the node.
:return: Bias value in the NumPy format.
"""
@staticmethod
@abstractmethod
def get_weight_value(node: NNCFNode, model: TModel, port_id: int) -> np.ndarray:
"""
Returns bias value in the NumPy format of provided node.
:param node: Node of NNCFGraph with bias value.
:param model: Backend-specific model for the initializer finding.
:param nncf_graph: NNCFGraph instance with the node.
:return: Bias value in the NumPy format.
"""
@staticmethod
@abstractmethod
def get_activation_port_ids_for_node(node: NNCFNode) -> Tuple[int, int]:
"""
Returns Input Port ID and Output Port ID corresponding to activation input and output edges for
the node.
Supports only nodes that could have bias value.
:param node: Node of NNCFGraph with bias value.
"""
@staticmethod
@abstractmethod
def get_weights_port_ids_for_node(node: NNCFNode) -> Tuple[int, int]:
"""
Returns Input Port ID and Output Port ID corresponding to node weights input port id and
constant output port id the node.
:param node: Node of NNCFGraph.
"""
@staticmethod
@abstractmethod
def get_statistic_collector(
reduction_shape, q: float, num_samples: int, inplace: bool
) -> TensorStatisticCollectorBase:
"""
Get backend-specific tensor collector that collects medians of minimal and maximal quantiles.
:param reduction_shape: Target reduction shape for the reduction.
:param q: Minimal quantile for the tensor collector.
:param num_samples: Num samples to collect by the tensor collector.
:param inplace: Should statistic be calculated inplace or out of place.
:return: Backend-specific tensor collector that collects medians of minimal and maximal quantiles.
"""
@staticmethod
@abstractmethod
def is_node_with_bias(node: NNCFNode, nncf_graph: NNCFGraph) -> bool:
"""
Checks if the node has a bias or not.
:param node: The node to check.
:param nncf_graph: The NNCF graph.
:return: True` if `node` corresponds to the operation with bias
(bias is added to the output tensor of that operation), `False` otherwise.
"""
@staticmethod
@abstractmethod
def METHOD_NAME(node: NNCFNode) -> LayoutDescriptor:
"""
Return weights layout descriptor of the given node if it is possible and None otherwise.
Only convolutional and linear nodes are supported.
:param node: NNCFNode to get layout descriptor from.
:return: Weights layout descriptor of the given node if it is possible and None otherwise.
"""
@staticmethod
@abstractmethod
def get_conv_layer_attributes(node: NNCFNode) -> Optional[ConvolutionLayerAttributes]:
"""
Returns convolutional layer attributes of given node if they are present and None otherwise.
:param node: NNCFNode to take convolutional layer attributes from.
:return: Convolutional layer attributes of given node if they are present and None otherwise
"""
|
3,741 |
approve import
|
import os,sys, subprocess
from tinyrpc.transports import ServerTransport
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.dispatch import public,RPCDispatcher
from tinyrpc.server import RPCServer
""" This is a POC example of how to write a custom UI for Clef. The UI starts the
clef process with the '--stdio-ui' option, and communicates with clef using standard input / output.
The standard input/output is a relatively secure way to communicate, as it does not require opening any ports
or IPC files. Needless to say, it does not protect against memory inspection mechanisms where an attacker
can access process memory."""
try:
import urllib.parse as urlparse
except ImportError:
import urllib as urlparse
class StdIOTransport(ServerTransport):
""" Uses std input/output for RPC """
def receive_message(self):
return None, urlparse.unquote(sys.stdin.readline())
def send_reply(self, context, reply):
print(reply)
class PipeTransport(ServerTransport):
""" Uses std a pipe for RPC """
def __init__(self,input, output):
self.input = input
self.output = output
def receive_message(self):
data = self.input.readline()
print(">> {}".format( data))
return None, urlparse.unquote(data)
def send_reply(self, context, reply):
print("<< {}".format( reply))
self.output.write(reply)
self.output.write("\n")
class StdIOHandler():
def __init__(self):
pass
@public
def ApproveTx(self,req):
"""
Example request:
{
"jsonrpc": "2.0",
"method": "ApproveTx",
"params": [{
"transaction": {
"to": "0xae967917c465db8578ca9024c205720b1a3651A9",
"gas": "0x333",
"gasPrice": "0x123",
"value": "0x10",
"data": "0xd7a5865800000000000000000000000000000000000000000000000000000000000000ff",
"nonce": "0x0"
},
"from": "0xAe967917c465db8578ca9024c205720b1a3651A9",
"call_info": "Warning! Could not validate ABI-data against calldata\nSupplied ABI spec does not contain method signature in data: 0xd7a58658",
"meta": {
"remote": "127.0.0.1:34572",
"local": "localhost:8550",
"scheme": "HTTP/1.1"
}
}],
"id": 1
}
:param transaction: transaction info
:param call_info: info abou the call, e.g. if ABI info could not be
:param meta: metadata about the request, e.g. where the call comes from
:return:
"""
transaction = req.get('transaction')
_from = req.get('from')
call_info = req.get('call_info')
meta = req.get('meta')
return {
"approved" : False,
#"transaction" : transaction,
# "from" : _from,
# "password" : None,
}
@public
def ApproveSignData(self, req):
""" Example request
"""
return {"approved": False, "password" : None}
@public
def ApproveExport(self, req):
""" Example request
"""
return {"approved" : False}
@public
def METHOD_NAME(self, req):
""" Example request
"""
return { "approved" : False, "old_password": "", "new_password": ""}
@public
def ApproveListing(self, req):
""" Example request
"""
return {'accounts': []}
@public
def ApproveNewAccount(self, req):
"""
Example request
:return:
"""
return {"approved": False,
#"password": ""
}
@public
def ShowError(self,message = {}):
"""
Example request:
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowError'"},"id":1}
:param message: to show
:return: nothing
"""
if 'text' in message.keys():
sys.stderr.write("Error: {}\n".format( message['text']))
return
@public
def ShowInfo(self,message = {}):
"""
Example request
{"jsonrpc":"2.0","method":"ShowInfo","params":{"message":"Testing 'ShowInfo'"},"id":0}
:param message: to display
:return:nothing
"""
if 'text' in message.keys():
sys.stdout.write("Error: {}\n".format( message['text']))
return
def main(args):
cmd = ["clef", "--stdio-ui"]
if len(args) > 0 and args[0] == "test":
cmd.extend(["--stdio-ui-test"])
print("cmd: {}".format(" ".join(cmd)))
dispatcher = RPCDispatcher()
dispatcher.register_instance(StdIOHandler(), '')
# line buffered
p = subprocess.Popen(cmd, bufsize=1, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
rpc_server = RPCServer(
PipeTransport(p.stdout, p.stdin),
JSONRPCProtocol(),
dispatcher
)
rpc_server.serve_forever()
if __name__ == '__main__':
main(sys.argv[1:])
|
3,742 |
pre launch
|
"""
QEMU qtest library
qtest offers the QEMUQtestProtocol and QEMUQTestMachine classes, which
offer a connection to QEMU's qtest protocol socket, and a qtest-enabled
subclass of QEMUMachine, respectively.
"""
# Copyright (C) 2015 Red Hat Inc.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Based on qmp.py.
#
import os
import socket
from typing import (
List,
Optional,
Sequence,
TextIO,
)
from .machine import QEMUMachine
from .qmp import SocketAddrT
class QEMUQtestProtocol:
"""
QEMUQtestProtocol implements a connection to a qtest socket.
:param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
:param server: server mode, listens on the socket (bool)
:raise socket.error: on socket connection errors
.. note::
No conection is estabalished by __init__(), this is done
by the connect() or accept() methods.
"""
def __init__(self, address: SocketAddrT,
server: bool = False):
self._address = address
self._sock = self._get_sock()
self._sockfile: Optional[TextIO] = None
if server:
self._sock.bind(self._address)
self._sock.listen(1)
def _get_sock(self) -> socket.socket:
if isinstance(self._address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def connect(self) -> None:
"""
Connect to the qtest socket.
@raise socket.error on socket connection errors
"""
self._sock.connect(self._address)
self._sockfile = self._sock.makefile(mode='r')
def accept(self) -> None:
"""
Await connection from QEMU.
@raise socket.error on socket connection errors
"""
self._sock, _ = self._sock.accept()
self._sockfile = self._sock.makefile(mode='r')
def cmd(self, qtest_cmd: str) -> str:
"""
Send a qtest command on the wire.
@param qtest_cmd: qtest command text to be sent
"""
assert self._sockfile is not None
self._sock.sendall((qtest_cmd + "\n").encode('utf-8'))
resp = self._sockfile.readline()
return resp
def close(self) -> None:
"""
Close this socket.
"""
self._sock.close()
if self._sockfile:
self._sockfile.close()
self._sockfile = None
def settimeout(self, timeout: Optional[float]) -> None:
"""Set a timeout, in seconds."""
self._sock.settimeout(timeout)
class QEMUQtestMachine(QEMUMachine):
"""
A QEMU VM, with a qtest socket available.
"""
def __init__(self,
binary: str,
args: Sequence[str] = (),
name: Optional[str] = None,
test_dir: str = "/var/tmp",
socket_scm_helper: Optional[str] = None,
sock_dir: Optional[str] = None):
if name is None:
name = "qemu-%d" % os.getpid()
if sock_dir is None:
sock_dir = test_dir
super().__init__(binary, args, name=name, test_dir=test_dir,
socket_scm_helper=socket_scm_helper,
sock_dir=sock_dir)
self._qtest: Optional[QEMUQtestProtocol] = None
self._qtest_path = os.path.join(sock_dir, name + "-qtest.sock")
@property
def _base_args(self) -> List[str]:
args = super()._base_args
args.extend([
'-qtest', f"unix:path={self._qtest_path}",
'-accel', 'qtest'
])
return args
def METHOD_NAME(self) -> None:
super().METHOD_NAME()
self._qtest = QEMUQtestProtocol(self._qtest_path, server=True)
def _post_launch(self) -> None:
assert self._qtest is not None
super()._post_launch()
self._qtest.accept()
def _post_shutdown(self) -> None:
super()._post_shutdown()
self._remove_if_exists(self._qtest_path)
def qtest(self, cmd: str) -> str:
"""
Send a qtest command to the guest.
:param cmd: qtest command to send
:return: qtest server response
"""
if self._qtest is None:
raise RuntimeError("qtest socket not available")
return self._qtest.cmd(cmd)
|
3,743 |
get api version
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=unused-import
from azure.cli.core.profiles._shared import AZURE_API_PROFILES, ResourceType, CustomResourceType, PROFILE_TYPE, \
SDKProfile, AD_HOC_API_VERSIONS
def METHOD_NAME(cli_ctx, resource_type, as_sdk_profile=False):
""" Get the current API version for a given resource_type.
:param resource_type: The resource type.
:type resource_type: ResourceType.
:param bool as_sdk_profile: Return SDKProfile instance.
:returns: The API version
Can return a tuple<operation_group, str> if the resource_type supports SDKProfile.
:rtype: str or tuple[str]
"""
from azure.cli.core.profiles._shared import METHOD_NAME as _sdk_get_api_version
return _sdk_get_api_version(cli_ctx.cloud.profile, resource_type, as_sdk_profile)
def supported_api_version(cli_ctx, resource_type, min_api=None, max_api=None, operation_group=None):
""" Method to check if the current API version for a given resource_type is supported.
If resource_type is set to None, the current profile version will be used as the basis of
the comparison.
:param resource_type: The resource type.
:type resource_type: ResourceType.
:param min_api: The minimum API that is supported (inclusive). Omit for no minimum constraint.
"type min_api: str
:param max_api: The maximum API that is supported (inclusive). Omit for no maximum constraint.
:type max_api: str
:returns: True if the current API version of resource_type satisfies the min/max constraints. False otherwise.
Can return a tuple<operation_group, bool> if the resource_type supports SDKProfile.
:rtype: bool or tuple[bool]
"""
from azure.cli.core.profiles._shared import supported_api_version as _sdk_supported_api_version
return _sdk_supported_api_version(cli_ctx.cloud.profile,
resource_type=resource_type,
min_api=min_api,
max_api=max_api,
operation_group=operation_group)
def supported_resource_type(cli_ctx, resource_type):
from azure.cli.core.profiles._shared import supported_resource_type as _supported_resource_type
return _supported_resource_type(cli_ctx.cloud.profile,
resource_type=resource_type)
def get_sdk(cli_ctx, resource_type, *attr_args, **kwargs):
""" Get any SDK object that's versioned using the current API version for resource_type.
Supported keyword arguments:
checked - A boolean specifying if this method should suppress/check import exceptions
or not. By default, None is returned.
mod - A string specifying the submodule that all attr_args should be prefixed with.
operation_group - A string specifying the operation group name we want models.
Example usage:
Get a single SDK model.
TableService = get_sdk(resource_type, 'table#TableService')
File, Directory = get_sdk(resource_type,
'file.models#File',
'file.models#Directory')
Same as above but get multiple models where File and Directory are both part of
'file.models' and we don't want to specify each full path.
File, Directory = get_sdk(resource_type,
'File',
'Directory',
mod='file.models')
VirtualMachine = get_sdk(resource_type,
'VirtualMachine',
mod='models',
operation_group='virtual_machines')
:param resource_type: The resource type.
:type resource_type: ResourceType.
:param attr_args: Positional arguments for paths to objects to get.
:type attr_args: str
:param kwargs: Keyword arguments.
:type kwargs: str
:returns: object -- e.g. an SDK module, model, enum, attribute. The number of objects returned
depends on len(attr_args).
"""
from azure.cli.core.profiles._shared import get_versioned_sdk as _sdk_get_versioned_sdk
return _sdk_get_versioned_sdk(cli_ctx.cloud.profile, resource_type, *attr_args, **kwargs)
# API Profiles currently supported in the CLI.
API_PROFILES = {
'latest': AZURE_API_PROFILES['latest'],
'2017-03-09-profile': AZURE_API_PROFILES['2017-03-09-profile'],
'2018-03-01-hybrid': AZURE_API_PROFILES['2018-03-01-hybrid'],
'2019-03-01-hybrid': AZURE_API_PROFILES['2019-03-01-hybrid'],
'2020-09-01-hybrid': AZURE_API_PROFILES['2020-09-01-hybrid']
}
def register_resource_type(profile_name, resource_type, api_version):
err_msg = "Failed to add resource type to profile '{p}': "
if not isinstance(resource_type, CustomResourceType):
raise TypeError((err_msg + "resource_type should be of type {c}, got {r}.").format(p=profile_name,
c=CustomResourceType,
r=type(resource_type)))
try:
API_PROFILES[profile_name].update({resource_type: api_version})
except KeyError:
raise ValueError((err_msg + "Profile '{p}' not found.").format(p=profile_name))
|
3,744 |
config options
|
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir, save
import os
import textwrap
required_conan_version = ">=1.52.0"
class LibE57FormatConan(ConanFile):
name = "libe57format"
license = "BSL-1.0"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/asmaloney/libE57Format"
description = "Library for reading & writing the E57 file format"
topics = ("e57", "io", "point-cloud")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def METHOD_NAME(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
def layout(self):
cmake_layout(self, src_folder="src")
def requirements(self):
self.requires("xerces-c/3.2.3")
def validate(self):
if self.info.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, "11")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self.source_folder, strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["USING_STATIC_XERCES"] = not self.dependencies["xerces-c"].options.shared
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, "LICENSE.md", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_file_rel_path),
{"E57Format": "E57Format::E57Format"}
)
def _create_cmake_module_alias_targets(self, module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent(f"""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""")
save(self, module_file, content)
@property
def _module_file_rel_path(self):
return os.path.join("lib", "cmake", f"conan-official-{self.name}-targets.cmake")
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "e57format")
self.cpp_info.set_property("cmake_target_name", "E57Format")
suffix = "-d" if self.settings.build_type == "Debug" else ""
self.cpp_info.libs = [f"E57Format{suffix}"]
if self.settings.os in ["Linux", "FreeBSD"] and not self.options.shared:
self.cpp_info.system_libs.extend(["m", "pthread"])
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.filenames["cmake_find_package"] = "e57format"
self.cpp_info.filenames["cmake_find_package_multi"] = "e57format"
self.cpp_info.names["cmake_find_package"] = "E57Format"
self.cpp_info.names["cmake_find_package_multi"] = "E57Format"
self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path]
|
3,745 |
get messages by service
|
#!/usr/bin/env python
import argparse
import datetime
import logging
import smtplib
import sys
from collections import defaultdict
from email.message import EmailMessage
from socket import getfqdn
import pysensu_yelp
import requests
from paasta_tools import mesos_tools
from paasta_tools.monitoring_tools import send_event
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import list_services
logger = logging.getLogger(__name__)
email_from_address = f"paasta@{getfqdn()}"
JUPYTER_PREFIX = "jupyterhub_"
def parse_args():
parser = argparse.ArgumentParser(
description="Reports long-running Spark frameworks."
)
parser.add_argument(
"--min-hours",
type=float,
help="Report frameworks that have been registered for more than this duration",
default=0,
)
parser.add_argument(
"--no-notify",
action="store_true",
help="Skip notifying the teams that own each framework",
)
parser.add_argument(
"--email-domain", default=None, help="Email domain for notifying users"
)
return parser.parse_args()
def get_time_running(framework):
registered_time = datetime.datetime.fromtimestamp(framework["registered_time"])
return datetime.datetime.now() - registered_time
def get_spark_properties(framework):
webui_url = framework.get("webui_url")
if not webui_url:
return None
env_endpoint = f"{webui_url}/api/v1/applications/{framework.id}/environment"
try:
response = requests.get(env_endpoint, timeout=5)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as e:
logger.warning(f"Unable to connect to {env_endpoint}: {e!r}")
return None
if response.status_code != 200:
logger.warning(f"Bad response from {env_endpoint}: {response.status_code}")
return None
try:
return response.json()["sparkProperties"]
except (ValueError, KeyError):
logger.warning(
f"Unable to get sparkProperties for {framework.id}: got response {response.text}"
)
return None
def guess_service(properties):
if not properties:
return None
for key, value in properties:
if key == "spark.executorEnv.PAASTA_SERVICE":
service = value
break
else:
return None
if service.startswith(JUPYTER_PREFIX):
return service[len(JUPYTER_PREFIX) :]
else:
return service
def get_matching_framework_info(min_hours):
frameworks = mesos_tools.get_all_frameworks(active_only=True)
matching_info = []
min_timedelta = datetime.timedelta(hours=min_hours)
for framework in frameworks:
if not framework.active:
continue
if framework.get("principal") != "spark":
continue
time_running = get_time_running(framework)
if time_running >= min_timedelta:
info = {
"id": framework.id,
"name": framework.name,
"webui_url": framework.get("webui_url"),
"service": guess_service(get_spark_properties(framework)),
"user": framework.user,
"time_running": str(time_running),
}
matching_info.append(info)
return matching_info
def format_framework(info):
result = [f'{info["name"]} (running for {info["time_running"]})']
result.append(f' user: {info["user"]}')
result.append(f' job UI: {info["webui_url"]}')
return "\n".join(result)
def format_message_for_service(service, frameworks):
output = f"Found the following long-running Spark frameworks associated with service {service}.\n"
output += (
f"Please check why they are still running and terminate if appropriate.\n\n"
)
output += "\n".join(format_framework(f) for f in frameworks)
return output
def METHOD_NAME(frameworks):
frameworks_by_service = defaultdict(list)
for framework in frameworks:
service = framework["service"]
frameworks_by_service[service].append(framework)
return {
service: format_message_for_service(service, frameworks)
for service, frameworks in frameworks_by_service.items()
}
def update_check_status(service, output, status):
overrides = {
"page": False,
"alert_after": 0,
"tip": "Ask the user to check the job UI and terminate the job if appropriate.",
"runbook": "http://y/spark-debug",
"ticket": True,
}
send_event(
service=service,
check_name=f"long_running_spark_jobs.{service}",
overrides=overrides,
status=status,
output=output,
soa_dir=DEFAULT_SOA_DIR,
)
def email_user(framework_info, email_domain):
guessed_user = None
if framework_info["user"] != "root":
guessed_user = framework_info["user"]
elif framework_info["name"].startswith(JUPYTER_PREFIX):
try:
# the job format is now `<AppName>_<UserName>_<UIPort>_<StartTime>`
guessed_user = framework_info["name"].split("_")[-3]
except IndexError:
pass
if guessed_user:
print(
f'Guessed {framework_info["name"]} belongs to {guessed_user}, sending email'
)
else:
print(f"Could not guess user from {framework_info}, skipping user email")
return
msg = EmailMessage()
msg["From"] = email_from_address
msg["To"] = f"{guessed_user}@{email_domain}"
msg["Subject"] = f'Long-running Spark framework {framework_info["name"]}'
content = "Please check why it is still running and terminate if appropriate.\n"
content += format_framework(framework_info)
msg.set_content(content)
with smtplib.SMTP("localhost") as s:
s.send_message(msg)
def report_spark_jobs(min_hours, no_notify, email_domain=None):
frameworks = get_matching_framework_info(min_hours=min_hours)
messages_by_service = METHOD_NAME(frameworks)
valid_services = set(list_services())
messages_for_unknown_services = []
for service, message in messages_by_service.items():
if service in valid_services:
print(f"{message}\n")
else:
messages_for_unknown_services.append(message)
if messages_for_unknown_services:
print("\nINVALID SERVICES")
print("----------------")
print(
"The following frameworks are associated with services that are not configured in PaaSTA.\n"
)
print("\n\n".join(messages_for_unknown_services))
if not no_notify:
for service in valid_services:
if service in messages_by_service:
update_check_status(service, message, pysensu_yelp.Status.WARNING)
else:
update_check_status(
service, "No long running spark jobs", pysensu_yelp.Status.OK
)
if email_domain:
for framework in frameworks:
email_user(framework, email_domain)
return 0 if len(frameworks) == 0 else 1
def main():
args = parse_args()
logging.basicConfig()
return report_spark_jobs(args.min_hours, args.no_notify, args.email_domain)
if __name__ == "__main__":
sys.exit(main())
|
3,746 |
check
|
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import gettext_lazy as _
from assets.const import DatabaseTypes
from assets.models import Database
from common.decorators import Singleton
from common.exceptions import JMSException
from common.utils import get_logger, get_object_or_none
from orgs.utils import tmp_to_root_org
logger = get_logger(__file__)
@Singleton
class DBPortManager(object):
""" 管理端口-数据库ID的映射, Magnus 要使用 """
CACHE_KEY = 'PORT_DB_MAPPER'
def __init__(self):
oracle_ports = self.oracle_port_range
try:
port_start, port_end = oracle_ports.split('-')
port_start, port_end = int(port_start), int(port_end)
except Exception as e:
logger.error('MAGNUS_ORACLE_PORTS config error: {}'.format(e))
port_start, port_end = 30000, 30100
self.port_start, self.port_end = port_start, port_end
# 可以使用的端口列表
self.all_avail_ports = list(range(self.port_start, self.port_end + 1))
@property
def oracle_port_range(self):
oracle_ports = settings.MAGNUS_ORACLE_PORTS
if not oracle_ports and settings.MAGNUS_PORTS:
oracle_ports = settings.MAGNUS_PORTS
return oracle_ports
@staticmethod
def fetch_dbs():
with tmp_to_root_org():
dbs = Database.objects.filter(platform__type=DatabaseTypes.ORACLE).order_by('id')
return dbs
def METHOD_NAME(self):
dbs = self.fetch_dbs()
mapper = self.get_mapper()
db_ids = [str(db.id) for db in dbs]
db_ids_to_add = list(set(db_ids) - set(mapper.values()))
mapper = self.bulk_add(db_ids_to_add, mapper)
db_ids_to_pop = set(mapper.values()) - set(db_ids)
mapper = self.bulk_pop(db_ids_to_pop, mapper)
self.set_mapper(mapper)
if settings.DEBUG:
logger.debug("Oracle listen ports: {}".format(len(mapper.keys())))
def init(self):
dbs = self.fetch_dbs()
db_ids = dbs.values_list('id', flat=True)
db_ids = [str(i) for i in db_ids]
mapper = dict(zip(self.all_avail_ports, list(db_ids)))
self.set_mapper(mapper)
def bulk_add(self, db_ids, mapper):
for db_id in db_ids:
avail_port = self.get_next_avail_port(mapper)
mapper[avail_port] = str(db_id)
return mapper
def bulk_pop(self, db_ids, mapper):
new_mapper = {port: str(db_id) for port, db_id in mapper.items() if db_id not in db_ids}
return new_mapper
def get_port_by_db(self, db, raise_exception=True):
mapper = self.get_mapper()
for port, db_id in mapper.items():
if db_id == str(db.id):
return port
if raise_exception:
error = _(
'No available port is matched. '
'The number of databases may have exceeded the number of ports '
'open to the database agent service, '
'Contact the administrator to open more ports.'
)
raise JMSException(error)
def get_db_by_port(self, port):
try:
port = int(port)
except Exception as e:
raise JMSException('Port type error: {}'.format(e))
mapper = self.get_mapper()
db_id = mapper.get(port, None)
if not db_id:
raise JMSException('Database not in port-db mapper, port: {}'.format(port))
with tmp_to_root_org():
db = get_object_or_none(Database, id=db_id)
if not db:
raise JMSException('Database not exists, db id: {}'.format(db_id))
return db
def get_next_avail_port(self, mapper=None):
if mapper is None:
mapper = self.get_mapper()
already_use_ports = [int(i) for i in mapper.keys()]
avail_ports = sorted(list(set(self.all_avail_ports) - set(already_use_ports)))
if len(avail_ports) <= 0:
msg = _('No ports can be used, check and modify the limit on the number '
'of ports that Magnus listens on in the configuration file.')
tips = _('All available port count: {}, Already use port count: {}').format(
len(self.all_avail_ports), len(already_use_ports)
)
error = msg + tips
raise JMSException(error)
port = avail_ports[0]
logger.debug('Get next available port: {}'.format(port))
return port
def get_already_use_ports(self):
mapper = self.get_mapper()
return sorted([int(i) for i in mapper.keys()])
def get_mapper(self):
mapper = cache.get(self.CACHE_KEY, {})
if not mapper:
# redis 可能被清空,重新初始化一下
self.init()
return cache.get(self.CACHE_KEY, {})
def set_mapper(self, value):
"""
value: {
port: db_id
}
"""
cache.set(self.CACHE_KEY, value, timeout=None)
db_port_manager = DBPortManager()
|
3,747 |
test voxelization output shape no z
|
# Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.object_detection_3d.centernet_label_encoder import (
CenterNetLabelEncoder,
)
from keras_cv.tests.test_case import TestCase
class CenterNetLabelEncoderTest(TestCase):
def METHOD_NAME(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 2])
def test_voxelization_output_shape_with_z(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 10],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 20],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400, 4])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 4, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 4, 7])
# last dimension has x, y, z
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 3])
self.assertEqual(output["class_2"]["top_k_index"].shape, [2, 20, 3])
def test_voxelization_output_shape_missing_topk(self):
layer = CenterNetLabelEncoder(
voxel_size=[0.1, 0.1, 1000],
max_radius=[8.0, 8.0, 0.0],
spatial_size=[-20, 20, -20, 20, -20, 20],
num_classes=2,
top_k_heatmap=[10, 0],
)
box_3d = tf.random.uniform(
shape=[2, 100, 7], minval=-5, maxval=5, dtype=tf.float32
)
box_classes = tf.random.uniform(
shape=[2, 100], minval=0, maxval=2, dtype=tf.int32
)
box_mask = tf.constant(True, shape=[2, 100])
inputs = {
"3d_boxes": {
"boxes": box_3d,
"classes": box_classes,
"mask": box_mask,
}
}
output = layer(inputs)
# # (20 - (-20)) / 0.1 = 400
self.assertEqual(output["class_1"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_2"]["heatmap"].shape, [2, 400, 400])
self.assertEqual(output["class_1"]["boxes"].shape, [2, 400, 400, 7])
self.assertEqual(output["class_2"]["boxes"].shape, [2, 400, 400, 7])
# last dimension only has x, y
self.assertEqual(output["class_1"]["top_k_index"].shape, [2, 10, 2])
self.assertEqual(output["class_2"]["top_k_index"], None)
|
3,748 |
sigpending
|
import sys
from _typeshed import structseq
from collections.abc import Callable, Iterable
from enum import IntEnum
from types import FrameType
from typing import Any
from typing_extensions import Final, Never, TypeAlias, final
NSIG: int
class Signals(IntEnum):
SIGABRT: int
SIGEMT: int
SIGFPE: int
SIGILL: int
SIGINFO: int
SIGINT: int
SIGSEGV: int
SIGTERM: int
if sys.platform == "win32":
SIGBREAK: int
CTRL_C_EVENT: int
CTRL_BREAK_EVENT: int
else:
SIGALRM: int
SIGBUS: int
SIGCHLD: int
SIGCONT: int
SIGHUP: int
SIGIO: int
SIGIOT: int
SIGKILL: int
SIGPIPE: int
SIGPROF: int
SIGQUIT: int
SIGSTOP: int
SIGSYS: int
SIGTRAP: int
SIGTSTP: int
SIGTTIN: int
SIGTTOU: int
SIGURG: int
SIGUSR1: int
SIGUSR2: int
SIGVTALRM: int
SIGWINCH: int
SIGXCPU: int
SIGXFSZ: int
if sys.platform != "darwin":
SIGCLD: int
SIGPOLL: int
SIGPWR: int
SIGRTMAX: int
SIGRTMIN: int
if sys.version_info >= (3, 11):
SIGSTKFLT: int
class Handlers(IntEnum):
SIG_DFL: int
SIG_IGN: int
SIG_DFL: Handlers
SIG_IGN: Handlers
_SIGNUM: TypeAlias = int | Signals
_HANDLER: TypeAlias = Callable[[int, FrameType | None], Any] | int | Handlers | None
def default_int_handler(__signalnum: int, __frame: FrameType | None) -> Never: ...
if sys.version_info >= (3, 10): # arguments changed in 3.10.2
def getsignal(signalnum: _SIGNUM) -> _HANDLER: ...
def signal(signalnum: _SIGNUM, handler: _HANDLER) -> _HANDLER: ...
else:
def getsignal(__signalnum: _SIGNUM) -> _HANDLER: ...
def signal(__signalnum: _SIGNUM, __handler: _HANDLER) -> _HANDLER: ...
SIGABRT: Signals
SIGEMT: Signals
SIGFPE: Signals
SIGILL: Signals
SIGINFO: Signals
SIGINT: Signals
SIGSEGV: Signals
SIGTERM: Signals
if sys.platform == "win32":
SIGBREAK: Signals
CTRL_C_EVENT: Signals
CTRL_BREAK_EVENT: Signals
else:
SIGALRM: Signals
SIGBUS: Signals
SIGCHLD: Signals
SIGCONT: Signals
SIGHUP: Signals
SIGIO: Signals
SIGIOT: Signals
SIGKILL: Signals
SIGPIPE: Signals
SIGPROF: Signals
SIGQUIT: Signals
SIGSTOP: Signals
SIGSYS: Signals
SIGTRAP: Signals
SIGTSTP: Signals
SIGTTIN: Signals
SIGTTOU: Signals
SIGURG: Signals
SIGUSR1: Signals
SIGUSR2: Signals
SIGVTALRM: Signals
SIGWINCH: Signals
SIGXCPU: Signals
SIGXFSZ: Signals
class ItimerError(OSError): ...
ITIMER_PROF: int
ITIMER_REAL: int
ITIMER_VIRTUAL: int
class Sigmasks(IntEnum):
SIG_BLOCK: int
SIG_UNBLOCK: int
SIG_SETMASK: int
SIG_BLOCK = Sigmasks.SIG_BLOCK
SIG_UNBLOCK = Sigmasks.SIG_UNBLOCK
SIG_SETMASK = Sigmasks.SIG_SETMASK
def alarm(__seconds: int) -> int: ...
def getitimer(__which: int) -> tuple[float, float]: ...
def pause() -> None: ...
def pthread_kill(__thread_id: int, __signalnum: int) -> None: ...
if sys.version_info >= (3, 10): # arguments changed in 3.10.2
def pthread_sigmask(how: int, mask: Iterable[int]) -> set[_SIGNUM]: ...
else:
def pthread_sigmask(__how: int, __mask: Iterable[int]) -> set[_SIGNUM]: ...
def setitimer(__which: int, __seconds: float, __interval: float = 0.0) -> tuple[float, float]: ...
def siginterrupt(__signalnum: int, __flag: bool) -> None: ...
def METHOD_NAME() -> Any: ...
if sys.version_info >= (3, 10): # argument changed in 3.10.2
def sigwait(sigset: Iterable[int]) -> _SIGNUM: ...
else:
def sigwait(__sigset: Iterable[int]) -> _SIGNUM: ...
if sys.platform != "darwin":
SIGCLD: Signals
SIGPOLL: Signals
SIGPWR: Signals
SIGRTMAX: Signals
SIGRTMIN: Signals
if sys.version_info >= (3, 11):
SIGSTKFLT: Signals
@final
class struct_siginfo(structseq[int], tuple[int, int, int, int, int, int, int]):
if sys.version_info >= (3, 10):
__match_args__: Final = ("si_signo", "si_code", "si_errno", "si_pid", "si_uid", "si_status", "si_band")
@property
def si_signo(self) -> int: ...
@property
def si_code(self) -> int: ...
@property
def si_errno(self) -> int: ...
@property
def si_pid(self) -> int: ...
@property
def si_uid(self) -> int: ...
@property
def si_status(self) -> int: ...
@property
def si_band(self) -> int: ...
def sigtimedwait(sigset: Iterable[int], timeout: float) -> struct_siginfo | None: ...
def sigwaitinfo(sigset: Iterable[int]) -> struct_siginfo: ...
if sys.version_info >= (3, 8):
def strsignal(__signalnum: _SIGNUM) -> str | None: ...
def valid_signals() -> set[Signals]: ...
def raise_signal(__signalnum: _SIGNUM) -> None: ...
def set_wakeup_fd(fd: int, *, warn_on_full_buffer: bool = ...) -> int: ...
if sys.version_info >= (3, 9):
if sys.platform == "linux":
def pidfd_send_signal(__pidfd: int, __sig: int, __siginfo: None = None, __flags: int = ...) -> None: ...
|
3,749 |
forward
|
from typing import Optional
from torch import Tensor
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.conv.gcn_conv import gcn_norm
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptTensor, SparseTensor
from torch_geometric.utils import spmm
class SGConv(MessagePassing):
r"""The simple graph convolutional operator from the `"Simplifying Graph
Convolutional Networks" <https://arxiv.org/abs/1902.07153>`_ paper
.. math::
\mathbf{X}^{\prime} = {\left(\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \right)}^K \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
The adjacency matrix can include other values than :obj:`1` representing
edge weights via the optional :obj:`edge_weight` tensor.
Args:
in_channels (int): Size of each input sample, or :obj:`-1` to derive
the size from the first input(s) to the forward method.
out_channels (int): Size of each output sample.
K (int, optional): Number of hops :math:`K`. (default: :obj:`1`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`{\left(\mathbf{\hat{D}}^{-1/2}
\mathbf{\hat{A}} \mathbf{\hat{D}}^{-1/2} \right)}^K \mathbf{X}` on
first execution, and will use the cached version for further
executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
add_self_loops (bool, optional): If set to :obj:`False`, will not add
self-loops to the input graph. (default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
Shapes:
- **input:**
node features :math:`(|\mathcal{V}|, F_{in})`,
edge indices :math:`(2, |\mathcal{E}|)`,
edge weights :math:`(|\mathcal{E}|)` *(optional)*
- **output:**
node features :math:`(|\mathcal{V}|, F_{out})`
"""
_cached_x: Optional[Tensor]
def __init__(self, in_channels: int, out_channels: int, K: int = 1,
cached: bool = False, add_self_loops: bool = True,
bias: bool = True, **kwargs):
kwargs.setdefault('aggr', 'add')
super().__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.K = K
self.cached = cached
self.add_self_loops = add_self_loops
self._cached_x = None
self.lin = Linear(in_channels, out_channels, bias=bias)
self.reset_parameters()
def reset_parameters(self):
super().reset_parameters()
self.lin.reset_parameters()
self._cached_x = None
def METHOD_NAME(self, x: Tensor, edge_index: Adj,
edge_weight: OptTensor = None) -> Tensor:
cache = self._cached_x
if cache is None:
if isinstance(edge_index, Tensor):
edge_index, edge_weight = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim), False,
self.add_self_loops, self.flow, dtype=x.dtype)
elif isinstance(edge_index, SparseTensor):
edge_index = gcn_norm( # yapf: disable
edge_index, edge_weight, x.size(self.node_dim), False,
self.add_self_loops, self.flow, dtype=x.dtype)
for k in range(self.K):
# propagate_type: (x: Tensor, edge_weight: OptTensor)
x = self.propagate(edge_index, x=x, edge_weight=edge_weight,
size=None)
if self.cached:
self._cached_x = x
else:
x = cache.detach()
return self.lin(x)
def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor:
return edge_weight.view(-1, 1) * x_j
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
return spmm(adj_t, x, reduce=self.aggr)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, K={self.K})')
|
3,750 |
delete ecs cluster
|
import time
import pytest
import boto3
from botocore.exceptions import ClientError
from test import test_utils
import test.test_utils.ecs as ecs_utils
@pytest.fixture(scope="function")
def ecs_client(region):
return boto3.client("ecs", region_name=region)
@pytest.fixture(scope="function")
def ecs_cluster_name(request):
return request.param
@pytest.mark.timeout(300)
@pytest.fixture(scope="function")
def ecs_cluster(request, ecs_client, ecs_cluster_name, region):
"""
Fixture to handle spin up and tear down of ecs cluster
:param request:
:param ecs_client:
:param ecs_cluster_name:
:param region:
:return:
"""
cluster_name = ecs_cluster_name
cluster_arn = ecs_utils.create_ecs_cluster(cluster_name, region=region)
# Finalizer to delete the ecs cluster
def METHOD_NAME():
ecs_utils.METHOD_NAME(cluster_arn, region=region)
request.addfinalizer(METHOD_NAME)
# Wait for cluster status to be active
if ecs_utils.check_ecs_cluster_status(cluster_arn, "ACTIVE"):
return cluster_arn
raise ecs_utils.ECSClusterCreationException(f"Failed to create ECS cluster - {cluster_name}")
@pytest.fixture(scope="function")
def training_script(request):
"""
Path that container is expecting training script to be in
i.e. /test/bin/testTensorFlow
"""
return request.param
@pytest.fixture(scope="function")
def training_cmd(request, ecs_cluster_name, training_script):
artifact_folder = f"{ecs_cluster_name}-folder"
s3_test_artifact_location = test_utils.upload_tests_to_s3(artifact_folder)
def delete_s3_artifact_copy():
test_utils.delete_uploaded_tests_from_s3(s3_test_artifact_location)
request.addfinalizer(delete_s3_artifact_copy)
return ecs_utils.build_ecs_training_command(s3_test_artifact_location, training_script)
@pytest.fixture(scope="session")
def ecs_ami(request):
return request.param
@pytest.fixture(scope="session")
def ecs_instance_type(request):
return request.param
@pytest.fixture(scope="session")
def use_large_storage(request):
if hasattr(request, "param"):
return request.param
else:
return False
@pytest.fixture(scope="session")
def ecs_num_neurons(request, ecs_instance_type):
# Set the num neurons based on instance_type
if ecs_instance_type in ["trn1.2xlarge", "inf2.xlarge"]:
return 1
elif ecs_instance_type == "trn1.32xlarge":
return 16
return None
@pytest.mark.timeout(300)
@pytest.fixture(scope="function")
def ecs_container_instance(
request,
ecs_cluster,
ec2_client,
ecs_client,
ecs_instance_type,
ecs_ami,
region,
ei_accelerator_type,
use_large_storage,
):
"""
Fixture to handle spin up and tear down of ECS container instance
:param request: pytest request object
:param ecs_cluster: ecs cluster fixture
:param ec2_client: boto3 ec2 client
:param ecs_client: boto3 ecs client
:param ecs_instance_type: eventually to be used
:param ecs_ami: eventually to be used
:return:
"""
# Get these from params on the test
instance_type = ecs_instance_type
image_id = ecs_ami
cluster_name = ecs_utils.get_ecs_cluster_name(ecs_cluster)
user_data = f"#!/bin/bash\necho ECS_CLUSTER={ecs_cluster} >> /etc/ecs/ecs.config"
params = {
"KeyName": "pytest.pem",
"ImageId": image_id,
"InstanceType": instance_type,
"UserData": user_data,
"IamInstanceProfile": {"Name": "ecsInstanceRole"},
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [{"Key": "Name", "Value": f"CI-CD ecs worker {cluster_name}"}],
},
],
"MaxCount": 1,
"MinCount": 1,
}
if use_large_storage:
params["BlockDeviceMappings"] = [
{"DeviceName": "/dev/xvda", "Ebs": {"VolumeSize": 90, "VolumeType": "gp2"}}
]
if ei_accelerator_type:
params["ElasticInferenceAccelerators"] = [{"Type": ei_accelerator_type, "Count": 1}]
availability_zones = {
"us-west-2": ["us-west-2a", "us-west-2b", "us-west-2c"],
"us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c"],
}
for a_zone in availability_zones[region]:
params["Placement"] = {"AvailabilityZone": a_zone}
try:
instances = ec2_client.run_instances(**params)
if instances:
break
except ClientError as e:
print(f"Failed to launch in {a_zone} with Error: {e}")
continue
else:
instances = ec2_client.run_instances(**params)
instance_id = instances.get("Instances")[0].get("InstanceId")
# Define finalizer to terminate instance after this fixture completes
def terminate_ec2_instance():
ec2_client.terminate_instances(InstanceIds=[instance_id])
terminate_waiter = ec2_client.get_waiter("instance_terminated")
terminate_waiter.wait(InstanceIds=[instance_id])
request.addfinalizer(terminate_ec2_instance)
waiter = ec2_client.get_waiter("instance_running")
waiter.wait(InstanceIds=[instance_id])
is_attached = False
# Check to see if instance is attached
while not is_attached:
# Add sleep to avoid throttling limit
time.sleep(12)
response = ecs_client.describe_clusters(clusters=[ecs_cluster])
if response.get("clusters", [{}])[0].get("registeredContainerInstancesCount"):
is_attached = True
return instance_id, ecs_cluster
|
3,751 |
query mathoid
|
import hashlib
import logging
import re
import requests
from django.conf import settings
from django.core.cache import caches
from django.utils.html import format_html
from mistune import escape
from judge.utils.file_cache import HashFileCache
from judge.utils.unicode import utf8bytes, utf8text
logger = logging.getLogger('judge.mathoid')
reescape = re.compile(r'(?<!\\)(?:\\{2})*[$]')
REPLACES = [
('\u2264', r'\le'),
('\u2265', r'\ge'),
('\u2026', '...'),
('\u2212', '-'),
('≤', r'\le'),
('≥', r'\ge'),
('<', '<'),
('>', '>'),
('&', '&'),
('−', '-'),
('≤', r'\le'),
('≥', r'\ge'),
('…', '...'),
(r'\lt', '<'),
(r'\gt', '>'),
]
def format_math(math):
for a, b in REPLACES:
math = math.replace(a, b)
return math
class MathoidMathParser(object):
types = ('svg', 'mml', 'tex', 'jax')
def __init__(self, type):
self.type = type
self.mathoid_url = settings.MATHOID_URL
self.cache = HashFileCache(settings.MATHOID_CACHE_ROOT,
settings.MATHOID_CACHE_URL,
settings.MATHOID_GZIP)
mml_cache = settings.MATHOID_MML_CACHE
self.mml_cache = mml_cache and caches[mml_cache]
self.css_cache = caches[settings.MATHOID_CSS_CACHE]
self.mml_cache_ttl = settings.MATHOID_MML_CACHE_TTL
def METHOD_NAME(self, formula, hash):
self.cache.create(hash)
try:
response = requests.post(self.mathoid_url, data={
'q': reescape.sub(lambda m: '\\' + m.group(0), formula).encode('utf-8'),
'type': 'tex' if formula.startswith(r'\displaystyle') else 'inline-tex',
})
response.raise_for_status()
data = response.json()
except requests.ConnectionError:
logger.exception('Failed to connect to mathoid for: %s', formula)
return
except requests.HTTPError as e:
logger.error('Mathoid failed to render: %s\n%s', formula, e.response.text)
return
except Exception:
logger.exception('Failed to connect to mathoid for: %s', formula)
return
if not data['success']:
logger.error('Mathoid failure for: %s\n%s', formula, data)
return
if any(i not in data for i in ('mml', 'svg', 'mathoidStyle')):
logger.error('Mathoid did not return required information (mml, svg, mathoidStyle needed):\n%s', data)
return
css = data['mathoidStyle']
mml = data['mml']
result = {
'css': css,
'mml': mml,
'svg': self.cache.cache_data(hash, 'svg', data['svg'].encode('utf-8')),
}
self.cache.cache_data(hash, 'mml', mml.encode('utf-8'), url=False, gzip=False)
self.cache.cache_data(hash, 'css', css.encode('utf-8'), url=False, gzip=False)
return result
def query_cache(self, hash):
result = {'svg': self.cache.get_url(hash, 'svg')}
key = 'mathoid:css:' + hash
css = result['css'] = self.css_cache.get(key)
if css is None:
css = result['css'] = self.cache.read_data(hash, 'css').decode('utf-8')
self.css_cache.set(key, css, self.mml_cache_ttl)
mml = None
if self.mml_cache:
mml = result['mml'] = self.mml_cache.get('mathoid:mml:' + hash)
if mml is None:
mml = result['mml'] = self.cache.read_data(hash, 'mml').decode('utf-8')
if self.mml_cache:
self.mml_cache.set('mathoid:mml:' + hash, mml, self.mml_cache_ttl)
return result
def get_result(self, formula):
if self.type == 'tex':
return
hash = hashlib.sha1(utf8bytes(formula)).hexdigest()
formula = utf8text(formula)
if self.cache.has_file(hash, 'css'):
result = self.query_cache(hash)
else:
result = self.METHOD_NAME(formula, hash)
if not result:
return None
result['tex'] = formula
result['display'] = formula.startswith(r'\displaystyle')
return {
'mml': self.output_mml,
'jax': self.output_jax,
'svg': self.output_svg,
'raw': lambda x: x,
}[self.type](result)
def output_mml(self, result):
return result['mml']
def output_jax(self, result):
return format_html('<span class="{3}">'
'<img class="tex-image" src="{0}" style="{1}" alt="{2}">'
'<span class="tex-text" style="display:none">{4}{2}{4}</span>'
'</span>',
result['svg'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']], ['~', '$$'][result['display']])
def output_svg(self, result):
return format_html('<img class="{3}" src="{0}" style="{1}" alt="{2}">',
result['svg'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']])
def display_math(self, math):
math = format_math(math)
return self.get_result(r'\displaystyle ' + math) or r'\[%s\]' % escape(math)
def inline_math(self, math):
math = format_math(math)
return self.get_result(math) or r'\(%s\)' % escape(math)
|
3,752 |
convert to datetime
|
from __future__ import annotations
import datetime as dt
import logging
import pathlib
import traceback
from functools import update_wrapper, wraps
from typing import Any, Callable, List, Optional
import click
from dateutil.parser import parse
import metricflow.cli.custom_click_types as click_custom
from metricflow.cli.cli_context import CLIContext
logger = logging.getLogger(__name__)
# Click Options
def query_options(function: Callable) -> Callable:
"""Common options for a query."""
function = click.option(
"--order",
type=click_custom.SequenceParamType(),
help='Metrics or group bys to order by ("-" prefix for DESC). For example: --order -ds or --order ds,-revenue',
required=False,
)(function)
function = click.option(
"--limit",
type=str,
help="Limit the number of rows out using an int or leave blank for no limit. For example: --limit 100",
callback=lambda ctx, param, value: validate_limit(value),
)(function)
function = click.option(
"--where",
type=str,
default=None,
help='SQL-like where statement provided as a string. For example: --where "revenue > 100"',
)(function)
function = start_end_time_options(function)
function = click.option(
"--group-by",
type=click_custom.SequenceParamType(),
default="",
help="Dimensions and/or entities to group by: syntax is --group-by ds or for multiple group bys --group-by ds,org",
)(function)
function = click.option(
"--metrics",
type=click_custom.SequenceParamType(min_length=1),
default="",
help="Metrics to query for: syntax is --metrics bookings or for multiple metrics --metrics bookings,messages",
)(function)
return function
def start_end_time_options(function: Callable) -> Callable:
"""Options for start_time and end_time."""
function = click.option(
"--start-time",
type=str,
default=None,
help="Optional iso8601 timestamp to constraint the start time of the data (inclusive)",
callback=lambda ctx, param, value: METHOD_NAME(value),
)(function)
function = click.option(
"--end-time",
type=str,
default=None,
help="Optional iso8601 timestamp to constraint the end time of the data (inclusive)",
callback=lambda ctx, param, value: METHOD_NAME(value),
)(function)
return function
# Parsers/Validators
def METHOD_NAME(datetime_str: Optional[str]) -> Optional[dt.datetime]:
"""Callback to convert string to datetime given as an iso8601 timestamp."""
if datetime_str is None:
return None
try:
return parse(datetime_str)
except Exception:
raise click.BadParameter("must be valid iso8601 timestamp")
def parse_comma_separated_inputs(value: Optional[str]) -> Optional[List[str]]: # noqa: D
# If comma exist, explode this into a list and return
if value is None:
return None
if "," in value:
return [i.strip() for i in value.split(",")]
# Return a list of the single value
return [value]
def validate_limit(limit: Optional[str]) -> Optional[int]:
"""Validates and transform limit input."""
if limit and not limit.isnumeric():
raise click.BadParameter("limit must be an int. For no limit, do not pass this argument")
return int(limit) if limit else None
# Misc
def exception_handler(func: Callable[..., Any]) -> Callable[..., Any]: # type: ignore[misc]
"""Decorator to handle exceptions."""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
try:
func(*args, **kwargs)
except Exception as e:
# This will log to the file handlers registered in the root.
logging.exception("Got an exception in the exception handler.")
# Checks if CLIContext has verbose flag set
if isinstance(args[0], CLIContext):
cli_context: CLIContext = args[0]
click.echo(f"\nERROR: {str(e)}\nLog file: {cli_context.log_file_path}")
else:
if not isinstance(args[0], CLIContext):
logger.error(
f"Missing {CLIContext.__name__} as the first argument to the function "
f"{getattr(func, '__name__', repr(func))}"
)
click.echo(f"\nERROR: {str(e)}")
if args and hasattr(args[0], "verbose") and args[0].verbose is True:
click.echo(traceback.format_exc())
exit(1)
return wrapper
def dbt_project_file_exists() -> bool:
"""Check that the cwd is a dbt project root. Currently done by checking for existence of dbt_project.yml."""
return pathlib.Path("dbt_project.yml").exists()
def error_if_not_in_dbt_project(func: Callable) -> Callable:
"""Decorator to output an error message and exit if caller is not in a root directory of a dbt project."""
@click.pass_context
def new_func(ctx: click.core.Context, *args: Any, **kwargs: Any) -> Any: # type: ignore[misc]
if not dbt_project_file_exists():
click.echo(
"❌ Unable to locate 'dbt_project.yml' in the current directory\n"
"In order to run the MetricFlow CLI, you must be running in the root directory of a working dbt project.\n"
"Please check out `https://docs.getdbt.com/reference/commands/init` if you want to get started on building a dbt project."
)
exit(1)
return ctx.invoke(func, *args, **kwargs)
return update_wrapper(new_func, func)
|
3,753 |
test number of parameters meets expectation
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for registered platform parameters."""
from __future__ import annotations
from core.domain import platform_feature_services as feature_services
from core.domain import platform_parameter_list as params
from core.tests import test_utils
class ExistingPlatformParameterValidityTests(test_utils.GenericTestBase):
"""Tests to validate platform parameters registered in
core/domain/platform_parameter_list.py.
"""
EXPECTED_PARAM_NAMES = ['always_ask_learners_for_answer_details',
'android_beta_landing_page',
'blog_pages',
'checkpoint_celebration',
'contributor_dashboard_accomplishments',
'diagnostic_test',
'dummy_feature_flag_for_e2e_tests',
'dummy_parameter',
'end_chapter_celebration',
'high_bounce_rate_task_minimum_exploration_starts',
(
'high_bounce_rate_task_state_bounce_'
'rate_creation_threshold'
),
(
'high_bounce_rate_task_state_bounce_rate_'
'obsoletion_threshold'
),
'is_improvements_tab_enabled',
'learner_groups_are_enabled',
'promo_bar_enabled',
'promo_bar_message',
'serial_chapter_launch_curriculum_admin_view',
'serial_chapter_launch_learner_view',
'show_feedback_updates_in_profile_pic_dropdown',
'show_redesigned_learner_dashboard',
'show_translation_size',
'cd_admin_dashboard_new_ui',]
def test_all_defined_parameters_are_valid(self) -> None:
all_names = params.Registry.get_all_platform_parameter_names()
for name in all_names:
param = params.Registry.get_platform_parameter(name)
param.validate()
def METHOD_NAME(self) -> None:
"""Test that the Registry and EXPECTED_PARAM_NAMES have the same number
of platform parameters.
If this test fails, it means either:
- There are parameters defined in
core/domain/platform_parameter_list.py but not added to
EXPECTED_PARAM_NAMES above.
- There are parameters accidentally deleted from
core/domain/platform_parameter_list.py.
If you are defining new platform parameters, make sure to add it to the
EXPECTED_PARAM_NAMES list as well.
"""
self.assertEqual(
len(params.Registry.get_all_platform_parameter_names()),
len(self.EXPECTED_PARAM_NAMES))
def test_all_expected_parameters_are_present_in_registry(self) -> None:
"""Test that all parameters in EXPECTED_PARAM_NAMES are present in
Registry.
If this test fails, it means some parameters in EXPECTED_PARAM_NAMES
are missing in the registry. It's most likely caused by accidentally
deleting some parameters in core/domain/platform_parameter_list.py.
To fix this, please make sure no parameter is deleted. If you really
need to delete a parameter (this should not happen in most cases),
make sure it's also deleted from EXPECTED_PARAM_NAMES.
"""
existing_names = params.Registry.get_all_platform_parameter_names()
missing_names = set(self.EXPECTED_PARAM_NAMES) - set(existing_names)
self.assertFalse(
missing_names,
msg='Platform parameters missing in registry: %s.' % (
list(missing_names))
)
def test_no_unexpected_parameter_in_registry(self) -> None:
"""Test that all parameters registered in Registry are expected.
If this test fails, it means some parameters in
core/domain/platform_parameter_list.py are not found in
EXPECTED_PARAM_NAMES.
If you are creating new platform parameters, make sure to add it to
the EXPECTED_PARAM_NAMES list as well.
"""
existing_names = params.Registry.get_all_platform_parameter_names()
unexpected_names = set(existing_names) - set(self.EXPECTED_PARAM_NAMES)
self.assertFalse(
unexpected_names,
msg='Unexpected platform parameters: %s.' % list(unexpected_names)
)
def test_all_feature_flags_are_of_bool_type(self) -> None:
feature_flags = feature_services.get_all_feature_flag_dicts()
self.assertGreater(len(feature_flags), 0)
for feature in feature_flags:
self.assertEqual(
feature['data_type'],
'bool',
'We expect all the feature-flags to be of type boolean '
'but "%s" feature-flag is of type "%s".' % (
feature['name'], feature['data_type'])
)
def test_all_feature_flags_have_default_value_as_false(self) -> None:
feature_flags = feature_services.get_all_feature_flag_dicts()
self.assertGreater(len(feature_flags), 0)
for feature in feature_flags:
self.assertEqual(
feature['default_value'],
False,
'We expect all the feature-flags default_value to be False '
'but "%s" feature-flag has "%s".' % (
feature['name'], feature['default_value'])
)
|
3,754 |
get empty instance
|
import logging
from dataclasses import InitVar, is_dataclass
from inspect import getmodule, signature
from typing import Dict, List, Optional, Union, get_type_hints
from hikaru import HikaruBase, HikaruDocumentBase
from kubernetes.client.models.events_v1_event import EventsV1Event
from kubernetes.client.models.v1_container_image import V1ContainerImage
from ruamel.yaml import YAML
try:
from typing import get_args, get_origin
except ImportError: # pragma: no cover
def get_args(tp):
return tp.__args__ if hasattr(tp, "__args__") else ()
def get_origin(tp):
return tp.__origin__ if hasattr(tp, "__origin__") else None
NoneType = type(None)
def create_monkey_patches():
# The 2 patched Hikaru methods are very expensive CPU wise. We patched them, and using cached attributes
# on the hikaru class, so that we perform the expensive procedure only once
logging.info("Creating hikaru monkey patches")
HikaruBase.METHOD_NAME = METHOD_NAME
HikaruBase._get_hints = _get_hints
# The YAML method below is searching the file system for plugins, each time a parser is created
# We create many parser, and this is very inefficient.
# The plugins doesn't change during program execution.
# We added caching to search for the plugins only once
logging.info("Creating yaml monkey patch")
YAML.official_plug_ins = official_plug_ins
# The patched method is due to a bug in containerd that allows for containerImages to have no names
# which causes the kubernetes python api to throw an exception
logging.info("Creating kubernetes ContainerImage monkey patch")
EventsV1Event.event_time = EventsV1Event.event_time.setter(event_time)
def event_time(self, event_time):
self._event_time = event_time
def official_plug_ins(self):
return []
# hikaru meta.py monkey patch function
@classmethod
def METHOD_NAME(cls):
"""
Returns a properly initialized instance with Nones and empty collections
:return: and instance of 'cls' with all scalar attrs set to None and
all collection attrs set to an appropriate empty collection
"""
kw_args = {}
# The 3 lines below are added, to use cached arguments to create the empty class instance
cached_args = getattr(cls, "cached_args", None)
if cached_args:
return cls(**cached_args)
sig = signature(cls.__init__)
init_var_hints = {k for k, v in get_type_hints(cls).items() if isinstance(v, InitVar) or v is InitVar}
hints = cls._get_hints()
for p in sig.parameters.values():
if p.name in ("self", "client") or p.name in init_var_hints:
continue
# skip these either of these next two since they are supplied by default,
# but only if they have default values
if p.name in ("apiVersion", "kind"):
if issubclass(cls, HikaruDocumentBase):
continue
f = hints[p.name]
initial_type = f
origin = get_origin(initial_type)
is_required = True
if origin is Union:
type_args = get_args(f)
initial_type = type_args[0]
is_required = False
if (
(type(initial_type) == type and issubclass(initial_type, (int, str, bool, float)))
or (is_dataclass(initial_type) and issubclass(initial_type, HikaruBase))
or initial_type is object
):
# this is a type that might default to None
# kw_args[p.name] = None
if is_required:
if is_dataclass(initial_type) and issubclass(initial_type, HikaruBase):
kw_args[p.name] = initial_type.METHOD_NAME()
else:
kw_args[p.name] = ""
else:
kw_args[p.name] = None
else:
origin = get_origin(initial_type)
if origin in (list, List):
# ok, just stuffing an empty list in here can be a problem,
# as we don't know if this is going to then be put through
# get clean dict; if it's required, a clean dict will remove
# the list. So we need to put something inside this list so it
# doesn't get blown away. But ONLY if it's required
if is_required:
list_of_type = get_args(initial_type)[0]
if issubclass(list_of_type, HikaruBase):
kw_args[p.name] = [list_of_type.METHOD_NAME()]
else:
kw_args[p.name] = [None]
else:
kw_args[p.name] = []
elif origin in (dict, Dict):
kw_args[p.name] = {}
else:
raise NotImplementedError(
f"Internal error! Unknown type"
f" {initial_type}"
f" for parameter {p.name} in"
f" {cls.__name__}. Please file a"
f" bug report."
) # pragma: no cover
new_inst = cls(**kw_args)
# Caching the empty instance creation args, to use next time we want to create an empty instance
cls.cached_args = kw_args
return new_inst
@classmethod
def _get_hints(cls) -> dict:
# The 3 lines below are added, to use cached hints
cached_hints = getattr(cls, "cached_hints", None)
if cached_hints:
return cached_hints
mro = cls.mro()
mro.reverse()
hints = {}
globs = vars(getmodule(cls))
for c in mro:
if is_dataclass(c):
hints.update(get_type_hints(c, globs))
# patching ContainerImage hint to allow the names to be None due to containerd bug
if cls.__name__ == "Event":
hints["eventTime"] = Optional[str]
# Caching the class hints for later use
cls.cached_hints = hints
return hints
|
3,755 |
test lookup5
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import unittest
import dns.name
import dns.namedict
class NameTestCase(unittest.TestCase):
def setUp(self):
self.ndict = dns.namedict.NameDict()
n1 = dns.name.from_text("foo.bar.")
n2 = dns.name.from_text("bar.")
self.ndict[n1] = 1
self.ndict[n2] = 2
self.rndict = dns.namedict.NameDict()
n1 = dns.name.from_text("foo.bar", None)
n2 = dns.name.from_text("bar", None)
self.rndict[n1] = 1
self.rndict[n2] = 2
def testDepth(self):
self.assertEqual(self.ndict.max_depth, 3)
def testLookup1(self):
k = dns.name.from_text("foo.bar.")
self.assertEqual(self.ndict[k], 1)
def testLookup2(self):
k = dns.name.from_text("foo.bar.")
self.assertEqual(self.ndict.get_deepest_match(k)[1], 1)
def testLookup3(self):
k = dns.name.from_text("a.b.c.foo.bar.")
self.assertEqual(self.ndict.get_deepest_match(k)[1], 1)
def testLookup4(self):
k = dns.name.from_text("a.b.c.bar.")
self.assertEqual(self.ndict.get_deepest_match(k)[1], 2)
def METHOD_NAME(self):
def bad():
n = dns.name.from_text("a.b.c.")
self.ndict.get_deepest_match(n)
self.assertRaises(KeyError, bad)
def testLookup6(self):
def bad():
self.ndict.get_deepest_match(dns.name.empty)
self.assertRaises(KeyError, bad)
def testLookup7(self):
self.ndict[dns.name.empty] = 100
n = dns.name.from_text("a.b.c.")
v = self.ndict.get_deepest_match(n)[1]
self.assertEqual(v, 100)
def testLookup8(self):
def bad():
self.ndict["foo"] = 100
self.assertRaises(ValueError, bad)
def testRelDepth(self):
self.assertEqual(self.rndict.max_depth, 2)
def testRelLookup1(self):
k = dns.name.from_text("foo.bar", None)
self.assertEqual(self.rndict[k], 1)
def testRelLookup2(self):
k = dns.name.from_text("foo.bar", None)
self.assertEqual(self.rndict.get_deepest_match(k)[1], 1)
def testRelLookup3(self):
k = dns.name.from_text("a.b.c.foo.bar", None)
self.assertEqual(self.rndict.get_deepest_match(k)[1], 1)
def testRelLookup4(self):
k = dns.name.from_text("a.b.c.bar", None)
self.assertEqual(self.rndict.get_deepest_match(k)[1], 2)
def testRelLookup7(self):
self.rndict[dns.name.empty] = 100
n = dns.name.from_text("a.b.c", None)
v = self.rndict.get_deepest_match(n)[1]
self.assertEqual(v, 100)
def test_max_depth_increases(self):
n = dns.name.from_text("a.foo.bar.")
self.assertEqual(self.ndict.max_depth, 3)
self.ndict[n] = 1
self.assertEqual(self.ndict.max_depth, 4)
def test_delete_no_max_depth_change(self):
self.assertEqual(self.ndict.max_depth, 3)
n = dns.name.from_text("bar.")
del self.ndict[n]
self.assertEqual(self.ndict.max_depth, 3)
self.assertEqual(self.ndict.get(n), None)
def test_delete_max_depth_changes(self):
self.assertEqual(self.ndict.max_depth, 3)
n = dns.name.from_text("foo.bar.")
del self.ndict[n]
self.assertEqual(self.ndict.max_depth, 2)
self.assertEqual(self.ndict.get(n), None)
def test_delete_multiple_max_depth_changes(self):
self.assertEqual(self.ndict.max_depth, 3)
nr = dns.name.from_text("roo.")
self.ndict[nr] = 1
nf = dns.name.from_text("foo.bar.")
nb = dns.name.from_text("bar.bar.")
self.ndict[nb] = 1
self.assertEqual(self.ndict.max_depth, 3)
self.assertEqual(self.ndict.max_depth_items, 2)
del self.ndict[nb]
self.assertEqual(self.ndict.max_depth, 3)
self.assertEqual(self.ndict.max_depth_items, 1)
del self.ndict[nf]
self.assertEqual(self.ndict.max_depth, 2)
self.assertEqual(self.ndict.max_depth_items, 2)
self.assertEqual(self.ndict.get(nf), None)
self.assertEqual(self.ndict.get(nb), None)
def test_iter(self):
nf = dns.name.from_text("foo.bar.")
nb = dns.name.from_text("bar.")
keys = set([x for x in self.ndict])
self.assertEqual(len(keys), 2)
self.assertTrue(nf in keys)
self.assertTrue(nb in keys)
def test_len(self):
self.assertEqual(len(self.ndict), 2)
def test_haskey(self):
nf = dns.name.from_text("foo.bar.")
nb = dns.name.from_text("bar.")
nx = dns.name.from_text("x.")
self.assertTrue(self.ndict.has_key(nf))
self.assertTrue(self.ndict.has_key(nb))
self.assertFalse(self.ndict.has_key(nx))
if __name__ == "__main__":
unittest.main()
|
3,756 |
update
|
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import vtk
from .ChiggerObject import ChiggerObject
class ChiggerResultBase(ChiggerObject):
"""
Base class for objects to be displayed with a single vtkRenderer object.
Any object or set of objects that require a single vtkRenderer object should inherit from this
and all settings for the vtkRender object should be placed in this class.
If you are creating a new type of "result" object (i.e., something with a vtkRenderer) you will
likely want to derive from one of the child classes of ChiggerResultBase, such as ChiggerResult.
Inputs:
see ChiggerObject
"""
@staticmethod
def getOptions():
opt = ChiggerObject.getOptions()
opt.add('layer', 1, "The VTK layer within the render window.", vtype=int)
opt.add('viewport', [0, 0, 1, 1], "A list given the viewport coordinates [x_min, y_min, "
"x_max, y_max], in relative position to the entire "
"window (0 to 1).", vtype=list)
opt.add('background', [0, 0, 0], "The background color, only applied when the 'layer' "
"option is zero. A background result is automatically "
"added when chigger.RenderWindow is utilized.")
opt.add('background2', None, "The second background color, when supplied this creates a "
"gradient background, only applied when the 'layer' option is "
"zero. A background result is automatically added when "
"chigger.RenderWindow is utilized.", vtype=list)
opt.add('gradient_background', False, "Enable/disable the use of a gradient background.")
opt.add('camera', "The VTK camera to utilize for viewing the results.", vtype=vtk.vtkCamera)
opt.add('light', None, "Add a headlight with the supplied intensity.", vtype=float)
return opt
def __init__(self, renderer=None, **kwargs):
super(ChiggerResultBase, self).__init__(**kwargs)
self._vtkrenderer = renderer if renderer != None else vtk.vtkRenderer()
self._vtklight = vtk.vtkLight()
self._vtklight.SetLightTypeToHeadlight()
def getVTKRenderer(self):
"""
Return the vtkRenderer object. (public)
Generally, this should not be used. This method if mainly for the RenderWindow object to
populate the list of renderers that it will be displaying.
"""
return self._vtkrenderer
def METHOD_NAME(self, **kwargs):
"""
Update the vtkRenderer settings. (override)
Inputs:
see ChiggerObject
"""
super(ChiggerResultBase, self).METHOD_NAME(**kwargs)
# Render layer
if self.isOptionValid('layer'):
self._vtkrenderer.SetLayer(self.getOption('layer'))
# Viewport
if self.isOptionValid('viewport'):
self._vtkrenderer.SetViewport(self.getOption('viewport'))
# Background (only gets applied if layer=0)
self._vtkrenderer.SetBackground(self.getOption('background'))
if self.isOptionValid('background2'):
self._vtkrenderer.SetBackground2(self.getOption('background2'))
if self.isOptionValid('gradient_background'):
self._vtkrenderer.SetGradientBackground(self.getOption('gradient_background'))
# Camera
if self.isOptionValid('camera'):
self._vtkrenderer.SetActiveCamera(self.getOption('camera'))
# Headlight
if self.isOptionValid('light'):
self._vtklight.SetIntensity(1.5)
self._vtkrenderer.AddLight(self._vtklight)
|
3,757 |
render
|
from __future__ import annotations
import warnings
from typing import Any
import gymnasium.spaces
import numpy as np
from pettingzoo.utils.env import ActionType, AECEnv, AgentID, ObsType
class BaseWrapper(AECEnv[AgentID, ObsType, ActionType]):
"""Creates a wrapper around `env` parameter.
All AECEnv wrappers should inherit from this base class
"""
def __init__(self, env: AECEnv[AgentID, ObsType, ActionType]):
super().__init__()
self.env = env
try:
self.possible_agents = self.env.possible_agents
except AttributeError:
pass
self.metadata = self.env.metadata
# we don't want these defined as we don't want them used before they are gotten
# self.agent_selection = self.env.agent_selection
# self.rewards = self.env.rewards
# self.dones = self.env.dones
# we don't want to care one way or the other whether environments have an infos or not before reset
try:
self.infos = self.env.infos
except AttributeError:
pass
# Not every environment has the .state_space attribute implemented
try:
self.state_space = (
self.env.state_space # pyright: ignore[reportGeneralTypeIssues]
)
except AttributeError:
pass
def __getattr__(self, name: str) -> Any:
"""Returns an attribute with ``name``, unless ``name`` starts with an underscore."""
if name.startswith("_"):
raise AttributeError(f"accessing private attribute '{name}' is prohibited")
return getattr(self.env, name)
@property
def observation_spaces(self) -> dict[AgentID, gymnasium.spaces.Space]:
warnings.warn(
"The `observation_spaces` dictionary is deprecated. Use the `observation_space` function instead."
)
try:
return {
agent: self.observation_space(agent) for agent in self.possible_agents
}
except AttributeError as e:
raise AttributeError(
"The base environment does not have an `observation_spaces` dict attribute. Use the environment's `observation_space` method instead"
) from e
@property
def action_spaces(self) -> dict[AgentID, gymnasium.spaces.Space]:
warnings.warn(
"The `action_spaces` dictionary is deprecated. Use the `action_space` function instead."
)
try:
return {agent: self.action_space(agent) for agent in self.possible_agents}
except AttributeError as e:
raise AttributeError(
"The base environment does not have an action_spaces dict attribute. Use the environment's `action_space` method instead"
) from e
def observation_space(self, agent: AgentID) -> gymnasium.spaces.Space:
return self.env.observation_space(agent)
def action_space(self, agent: AgentID) -> gymnasium.spaces.Space:
return self.env.action_space(agent)
@property
def unwrapped(self) -> AECEnv:
return self.env.unwrapped
def close(self) -> None:
self.env.close()
def METHOD_NAME(self) -> None | np.ndarray | str | list:
return self.env.METHOD_NAME()
def reset(self, seed: int | None = None, options: dict | None = None):
self.env.reset(seed=seed, options=options)
self.agent_selection = self.env.agent_selection
self.rewards = self.env.rewards
self.terminations = self.env.terminations
self.truncations = self.env.truncations
self.infos = self.env.infos
self.agents = self.env.agents
self._cumulative_rewards = self.env._cumulative_rewards
def observe(self, agent: AgentID) -> ObsType | None:
return self.env.observe(agent)
def state(self) -> np.ndarray:
return self.env.state()
def step(self, action: ActionType) -> None:
self.env.step(action)
self.agent_selection = self.env.agent_selection
self.rewards = self.env.rewards
self.terminations = self.env.terminations
self.truncations = self.env.truncations
self.infos = self.env.infos
self.agents = self.env.agents
self._cumulative_rewards = self.env._cumulative_rewards
def __str__(self) -> str:
"""Returns a name which looks like: "max_observation<space_invaders_v1>"."""
return f"{type(self).__name__}<{str(self.env)}>"
|
3,758 |
set up
|
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2015 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from unittest import TestCase
import configparser
from mock import patch, mock_open, Mock
from GTG.core.config import open_config_file, SectionConfig
class TestOpenConfigFile(TestCase):
def METHOD_NAME(self):
self.mock_parser = patch(
'GTG.core.config.configparser.ConfigParser.read').start()
self.mock_os = patch('GTG.core.config.os').start()
self.mock_path = patch('GTG.core.config.os.path').start()
self.mock_open = patch(
'GTG.core.config.open', mock_open(), create=True).start()
def tearDown(self):
patch.stopall()
def test_reads_configuration(self):
open_config_file('gtg.conf')
self.mock_parser.assert_called_once_with('gtg.conf')
@patch('GTG.core.config.log')
def test_falls_back_when_there_is_config_error(self, mock_log):
self.mock_parser.side_effect = configparser.Error()
open_config_file('gtg.conf')
self.mock_parser.assert_called_once_with('gtg.conf')
self.assertTrue(mock_log.warning.called)
def test_creates_config_folder_when_missing(self):
self.mock_path.exists.return_value = False
self.mock_path.dirname.return_value = 'config'
open_config_file('config/gtg.conf')
self.mock_os.makedirs.assert_called_once_with('config')
def test_creates_config_file_when_missing(self):
self.mock_path.exists.side_effect = lambda name: name != 'gtg.conf'
open_config_file('gtg.conf')
self.mock_open.assert_called_with('gtg.conf', 'w')
def test_raises_error_when_config_is_not_accessible(self):
self.mock_os.access.return_value = False
with self.assertRaises(Exception):
open_config_file('gtg.conf')
class TestSectionConfig(TestCase):
def make_section_config(self, config_dict):
""" Creates a section from a dictionary """
config = configparser.ConfigParser()
config.read_dict({'section': config_dict})
return config['section']
@patch('GTG.core.config.log')
def test_warns_when_no_default_value_is_provided(self, mock_log):
config = self.make_section_config({'option': '1'})
section = SectionConfig('Name', config, {}, Mock())
value = section.get('option')
self.assertEqual('1', value)
@patch('GTG.core.config.log')
def test_warns_when_value_is_wrong_type(self, mock_log):
config = self.make_section_config({'option': 'text'})
section = SectionConfig('Name', config, {'option': 42}, Mock())
value = section.get('option')
self.assertTrue(mock_log.warning.called)
# It should fall back to default value as 'text' is not an int
self.assertEqual(42, value)
def test_returns_int_when_expected_int(self):
config = self.make_section_config({'option': '42'})
section = SectionConfig('Name', config, {'option': 42}, Mock())
value = section.get('option')
self.assertEqual(int, type(value))
self.assertEqual(42, value)
def test_returns_bool_when_expected_bool(self):
config = self.make_section_config({'option': 'False'})
section = SectionConfig('Name', config, {'option': False}, Mock())
value = section.get('option')
self.assertEqual(bool, type(value))
self.assertEqual(False, value)
def test_returns_string_when_expected_string(self):
config = self.make_section_config({'option': 'Hello'})
section = SectionConfig('Name', config, {'option': 'World'}, Mock())
value = section.get('option')
self.assertEqual(str, type(value))
self.assertEqual('Hello', value)
def test_returns_empty_list_for_non_existing_value(self):
config = self.make_section_config({})
section = SectionConfig('Name', config, {'option': []}, Mock())
value = section.get('option')
self.assertEqual([], value)
def test_returns_empty_list_for_empty_value(self):
config = self.make_section_config({'option': ''})
section = SectionConfig('Name', config, {'option': []}, Mock())
value = section.get('option')
self.assertEqual([], value)
def test_returns_list_from_previous_configuration(self):
# Config from GTG 0.2.4
config = self.make_section_config({
'opened_tasks': '8@1, 6@1, 4@1'})
section = SectionConfig('Name', config, {'opened_tasks': []}, Mock())
value = section.get('opened_tasks')
self.assertEqual(['8@1', '6@1', '4@1'], value)
def test_returns_empty_list_from_previous_empty_configuration(self):
# Config from GTG 0.2.4
config = self.make_section_config({
'opened_tasks': ','})
section = SectionConfig('Name', config, {'opened_tasks': []}, Mock())
value = section.get('opened_tasks')
self.assertEqual([], value)
def test_returns_list_of_tuples(self):
# Splitting only by ',' caused bugs
# - https://bugs.launchpad.net/gtg/+bug/1218093
# - https://bugs.launchpad.net/gtg/+bug/1216807
config = self.make_section_config({
'collapsed_tasks': "('0@1', '6@1'),('0@1', '8@1', '3@1', '5@1')"})
section = SectionConfig(
'Name', config, {'collapsed_tasks': []}, Mock())
value = section.get('collapsed_tasks')
self.assertEqual(
["('0@1', '6@1')", "('0@1', '8@1', '3@1', '5@1')"],
value)
@patch('GTG.core.config.log')
def test_raises_an_error_when_no_value_and_no_default_value(
self, mock_log):
config = self.make_section_config({})
section = SectionConfig('Name', config, {}, Mock())
with self.assertRaises(ValueError):
section.get('option')
def test_can_set_value(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('option', 42)
self.assertEqual('42', config['option'])
# Automatically saved value
save_mock.assert_any_call()
def test_can_set_list(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('list', [1, True, 'Hello'])
self.assertEqual('1,True,Hello', config['list'])
# Automatically saved value
save_mock.assert_any_call()
def test_can_set_tuple(self):
config = self.make_section_config({})
save_mock = Mock()
section = SectionConfig('Name', config, {}, save_mock)
section.set('list', (1, 2))
self.assertEqual('1,2', config['list'])
# Automatically saved value
save_mock.assert_any_call()
|
3,759 |
test named only with types
|
import unittest
from enum import Enum
from robot.running.arguments.argumentspec import ArgumentSpec, ArgInfo
from robot.utils.asserts import assert_equal
class TestStringRepr(unittest.TestCase):
def test_empty(self):
self._verify('')
def test_normal(self):
self._verify('a, b', ['a', 'b'])
def test_non_ascii_names(self):
self._verify('nön, äscii', ['nön', 'äscii'])
def test_default(self):
self._verify('a, b=c', ['a', 'b'], defaults={'b': 'c'})
self._verify('nön=äscii', ['nön'], defaults={'nön': 'äscii'})
self._verify('i=42', ['i'], defaults={'i': 42})
def test_default_as_bytes(self):
self._verify('b=ytes', ['b'], defaults={'b': b'ytes'})
self._verify('ä=\\xe4', ['ä'], defaults={'ä': b'\xe4'})
def test_type_as_class(self):
self._verify('a: int, b: bool', ['a', 'b'], types={'a': int, 'b': bool})
def test_type_as_string(self):
self._verify('a: Integer, b: Boolean', ['a', 'b'],
types={'a': 'Integer', 'b': 'Boolean'})
def test_type_and_default(self):
self._verify('arg: int = 1', ['arg'], types=[int], defaults={'arg': 1})
def test_positional_only(self):
self._verify('a, /', positional_only=['a'])
self._verify('a, /, b', positional_only=['a'], positional_or_named=['b'])
def test_positional_only_with_default(self):
self._verify('a, b=2, /', positional_only=['a', 'b'], defaults={'b': 2})
def test_positional_only_with_type(self):
self._verify('a: int, b, /', positional_only=['a', 'b'], types=[int])
self._verify('a: int, b: float, /, c: bool, d',
positional_only=['a', 'b'],
positional_or_named=['c', 'd'],
types=[int, float, bool])
def test_positional_only_with_type_and_default(self):
self._verify('a: int = 1, b=2, /',
positional_only=['a', 'b'],
types={'a': int},
defaults={'a': 1, 'b': 2})
def test_varargs(self):
self._verify('*varargs',
var_positional='varargs')
self._verify('a, *b',
positional_or_named=['a'],
var_positional='b')
def test_varargs_with_type(self):
self._verify('*varargs: float',
var_positional='varargs',
types={'varargs': float})
self._verify('a: int, *b: list[int]',
positional_or_named=['a'],
var_positional='b',
types=[int, 'list[int]'])
def test_named_only_without_varargs(self):
self._verify('*, kwo',
named_only=['kwo'])
def test_named_only_with_varargs(self):
self._verify('*varargs, k1, k2',
var_positional='varargs',
named_only=['k1', 'k2'])
def test_named_only_with_default(self):
self._verify('*, k=1, w, o=3',
named_only=['k', 'w', 'o'],
defaults={'k': 1, 'o': 3})
def METHOD_NAME(self):
self._verify('*, k: int, w: float, o',
named_only=['k', 'w', 'o'],
types=[int, float])
self._verify('x: int, *y: float, z: bool',
positional_or_named=['x'],
var_positional='y',
named_only=['z'],
types=[int, float, bool])
def test_named_only_with_types_and_defaults(self):
self._verify('x: int = 1, *, y: float, z: bool = 3',
positional_or_named=['x'],
named_only=['y', 'z'],
types=[int, float, bool],
defaults={'x': 1, 'z': 3})
def test_kwargs(self):
self._verify('**kws',
var_named='kws')
self._verify('a, b=c, *d, e=f, g, **h',
positional_or_named=['a', 'b'],
var_positional='d',
named_only=['e', 'g'],
var_named='h',
defaults={'b': 'c', 'e': 'f'})
def test_kwargs_with_types(self):
self._verify('**kws: dict[str, int]',
var_named='kws',
types={'kws': 'dict[str, int]'})
self._verify('a: int, /, b: float, *c: list[int], d: bool, **e: dict[int, str]',
positional_only=['a'],
positional_or_named=['b'],
var_positional='c',
named_only=['d'],
var_named='e',
types=[int, float, 'list[int]', bool, 'dict[int, str]'])
def test_enum_with_few_members(self):
class Small(Enum):
ONLY_FEW_MEMBERS = 1
SO_THEY_CAN = 2
BE_PRETTY_LONG = 3
self._verify('e: Small',
['e'], types=[Small])
def test_enum_with_many_short_members(self):
class ManyShort(Enum):
ONE = 1
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
self._verify('e: ManyShort',
['e'], types=[ManyShort])
def test_enum_with_many_long_members(self):
class Big(Enum):
MANY_MEMBERS = 1
THAT_ARE_LONGISH = 2
MEANS_THEY_ALL_DO_NOT_FIT = 3
AND_SOME_ARE_OMITTED = 4
FROM_THE_END = 5
self._verify('e: Big',
['e'], types=[Big])
def _verify(self, expected, positional_or_named=None, **config):
spec = ArgumentSpec(positional_or_named=positional_or_named, **config)
assert_equal(str(spec), expected)
assert_equal(bool(spec), bool(expected))
class TestArgInfo(unittest.TestCase):
def test_required_without_default(self):
for kind in (ArgInfo.POSITIONAL_ONLY,
ArgInfo.POSITIONAL_OR_NAMED,
ArgInfo.NAMED_ONLY):
assert_equal(ArgInfo(kind).required, True)
assert_equal(ArgInfo(kind, default=None).required, False)
def test_never_required(self):
for kind in (ArgInfo.VAR_POSITIONAL,
ArgInfo.VAR_NAMED,
ArgInfo.POSITIONAL_ONLY_MARKER,
ArgInfo.NAMED_ONLY_MARKER):
assert_equal(ArgInfo(kind).required, False)
if __name__ == '__main__':
unittest.main()
|
3,760 |
test request invalid key not base64
|
import contextlib
import unittest
from websockets.datastructures import Headers
from websockets.exceptions import (
InvalidHandshake,
InvalidHeader,
InvalidHeaderValue,
InvalidUpgrade,
)
from websockets.legacy.handshake import *
from websockets.utils import accept_key
class HandshakeTests(unittest.TestCase):
def test_round_trip(self):
request_headers = Headers()
request_key = build_request(request_headers)
response_key = check_request(request_headers)
self.assertEqual(request_key, response_key)
response_headers = Headers()
build_response(response_headers, response_key)
check_response(response_headers, request_key)
@contextlib.contextmanager
def assertValidRequestHeaders(self):
"""
Provide request headers for modification.
Assert that the transformation kept them valid.
"""
headers = Headers()
build_request(headers)
yield headers
check_request(headers)
@contextlib.contextmanager
def assertInvalidRequestHeaders(self, exc_type):
"""
Provide request headers for modification.
Assert that the transformation made them invalid.
"""
headers = Headers()
build_request(headers)
yield headers
assert issubclass(exc_type, InvalidHandshake)
with self.assertRaises(exc_type):
check_request(headers)
def test_request_invalid_connection(self):
with self.assertInvalidRequestHeaders(InvalidUpgrade) as headers:
del headers["Connection"]
headers["Connection"] = "Downgrade"
def test_request_missing_connection(self):
with self.assertInvalidRequestHeaders(InvalidUpgrade) as headers:
del headers["Connection"]
def test_request_additional_connection(self):
with self.assertValidRequestHeaders() as headers:
headers["Connection"] = "close"
def test_request_invalid_upgrade(self):
with self.assertInvalidRequestHeaders(InvalidUpgrade) as headers:
del headers["Upgrade"]
headers["Upgrade"] = "socketweb"
def test_request_missing_upgrade(self):
with self.assertInvalidRequestHeaders(InvalidUpgrade) as headers:
del headers["Upgrade"]
def test_request_additional_upgrade(self):
with self.assertInvalidRequestHeaders(InvalidUpgrade) as headers:
headers["Upgrade"] = "socketweb"
def METHOD_NAME(self):
with self.assertInvalidRequestHeaders(InvalidHeaderValue) as headers:
del headers["Sec-WebSocket-Key"]
headers["Sec-WebSocket-Key"] = "!@#$%^&*()"
def test_request_invalid_key_not_well_padded(self):
with self.assertInvalidRequestHeaders(InvalidHeaderValue) as headers:
del headers["Sec-WebSocket-Key"]
headers["Sec-WebSocket-Key"] = "CSIRmL8dWYxeAdr/XpEHRw"
def test_request_invalid_key_not_16_bytes_long(self):
with self.assertInvalidRequestHeaders(InvalidHeaderValue) as headers:
del headers["Sec-WebSocket-Key"]
headers["Sec-WebSocket-Key"] = "ZLpprpvK4PE="
def test_request_missing_key(self):
with self.assertInvalidRequestHeaders(InvalidHeader) as headers:
del headers["Sec-WebSocket-Key"]
def test_request_additional_key(self):
with self.assertInvalidRequestHeaders(InvalidHeader) as headers:
# This duplicates the Sec-WebSocket-Key header.
headers["Sec-WebSocket-Key"] = headers["Sec-WebSocket-Key"]
def test_request_invalid_version(self):
with self.assertInvalidRequestHeaders(InvalidHeaderValue) as headers:
del headers["Sec-WebSocket-Version"]
headers["Sec-WebSocket-Version"] = "42"
def test_request_missing_version(self):
with self.assertInvalidRequestHeaders(InvalidHeader) as headers:
del headers["Sec-WebSocket-Version"]
def test_request_additional_version(self):
with self.assertInvalidRequestHeaders(InvalidHeader) as headers:
# This duplicates the Sec-WebSocket-Version header.
headers["Sec-WebSocket-Version"] = headers["Sec-WebSocket-Version"]
@contextlib.contextmanager
def assertValidResponseHeaders(self, key="CSIRmL8dWYxeAdr/XpEHRw=="):
"""
Provide response headers for modification.
Assert that the transformation kept them valid.
"""
headers = Headers()
build_response(headers, key)
yield headers
check_response(headers, key)
@contextlib.contextmanager
def assertInvalidResponseHeaders(self, exc_type, key="CSIRmL8dWYxeAdr/XpEHRw=="):
"""
Provide response headers for modification.
Assert that the transformation made them invalid.
"""
headers = Headers()
build_response(headers, key)
yield headers
assert issubclass(exc_type, InvalidHandshake)
with self.assertRaises(exc_type):
check_response(headers, key)
def test_response_invalid_connection(self):
with self.assertInvalidResponseHeaders(InvalidUpgrade) as headers:
del headers["Connection"]
headers["Connection"] = "Downgrade"
def test_response_missing_connection(self):
with self.assertInvalidResponseHeaders(InvalidUpgrade) as headers:
del headers["Connection"]
def test_response_additional_connection(self):
with self.assertValidResponseHeaders() as headers:
headers["Connection"] = "close"
def test_response_invalid_upgrade(self):
with self.assertInvalidResponseHeaders(InvalidUpgrade) as headers:
del headers["Upgrade"]
headers["Upgrade"] = "socketweb"
def test_response_missing_upgrade(self):
with self.assertInvalidResponseHeaders(InvalidUpgrade) as headers:
del headers["Upgrade"]
def test_response_additional_upgrade(self):
with self.assertInvalidResponseHeaders(InvalidUpgrade) as headers:
headers["Upgrade"] = "socketweb"
def test_response_invalid_accept(self):
with self.assertInvalidResponseHeaders(InvalidHeaderValue) as headers:
del headers["Sec-WebSocket-Accept"]
other_key = "1Eq4UDEFQYg3YspNgqxv5g=="
headers["Sec-WebSocket-Accept"] = accept_key(other_key)
def test_response_missing_accept(self):
with self.assertInvalidResponseHeaders(InvalidHeader) as headers:
del headers["Sec-WebSocket-Accept"]
def test_response_additional_accept(self):
with self.assertInvalidResponseHeaders(InvalidHeader) as headers:
# This duplicates the Sec-WebSocket-Accept header.
headers["Sec-WebSocket-Accept"] = headers["Sec-WebSocket-Accept"]
|
3,761 |
get path
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Standard routines for handling extensions.
Extensions currently include external methods.
"""
import os
from functools import total_ordering
import Products
from zExceptions import NotFound
@total_ordering
class FuncCode:
def __init__(self, f, im=0):
self.co_varnames = f.__code__.co_varnames[im:]
self.co_argcount = f.__code__.co_argcount - im
def __eq__(self, other):
if not isinstance(other, FuncCode):
return False
return (self.co_argcount, self.co_varnames) == \
(other.co_argcount, other.co_varnames)
def __lt__(self, other):
if not isinstance(other, FuncCode):
return False
return (self.co_argcount, self.co_varnames) < \
(other.co_argcount, other.co_varnames)
def METHOD_NAME(home, prefix, name, suffixes):
dir = os.path.join(home, prefix)
if dir == prefix:
raise ValueError('The prefix, %s, should be a relative path' % prefix)
fn = os.path.join(dir, name)
if fn == name:
# Paranoia
raise ValueError(
'The file name, %s, should be a simple file name' % name)
for suffix in suffixes:
if suffix:
fqn = f"{fn}.{suffix}"
else:
fqn = fn
if os.path.exists(fqn):
return fqn
def getPath(prefix, name, checkProduct=1, suffixes=('',), cfg=None):
"""Find a file in one of several relative locations
Arguments:
prefix -- The location, relative to some home, to look for the
file
name -- The name of the file. This must not be a path.
checkProduct -- a flag indicating whether product directories
should be used as additional hope ares to be searched. This
defaults to a true value.
If this is true and the name contains a dot, then the
text before the dot is treated as a product name and
the product package directory is used as anothe rhome.
suffixes -- a sequences of file suffixes to check.
By default, the name is used without a suffix.
cfg -- ease testing (not part of the API)
The search takes on multiple homes which are the instance home,
the directory containing the directory containing the software
home, and possibly product areas.
"""
dir, ignored = os.path.split(name)
if dir:
raise ValueError(
'The file name, %s, should be a simple file name' % name)
if checkProduct:
dot = name.find('.')
if dot > 0:
product = name[:dot]
extname = name[dot + 1:]
for product_dir in Products.__path__:
found = METHOD_NAME(product_dir, os.path.join(product, prefix),
extname, suffixes)
if found is not None:
return found
if cfg is None:
import App.config
cfg = App.config.getConfiguration()
if prefix == "Extensions" and getattr(cfg, 'extensions', None) is not None:
found = METHOD_NAME(cfg.extensions, '', name, suffixes)
if found is not None:
return found
locations = [cfg.instancehome]
for home in locations:
found = METHOD_NAME(home, prefix, name, suffixes)
if found is not None:
return found
try:
dot = name.rfind('.')
if dot > 0:
realName = name[dot + 1:]
toplevel = name[:dot]
rdot = toplevel.rfind('.')
if rdot > -1:
module = __import__(
toplevel, globals(), {}, toplevel[rdot + 1:])
else:
module = __import__(toplevel)
prefix = os.path.join(module.__path__[0], prefix, realName)
for suffix in suffixes:
if suffix:
fn = f"{prefix}.{suffix}"
else:
fn = prefix
if os.path.exists(fn):
return fn
except Exception:
pass
_modules = {} # cache
def getObject(module, name, reload=0):
# The use of _modules here is not thread safe, however, there is
# no real harm in a race condition here. If two threads
# update the cache, then one will have simply worked a little
# harder than need be. So, in this case, we won't incur
# the expense of a lock.
old = _modules.get(module)
if old is not None and name in old and not reload:
return old[name]
base, ext = os.path.splitext(module)
if ext == 'py':
# XXX should never happen; splitext() keeps '.' with the extension
prefix = base
else:
prefix = module
path = getPath('Extensions', prefix, suffixes=('', 'py'))
if path is None:
raise NotFound(
"The specified module, '%s', couldn't be found." % module)
__traceback_info__ = path, module
try:
with open(path) as f:
execsrc = f.read()
except Exception:
raise NotFound("The specified module, '%s', "
"couldn't be opened." % module)
execcode = compile(execsrc, path, 'exec')
module_dict = {}
exec(execcode, module_dict)
if old is not None:
# XXX Accretive??
old.update(module_dict)
else:
_modules[module] = module_dict
try:
return module_dict[name]
except KeyError:
raise NotFound("The specified object, '%s', was not found "
"in module, '%s'." % (name, module))
|
3,762 |
get mfr id
|
#
# Sample pddf_psuutil file
#
# All the supported PSU SysFS aattributes are
#- psu_present
#- psu_model_name
#- psu_power_good
#- psu_mfr_id
#- psu_serial_num
#- psu_fan_dir
#- psu_v_out
#- psu_i_out
#- psu_p_out
#- psu_fan1_speed_rpm
#
import os.path
import sys
sys.path.append('/usr/share/sonic/platform/plugins')
import pddfparse
import json
try:
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class PsuUtil(PsuBase):
"""PDDF generic PSU util class"""
def __init__(self):
PsuBase.__init__(self)
global pddf_obj
global plugin_data
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/../pddf/pd-plugin.json')) as pd:
plugin_data = json.load(pd)
pddf_obj = pddfparse.PddfParse()
self.platform = pddf_obj.get_platform()
def get_num_psus(self):
return int(self.platform['num_psus'])
def get_psu_status(self, index):
if index is None:
return False
device = "PSU" + "%d" % index
output = pddf_obj.get_attr_name_output(device, "psu_power_good")
if not output:
return False
mode = output['mode']
val = output['status']
val = val.rstrip()
vmap = plugin_data['PSU']['psu_power_good'][mode]['valmap']
if val in vmap:
return vmap[val]
else:
return False
def get_psu_presence(self, index):
if index is None:
return False
status = 0
device = "PSU" + "%d" % index
output = pddf_obj.get_attr_name_output(device, "psu_present")
if not output:
return False
mode = output['mode']
status = output['status']
vmap = plugin_data['PSU']['psu_present'][mode]['valmap']
if status.rstrip('\n') in vmap:
return vmap[status.rstrip('\n')]
else:
return False
def get_powergood_status(self, idx):
if idx is None:
return False
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return False
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_power_good")
if not output:
return False
mode = output['mode']
status = output['status']
vmap = plugin_data['PSU']['psu_power_good'][mode]['valmap']
if status.rstrip('\n') in vmap:
return vmap[status.rstrip('\n')]
else:
return False
def get_model(self, idx):
if idx is None:
return None
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return None
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_model_name")
if not output:
return None
model = output['status']
# strip_non_ascii
stripped = (c for c in model if 0 < ord(c) < 127)
model = ''.join(stripped)
return model.rstrip('\n')
def METHOD_NAME(self, idx):
if idx is None:
return None
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return None
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_mfr_id")
if not output:
return None
mfr = output['status']
return mfr.rstrip('\n')
def get_serial(self, idx):
if idx is None:
return None
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return None
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_serial_num")
if not output:
return None
serial = output['status']
return serial.rstrip('\n')
def get_direction(self, idx):
if idx is None:
return None
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return None
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_fan_dir")
if not output:
return None
mode = output['mode']
direction = output['status'].rstrip('\n')
vmap = plugin_data['PSU']['psu_fan_dir'][mode]['valmap']
if direction in vmap:
airflow_dir_real = vmap[direction]
else:
airflow_dir_real = direction
return airflow_dir_real
def get_output_voltage(self, idx):
if idx is None:
return 0.0
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return 0.0
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_v_out")
if not output:
return 0.0
v_out = output['status']
# value returned by the psu driver is in mV
return float(v_out)/1000
def get_output_current(self, idx):
if idx is None:
return 0.0
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return 0.0
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_i_out")
if not output:
return 0.0
i_out = output['status']
# current in mA
return float(i_out)/1000
def get_output_power(self, idx):
if idx is None:
return 0.0
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return 0.0
device = "PSU"+"%d" % (idx)
output = pddf_obj.get_attr_name_output(device, "psu_p_out")
if not output:
return 0.0
p_out = output['status']
# power is returned in micro watts
return float(p_out)/1000000
def get_fan_rpm(self, idx, fan_idx):
if idx is None or fan_idx is None:
return 0
if idx < 1 or idx > self.platform['num_psus']:
print("Invalid index %d\n" % idx)
return 0
device = "PSU"+"%d" % (idx)
num_fans = pddf_obj.get_num_psu_fans(device)
if fan_idx < 1 or fan_idx > num_fans:
print("Invalid PSU-fan index %d\n" % fan_idx)
return 0
output = pddf_obj.get_attr_name_output(device, "psu_fan"+str(fan_idx)+"_speed_rpm")
if not output:
return 0
#mode = output['mode']
output['status'] = output['status'].rstrip()
if output['status'].isalpha():
return 0
else:
speed = int(output['status'])
return speed
def dump_sysfs(self):
return pddf_obj.cli_dump_dsysfs('psu')
|
3,763 |
assert redirect success
|
import json
import re
from unittest.mock import patch
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.test import RequestFactory, TestCase, override_settings
from django.urls import reverse
from core.govdelivery import MockGovDelivery
from core.views import (
CacheTaggedTemplateView,
ExternalURLNoticeView,
TranslatedTemplateView,
govdelivery_subscribe,
)
class GovDeliverySubscribeTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
def post(self, post, ajax=False):
kwargs = {"HTTP_X_REQUESTED_WITH": "XMLHttpRequest"} if ajax else {}
request = self.factory.post(reverse("govdelivery"), post, **kwargs)
return govdelivery_subscribe(request)
def assertRedirect(self, response, redirect):
self.assertEqual(
(response["Location"], response.status_code),
(reverse(redirect), 302),
)
def METHOD_NAME(self, response):
self.assertRedirect(response, "govdelivery:success")
def assertRedirectUserError(self, response):
self.assertRedirect(response, "govdelivery:user_error")
def assertRedirectServerError(self, response):
self.assertRedirect(response, "govdelivery:server_error")
def assertJSON(self, response, result):
self.assertEqual(
response.content.decode("utf-8"), json.dumps({"result": result})
)
def assertJSONSuccess(self, response):
return self.assertJSON(response, "pass")
def assertJSONError(self, response):
return self.assertJSON(response, "fail")
def check_post(self, post, response_check, ajax=False):
response_check(self.post(post, ajax=ajax))
def check_subscribe(
self, response_check, ajax=False, include_answers=False
):
post = {
"code": "FAKE_CODE",
"email": "[email protected]",
}
answers = [
("batman", "robin"),
("hello", "goodbye"),
]
if include_answers:
post.update({"questionid_" + q: a for q, a in answers})
self.check_post(post, response_check, ajax=ajax)
self.assertEqual(
MockGovDelivery.calls[0],
(
"set_subscriber_topics",
(post["email"], [post["code"]]),
{"send_notifications": True},
),
)
if include_answers:
for i, (q, a) in enumerate(answers):
self.assertEqual(
MockGovDelivery.calls[i + 1],
(
"set_subscriber_answers_to_question",
(post["email"], q, a),
{},
),
)
def test_missing_email_address(self):
post = {"code": "FAKE_CODE"}
self.check_post(post, self.assertRedirectUserError)
def test_missing_gd_code(self):
post = {"email": "[email protected]"}
self.check_post(post, self.assertRedirectUserError)
def test_missing_email_address_ajax(self):
post = {"code": "FAKE_CODE"}
self.check_post(post, self.assertJSONError, ajax=True)
def test_missing_gd_code_ajax(self):
post = {"email": "[email protected]"}
self.check_post(post, self.assertJSONError, ajax=True)
def test_successful_subscribe(self):
self.check_subscribe(self.METHOD_NAME)
def test_successful_subscribe_ajax(self):
self.check_subscribe(self.assertJSONSuccess, ajax=True)
@override_settings(
GOVDELIVERY_API="core.govdelivery.ExceptionMockGovDelivery"
)
def test_exception(self):
self.check_subscribe(self.assertRedirectServerError)
@override_settings(
GOVDELIVERY_API="core.govdelivery.ExceptionMockGovDelivery"
)
def test_exception_ajax(self):
self.check_subscribe(self.assertJSONError, ajax=True)
@override_settings(
GOVDELIVERY_API="core.govdelivery.ServerErrorMockGovDelivery"
)
def test_server_error(self):
self.check_subscribe(self.assertRedirectServerError)
@override_settings(
GOVDELIVERY_API="core.govdelivery.ServerErrorMockGovDelivery"
)
def test_server_error_ajax(self):
self.check_subscribe(self.assertJSONError, ajax=True)
def test_setting_subscriber_answers_to_questions(self):
self.check_subscribe(self.METHOD_NAME, include_answers=True)
class TestExternalURLNoticeView(TestCase):
def setUp(self):
self.factory = RequestFactory()
patched_whitelist = patch(
"core.forms.EXTERNAL_URL_ALLOWLIST",
(re.compile(r"^https:\/\/foo\.com$"),),
)
patched_whitelist.start()
self.addCleanup(patched_whitelist.stop)
def test_valid_get_returns_redirect(self):
view = ExternalURLNoticeView.as_view()
request = self.factory.get("/?ext_url=https://foo.com")
response = view(request)
self.assertEqual(response.status_code, 200)
def test_invalid_get_returns_404(self):
view = ExternalURLNoticeView.as_view()
request = self.factory.get("/?ext_url=https://bar.com")
with self.assertRaises(Http404):
view(request)
class TranslatedTemplateViewTestCase(TestCase):
def test_language_activation(self):
request = RequestFactory().get("/")
view = TranslatedTemplateView.as_view(template_name="test.html")
response = view(request)
self.assertEqual(response.context_data["current_language"], "en")
view = TranslatedTemplateView.as_view(
template_name="test.html", language="es"
)
response = view(request)
self.assertEqual(response.context_data["current_language"], "es")
class CacheTaggedTemplateViewTestCase(TestCase):
def test_cache_tag(self):
request = RequestFactory().get("/")
view = CacheTaggedTemplateView.as_view(
template_name="test.html", cache_tag="test"
)
response = view(request)
self.assertEqual(response["Edge-Cache-Tag"], "test")
def test_no_cache_tag(self):
request = RequestFactory().get("/")
view = CacheTaggedTemplateView.as_view(
template_name="test.html",
cache_tag=None,
)
with self.assertRaises(ImproperlyConfigured):
view(request)
|
3,764 |
delete
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
from builtins import str
import json
import logging
import sys
import urllib.request, urllib.parse, urllib.error
from django.urls import reverse
from django.views.decorators.csrf import ensure_csrf_cookie
from desktop.lib.django_util import JsonResponse, render
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.rest.http_client import RestException
from desktop.lib.paths import SAFE_CHARACTERS_URI_COMPONENTS
from desktop.models import Document
from oozie.views.dashboard import show_oozie_error, check_job_access_permission,\
check_job_edition_permission
from pig import api
from pig.management.commands import pig_setup
from pig.models import get_workflow_output, hdfs_link, PigScript,\
create_or_update_script, get_scripts
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger()
@ensure_csrf_cookie
def app(request):
autocomplete_base_url = ''
try:
autocomplete_base_url = reverse('beeswax:api_autocomplete_databases', kwargs={}) + '/'
except:
LOG.exception('failed to find autocomplete base url')
return render('app.mako', request, {
'autocomplete_base_url': autocomplete_base_url
})
def scripts(request):
return JsonResponse(get_scripts(request.user, is_design=True), safe=False)
@show_oozie_error
def dashboard(request):
pig_api = api.get(request.fs, request.jt, request.user)
jobs = pig_api.get_jobs()
hue_jobs = Document.objects.available(PigScript, request.user, with_history=True)
massaged_jobs = pig_api.massaged_jobs_for_json(request, jobs, hue_jobs)
return JsonResponse(massaged_jobs, safe=False)
def save(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
}
pig_script = create_or_update_script(**attrs)
pig_script.is_design = True
pig_script.save()
response = {
'id': pig_script.id,
'docId': pig_script.doc.get().id
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def stop(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
job_id = pig_script.dict['job_id']
job = check_job_access_permission(request, job_id)
check_job_edition_permission(job, request.user)
try:
api.get(request.fs, request.jt, request.user).stop(job_id)
except RestException as e:
raise PopupException(_("Error stopping Pig script.") % e.message)
return watch(request, job_id)
@show_oozie_error
def run(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
attrs = {
'id': request.POST.get('id'),
'name': request.POST.get('name'),
'script': request.POST.get('script'),
'user': request.user,
'parameters': json.loads(request.POST.get('parameters')),
'resources': json.loads(request.POST.get('resources')),
'hadoopProperties': json.loads(request.POST.get('hadoopProperties')),
'is_design': False
}
pig_script = create_or_update_script(**attrs)
params = request.POST.get('submissionVariables')
oozie_id = api.get(request.fs, request.jt, request.user).submit(pig_script, params)
pig_script.update_from_dict({'job_id': oozie_id})
pig_script.save()
response = {
'id': pig_script.id,
'watchUrl': reverse('pig:watch', kwargs={'job_id': oozie_id}) + '?format=python'
}
return JsonResponse(response, content_type="text/plain")
def copy(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
pig_script = PigScript.objects.get(id=request.POST.get('id'))
doc = pig_script.doc.get()
try:
doc.can_read_or_exception(request.user)
except Exception as e:
raise PopupException(e)
existing_script_data = pig_script.dict
owner = request.user
name = existing_script_data["name"] + _(' (Copy)')
script = existing_script_data["script"]
parameters = existing_script_data["parameters"]
resources = existing_script_data["resources"]
hadoopProperties = existing_script_data["hadoopProperties"]
script_copy = PigScript.objects.create(owner=owner)
script_copy.update_from_dict({
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
})
script_copy.save()
copy_doc = doc.copy(content_object=script_copy, name=name, owner=owner)
response = {
'id': script_copy.id,
'docId': copy_doc.id,
'name': name,
'script': script,
'parameters': parameters,
'resources': resources,
'hadoopProperties': hadoopProperties
}
return JsonResponse(response, content_type="text/plain")
def METHOD_NAME(request):
if request.method != 'POST':
raise PopupException(_('POST request required.'))
ids = request.POST.get('ids').split(",")
for script_id in ids:
try:
pig_script = PigScript.objects.get(id=script_id)
pig_script.can_edit_or_exception(request.user)
pig_script.doc.all().METHOD_NAME()
pig_script.METHOD_NAME()
except:
LOG.exception('failed to delete pig script')
None
response = {
'ids': ids,
}
return JsonResponse(response, content_type="text/plain")
@show_oozie_error
def watch(request, job_id):
oozie_workflow = check_job_access_permission(request, job_id)
logs, workflow_actions, is_really_done = api.get(request.fs, request.jt, request.user).get_log(request, oozie_workflow)
output = get_workflow_output(oozie_workflow, request.fs)
workflow = {
'job_id': oozie_workflow.id,
'status': oozie_workflow.status,
'progress': oozie_workflow.get_progress(),
'isRunning': oozie_workflow.is_running(),
'killUrl': reverse('oozie:manage_oozie_jobs', kwargs={'job_id': oozie_workflow.id, 'action': 'kill'}),
'rerunUrl': reverse('oozie:rerun_oozie_job', kwargs={'job_id': oozie_workflow.id, 'app_path': urllib.parse.quote(oozie_workflow.appPath.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)}),
'actions': workflow_actions
}
response = {
'workflow': workflow,
'logs': logs,
'isReallyDone': is_really_done,
'output': hdfs_link(output)
}
return JsonResponse(response, content_type="text/plain")
def install_examples(request):
result = {'status': -1, 'message': ''}
if request.method != 'POST':
result['message'] = _('A POST request is required.')
else:
try:
pig_setup.Command().handle()
result['status'] = 0
except Exception as e:
LOG.exception(e)
result['message'] = str(e)
return JsonResponse(result)
|
3,765 |
find nearest
|
from pathlib import Path
import csv
import json
import numpy as np
import matplotlib.pyplot as plt
from one.api import ONE
import ibllib.plots as iblplt
from ibllib.time import convert_pgts, uncycle_pgts
plt.ion()
def METHOD_NAME(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def add_stim_off_times(trials):
on = 'stimOn_times'
off = 'stimOff_times'
trials[off] = np.zeros(shape=trials[on].shape)
correct_trials = trials['feedbackType'] == 1
u = trials['feedback_times'][correct_trials] + 1.0
trials[off][correct_trials] = u
error_trials = trials['feedbackType'] == -1
v = trials['feedback_times'][error_trials] + 2.0
trials[off][error_trials] = v
def plot_pupil_diameter_single_trial(
trial_numbers,
trial_number,
diameter,
times,
trials):
a = list(trial_numbers)
first = a.index(trial_number)
last = len(a) - 1 - a[::-1].index(trial_number)
plt.plot(times[first:last], diameter[first:last])
def restrict_timestamplist(q):
li = []
for i in q:
if i > times[first] and i < times[last]:
li.append(i)
return li
iblplt.vertical_lines(restrict_timestamplist(
trials['stimOn_times']), ymin=10, ymax=20,
color='m', linewidth=0.5, label='stimOn_times')
iblplt.vertical_lines(restrict_timestamplist(
trials['feedback_times']), ymin=10, ymax=20,
color='b', linewidth=0.5, label='feedback_times')
iblplt.vertical_lines(restrict_timestamplist(
trials['stimOff_times']), ymin=10, ymax=20,
color='g', linewidth=0.5, label='stimOff_times')
plt.xlabel('Time (s)')
plt.ylabel('pupil diameter [px]')
plt.title('Trial number %s' % trial_number)
plt.legend()
plt.tight_layout()
def get_pupil_diameter(alf_path):
json1_file = open(alf_path / '_ibl_leftCamera.dlc.metadata.json')
json1_str = json1_file.read()
json1_data = json.loads(json1_str)['columns']
# check order
assert json1_data[0] == 'pupil_top_r_x', 'Order is off!'
assert json1_data[11] == 'pupil_left_r_likelihood', 'Order is off!'
dlc = np.load(alf_path / '_ibl_leftCamera.dlc.npy')
K = {}
K['pupil_top_r'] = dlc[:, :3]
K['pupil_right_r'] = dlc[:, 3:6]
K['pupil_bottom_r'] = dlc[:, 6:9]
K['pupil_left_r'] = dlc[:, 9:12]
# Set values to nan if likelyhood is too low
XYs = {}
for part in K:
x = np.ma.masked_where(K[part][:, 2] < 0.9, K[part][:, 0])
x = x.filled(np.nan)
y = np.ma.masked_where(K[part][:, 2] < 0.9, K[part][:, 1])
y = y.filled(np.nan)
XYs[part] = [x, y]
# get both diameters (d1 = top - bottom, d2 = left - right)
d1 = ((XYs['pupil_top_r'][0] - XYs['pupil_bottom_r'][0])**2 +
(XYs['pupil_top_r'][1] - XYs['pupil_bottom_r'][1])**2)**0.5
d2 = ((XYs['pupil_left_r'][0] - XYs['pupil_right_r'][0])**2 +
(XYs['pupil_left_r'][1] - XYs['pupil_right_r'][1])**2)**0.5
d = np.mean([d1, d2], axis=0)
return d
def get_timestamps_from_ssv_file(alf_path):
loc = alf_path.parent.joinpath('raw_video_data/'
'_iblrig_leftCamera.timestamps.ssv')
with open(loc, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
ssv_times = np.array([line for line in csv_reader])
return uncycle_pgts([convert_pgts(int(time)) for time in ssv_times[:, 0]])
def plot_mean_std_around_event(event, diameter, times, eid):
"""
event in {'stimOn_times', 'feedback_times', 'stimOff_times'}
"""
event_times = trials[event]
window_size = 70
segments = []
# skip first and last trials to get same window length
for t in event_times[5:-5]:
idx = METHOD_NAME(times, t)
segments.append(diameter[idx - window_size: idx + window_size])
M = np.nanmean(np.array(segments), axis=0)
E = np.nanstd(np.array(segments), axis=0)
fig, ax = plt.subplots()
ax.fill_between(
range(
len(M)),
M - E,
M + E,
alpha=0.5,
edgecolor='#CC4F1B',
facecolor='#FF9848')
plt.plot(range(len(M)), M, color='k', linewidth=3)
plt.axvline(x=window_size, color='r', linewidth=1, label=event)
plt.legend()
plt.ylabel('pupil diameter [px]')
plt.xlabel('frames')
plt.title(eid)
plt.tight_layout()
if __name__ == "__main__":
one = ONE()
# one.list(None, 'dataset-types') # to check dataset types, 'camera.times'?
# one.search(dataset=['camera.dlc.pqt', 'camera.times.npy'])
eid = '61393bca-f1ff-4e7d-b2d8-da7475219866'
camera_label = 'left'
trials = one.load_object(eid, 'trials', collection='alf')
add_stim_off_times(trials)
times = one.load_dataset(eid, f'_ibl_{camera_label}Camera.times.npy', collection='alf')
dlc_path = one.load_dataset(eid, f'_ibl_{camera_label}Camera.dlc.pqt',
collection='alf', download_only=True)
diameter = get_pupil_diameter(one.eid2path / 'alf')
# get trial number for each time bin
trial_numbers = np.digitize(times, trials['goCue_times'])
print('Range of trials: ', [trial_numbers[0], trial_numbers[-1]])
# get a raster plot for a particular trial
# plot_pupil_diameter_single_trial(trial_numbers,
# 15, diameter, times, trials)
plot_mean_std_around_event('stimOn_times', diameter, times, eid)
plot_mean_std_around_event('feedback_times', diameter, times, eid)
# what's that stim-off times, are they reliable?
# plot_mean_std_around_event('stimOff_times', diameter, times, eid)
plt.show()
|
3,766 |
do add parser
|
# Copyright (c) 2023, Antmicro <www.antmicro.com>
#
# Based on J-Link runner
# Copyright (c) 2017 Linaro Limited.
# SPDX-License-Identifier: Apache-2.0
"""
Runner that implements flashing with SiLabs Simplicity Commander binary tool.
See SiLabs UG162: "Simplicity Commander Reference Guide" for more info.
"""
import os
import shlex
from runners.core import ZephyrBinaryRunner, RunnerCaps, FileType
DEFAULT_APP = 'commander'
class SiLabsCommanderBinaryRunner(ZephyrBinaryRunner):
def __init__(self, cfg, device, dev_id, commander, dt_flash, erase, speed, tool_opt):
super().__init__(cfg)
self.file = cfg.file
self.file_type = cfg.file_type
self.hex_name = cfg.hex_file
self.bin_name = cfg.bin_file
self.elf_name = cfg.elf_file
self.device = device
self.dev_id = dev_id
self.commander = commander
self.dt_flash = dt_flash
self.erase = erase
self.speed = speed
self.tool_opt = []
for opts in [shlex.split(opt) for opt in tool_opt]:
self.tool_opt += opts
@classmethod
def name(cls):
return 'silabs_commander'
@classmethod
def capabilities(cls):
return RunnerCaps(commands={'flash'},
dev_id=True, flash_addr=True, erase=True,
tool_opt=True, file=True)
@classmethod
def dev_id_help(cls) -> str:
return '''Device identifier. Use it to select the J-Link Serial Number
of the device connected over USB.'''
@classmethod
def tool_opt_help(cls) -> str:
return "Additional options for Simplicity Commander, e.g. '--noreset'"
@classmethod
def METHOD_NAME(cls, parser):
# Required:
parser.add_argument('--device', required=True,
help='device part number')
# Optional:
parser.add_argument('--commander', default=DEFAULT_APP,
help='path to Simplicity Commander executable')
parser.add_argument('--speed', default=None,
help='JTAG/SWD speed to use')
@classmethod
def do_create(cls, cfg, args):
return SiLabsCommanderBinaryRunner(
cfg, args.device,
dev_id=args.dev_id,
commander=args.commander,
dt_flash=args.dt_flash,
erase=args.erase,
speed=args.speed,
tool_opt=args.tool_opt)
def do_run(self, command, **kwargs):
self.require(self.commander)
opts = ['--device', self.device]
if self.erase:
opts.append('--masserase')
if self.dev_id:
opts.extend(['--serialno', self.dev_id])
if self.speed is not None:
opts.extend(['--speed', self.speed])
# Get the build artifact to flash
if self.dt_flash:
flash_addr = self.flash_address_from_build_conf(self.build_conf)
else:
flash_addr = 0
if self.file is not None:
# use file provided by the user
if not os.path.isfile(self.file):
raise ValueError(f'Cannot flash; file ({self.file}) not found')
flash_file = self.file
if self.file_type == FileType.HEX:
flash_args = [flash_file]
elif self.file_type == FileType.BIN:
flash_args = ['--binary', '--address', f'0x{flash_addr:x}', flash_file]
else:
raise ValueError('Cannot flash; this runner only supports hex and bin files')
else:
# use hex or bin file provided by the buildsystem, preferring .hex over .bin
if self.hex_name is not None and os.path.isfile(self.hex_name):
flash_file = self.hex_name
flash_args = [flash_file]
elif self.bin_name is not None and os.path.isfile(self.bin_name):
flash_file = self.bin_name
flash_args = ['--binary', '--address', f'0x{flash_addr:x}', flash_file]
else:
raise ValueError(f'Cannot flash; no hex ({self.hex_name}) or bin ({self.bin_name}) files found.')
args = [self.commander, 'flash'] + opts + self.tool_opt + flash_args
self.logger.info('Flashing file: {}'.format(flash_file))
self.check_call(args)
|
3,767 |
set up
|
from DateTime import DateTime
from plone.app.testing import TEST_USER_PASSWORD
from plone.app.z3cform.interfaces import IPloneFormLayer
from Products.CMFCore.permissions import SetOwnProperties
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.testing import PRODUCTS_CMFPLONE_FUNCTIONAL_TESTING
from zope.component import getMultiAdapter
from zope.interface import alsoProvides
import re
import time
import unittest
FORM_ID = "login"
class TestLoginForm(unittest.TestCase):
layer = PRODUCTS_CMFPLONE_FUNCTIONAL_TESTING
def METHOD_NAME(self):
self.app = self.layer["app"]
self.portal = self.layer["portal"]
self.request = self.layer["request"]
self.mt = getToolByName(self.portal, "portal_membership")
# suitable for testing z3c.form views
alsoProvides(self.request, IPloneFormLayer)
def test_login_view(self):
view = getMultiAdapter((self.portal, self.request), name="login")
self.assertTrue(view())
def _setup_authenticator_request(self):
self.request.set("REQUEST_METHOD", "POST")
authenticator = getMultiAdapter(
(self.portal, self.request), name="authenticator"
)
html = authenticator.authenticator()
token = re.search('value="(.*)"', html).groups()[0]
self.request.set("_authenticator", token)
def test_form_update(self):
self._setup_authenticator_request()
self.request["__ac_name"] = "test"
self.request["__ac_password"] = TEST_USER_PASSWORD
self.request["form.widgets.came_from"] = [""]
form = self.portal.restrictedTraverse(FORM_ID)
form.update()
data, errors = form.extractData()
self.assertEqual(len(errors), 0)
def test_failsafe_login_form(self):
view = getMultiAdapter((self.portal, self.request), name="failsafe_login")
html = view()
self.assertNotIn("main-container", html)
def test_failsafe_login_form_update(self):
self._setup_authenticator_request()
self.request["__ac_name"] = "test"
self.request["__ac_password"] = TEST_USER_PASSWORD
self.request["form.widgets.came_from"] = [""]
form = self.portal.restrictedTraverse("failsafe_login")
form.update()
data, errors = form.extractData()
self.assertEqual(len(errors), 0)
def test_login_external(self):
registry = self.layer["portal"].portal_registry
registry["plone.external_login_url"] = "http://testurl/extlogin"
form = self.portal.restrictedTraverse("login")
form()
self.assertEqual(
registry["plone.external_login_url"],
form.request.response.getHeader("Location"),
)
def test_login_external_with_all_params(self):
registry = self.layer["portal"].portal_registry
registry["plone.external_login_url"] = "http://testurl/extlogin?level=debug"
self.request["came_from"] = "foo"
self.request["next"] = "bar"
form = self.portal.restrictedTraverse("login")
form()
self.assertIn(
"came_from=foo",
form.request.response.getHeader("Location"),
)
self.assertIn(
"next=bar",
form.request.response.getHeader("Location"),
)
# Keep the original query string
self.assertIn(
"level=debug",
form.request.response.getHeader("Location"),
)
def test_login_external_without_next_param(self):
registry = self.layer["portal"].portal_registry
registry["plone.external_login_url"] = "http://testurl/extlogin"
self.request["came_from"] = "foo"
form = self.portal.restrictedTraverse("login")
form()
self.assertIn(
"came_from=foo",
form.request.response.getHeader("Location"),
)
def test_failsafe_login_external(self):
registry = self.layer["portal"].portal_registry
registry["plone.external_login_url"] = "http://testurl/extlogin"
form = self.portal.restrictedTraverse("failsafe_login")
html = form()
self.assertIsNotNone(html)
self.assertEqual(None, form.request.response.getHeader("Location"))
self.assertNotIn("main-container", html)
def test_login_creates_memberarea(self):
membership = self.layer["portal"].portal_membership
form = self.portal.restrictedTraverse("@@login")
if membership.memberareaCreationFlag == "True":
self.assertEqual(membership.getHomeFolder(), None)
form._post_login()
self.assertNotEqual(membership.getHomeFolder(), None)
def test_post_login_sets_login_time(self):
now = DateTime()
member = self.layer["portal"].portal_membership.getAuthenticatedMember()
self.assertTrue(DateTime(member.getProperty("login_time")) < now)
form = self.portal.restrictedTraverse("@@login")
form._post_login()
membership = self.layer["portal"].portal_membership
member = membership.getAuthenticatedMember()
self.assertTrue(DateTime(member.getProperty("login_time")) >= now)
def test_post_login_sets_last_login_time(self):
now = DateTime()
membership = self.layer["portal"].portal_membership
member = membership.getAuthenticatedMember()
self.assertTrue(DateTime(member.getProperty("last_login_time")) < now)
form = self.portal.restrictedTraverse("@@login")
form._post_login()
member = membership.getAuthenticatedMember()
self.assertTrue(DateTime(member.getProperty("last_login_time")) >= now)
def test_post_login_sets_LastLoginTime_if_member_lacks_set_own_properties_permission(
self,
): # noqa: E501
# If members lack the "Set own properties" permission, they should
# still be able to log in, and their login times should be set.
now = DateTime()
self.portal.manage_permission(SetOwnProperties, ["Manager"], acquire=0)
form = self.portal.restrictedTraverse("@@login")
form._post_login()
membership = self.layer["portal"].portal_membership
member = membership.getAuthenticatedMember()
self.assertTrue(DateTime(member.getProperty("last_login_time")) >= now)
def test_initial_login_time_does_change(self):
membership = self.layer["portal"].portal_membership
member = membership.getAuthenticatedMember()
form = self.portal.restrictedTraverse("@@login")
form._post_login()
member = membership.getAuthenticatedMember()
login_time = DateTime(member.getProperty("login_time"))
# Log in again later
time.sleep(0.2)
form._post_login()
# login_time did change
member = membership.getAuthenticatedMember()
self.assertTrue(
DateTime(member.getProperty("login_time")) > login_time,
)
def test_initial_login_time_with_string(self):
membership = self.layer["portal"].portal_membership
member = membership.getAuthenticatedMember()
# Realize the login_time is not string but DateTime
self.assertIsInstance(member.getProperty("login_time"), DateTime)
self.assertEqual(member.getProperty("login_time").Date(), "2000/01/01")
# Update login_time into string
today = DateTime().Date()
member.setProperties(login_time=today)
self.assertIsInstance(member.getProperty("login_time"), str)
self.assertEqual(member.getProperty("login_time"), today)
# Logging in set login_time with DateTime
form = self.portal.restrictedTraverse("@@login")
form._post_login()
member = membership.getAuthenticatedMember()
self.assertIsInstance(member.getProperty("login_time"), DateTime)
self.assertTrue(member.getProperty("login_time") > DateTime(today))
|
3,768 |
vi function 2 d
|
# Copyright 2022 INRIA
import numpy as np
import siconos.numerics as sn
def vi_function_1D(n, x, F):
F[0] = 1.0 + x[0]
pass
def vi_nabla_function_1D(n, x, nabla_F):
nabla_F[0] = 1.0
pass
def METHOD_NAME(n, z, F):
M = np.array([[2.0, 1.0], [1.0, 2.0]])
q = np.array([-5.0, -6.0])
F[:] = np.dot(M, z) + q
pass
def vi_nabla_function_2D(n, z, nabla_F):
M = np.array([[2.0, 1.0], [1.0, 2.0]])
nabla_F[:] = M
pass
def vi_function_3D(n, z, F):
M = np.array(((0.0, -1.0, 2.0), (2.0, 0.0, -2.0), (-1.0, 1.0, 0.0)))
q = np.array((-3.0, 6.0, -1))
F[:] = np.dot(M, z) + q
pass
def vi_nabla_function_3D(n, z, nabla_F):
M = np.array(((0.0, -1.0, 2.0), (2.0, 0.0, -2.0), (-1.0, 1.0, 0.0)))
nabla_F[:] = M
pass
# solution
xsol_1D = np.array([-1.0])
Fsol_1D = np.array([0.0])
xsol_2D = np.array([1.0, 1.0])
Fsol_2D = np.array([-2.0, -3.0])
xsol_3D = np.array((-1.0, -1.0, 1.0))
Fsol_3D = np.array((0.0, 2.0, -1.0))
# problem
# vi=N.MCP(1,1,vi_function,vi_Nablafunction)
xtol = 1e-8
def test_new():
return sn.VI(1)
def test_vi_1D():
vi = sn.VI(1, vi_function_1D)
vi.set_compute_nabla_F(vi_nabla_function_1D)
x = np.array([0.0])
F = np.array([0.0])
SO = sn.SolverOptions(sn.SICONOS_VI_BOX_QI)
lb = np.array((-1.0,))
ub = np.array((1.0,))
vi.set_box_constraints(lb, ub)
info = sn.variationalInequality_box_newton_QiLSA(vi, x, F, SO)
print(info)
print("x = ", x)
print("F = ", F)
assert np.linalg.norm(x - xsol_1D) <= xtol
assert not info
def test_vi_2D():
vi = sn.VI(2, METHOD_NAME)
vi.set_compute_nabla_F(vi_nabla_function_2D)
x = np.array((0.0, 0.0))
F = np.array((0.0, 0.0))
SO = sn.SolverOptions(sn.SICONOS_VI_BOX_QI)
lb = np.array((-1.0, -1.0))
ub = np.array((1.0, 1.0))
vi.set_box_constraints(lb, ub)
info = sn.variationalInequality_box_newton_QiLSA(vi, x, F, SO)
print(info)
print(
"number of iteration {:} ; precision {:}".format(
SO.iparam[sn.SICONOS_IPARAM_ITER_DONE], SO.dparam[sn.SICONOS_DPARAM_RESIDU]
)
)
print("x = ", x)
print("F = ", F)
assert np.linalg.norm(x - xsol_2D) <= xtol
assert not info
def test_vi_3D():
vi = sn.VI(3, vi_function_3D)
x = np.zeros((3,))
F = np.zeros((3,))
SO = sn.SolverOptions(sn.SICONOS_VI_BOX_QI)
vi.set_compute_nabla_F(vi_nabla_function_3D)
lb = np.array((-1.0, -1.0, -1.0))
ub = np.array((1.0, 1.0, 1.0))
vi.set_box_constraints(lb, ub)
info = sn.variationalInequality_box_newton_QiLSA(vi, x, F, SO)
print(info)
print(
"number of iteration {:} ; precision {:}".format(
SO.iparam[sn.SICONOS_IPARAM_ITER_DONE], SO.dparam[sn.SICONOS_DPARAM_RESIDU]
)
)
print("x = ", x)
print("F = ", F)
assert np.linalg.norm(x - xsol_3D) <= xtol
assert not info
assert np.abs(SO.dparam[sn.SICONOS_DPARAM_RESIDU]) < 1e-10
def test_vi_C_interface():
try:
from cffi import FFI
cffi_is_present = True
except ImportError:
cffi_is_present = False
return
if cffi_is_present:
h = 1e-5
T = 1.0
t = 0.0
theta = 1.0
gamma = 1.0
g = 9.81
kappa = 0.4
xk = np.array((1.0, 10.0))
ffi = FFI()
ffi.cdef("void set_cstruct(uintptr_t p_env, void* p_struct);")
ffi.cdef(
"""typedef struct
{
int id;
double* xk;
double h;
double theta;
double gamma;
double g;
double kappa;
unsigned int f_eval;
unsigned int nabla_eval;
} data;
"""
)
data_struct = ffi.new("data*")
data_struct.id = -1 # to avoid freeing the data in the destructor
data_struct.xk = ffi.cast("double *", xk.ctypes.data)
data_struct.h = h
data_struct.theta = theta
data_struct.gamma = gamma
data_struct.g = g
data_struct.kappa = kappa
vi = sn.VI(2)
import siconos
D = ffi.dlopen(siconos.__path__[0] + "/_pynumerics.so")
D.set_cstruct(vi.get_env_as_long(), ffi.cast("void*", data_struct))
vi.set_compute_F_and_nabla_F_as_C_functions(
"ZhuravlevIvanov.so", "compute_F", "compute_nabla_F"
)
lambda_ = np.zeros((2,))
xkp1 = np.zeros((2,))
SO = sn.SolverOptions(sn.SICONOS_VI_BOX_QI)
lb = np.array((-1.0, -1.0))
ub = np.array((1.0, 1.0))
vi.set_box_constraints(lb, ub)
N = int(T / h + 10)
print(N)
SO.dparam[sn.SICONOS_DPARAM_TOL] = 1e-24
SO.iparam[sn.SICONOS_IPARAM_MAX_ITER] = 100
SO.iparam[sn.SICONOS_VI_IPARAM_ACTIVATE_UPDATE] = 1
SO.iparam[sn.SICONOS_VI_IPARAM_DECREASE_RHO] = 0
SO.iparam[4] = 5 # ???
signs = np.empty((N, 2))
sol = np.empty((N, 2))
sol[0, :] = xk
k = 0
# sn.numerics_set_verbose(3)
while t <= T:
k += 1
info = sn.variationalInequality_box_newton_QiLSA(vi, lambda_, xkp1, SO)
if info > 0:
print(lambda_)
# vi_function(2, signs[k-1, :], xkp1)
lambda_[0] = -np.sign(xkp1[0])
lambda_[1] = -np.sign(xkp1[1])
if np.abs(xk[0]) < 1e-10:
lambda_[0] = 0.01
if np.abs(xk[1]) < 1e-10:
lambda_[1] = 0.01
print("ok lambda")
print(lambda_)
info = sn.variationalInequality_box_newton_QiLSA(vi, lambda_, xkp1, SO)
print(
"iter {:} ; solver iter = {:} ; prec = {:}".format(
k,
SO.iparam[sn.SICONOS_IPARAM_ITER_DONE],
SO.dparam[sn.SICONOS_DPARAM_RESIDU],
)
)
if info > 0:
print("VI solver failed ! info = {:}".format(info))
print(xk)
print(lambda_)
print(xkp1)
kaboom()
sol[k, 0:2] = xkp1
np.copyto(xk, xkp1, casting="no")
signs[k, 0:2] = lambda_
t = k * h
# z[:] = 0.0
|
3,769 |
test rotation
|
import math
import unittest
import volmdlr
from volmdlr import curves
class TestEllipse3D(unittest.TestCase):
ellipse = curves.Ellipse3D(4, 3, volmdlr.Frame3D(volmdlr.O3D, volmdlr.X3D,
volmdlr.Z3D.cross(volmdlr.X3D), volmdlr.Z3D))
vector1 = volmdlr.Vector3D(1, 1, 1)
vector1 = vector1.unit_vector()
vector2 = vector1.deterministic_unit_normal_vector()
vector3 = vector1.cross(vector2)
frame = volmdlr.Frame3D(volmdlr.O3D, vector1, vector2, vector3)
ellipse3d = curves.Ellipse3D(2, 1, frame)
def test_point_belongs(self):
point_on_ellipse = volmdlr.Point3D(2.8284271247461903, 2.1213203435596424, 0)
point_on_ellipse2 = volmdlr.Point3D(4, 0, 0)
point_not_on_ellipse = volmdlr.Point3D(3, 3, 0)
self.assertTrue(self.ellipse.point_belongs(point_on_ellipse))
self.assertTrue(self.ellipse.point_belongs(point_on_ellipse2))
self.assertFalse(self.ellipse.point_belongs(point_not_on_ellipse))
def test_length(self):
self.assertAlmostEqual(self.ellipse.length(), 22.1034921607)
def test_discretization_points(self):
discretization_points = self.ellipse.discretization_points(number_points=4)
expected_points = [volmdlr.Point3D(4.0, 0.0, 0.0),
volmdlr.Point3D(0, -3, 0),
volmdlr.Point3D(-4, 0, 0),
volmdlr.Point3D(0, 3, 0)]
for expected_point, point in zip(expected_points, discretization_points):
self.assertTrue(expected_point.is_close(point))
def test_to_2d(self):
vector_2 = self.ellipse.normal.cross(self.ellipse.major_dir)
ellipse_2d = self.ellipse.to_2d(self.ellipse.center, self.ellipse.major_dir, vector_2)
self.assertEqual(volmdlr.O2D, ellipse_2d.center)
self.assertEqual(volmdlr.X2D, ellipse_2d.major_dir)
def test_abscissa(self):
point_on_ellipse = volmdlr.Point3D(2.8284271247461903, 2.1213203435596424, 0)
point_on_ellipse2 = volmdlr.Point3D(4, 0, 0)
point_not_on_ellipse = volmdlr.Point3D(3, 3, 0)
self.assertAlmostEqual(self.ellipse.abscissa(point_on_ellipse), 2.513786016470093)
self.assertAlmostEqual(self.ellipse.abscissa(point_on_ellipse2), 0.0)
with self.assertRaises(ValueError):
self.ellipse.abscissa(point_not_on_ellipse)
point3d = volmdlr.Point3D(-0.236335934849, -1.104105421298, -1.104105421298)
self.assertAlmostEqual(self.ellipse3d.abscissa(point3d), 3.8753709794017066)
def test_point_at_abcissa(self):
point_at_abscissa = self.ellipse3d.point_at_abscissa(self.ellipse3d.length() * 0.4)
self.assertTrue(point_at_abscissa.is_close(volmdlr.Point3D(-0.236335934849, -1.104105421298, -1.104105421298)))
def test_trim(self):
trim = self.ellipse3d.trim(volmdlr.Point3D(-0.12975651199692162, 0.9309036597828997, 0.9309036597828997),
volmdlr.Point3D(0.12975651199692095, -0.9309036597829001, -0.9309036597829001))
self.assertAlmostEqual(trim.length(), 4.844224110273849)
self.assertTrue(trim.point_belongs(volmdlr.Point3D(1.386280848895, 0.495371104295, 0.495371104295)))
def METHOD_NAME(self):
rotated_ellipse3d = self.ellipse3d.rotation(volmdlr.O3D, self.ellipse3d.frame.v, math.pi / 2)
rotated_ellipse3d_points = rotated_ellipse3d.discretization_points(number_points=6)
expected_points = [volmdlr.Point3D(-3.825416359218455e-16, -1.414213562373095, 1.4142135623730954),
volmdlr.Point3D(-0.7071067811865475, -0.35355339059327356, 1.0606601717798214),
volmdlr.Point3D(-0.7071067811865472, 1.0606601717798212, -0.3535533905932735),
volmdlr.Point3D(2.825496434870558e-16, 1.414213562373095, -1.4142135623730954),
volmdlr.Point3D(0.7071067811865474, 0.3535533905932742, -1.060660171779822),
volmdlr.Point3D(0.7071067811865475, -1.0606601717798207, 0.3535533905932728)]
for point, expected_point in zip(rotated_ellipse3d_points, expected_points):
self.assertTrue(point.is_close(expected_point))
def test_traslation(self):
translated_ellipse3d = self.ellipse3d.translation(self.ellipse3d.frame.w)
translated_ellipse3d_points = translated_ellipse3d.discretization_points(number_points=6)
expected_points = [volmdlr.Point3D(1.1547005383792517, 1.8618073195657994, 0.4475937571927041),
volmdlr.Point3D(-0.12975651199692162, 1.6380104409694474, 0.22379687859635217),
volmdlr.Point3D(-1.2844570503761734, 0.48330990259019585, -0.9309036597828992),
volmdlr.Point3D(-1.1547005383792517, -0.44759375719270406, -1.8618073195657994),
volmdlr.Point3D(0.12975651199692095, -0.2237968785963525, -1.6380104409694478),
volmdlr.Point3D(1.2844570503761727, 0.9309036597828986, -0.48330990259019657)]
for point, expected_point in zip(translated_ellipse3d_points, expected_points):
self.assertTrue(point.is_close(expected_point))
def test_frame_mapping(self):
frame_mapped_ellipse3d = self.ellipse3d.frame_mapping(self.ellipse3d.frame, 'new')
frame_mapped_ellipse3d_points = frame_mapped_ellipse3d.discretization_points(number_points=6)
expected_points = [volmdlr.Point3D(1.9999999999999996, 0.0, 0.0),
volmdlr.Point3D(1.0, -0.8660254037844384, 0.0),
volmdlr.Point3D(-0.9999999999999993, -0.8660254037844386, 0.0),
volmdlr.Point3D(-1.9999999999999996, -1.224646799147353e-16, 0.0),
volmdlr.Point3D(-1.0000000000000007, 0.8660254037844382, 0.0),
volmdlr.Point3D(0.9999999999999983, 0.8660254037844388, 0.0)]
for point, expected_point in zip(frame_mapped_ellipse3d_points, expected_points):
self.assertTrue(point.is_close(expected_point))
if __name__ == '__main__':
unittest.main()
|
3,770 |
cert pair
|
# (C) Datadog, Inc. 2020
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
from datetime import datetime, timedelta
from ...utils.date import UTC, parse_rfc3339
from .. import AgentCheck
try:
from datadog_agent import get_config
except ImportError:
def get_config(key):
return ""
class KubeletBase(AgentCheck):
def __init__(self, name, init_config, instances):
super(KubeletBase, self).__init__(name, init_config, instances)
def perform_kubelet_query(self, url, verbose=True, stream=False):
"""
Perform and return a GET request against kubelet. Support auth and TLS validation.
"""
# If tls_verify is False, then suppress tls warning
if self.kubelet_credentials.verify() is False:
self.http.ignore_tls_warning = True
return self.http.get(
url,
verify=self.kubelet_credentials.verify(),
cert=self.kubelet_credentials.METHOD_NAME(),
headers=self.kubelet_credentials.headers(url),
params={'verbose': verbose},
stream=stream,
)
def retrieve_pod_list(self):
try:
cutoff_date = self.compute_pod_expiration_datetime()
with self.perform_kubelet_query(self.pod_list_url, stream=True) as r:
if cutoff_date:
f = ExpiredPodFilter(cutoff_date)
pod_list = json.load(r.raw, object_hook=f.json_hook)
pod_list['expired_count'] = f.expired_count
if pod_list.get('items') is not None:
# Filter out None items from the list
pod_list['items'] = [p for p in pod_list['items'] if p is not None]
else:
pod_list = json.load(r.raw)
if pod_list.get('items') is None:
# Sanitize input: if no pods are running, 'items' is a NoneObject
pod_list['items'] = []
return pod_list
except Exception as e:
self.log.warning("failed to retrieve pod list from the kubelet at %s : %s", self.pod_list_url, e)
return {}
@staticmethod
def compute_pod_expiration_datetime():
"""
Looks up the agent's kubernetes_pod_expiration_duration option and returns either:
- None if expiration is disabled (set to 0)
- A (timezone aware) datetime object to compare against
"""
try:
seconds = int(get_config("kubernetes_pod_expiration_duration"))
if seconds == 0: # Expiration disabled
return None
return datetime.utcnow().replace(tzinfo=UTC) - timedelta(seconds=seconds)
except (ValueError, TypeError):
return None
class ExpiredPodFilter(object):
"""
Allows to filter old pods out of the podlist by providing a decoding hook
"""
def __init__(self, cutoff_date):
self.expired_count = 0
self.cutoff_date = cutoff_date
def json_hook(self, obj):
# Not a pod (hook is called for all objects)
if 'metadata' not in obj or 'status' not in obj:
return obj
# Quick exit for running/pending containers
pod_phase = obj.get('status', {}).get('phase')
if pod_phase in ["Running", "Pending"]:
return obj
# Filter out expired terminated pods, based on container finishedAt time
expired = True
for ctr in obj['status'].get('containerStatuses', []):
if "terminated" not in ctr.get("state", {}):
expired = False
break
finishedTime = ctr["state"]["terminated"].get("finishedAt")
if not finishedTime:
expired = False
break
if parse_rfc3339(finishedTime) > self.cutoff_date:
expired = False
break
if not expired:
return obj
# We are ignoring this pod
self.expired_count += 1
return None
class KubeletCredentials(object):
"""
Holds the configured credentials to connect to the Kubelet.
"""
def __init__(self, kubelet_conn_info):
"""
Parses the kubelet_conn_info dict and computes credentials
:param kubelet_conn_info: dict from kubeutil.get_connection_info()
"""
self._token = None
self._ssl_verify = None
self._ssl_cert = None
self._ssl_private_key = None
if kubelet_conn_info.get('verify_tls') == 'false':
self._ssl_verify = False
else:
self._ssl_verify = kubelet_conn_info.get('ca_cert')
cert = kubelet_conn_info.get('client_crt')
key = kubelet_conn_info.get('client_key')
if cert and key:
self._ssl_cert = cert
self._ssl_private_key = key
return # Don't import the token if we have valid certs
if 'token' in kubelet_conn_info:
self._token = kubelet_conn_info['token']
def METHOD_NAME(self):
"""
Returns the client certificates
:return: tuple (crt,key) or None
"""
if self._ssl_cert and self._ssl_private_key:
return (self._ssl_cert, self._ssl_private_key)
else:
return None
def headers(self, url):
"""
Returns the https headers with credentials, if token is used and url is https
:param url: url to be queried, including scheme
:return: dict or None
"""
if self._token and url.lower().startswith('https'):
return {'Authorization': 'Bearer {}'.format(self._token)}
else:
return None
def verify(self):
"""
Returns the SSL verification parameters
:return: CA cert path, None or False (SSL verification explicitly disabled)
"""
return self._ssl_verify
def configure_scraper(self, scraper_config):
"""
Configures a PrometheusScaper object with query credentials
:param scraper: valid PrometheusScaper object
:param endpoint: url that will be scraped
"""
endpoint = scraper_config['prometheus_url']
scraper_config.update(
{
'ssl_ca_cert': self._ssl_verify,
'ssl_cert': self._ssl_cert,
'ssl_private_key': self._ssl_private_key,
'extra_headers': self.headers(endpoint) or {},
}
)
def urljoin(*args):
"""
Joins given arguments into an url. Trailing but not leading slashes are
stripped for each argument.
:return: string
"""
return '/'.join(arg.strip('/') for arg in args)
|
3,771 |
interpret name
|
import collections
from _typeshed import Incomplete
from types import TracebackType
from typing import Any
from typing_extensions import Literal
def encode_text(s: str) -> bytes: ...
PDFDocEncoding: dict[int, str]
def decode_text(b: bytes) -> str: ...
class PdfFormatError(RuntimeError): ...
def check_format_condition(condition, error_message) -> None: ...
class IndirectReference:
def __bytes__(self) -> bytes: ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __hash__(self) -> int: ...
class IndirectObjectDef(IndirectReference): ...
class XrefTable:
existing_entries: Incomplete
new_entries: Incomplete
deleted_entries: Incomplete
reading_finished: bool
def __init__(self) -> None: ...
def __setitem__(self, key, value) -> None: ...
def __getitem__(self, key): ...
def __delitem__(self, key) -> None: ...
def __contains__(self, key): ...
def __len__(self) -> int: ...
def keys(self): ...
def write(self, f): ...
class PdfName:
name: Incomplete
def __init__(self, name) -> None: ...
def name_as_str(self): ...
def __eq__(self, other): ...
def __hash__(self) -> int: ...
@classmethod
def from_pdf_stream(cls, data): ...
allowed_chars: Incomplete
def __bytes__(self) -> bytes: ...
class PdfArray(list[Any]):
def __bytes__(self) -> bytes: ...
class PdfDict(collections.UserDict[bytes, Any]):
def __setattr__(self, key: str, value) -> None: ...
def __getattr__(self, key: str): ...
def __bytes__(self) -> bytes: ...
class PdfBinary:
data: Incomplete
def __init__(self, data) -> None: ...
def __bytes__(self) -> bytes: ...
class PdfStream:
dictionary: Incomplete
buf: Incomplete
def __init__(self, dictionary, buf) -> None: ...
def decode(self): ...
def pdf_repr(x: Incomplete) -> bytes: ...
class PdfParser:
filename: Incomplete
buf: Incomplete
f: Incomplete
start_offset: Incomplete
should_close_buf: bool
should_close_file: bool
cached_objects: Incomplete
file_size_total: int
root: Incomplete
root_ref: Incomplete
info: Incomplete
info_ref: Incomplete
page_tree_root: Incomplete
pages: Incomplete
orig_pages: Incomplete
pages_ref: Incomplete
last_xref_section_offset: Incomplete
trailer_dict: Incomplete
xref_table: Incomplete
def __init__(
self,
filename: Incomplete | None = None,
f: Incomplete | None = None,
buf: Incomplete | None = None,
start_offset: int = 0,
mode: str = "rb",
) -> None: ...
def __enter__(self): ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None
) -> Literal[False]: ...
def start_writing(self) -> None: ...
def close_buf(self) -> None: ...
def close(self) -> None: ...
def seek_end(self) -> None: ...
def write_header(self) -> None: ...
def write_comment(self, s) -> None: ...
def write_catalog(self): ...
def rewrite_pages(self) -> None: ...
def write_xref_and_trailer(self, new_root_ref: Incomplete | None = None) -> None: ...
def write_page(self, ref, *objs, **dict_obj): ...
def write_obj(self, ref, *objs, **dict_obj): ...
def del_root(self) -> None: ...
@staticmethod
def get_buf_from_file(f): ...
file_size_this: Incomplete
def read_pdf_info(self) -> None: ...
def next_object_id(self, offset: Incomplete | None = None): ...
delimiter: bytes
delimiter_or_ws: bytes
whitespace: bytes
whitespace_or_hex: bytes
whitespace_optional: Incomplete
whitespace_mandatory: Incomplete
whitespace_optional_no_nl: bytes
newline_only: bytes
newline: Incomplete
re_trailer_end: Incomplete
re_trailer_prev: Incomplete
def read_trailer(self) -> None: ...
def read_prev_trailer(self, xref_section_offset) -> None: ...
re_whitespace_optional: Incomplete
re_name: Incomplete
re_dict_start: Incomplete
re_dict_end: Incomplete
@classmethod
def interpret_trailer(cls, trailer_data): ...
re_hashes_in_name: Incomplete
@classmethod
def METHOD_NAME(cls, raw, as_text: bool = False): ...
re_null: Incomplete
re_true: Incomplete
re_false: Incomplete
re_int: Incomplete
re_real: Incomplete
re_array_start: Incomplete
re_array_end: Incomplete
re_string_hex: Incomplete
re_string_lit: Incomplete
re_indirect_reference: Incomplete
re_indirect_def_start: Incomplete
re_indirect_def_end: Incomplete
re_comment: Incomplete
re_stream_start: Incomplete
re_stream_end: Incomplete
@classmethod
def get_value(cls, data, offset, expect_indirect: Incomplete | None = None, max_nesting: int = -1): ...
re_lit_str_token: Incomplete
escaped_chars: Incomplete
@classmethod
def get_literal_string(cls, data, offset): ...
re_xref_section_start: Incomplete
re_xref_subsection_start: Incomplete
re_xref_entry: Incomplete
def read_xref_table(self, xref_section_offset): ...
def read_indirect(self, ref, max_nesting: int = -1): ...
def linearize_page_tree(self, node: Incomplete | None = None): ...
|
3,772 |
all reduce params
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
from collections import OrderedDict
from contextlib import contextmanager
import torch
from torch import nn
from fairseq.distributed import utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, process_group, buffer_size=2 ** 28):
super().__init__()
self.module = module
self.process_group = process_group
self.world_size = utils.get_world_size(self.process_group)
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# make per-device lists of parameters
paramlists = OrderedDict()
for param in self.module.parameters():
device = param.device
if paramlists.get(device) is None:
paramlists[device] = []
paramlists[device] += [param]
self.per_device_params = list(paramlists.values())
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def all_reduce_grads(self):
"""
This function must be called explicitly after backward to reduce
gradients. There is no automatic hook like c10d.
"""
def METHOD_NAME(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset : offset + sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset : offset + sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[: p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset : offset + sz].view_as(p))
else:
p.grad = buffer[offset : offset + sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if self.accumulate_grads:
return
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
for params in self.per_device_params:
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in params:
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if hasattr(param, "expert"):
# Skip gradient sync for unshared parameters
continue
if param.grad.requires_grad:
raise RuntimeError(
"DistributedDataParallel only works "
"with gradients that don't require "
"grad"
)
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
METHOD_NAME([param])
else:
if offset + sz > self.buffer.numel():
METHOD_NAME(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
METHOD_NAME(buffered_params)
reduction_fn()
|
3,773 |
test browser open
|
import json
from io import StringIO
from logging import StreamHandler
from typing import Any
import pytest
from traitlets.config import Config
from jupyter_server.serverapp import ServerApp
from .mockextensions.app import MockExtensionApp
@pytest.fixture
def jp_server_config(jp_template_dir):
config = {
"ServerApp": {
"jpserver_extensions": {"tests.extension.mockextensions": True},
},
"MockExtensionApp": {
"template_paths": [str(jp_template_dir)],
"log_level": "DEBUG",
},
}
return config
@pytest.fixture
def mock_extension(extension_manager):
name = "tests.extension.mockextensions"
pkg = extension_manager.extensions[name]
point = pkg.extension_points["mockextension"]
app = point.app
return app
def test_initialize(jp_serverapp, jp_template_dir, mock_extension):
# Check that settings and handlers were added to the mock extension.
assert isinstance(mock_extension.serverapp, ServerApp)
assert len(mock_extension.handlers) > 0
assert mock_extension.loaded
assert mock_extension.template_paths == [str(jp_template_dir)]
@pytest.mark.parametrize(
"trait_name, trait_value, jp_argv",
(
[
"mock_trait",
"test mock trait",
["--MockExtensionApp.mock_trait=test mock trait"],
],
),
)
def test_instance_creation_with_argv(
trait_name,
trait_value,
jp_argv,
mock_extension,
):
assert getattr(mock_extension, trait_name) == trait_value
def test_extensionapp_load_config_file(
config_file,
jp_serverapp,
mock_extension,
):
# Assert default config_file_paths is the same in the app and extension.
assert mock_extension.config_file_paths == jp_serverapp.config_file_paths
assert mock_extension.config_dir == jp_serverapp.config_dir
assert mock_extension.config_file_name == "jupyter_mockextension_config"
# Assert that the trait is updated by config file
assert mock_extension.mock_trait == "config from file"
def test_extensionapp_no_parent():
# make sure we can load config files, even when serverapp is not passed
# relevant for e.g. shortcuts to config-loading
app = MockExtensionApp()
assert isinstance(app.config_file_paths, list)
assert app.serverapp is not None
OPEN_BROWSER_COMBINATIONS: Any = (
(True, {}),
(True, {"ServerApp": {"open_browser": True}}),
(False, {"ServerApp": {"open_browser": False}}),
(True, {"MockExtensionApp": {"open_browser": True}}),
(False, {"MockExtensionApp": {"open_browser": False}}),
(
True,
{
"ServerApp": {"open_browser": True},
"MockExtensionApp": {"open_browser": True},
},
),
(
False,
{
"ServerApp": {"open_browser": True},
"MockExtensionApp": {"open_browser": False},
},
),
(
True,
{
"ServerApp": {"open_browser": False},
"MockExtensionApp": {"open_browser": True},
},
),
(
False,
{
"ServerApp": {"open_browser": False},
"MockExtensionApp": {"open_browser": False},
},
),
)
@pytest.mark.parametrize("expected_value, config", OPEN_BROWSER_COMBINATIONS)
async def METHOD_NAME(monkeypatch, jp_environ, config, expected_value):
serverapp = MockExtensionApp.initialize_server(config=Config(config))
assert serverapp.open_browser == expected_value
async def test_load_parallel_extensions(monkeypatch, jp_environ):
serverapp = MockExtensionApp.initialize_server()
exts = serverapp.extension_manager.extensions
assert "tests.extension.mockextensions.mock1" in exts
assert "tests.extension.mockextensions" in exts
exts = serverapp.jpserver_extensions
assert exts["tests.extension.mockextensions.mock1"]
assert exts["tests.extension.mockextensions"]
async def test_stop_extension(jp_serverapp, caplog):
"""Test the stop_extension method.
This should be fired by ServerApp.cleanup_extensions.
"""
calls = 0
# load extensions (make sure we only have the one extension loaded
# as well as
jp_serverapp.extension_manager.load_all_extensions()
extension_name = "tests.extension.mockextensions"
apps = set(jp_serverapp.extension_manager.extension_apps)
assert apps == {"jupyter_server_terminals", extension_name}
# add a stop_extension method for the extension app
async def _stop(*args):
nonlocal calls
calls += 1
for apps in jp_serverapp.extension_manager.extension_apps.values():
for app in apps:
if app:
app.stop_extension = _stop
# call cleanup_extensions, check the logging is correct
caplog.clear()
await jp_serverapp.cleanup_extensions()
assert {msg for *_, msg in caplog.record_tuples} == {
"Shutting down 2 extensions",
"jupyter_server_terminals | extension app 'jupyter_server_terminals' stopping",
f"{extension_name} | extension app 'mockextension' stopping",
"jupyter_server_terminals | extension app 'jupyter_server_terminals' stopped",
f"{extension_name} | extension app 'mockextension' stopped",
}
# check the shutdown method was called twice
assert calls == 2
async def test_events(jp_serverapp, jp_fetch):
stream = StringIO()
handler = StreamHandler(stream)
jp_serverapp.event_logger.register_handler(handler)
await jp_fetch("mock")
handler.flush()
output = json.loads(stream.getvalue())
# Clear the sink.
stream.truncate(0)
stream.seek(0)
assert output["msg"] == "Hello, world!"
|
3,774 |
send request
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import SecurityCenterConfiguration
from .operations import APICollectionOffboardingOperations, APICollectionOnboardingOperations, APICollectionOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class SecurityCenter: # pylint: disable=client-accepts-api-version-keyword
"""API spec for Microsoft.Security (Azure Security Center) resource provider.
:ivar api_collection: APICollectionOperations operations
:vartype api_collection:
azure.mgmt.security.v2022_11_20_preview.operations.APICollectionOperations
:ivar api_collection_onboarding: APICollectionOnboardingOperations operations
:vartype api_collection_onboarding:
azure.mgmt.security.v2022_11_20_preview.operations.APICollectionOnboardingOperations
:ivar api_collection_offboarding: APICollectionOffboardingOperations operations
:vartype api_collection_offboarding:
azure.mgmt.security.v2022_11_20_preview.operations.APICollectionOffboardingOperations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2022-11-20-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = SecurityCenterConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.api_collection = APICollectionOperations(self._client, self._config, self._serialize, self._deserialize)
self.api_collection_onboarding = APICollectionOnboardingOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.api_collection_offboarding = APICollectionOffboardingOperations(
self._client, self._config, self._serialize, self._deserialize
)
def METHOD_NAME(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self) -> None:
self._client.close()
def __enter__(self) -> "SecurityCenter":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details)
|
3,775 |
create
|
from contextlib import suppress
import awxkit.exceptions as exc
from awxkit.api.pages import base, WorkflowJobTemplate, UnifiedJobTemplate, JobTemplate
from awxkit.api.mixins import HasCreate, DSAdapter
from awxkit.api.resources import resources
from awxkit.utils import update_payload, PseudoNamespace, random_title
from . import page
class WorkflowJobTemplateNode(HasCreate, base.Base):
dependencies = [WorkflowJobTemplate, UnifiedJobTemplate]
NATURAL_KEY = ('workflow_job_template', 'identifier')
def payload(self, workflow_job_template, unified_job_template, **kwargs):
if not unified_job_template:
# May pass "None" to explicitly create an approval node
payload = PseudoNamespace(workflow_job_template=workflow_job_template.id)
else:
payload = PseudoNamespace(workflow_job_template=workflow_job_template.id, unified_job_template=unified_job_template.id)
optional_fields = (
'diff_mode',
'extra_data',
'limit',
'scm_branch',
'job_tags',
'job_type',
'skip_tags',
'verbosity',
'extra_data',
'identifier',
'all_parents_must_converge',
# prompt fields for JTs
'job_slice_count',
'forks',
'timeout',
'execution_environment',
)
update_payload(payload, optional_fields, kwargs)
if 'inventory' in kwargs:
payload['inventory'] = kwargs['inventory'].id
return payload
def create_payload(self, workflow_job_template=WorkflowJobTemplate, unified_job_template=JobTemplate, **kwargs):
if not unified_job_template:
self.create_and_update_dependencies(workflow_job_template)
payload = self.payload(workflow_job_template=self.ds.workflow_job_template, unified_job_template=None, **kwargs)
else:
self.create_and_update_dependencies(workflow_job_template, unified_job_template)
payload = self.payload(workflow_job_template=self.ds.workflow_job_template, unified_job_template=self.ds.unified_job_template, **kwargs)
payload.ds = DSAdapter(self.__class__.__name__, self._dependency_store)
return payload
def METHOD_NAME(self, workflow_job_template=WorkflowJobTemplate, unified_job_template=JobTemplate, **kwargs):
payload = self.create_payload(workflow_job_template=workflow_job_template, unified_job_template=unified_job_template, **kwargs)
return self.update_identity(WorkflowJobTemplateNodes(self.connection).post(payload))
def _add_node(self, endpoint, unified_job_template, **kwargs):
node = endpoint.post(dict(unified_job_template=unified_job_template.id, **kwargs))
node.create_and_update_dependencies(self.ds.workflow_job_template, unified_job_template)
return node
def add_always_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.always_nodes, unified_job_template, **kwargs)
def add_failure_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.failure_nodes, unified_job_template, **kwargs)
def add_success_node(self, unified_job_template, **kwargs):
return self._add_node(self.related.success_nodes, unified_job_template, **kwargs)
def add_credential(self, credential):
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=credential.id, associate=True))
def remove_credential(self, credential):
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=credential.id, disassociate=True))
def remove_all_credentials(self):
for cred in self.related.credentials.get().results:
with suppress(exc.NoContent):
self.related.credentials.post(dict(id=cred.id, disassociate=True))
def make_approval_node(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = 'approval node {}'.format(random_title())
self.related.create_approval_template.post(kwargs)
return self.get()
def get_job_node(self, workflow_job):
candidates = workflow_job.get_related('workflow_nodes', identifier=self.identifier)
return candidates.results.pop()
def add_label(self, label):
with suppress(exc.NoContent):
self.related.labels.post(dict(id=label.id))
def add_instance_group(self, instance_group):
with suppress(exc.NoContent):
self.related.instance_groups.post(dict(id=instance_group.id))
page.register_page(
[resources.workflow_job_template_node, (resources.workflow_job_template_nodes, 'post'), (resources.workflow_job_template_workflow_nodes, 'post')],
WorkflowJobTemplateNode,
)
class WorkflowJobTemplateNodes(page.PageList, WorkflowJobTemplateNode):
pass
page.register_page(
[
resources.workflow_job_template_nodes,
resources.workflow_job_template_workflow_nodes,
resources.workflow_job_template_node_always_nodes,
resources.workflow_job_template_node_failure_nodes,
resources.workflow_job_template_node_success_nodes,
],
WorkflowJobTemplateNodes,
)
|
3,776 |
company id
|
import csv
from typing import Dict, Optional
from datetime import datetime
from io import TextIOWrapper
from zipfile import ZipFile
from normality import collapse_spaces
from followthemoney.util import join_text
from zavod import Context
TYPES = {"C": "HE", "P": "S", "O": "AE", "N": "BN", "B": "B"}
def parse_date(text: Optional[str]) -> Optional[str]:
if text is None or not len(text.strip()):
return None
return datetime.strptime(text, "%d/%m/%Y").date().isoformat()
def METHOD_NAME(org_type: str, reg_nr: str) -> Optional[str]:
org_type_oc = TYPES.get(org_type)
if org_type_oc is None:
return None
return f"oc-companies-cy-{org_type_oc}{reg_nr}".lower()
def iter_rows(zip: ZipFile, name: str):
with zip.open(name, "r") as fh:
wrapper = TextIOWrapper(fh, encoding="utf-8-sig")
for row in csv.DictReader(wrapper):
yield row
def parse_organisations(context: Context, rows, addresses: Dict[str, str]):
for row in rows:
org_type = row.pop("ORGANISATION_TYPE_CODE")
reg_nr = row.pop("REGISTRATION_NO")
if org_type in ("", "Εμπορική Επωνυμία"):
continue
entity = context.make("Company")
entity.id = METHOD_NAME(org_type, reg_nr)
if entity.id is None:
context.log.error("Could not make ID", org_type=org_type, reg_nr=reg_nr)
continue
entity.add("name", row.pop("ORGANISATION_NAME"))
entity.add("status", row.pop("ORGANISATION_STATUS"))
if org_type == "O":
entity.add("country", "cy")
else:
entity.add("jurisdiction", "cy")
org_type_oc = TYPES[org_type]
oc_id = f"{org_type_oc}{reg_nr}"
oc_url = f"https://opencorporates.com/companies/cy/{oc_id}"
entity.add("opencorporatesUrl", oc_url)
entity.add("registrationNumber", oc_id)
entity.add("registrationNumber", f"{org_type}{reg_nr}")
org_type_text = row.pop("ORGANISATION_TYPE")
org_subtype = row.pop("ORGANISATION_SUB_TYPE")
if len(org_subtype.strip()):
org_type_text = f"{org_type_text} - {org_subtype}"
entity.add("legalForm", org_type_text)
reg_date = parse_date(row.pop("REGISTRATION_DATE"))
entity.add("incorporationDate", reg_date)
status_date = parse_date(row.pop("ORGANISATION_STATUS_DATE"))
entity.add("modifiedAt", status_date)
addr_id = row.pop("ADDRESS_SEQ_NO")
entity.add("address", addresses.get(addr_id))
context.emit(entity)
# print(entity.to_dict())
context.audit_data(row, ignore=["NAME_STATUS_CODE", "NAME_STATUS"])
def parse_officials(context: Context, rows):
org_types = list(TYPES.keys())
for row in rows:
org_type = row.pop("ORGANISATION_TYPE_CODE")
if org_type not in org_types:
continue
reg_nr = row.pop("REGISTRATION_NO")
name = row.pop("PERSON_OR_ORGANISATION_NAME")
position = row.pop("OFFICIAL_POSITION")
entity = context.make("LegalEntity")
entity.id = context.make_id(org_type, reg_nr, name)
entity.add("name", name)
context.emit(entity)
link = context.make("Directorship")
link.id = context.make_id("Directorship", org_type, reg_nr, name, position)
org_id = METHOD_NAME(org_type, reg_nr)
if org_id is None:
context.log.error("Could not make ID", org_type=org_type, reg_nr=reg_nr)
continue
link.add("organization", org_id)
link.add("director", entity.id)
link.add("role", position)
context.emit(link)
def load_addresses(rows) -> Dict[str, str]:
addresses: Dict[str, str] = {}
for row in rows:
seq_no = row.pop("ADDRESS_SEQ_NO")
if seq_no is None:
continue
street = row.pop("STREET")
building = row.pop("BUILDING")
territory = row.pop("TERRITORY")
address = join_text(building, street, territory, sep=", ")
if address is not None:
address = collapse_spaces(address.replace("_", ""))
if address is not None:
addresses[seq_no] = address
return addresses
def crawl(context: Context):
data_path = context.fetch_resource("data.zip", context.data_url)
with ZipFile(data_path, "r") as zip:
addresses: Dict[str, str] = {}
for name in zip.namelist():
if name.startswith("registered_office_"):
addresses = load_addresses(iter_rows(zip, name))
for name in zip.namelist():
context.log.info("Reading: %s in %s" % (name, data_path))
if name.startswith("organisations_"):
rows = iter_rows(zip, name)
parse_organisations(context, rows, addresses)
if name.startswith("organisation_officials_"):
rows = iter_rows(zip, name)
parse_officials(context, rows)
|
3,777 |
on timestamp timer
|
from qtpy import QtWidgets, QtCore
from openpype.tools.utils.delegates import pretty_date
class BaseInfoDialog(QtWidgets.QDialog):
width = 600
height = 400
def __init__(self, message, title, info_obj, parent=None):
super(BaseInfoDialog, self).__init__(parent)
self._result = 0
self._info_obj = info_obj
self.setWindowTitle(title)
message_label = QtWidgets.QLabel(message, self)
message_label.setWordWrap(True)
separator_widget_1 = QtWidgets.QFrame(self)
separator_widget_2 = QtWidgets.QFrame(self)
for separator_widget in (
separator_widget_1,
separator_widget_2
):
separator_widget.setObjectName("Separator")
separator_widget.setMinimumHeight(1)
separator_widget.setMaximumHeight(1)
other_information = QtWidgets.QWidget(self)
other_information_layout = QtWidgets.QFormLayout(other_information)
other_information_layout.setContentsMargins(0, 0, 0, 0)
for label, value in (
("Username", info_obj.username),
("Host name", info_obj.hostname),
("Host IP", info_obj.hostip),
("System name", info_obj.system_name),
("Local ID", info_obj.local_id),
):
other_information_layout.addRow(
label,
QtWidgets.QLabel(value or "N/A", other_information)
)
timestamp_label = QtWidgets.QLabel(
pretty_date(info_obj.timestamp_obj), other_information
)
other_information_layout.addRow("Time", timestamp_label)
footer_widget = QtWidgets.QWidget(self)
buttons_widget = QtWidgets.QWidget(footer_widget)
buttons_layout = QtWidgets.QHBoxLayout(buttons_widget)
buttons_layout.setContentsMargins(0, 0, 0, 0)
buttons = self.get_buttons(buttons_widget)
for button in buttons:
buttons_layout.addWidget(button, 1)
footer_layout = QtWidgets.QHBoxLayout(footer_widget)
footer_layout.setContentsMargins(0, 0, 0, 0)
footer_layout.addStretch(1)
footer_layout.addWidget(buttons_widget, 0)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(message_label, 0)
layout.addWidget(separator_widget_1, 0)
layout.addStretch(1)
layout.addWidget(other_information, 0, QtCore.Qt.AlignHCenter)
layout.addStretch(1)
layout.addWidget(separator_widget_2, 0)
layout.addWidget(footer_widget, 0)
timestamp_timer = QtCore.QTimer()
timestamp_timer.setInterval(1000)
timestamp_timer.timeout.connect(self.METHOD_NAME)
self._timestamp_label = timestamp_label
self._timestamp_timer = timestamp_timer
def showEvent(self, event):
super(BaseInfoDialog, self).showEvent(event)
self._timestamp_timer.start()
self.resize(self.width, self.height)
def closeEvent(self, event):
self._timestamp_timer.stop()
super(BaseInfoDialog, self).closeEvent(event)
def METHOD_NAME(self):
self._timestamp_label.setText(
pretty_date(self._info_obj.timestamp_obj)
)
def result(self):
return self._result
def get_buttons(self, parent):
return []
class SettingsUIOpenedElsewhere(BaseInfoDialog):
def __init__(self, info_obj, parent=None):
title = "Someone else has opened Settings UI"
message = (
"Someone else has opened Settings UI which could cause data loss."
" Please contact the person on the other side."
"<br/><br/>You can continue in <b>view-only mode</b>."
" All changes in view mode will be lost."
"<br/><br/>You can <b>take control</b> which will cause that"
" all changes of settings on the other side will be lost.<br/>"
)
super(SettingsUIOpenedElsewhere, self).__init__(
message, title, info_obj, parent
)
def _on_take_control(self):
self._result = 1
self.close()
def _on_view_mode(self):
self._result = 0
self.close()
def get_buttons(self, parent):
take_control_btn = QtWidgets.QPushButton(
"Take control", parent
)
view_mode_btn = QtWidgets.QPushButton(
"View only", parent
)
take_control_btn.clicked.connect(self._on_take_control)
view_mode_btn.clicked.connect(self._on_view_mode)
return [
take_control_btn,
view_mode_btn
]
class SettingsLastSavedChanged(BaseInfoDialog):
width = 500
height = 300
def __init__(self, info_obj, parent=None):
title = "Settings has changed"
message = (
"Settings has changed while you had opened this settings session."
"<br/><br/>It is <b>recommended to refresh settings</b>"
" and re-apply changes in the new session."
)
super(SettingsLastSavedChanged, self).__init__(
message, title, info_obj, parent
)
def _on_save(self):
self._result = 1
self.close()
def _on_close(self):
self._result = 0
self.close()
def get_buttons(self, parent):
close_btn = QtWidgets.QPushButton(
"Close", parent
)
save_btn = QtWidgets.QPushButton(
"Save anyway", parent
)
close_btn.clicked.connect(self._on_close)
save_btn.clicked.connect(self._on_save)
return [
close_btn,
save_btn
]
class SettingsControlTaken(BaseInfoDialog):
width = 500
height = 300
def __init__(self, info_obj, parent=None):
title = "Settings control taken"
message = (
"Someone took control over your settings."
"<br/><br/>It is not possible to save changes of currently"
" opened session. Copy changes you want to keep and hit refresh."
)
super(SettingsControlTaken, self).__init__(
message, title, info_obj, parent
)
def _on_confirm(self):
self.close()
def get_buttons(self, parent):
confirm_btn = QtWidgets.QPushButton("Understand", parent)
confirm_btn.clicked.connect(self._on_confirm)
return [confirm_btn]
|
3,778 |
set up
|
from django.urls import reverse
from rest_framework import status
from lego.apps.quotes.models import Quote
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseAPITestCase
def _get_list_url():
return reverse("api:v1:quote-list")
def _get_list_approved_url():
return _get_list_url() + "?approved=True"
def _get_list_unapproved_url():
return _get_list_url() + "?approved=False"
def _get_detail_url(pk):
return reverse("api:v1:quote-detail", kwargs={"pk": pk})
def _get_approve_url(pk):
return reverse("api:v1:quote-approve", kwargs={"pk": pk})
def _get_unapprove_url(pk):
return reverse("api:v1:quote-unapprove", kwargs={"pk": pk})
class QuoteViewSetTestCase(BaseAPITestCase):
fixtures = ["test_users.yaml", "test_abakus_groups.yaml", "test_quotes.yaml"]
def METHOD_NAME(self):
self.authenticated_user = User.objects.get(username="test1")
self.group = AbakusGroup.objects_with_text.get(name="QuoteAdminTest")
self.group.add_user(self.authenticated_user)
self.unauthenticated_user = User.objects.get(username="test2")
self.quote_data = {"text": "TestText", "source": "TestSource"}
def test_create_authenticated(self):
"""Users with permissions should be able to create quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.post(_get_list_url(), self.quote_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_unauthenticated(self):
"""Users with no permissions should not be able to create quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.post(_get_list_url(), self.quote_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_authenticated(self):
"""Users with permissions should be able to list quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.json())
def test_list_unauthenticated(self):
"""Users with no permissions should not be able to list quotes"""
self.client.force_authenticate(user=self.unauthenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_detail_authenticated(self):
"""Users with permissions should be able to see detailed quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_detail_url(1))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.json())
def test_detail_unauthenticated(self):
"""Users with no permissions should not be able see detailed quotes"""
self.client.force_authenticate(user=self.unauthenticated_user)
response = self.client.get(_get_detail_url(1))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_approve_authenticated(self):
"""Users with permissions should be able to approve quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.put(_get_approve_url(3))
self.assertEqual(response.status_code, status.HTTP_200_OK)
quote = Quote.objects.get(id=3)
self.assertTrue(quote.approved)
def test_approve_permission(self):
"""Users should not have permission to approve their own quotes"""
self.client.force_authenticate(self.authenticated_user)
self.client.post(_get_list_url(), self.quote_data)
response = self.client.put(_get_approve_url(4))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
quote = Quote.objects.get(id=4)
self.assertFalse(quote.approved)
def test_approve_unauthenticated(self):
"""Users with no permissions should not be able to approve quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.put(_get_approve_url(3))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_unapproved_authenticated(self):
"""Users with permissions should be able to see unapproved quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_quote = response.json()["results"][0]
self.assertFalse(first_quote["approved"])
def test_list_unapproved_unauthenticated(self):
"""Users with no permissions should not be able to see unapproved quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_list_approved_unauthorized(self):
"""Users with regular permissions should be able to see approved quotes"""
self.group.permissions.remove("/sudo/admin/quotes/edit/")
self.group.save()
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertTrue(len(response.json()["results"]) > 0)
def test_list_unapproved_unauthorized(self):
"""Users with regular permissions should not be able to see unapproved quotes"""
self.group.permissions.remove("/sudo/admin/quotes/edit/")
self.group.save()
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.json()["results"]), 0)
|
3,779 |
from center cartesian
|
"""ProtocolEngine-based Well core implementations."""
from typing import Optional
from opentrons_shared_data.labware.constants import WELL_NAME_PATTERN
from opentrons.protocol_engine import WellLocation, WellOrigin, WellOffset
from opentrons.protocol_engine.clients import SyncClient as EngineClient
from opentrons.protocols.api_support.util import APIVersionError
from opentrons.types import Point
from . import point_calculations
from ..well import AbstractWellCore
from ..._liquid import Liquid
class WellCore(AbstractWellCore):
"""Well API core using a ProtocolEngine.
Args:
name: The well's name in the labware, e.g. `A1`.
labware_id: The ProtocolEngine ID of the well's parent labware.
engine_client: Synchronous ProtocolEngine client.
"""
def __init__(self, name: str, labware_id: str, engine_client: EngineClient) -> None:
self._labware_id = labware_id
self._engine_client = engine_client
self._definition = engine_client.state.labware.get_well_definition(
labware_id=labware_id, well_name=name
)
name_match = WELL_NAME_PATTERN.match(name)
self._name = name
self._row_name = name_match.group(1) if name_match is not None else ""
self._column_name = name_match.group(2) if name_match is not None else ""
@property
def labware_id(self) -> str:
"""Get the ID of the well's parent labware."""
return self._labware_id
@property
def diameter(self) -> Optional[float]:
"""Get the well's diameter, if circular."""
return self._definition.diameter
@property
def length(self) -> Optional[float]:
"""Get the well's length, if rectangular."""
return self._definition.xDimension
@property
def width(self) -> Optional[float]:
"""Get the well's width, if rectangular."""
return self._definition.yDimension
@property
def depth(self) -> float:
"""Get the well's depth."""
return self._definition.depth
def has_tip(self) -> bool:
"""Whether the well contains a tip."""
return self._engine_client.state.tips.has_clean_tip(
self._labware_id, self._name
)
def set_has_tip(self, value: bool) -> None:
"""Set the well as containing or not containing a tip."""
raise APIVersionError(
"Manually setting the tip state of a well in a tip rack has been deprecated."
)
def get_display_name(self) -> str:
"""Get the well's full display name."""
parent = self._engine_client.state.labware.get_display_name(self._labware_id)
return f"{self._name} of {parent}"
def get_name(self) -> str:
"""Get the name of the well (e.g. "A1")."""
return self._name
def get_column_name(self) -> str:
"""Get the column portion of the well name (e.g. "1")."""
return self._column_name
def get_row_name(self) -> str:
"""Get the row portion of the well name (e.g. "A")."""
return self._row_name
def get_max_volume(self) -> float:
"""Get the well's maximum liquid volume."""
return self._definition.totalLiquidVolume
def get_top(self, z_offset: float) -> Point:
"""Get the coordinate of the well's top, with a z-offset."""
return self._engine_client.state.geometry.get_well_position(
well_name=self._name,
labware_id=self._labware_id,
well_location=WellLocation(
origin=WellOrigin.TOP, offset=WellOffset(x=0, y=0, z=z_offset)
),
)
def get_bottom(self, z_offset: float) -> Point:
"""Get the coordinate of the well's bottom, with a z-offset."""
return self._engine_client.state.geometry.get_well_position(
well_name=self._name,
labware_id=self._labware_id,
well_location=WellLocation(
origin=WellOrigin.BOTTOM, offset=WellOffset(x=0, y=0, z=z_offset)
),
)
def get_center(self) -> Point:
"""Get the coordinate of the well's center."""
return self._engine_client.state.geometry.get_well_position(
well_name=self._name,
labware_id=self._labware_id,
well_location=WellLocation(origin=WellOrigin.CENTER),
)
def load_liquid(
self,
liquid: Liquid,
volume: float,
) -> None:
"""Load liquid into a well."""
self._engine_client.load_liquid(
labware_id=self._labware_id,
liquid_id=liquid._id,
volume_by_well={self._name: volume},
)
def METHOD_NAME(self, x: float, y: float, z: float) -> Point:
"""Gets point in deck coordinates based on percentage of the radius of each axis."""
well_size = self._engine_client.state.labware.get_well_size(
labware_id=self.labware_id, well_name=self._name
)
return point_calculations.get_relative_offset(
point=self.get_center(),
size=well_size,
x_ratio=x,
y_ratio=y,
z_ratio=z,
)
|
3,780 |
add file
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Provides a factory class for generating dynamic messages.
The easiest way to use this class is if you have access to the FileDescriptor
protos containing the messages you want to create you can just do the following:
message_classes = message_factory.GetMessages(iterable_of_file_descriptors)
my_proto_instance = message_classes['some.proto.package.MessageName']()
"""
__author__ = '[email protected] (Matt Toia)'
from google.protobuf.internal import api_implementation
from google.protobuf import descriptor_pool
from google.protobuf import message
if api_implementation.Type() == 'cpp':
from google.protobuf.pyext import cpp_message as message_impl
else:
from google.protobuf.internal import python_message as message_impl
# The type of all Message classes.
_GENERATED_PROTOCOL_MESSAGE_TYPE = message_impl.GeneratedProtocolMessageType
class MessageFactory(object):
"""Factory for creating Proto2 messages from descriptors in a pool."""
def __init__(self, pool=None):
"""Initializes a new factory."""
self.pool = pool or descriptor_pool.DescriptorPool()
# local cache of all classes built from protobuf descriptors
self._classes = {}
def GetPrototype(self, descriptor):
"""Obtains a proto2 message class based on the passed in descriptor.
Passing a descriptor with a fully qualified name matching a previous
invocation will cause the same class to be returned.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
if descriptor not in self._classes:
result_class = self.CreatePrototype(descriptor)
# The assignment to _classes is redundant for the base implementation, but
# might avoid confusion in cases where CreatePrototype gets overridden and
# does not call the base implementation.
self._classes[descriptor] = result_class
return result_class
return self._classes[descriptor]
def CreatePrototype(self, descriptor):
"""Builds a proto2 message class based on the passed in descriptor.
Don't call this function directly, it always creates a new class. Call
GetPrototype() instead. This method is meant to be overridden in subblasses
to perform additional operations on the newly constructed class.
Args:
descriptor: The descriptor to build from.
Returns:
A class describing the passed in descriptor.
"""
descriptor_name = descriptor.name
result_class = _GENERATED_PROTOCOL_MESSAGE_TYPE(
descriptor_name,
(message.Message,),
{
'DESCRIPTOR': descriptor,
# If module not set, it wrongly points to message_factory module.
'__module__': None,
})
result_class._FACTORY = self # pylint: disable=protected-access
# Assign in _classes before doing recursive calls to avoid infinite
# recursion.
self._classes[descriptor] = result_class
for field in descriptor.fields:
if field.message_type:
self.GetPrototype(field.message_type)
for extension in result_class.DESCRIPTOR.extensions:
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result_class
def GetMessages(self, files):
"""Gets all the messages from a specified file.
This will find and resolve dependencies, failing if the descriptor
pool cannot satisfy them.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for desc in file_desc.message_types_by_name.values():
result[desc.full_name] = self.GetPrototype(desc)
# While the extension FieldDescriptors are created by the descriptor pool,
# the python classes created in the factory need them to be registered
# explicitly, which is done below.
#
# The call to RegisterExtension will specifically check if the
# extension was already registered on the object and either
# ignore the registration if the original was the same, or raise
# an error if they were different.
for extension in file_desc.extensions_by_name.values():
if extension.containing_type not in self._classes:
self.GetPrototype(extension.containing_type)
extended_class = self._classes[extension.containing_type]
extended_class.RegisterExtension(extension)
return result
_FACTORY = MessageFactory()
def GetMessages(file_protos):
"""Builds a dictionary of all the messages available in a set of files.
Args:
file_protos: Iterable of FileDescriptorProto to build messages out of.
Returns:
A dictionary mapping proto names to the message classes. This will include
any dependent messages as well as any messages defined in the same file as
a specified message.
"""
# The cpp implementation of the protocol buffer library requires to add the
# message in topological order of the dependency graph.
file_by_name = {file_proto.name: file_proto for file_proto in file_protos}
def METHOD_NAME(file_proto):
for dependency in file_proto.dependency:
if dependency in file_by_name:
# Remove from elements to be visited, in order to cut cycles.
METHOD_NAME(file_by_name.pop(dependency))
_FACTORY.pool.Add(file_proto)
while file_by_name:
METHOD_NAME(file_by_name.popitem()[1])
return _FACTORY.GetMessages([file_proto.name for file_proto in file_protos])
|
3,781 |
test remote missing dependency on dir pull
|
import logging
import os
import pytest
from funcy import first
from voluptuous import MultipleInvalid, Schema
from dvc.fs import RemoteMissingDepsError
from dvc.ignore import _no_match
from dvc.output import CHECKSUM_SCHEMA, Output
from dvc.stage import Stage
from dvc.utils.fs import remove
def test_save_missing(dvc, mocker):
stage = Stage(dvc)
out = Output(stage, "path", cache=False)
mocker.patch.object(out.fs, "exists", return_value=False)
with pytest.raises(out.DoesNotExistError):
out.save()
@pytest.mark.parametrize(
"value,expected",
[
("", None),
(None, None),
(11111, "11111"),
("11111", "11111"),
("aAaBa", "aaaba"),
(
"3cc286c534a71504476da009ed174423",
"3cc286c534a71504476da009ed174423",
), # md5
(
"d41d8cd98f00b204e9800998ecf8427e-38",
"d41d8cd98f00b204e9800998ecf8427e-38",
), # etag
(
"000002000000000000000000c16859d1d071c6b1ffc9c8557d4909f1",
"000002000000000000000000c16859d1d071c6b1ffc9c8557d4909f1",
), # hdfs checksum
# Not much we can do about hex and oct values without writing our own
# parser. So listing these test cases just to acknowledge this.
# See https://github.com/iterative/dvc/issues/3331.
(0x3451, "13393"),
(0o1244, "676"),
],
)
def test_checksum_schema(value, expected):
assert Schema(CHECKSUM_SCHEMA)(value) == expected
@pytest.mark.parametrize("value", ["1", "11", {}, {"a": "b"}, [], [1, 2]])
def test_checksum_schema_fail(value):
with pytest.raises(MultipleInvalid):
assert Schema(CHECKSUM_SCHEMA)(value)
@pytest.mark.parametrize(
"exists, expected_message",
[
(
False,
(
"Output 'path'(stage: 'stage.dvc') is missing version info. "
"Cache for it will not be collected. "
"Use `dvc repro` to get your pipeline up to date."
),
),
(
True,
(
"Output 'path'(stage: 'stage.dvc') is missing version info. "
"Cache for it will not be collected. "
"Use `dvc repro` to get your pipeline up to date.\n"
"You can also use `dvc commit stage.dvc` to associate "
"existing 'path' with stage: 'stage.dvc'."
),
),
],
)
def test_get_used_objs(exists, expected_message, mocker, caplog):
stage = mocker.MagicMock()
mocker.patch.object(stage, "__str__", return_value="stage: 'stage.dvc'")
mocker.patch.object(stage, "addressing", "stage.dvc")
mocker.patch.object(stage, "wdir", os.getcwd())
mocker.patch.object(stage.repo, "root_dir", os.getcwd())
mocker.patch.object(stage.repo.dvcignore, "is_ignored", return_value=False)
mocker.patch.object(
stage.repo.dvcignore, "check_ignore", return_value=_no_match("path")
)
stage.repo.fs.version_aware = False
stage.repo.fs.PARAM_CHECKSUM = "md5"
output = Output(stage, "path")
mocker.patch.object(output, "use_cache", True)
mocker.patch.object(stage, "is_repo_import", False)
mocker.patch.object(
Output, "exists", new_callable=mocker.PropertyMock
).return_value = exists
with caplog.at_level(logging.WARNING, logger="dvc"):
assert {} == output.get_used_objs()
assert first(caplog.messages) == expected_message
def METHOD_NAME(tmp_dir, scm, dvc, mocker):
tmp_dir.dvc_gen({"dir": {"subfile": "file2 content"}}, commit="add dir")
with dvc.config.edit() as conf:
conf["remote"]["s3"] = {"url": "s3://bucket/name"}
conf["core"] = {"remote": "s3"}
remove("dir")
remove(dvc.cache.local.path)
mocker.patch(
"dvc.data_cloud.DataCloud.get_remote",
side_effect=RemoteMissingDepsError(dvc.fs, "azure", "azure://", []),
)
with pytest.raises(RemoteMissingDepsError):
dvc.pull()
def test_hash_info_cloud_versioning_dir(mocker):
stage = mocker.MagicMock()
stage.repo.fs.version_aware = False
stage.repo.fs.PARAM_CHECKSUM = "etag"
files = [
{
"size": 3,
"version_id": "WYRG4BglP7pD.gEoJP6a4AqOhl.FRA.h",
"etag": "acbd18db4cc2f85cedef654fccc4a4d8",
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"relpath": "bar",
},
{
"size": 3,
"version_id": "0vL53tFVY5vVAoJ4HG2jCS1mEcohDPE0",
"etag": "acbd18db4cc2f85cedef654fccc4a4d8",
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"relpath": "foo",
},
]
out = Output(stage, "path", files=files)
# `hash_info`` and `meta`` constructed from `files``
assert out.hash_info.name == "md5"
assert out.hash_info.value == "77e8000f532886eef8ee1feba82e9bad.dir"
assert out.meta.isdir
assert out.meta.nfiles == 2
assert out.meta.size == 6
def test_dumpd_cloud_versioning_dir(mocker):
stage = mocker.MagicMock()
stage.repo.fs.version_aware = False
stage.repo.fs.PARAM_CHECKSUM = "md5"
files = [
{
"size": 3,
"version_id": "WYRG4BglP7pD.gEoJP6a4AqOhl.FRA.h",
"etag": "acbd18db4cc2f85cedef654fccc4a4d8",
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"relpath": "bar",
},
{
"size": 3,
"version_id": "0vL53tFVY5vVAoJ4HG2jCS1mEcohDPE0",
"etag": "acbd18db4cc2f85cedef654fccc4a4d8",
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"relpath": "foo",
},
]
out = Output(stage, "path", files=files)
dumpd = out.dumpd()
assert dumpd == {"path": "path", "hash": "md5", "files": files}
def test_version_aware_is_set_based_on_files(mocker):
import dvc.fs as dvc_fs
get_fs_config = mocker.spy(dvc_fs, "get_fs_config")
stage = mocker.MagicMock()
stage.repo.fs.version_aware = False
stage.repo.fs.PARAM_CHECKSUM = "etag"
files = [
{
"size": 3,
"version_id": "WYRG4BglP7pD.gEoJP6a4AqOhl.FRA.h",
"etag": "acbd18db4cc2f85cedef654fccc4a4d8",
"md5": "acbd18db4cc2f85cedef654fccc4a4d8",
"relpath": "bar",
}
]
Output(stage, "path", files=files)
# version_aware is passed as `True` if `files` is present`.
# This will be intentionally ignored in filesystems that don't handle it
# in `_prepare_credentials`.
assert get_fs_config.call_args_list[0][1] == {
"url": "path",
"version_aware": True,
}
|
3,782 |
get stitch density
|
# Authors: see git history
#
# Copyright (c) 2010 Authors
# Licensed under the GNU GPL version 3.0 or later. See the file LICENSE for details.
import numpy as np
from scipy.spatial import KDTree
import inkex
from ..i18n import _
from ..stitch_plan import stitch_groups_to_stitch_plan
from ..svg import PIXELS_PER_MM
from ..svg.tags import INKSCAPE_GROUPMODE, INKSCAPE_LABEL, SVG_GROUP_TAG
from ..svg.units import get_viewbox_transform
from ..utils import cache
from .base import InkstitchExtension
class DensityMap(InkstitchExtension):
def __init__(self, *args, **kwargs):
InkstitchExtension.__init__(self, *args, **kwargs)
self.arg_parser.add_argument("-v", "--layer-visibility", type=int, default=0, dest="layer_visibility")
self.arg_parser.add_argument("-l", "--num-neighbors-red", type=int, default=6, dest="num_neighbors_red")
self.arg_parser.add_argument("-r", "--density-radius-red", type=float, default=0.5, dest="radius_red")
self.arg_parser.add_argument("-m", "--num-neighbors-yellow", type=int, default=3, dest="num_neighbors_yellow")
self.arg_parser.add_argument("-s", "--density-radius-yellow", type=float, default=0.5, dest="radius_yellow")
def effect(self):
# delete old stitch plan
svg = self.document.getroot()
reset_density_plan(svg)
# create new stitch plan
if not self.get_elements():
return
self.metadata = self.get_inkstitch_metadata()
collapse_len = self.metadata['collapse_len_mm']
patches = self.elements_to_stitch_groups(self.elements)
stitch_plan = stitch_groups_to_stitch_plan(patches, collapse_len=collapse_len)
layer = svg.find(".//*[@id='__inkstitch_density_plan__']")
color_groups = create_color_groups(layer)
density_options = [{'max_neighbors': self.options.num_neighbors_red, 'radius': self.options.radius_red},
{'max_neighbors': self.options.num_neighbors_yellow, 'radius': self.options.radius_yellow}]
color_block_to_density_markers(svg, color_groups, stitch_plan, density_options)
# update layer visibility 0 = unchanged, 1 = hidden, 2 = lower opacity
groups = self.document.getroot().findall(SVG_GROUP_TAG)
if self.options.layer_visibility == 1:
self.hide_all_layers()
layer.style['display'] = "inline"
elif self.options.layer_visibility == 2:
for g in groups:
style = g.specified_style()
# check groupmode and exclude density layer
# exclude objects which are not displayed at all or already have opacity < 0.4
if (g.get(INKSCAPE_GROUPMODE) == "layer" and not g == layer and
float(style.get('opacity', 1)) > 0.4 and not style.get('display', 'inline') == 'none'):
g.style['opacity'] = 0.4
def reset_density_plan(svg):
layer = svg.find(".//*[@id='__inkstitch_density_plan__']")
if layer is None:
layer = inkex.Group(attrib={
'id': '__inkstitch_density_plan__',
INKSCAPE_LABEL: _('Density Plan'),
INKSCAPE_GROUPMODE: 'layer'
})
svg.append(layer)
else:
# delete old density plan
del layer[:]
# make sure the layer is visible
layer.set('style', 'display:inline')
def create_color_groups(layer):
color_groups = []
colors = [_("Red"), _("Yellow"), _("Green")]
for color in colors:
color_group = inkex.Group(attrib={
'id': '__%s_density_layer__' % color.lower(),
INKSCAPE_LABEL: _('%s density') % color,
})
layer.append(color_group)
color_groups.append(color_group)
return color_groups
def color_block_to_density_markers(svg, groups, stitch_plan, density_options):
num_neighbors = []
for option in density_options:
radius = option['radius'] * PIXELS_PER_MM
num_neighbors.append(METHOD_NAME(stitch_plan, radius))
red_group, yellow_group, green_group = groups
for red_neighbors, yellow_neighbors, coord in zip(num_neighbors[0][0], num_neighbors[1][0], num_neighbors[0][1]):
color = "green" # green
group = green_group
if density_options[0]['max_neighbors'] <= red_neighbors:
color = "red"
group = red_group
elif density_options[1]['max_neighbors'] <= yellow_neighbors:
color = "yellow"
group = yellow_group
density_marker = inkex.Circle(attrib={
'id': svg.get_unique_id("density_marker"),
'style': "fill: %s; stroke: #7e7e7e; stroke-width: 0.02%%;" % color,
'cx': "%s" % coord[0],
'cy': "%s" % coord[1],
'r': str(0.5),
'transform': get_correction_transform(svg)
})
group.append(density_marker)
def METHOD_NAME(stitch_plan, radius):
stitches = []
for color_block in stitch_plan:
for stitch in color_block:
stitches.append((stitch.x, stitch.y))
# get density per stitch
tree = KDTree(np.array(stitches))
neighbors = tree.query_ball_tree(tree, radius)
density = [len(i) for i in neighbors], stitches
return density
@cache
def get_correction_transform(svg):
transform = get_viewbox_transform(svg)
# we need to correct for the viewbox
transform = -inkex.transforms.Transform(transform)
return str(transform)
|
3,783 |
test embed kernel func
|
"""test embed_kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import json
import os
import sys
import threading
import time
from contextlib import contextmanager
from subprocess import PIPE, Popen
import pytest
from flaky import flaky
from jupyter_client.blocking.client import BlockingKernelClient
from jupyter_core import paths
from ipykernel.embed import IPKernelApp, embed_kernel # type:ignore[attr-defined]
SETUP_TIMEOUT = 60
TIMEOUT = 15
if os.name == "nt":
pytest.skip("skipping tests on windows", allow_module_level=True)
@contextmanager
def setup_kernel(cmd):
"""start an embedded kernel in a subprocess, and wait for it to be ready
Returns
-------
kernel_manager: connected KernelManager instance
"""
def connection_file_ready(connection_file):
"""Check if connection_file is a readable json file."""
if not os.path.exists(connection_file):
return False
try:
with open(connection_file) as f:
json.load(f)
return True
except ValueError:
return False
kernel = Popen([sys.executable, "-c", cmd], stdout=PIPE, stderr=PIPE, encoding="utf-8")
try:
connection_file = os.path.join(
paths.jupyter_runtime_dir(),
"kernel-%i.json" % kernel.pid,
)
# wait for connection file to exist, timeout after 5s
tic = time.time()
while (
not connection_file_ready(connection_file)
and kernel.poll() is None
and time.time() < tic + SETUP_TIMEOUT
):
time.sleep(0.1)
# Wait 100ms for the writing to finish
time.sleep(0.1)
if kernel.poll() is not None:
o, e = kernel.communicate()
raise OSError("Kernel failed to start:\n%s" % e)
if not os.path.exists(connection_file):
if kernel.poll() is None:
kernel.terminate()
raise OSError("Connection file %r never arrived" % connection_file)
client = BlockingKernelClient(connection_file=connection_file)
client.load_connection_file()
client.start_channels()
client.wait_for_ready()
try:
yield client
finally:
client.stop_channels()
finally:
kernel.terminate()
kernel.wait()
# Make sure all the fds get closed.
for attr in ["stdout", "stderr", "stdin"]:
fid = getattr(kernel, attr)
if fid:
fid.close()
@flaky(max_runs=3)
def test_embed_kernel_basic():
"""IPython.embed_kernel() is basically functional"""
cmd = "\n".join(
[
"from IPython import embed_kernel",
"def go():",
" a=5",
' b="hi there"',
" embed_kernel()",
"go()",
"",
]
)
with setup_kernel(cmd) as client:
# oinfo a (int)
client.inspect("a")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["found"]
client.execute("c=a*2")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["status"] == "ok"
# oinfo c (should be 10)
client.inspect("c")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["found"]
text = content["data"]["text/plain"]
assert "10" in text
@flaky(max_runs=3)
def test_embed_kernel_namespace():
"""IPython.embed_kernel() inherits calling namespace"""
cmd = "\n".join(
[
"from IPython import embed_kernel",
"def go():",
" a=5",
' b="hi there"',
" embed_kernel()",
"go()",
"",
]
)
with setup_kernel(cmd) as client:
# oinfo a (int)
client.inspect("a")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["found"]
text = content["data"]["text/plain"]
assert "5" in text
# oinfo b (str)
client.inspect("b")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["found"]
text = content["data"]["text/plain"]
assert "hi there" in text
# oinfo c (undefined)
client.inspect("c")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert not content["found"]
@flaky(max_runs=3)
def test_embed_kernel_reentrant():
"""IPython.embed_kernel() can be called multiple times"""
cmd = "\n".join(
[
"from IPython import embed_kernel",
"count = 0",
"def go():",
" global count",
" embed_kernel()",
" count = count + 1",
"",
"while True: go()",
"",
]
)
with setup_kernel(cmd) as client:
for i in range(5):
client.inspect("count")
msg = client.get_shell_msg(timeout=TIMEOUT)
content = msg["content"]
assert content["found"]
text = content["data"]["text/plain"]
assert str(i) in text
# exit from embed_kernel
client.execute("get_ipython().exit_now = True")
msg = client.get_shell_msg(timeout=TIMEOUT)
time.sleep(0.2)
def METHOD_NAME():
from types import ModuleType
module = ModuleType("test")
def trigger_stop():
time.sleep(1)
app = IPKernelApp.instance()
app.io_loop.add_callback(app.io_loop.stop)
IPKernelApp.clear_instance()
thread = threading.Thread(target=trigger_stop)
thread.start()
embed_kernel(module, outstream_class=None)
|
3,784 |
equals
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
from plc4py.api.messages.PlcMessage import PlcMessage
from plc4py.protocols.modbus.readwrite.DriverType import DriverType
from plc4py.protocols.modbus.readwrite.ModbusADU import ModbusADU
from plc4py.protocols.modbus.readwrite.ModbusADU import ModbusADUBuilder
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDU
from plc4py.spi.generation.ReadBuffer import ReadBuffer
from plc4py.spi.generation.WriteBuffer import WriteBuffer
import math
@dataclass
class ModbusTcpADU(PlcMessage, ModbusADU):
transaction_identifier: int
unit_identifier: int
pdu: ModbusPDU
# Arguments.
response: bool
PROTOCOLIDENTIFIER: int = 0x0000
# Accessors for discriminator values.
driver_type: DriverType = DriverType.MODBUS_TCP
def __post_init__(self):
super().__init__(self.response)
def serialize_modbus_adu_child(self, write_buffer: WriteBuffer):
write_buffer.push_context("ModbusTcpADU")
# Simple Field (transactionIdentifier)
write_buffer.write_unsigned_short(
self.transaction_identifier, logical_name="transactionIdentifier"
)
# Const Field (protocolIdentifier)
write_buffer.write_unsigned_short(
self.protocol_identifier.value, logical_name="protocolIdentifier"
)
# Implicit Field (length) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
length: int = self.pdu.getlength_in_bytes(ctx) + int(1)
write_buffer.write_unsigned_short(length, logical_name="length")
# Simple Field (unitIdentifier)
write_buffer.write_unsigned_byte(
self.unit_identifier, logical_name="unitIdentifier"
)
# Simple Field (pdu)
write_buffer.write_serializable(self.pdu, logical_name="pdu")
write_buffer.pop_context("ModbusTcpADU")
def length_in_bytes(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0)))
def get_length_in_bits(self) -> int:
length_in_bits: int = super().get_length_in_bits()
_value: ModbusTcpADU = self
# Simple field (transactionIdentifier)
length_in_bits += 16
# Const Field (protocolIdentifier)
length_in_bits += 16
# Implicit Field (length)
length_in_bits += 16
# Simple field (unitIdentifier)
length_in_bits += 8
# Simple field (pdu)
length_in_bits += self.pdu.get_length_in_bits()
return length_in_bits
@staticmethod
def static_parse_builder(
read_buffer: ReadBuffer, driver_type: DriverType, response: bool
):
read_buffer.push_context("ModbusTcpADU")
self.transaction_identifier = read_simple_field(
"transactionIdentifier",
read_unsigned_int,
WithOption.WithByteOrder(get_bi_g__endian()),
)
self.protocol_identifier: int = read_const_field(
"protocolIdentifier",
read_unsigned_int,
ModbusTcpADU.PROTOCOLIDENTIFIER,
WithOption.WithByteOrder(get_bi_g__endian()),
)
length: int = read_implicit_field(
"length", read_unsigned_int, WithOption.WithByteOrder(get_bi_g__endian())
)
self.unit_identifier = read_simple_field(
"unitIdentifier",
read_unsigned_short,
WithOption.WithByteOrder(get_bi_g__endian()),
)
self.pdu = read_simple_field(
"pdu",
DataReaderComplexDefault(
ModbusPDU.static_parse(read_buffer, bool(response)), read_buffer
),
WithOption.WithByteOrder(get_bi_g__endian()),
)
read_buffer.pop_context("ModbusTcpADU")
# Create the instance
return ModbusTcpADUBuilder(
transaction_identifier, unit_identifier, pdu, response
)
def METHOD_NAME(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusTcpADU):
return False
that: ModbusTcpADU = ModbusTcpADU(o)
return (
(self.transaction_identifier == that.transaction_identifier)
and (self.unit_identifier == that.unit_identifier)
and (self.pdu == that.pdu)
and super().METHOD_NAME(that)
and True
)
def hash_code(self) -> int:
return hash(self)
def __str__(self) -> str:
write_buffer_box_based: WriteBufferBoxBased = WriteBufferBoxBased(True, True)
try:
write_buffer_box_based.writeSerializable(self)
except SerializationException as e:
raise RuntimeException(e)
return "\n" + str(write_buffer_box_based.get_box()) + "\n"
@dataclass
class ModbusTcpADUBuilder(ModbusADUBuilder):
transactionIdentifier: int
unitIdentifier: int
pdu: ModbusPDU
response: bool
def __post_init__(self):
pass
def build(self, response: bool) -> ModbusTcpADU:
modbus_tcp_adu: ModbusTcpADU = ModbusTcpADU(
self.transaction_identifier, self.unit_identifier, self.pdu, response
)
return modbus_tcp_adu
|
3,785 |
get extension
|
from typing import Any, Callable, Iterator, Optional, Protocol, Tuple, Union
from thinc.types import Floats1d, FloatsXd
from ..lexeme import Lexeme
from ..vocab import Vocab
from .doc import Doc
from .morphanalysis import MorphAnalysis
from .span import Span
from .underscore import Underscore
class TokenMethod(Protocol):
def __call__(self: Token, *args: Any, **kwargs: Any) -> Any: ... # type: ignore[misc]
class Token:
i: int
doc: Doc
vocab: Vocab
@classmethod
def set_extension(
cls,
name: str,
default: Optional[Any] = ...,
getter: Optional[Callable[[Token], Any]] = ...,
setter: Optional[Callable[[Token, Any], None]] = ...,
method: Optional[TokenMethod] = ...,
force: bool = ...,
) -> None: ...
@classmethod
def METHOD_NAME(
cls, name: str
) -> Tuple[
Optional[Any],
Optional[TokenMethod],
Optional[Callable[[Token], Any]],
Optional[Callable[[Token, Any], None]],
]: ...
@classmethod
def has_extension(cls, name: str) -> bool: ...
@classmethod
def remove_extension(
cls, name: str
) -> Tuple[
Optional[Any],
Optional[TokenMethod],
Optional[Callable[[Token], Any]],
Optional[Callable[[Token, Any], None]],
]: ...
def __init__(self, vocab: Vocab, doc: Doc, offset: int) -> None: ...
def __hash__(self) -> int: ...
def __len__(self) -> int: ...
def __unicode__(self) -> str: ...
def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __richcmp__(self, other: Token, op: int) -> bool: ...
@property
def _(self) -> Underscore: ...
def nbor(self, i: int = ...) -> Token: ...
def similarity(self, other: Union[Doc, Span, Token, Lexeme]) -> float: ...
def has_morph(self) -> bool: ...
morph: MorphAnalysis
@property
def lex(self) -> Lexeme: ...
@property
def lex_id(self) -> int: ...
@property
def rank(self) -> int: ...
@property
def text(self) -> str: ...
@property
def text_with_ws(self) -> str: ...
@property
def prob(self) -> float: ...
@property
def sentiment(self) -> float: ...
@property
def lang(self) -> int: ...
@property
def idx(self) -> int: ...
@property
def cluster(self) -> int: ...
@property
def orth(self) -> int: ...
@property
def lower(self) -> int: ...
@property
def norm(self) -> int: ...
@property
def shape(self) -> int: ...
@property
def prefix(self) -> int: ...
@property
def suffix(self) -> int: ...
lemma: int
pos: int
tag: int
dep: int
@property
def has_vector(self) -> bool: ...
@property
def vector(self) -> Floats1d: ...
@property
def vector_norm(self) -> float: ...
@property
def tensor(self) -> Optional[FloatsXd]: ...
@property
def n_lefts(self) -> int: ...
@property
def n_rights(self) -> int: ...
@property
def sent(self) -> Span: ...
sent_start: bool
is_sent_start: Optional[bool]
is_sent_end: Optional[bool]
@property
def lefts(self) -> Iterator[Token]: ...
@property
def rights(self) -> Iterator[Token]: ...
@property
def children(self) -> Iterator[Token]: ...
@property
def subtree(self) -> Iterator[Token]: ...
@property
def left_edge(self) -> Token: ...
@property
def right_edge(self) -> Token: ...
@property
def ancestors(self) -> Iterator[Token]: ...
def is_ancestor(self, descendant: Token) -> bool: ...
def has_head(self) -> bool: ...
head: Token
@property
def conjuncts(self) -> Tuple[Token]: ...
ent_type: int
ent_type_: str
@property
def ent_iob(self) -> int: ...
@classmethod
def iob_strings(cls) -> Tuple[str]: ...
@property
def ent_iob_(self) -> str: ...
ent_id: int
ent_id_: str
ent_kb_id: int
ent_kb_id_: str
@property
def whitespace_(self) -> str: ...
@property
def orth_(self) -> str: ...
@property
def lower_(self) -> str: ...
norm_: str
@property
def shape_(self) -> str: ...
@property
def prefix_(self) -> str: ...
@property
def suffix_(self) -> str: ...
@property
def lang_(self) -> str: ...
lemma_: str
pos_: str
tag_: str
def has_dep(self) -> bool: ...
dep_: str
@property
def is_oov(self) -> bool: ...
@property
def is_stop(self) -> bool: ...
@property
def is_alpha(self) -> bool: ...
@property
def is_ascii(self) -> bool: ...
@property
def is_digit(self) -> bool: ...
@property
def is_lower(self) -> bool: ...
@property
def is_upper(self) -> bool: ...
@property
def is_title(self) -> bool: ...
@property
def is_punct(self) -> bool: ...
@property
def is_space(self) -> bool: ...
@property
def is_bracket(self) -> bool: ...
@property
def is_quote(self) -> bool: ...
@property
def is_left_punct(self) -> bool: ...
@property
def is_right_punct(self) -> bool: ...
@property
def is_currency(self) -> bool: ...
@property
def like_url(self) -> bool: ...
@property
def like_num(self) -> bool: ...
@property
def like_email(self) -> bool: ...
|
3,786 |
make function list
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import string
from egl.common import *
from opengl.src_util import getGLRegistry
from itertools import chain
import khr_util.registry
from khr_util.format import indentLines
def toCamelCase (extName):
return "".join([x.title() for x in extName.split("_")])
def makeStringList (name, strings):
yield ""
yield "static const char* s_%s[] =" % name
yield "{"
for entry in strings:
yield "\t\"%s\"," % (entry)
yield "};"
def METHOD_NAME (name, iface):
return makeStringList(name, [command.name for command in iface.commands])
def makeExtensionList (extensions):
for name, iface in extensions:
for line in METHOD_NAME(name, iface):
yield line
yield ""
yield "static const struct"
yield "{"
yield "\tconst char*\t\t\tname;"
yield "\tconst int\t\t\tnumFunctions;"
yield "\tconst char* const*\tfunctions;"
yield "} s_extensions[] ="
yield "{"
entries = []
for name, iface in extensions:
entries.append("\t{ \"%s\",\tDE_LENGTH_OF_ARRAY(s_%s),\ts_%s\t}," % (name, name, name))
for line in indentLines(entries):
yield line
yield "};"
def getExtensionList (registry, api):
exts = []
for extension in registry.extensions:
if not khr_util.registry.extensionSupports(extension, api):
continue
spec = khr_util.registry.InterfaceSpec()
spec.addExtension(extension, api)
iface = khr_util.registry.createInterface(registry, spec, api)
if len(iface.commands) == 0:
continue
exts.append((khr_util.registry.getExtensionName(extension),
iface))
return exts
def uniqueExtensions (extensions):
res = []
seen = set()
for name, iface in extensions:
if not name in seen:
res.append((name, iface))
seen.add(name)
return res
def getInterfaceExactVersion (registry, api, version):
spec = khr_util.registry.InterfaceSpec()
def check (v): return v == version
for feature in registry.getFeatures(api, check):
spec.addFeature(feature, api)
return khr_util.registry.createInterface(registry, spec, api)
def gen ():
eglRegistry = getEGLRegistry()
eglCoreIface = getInterface(eglRegistry, 'egl', '1.4')
egl15Iface = getInterfaceExactVersion(eglRegistry, 'egl', '1.5')
eglExtensions = getExtensionList(eglRegistry, 'egl')
glRegistry = getGLRegistry()
gles1Extensions = getExtensionList(glRegistry, 'gles1')
gles2Extensions = getExtensionList(glRegistry, 'gles2')
gles10CoreIface = getInterface(glRegistry, 'gles1', '1.0')
gles20CoreIface = getInterface(glRegistry, 'gles2', '2.0')
gles30CoreIface = getInterfaceExactVersion(glRegistry, 'gles2', '3.0')
# gles31CoreIface = getInterfaceExactVersion(glRegistry, 'gles2', '3.1')
allExtensions = eglExtensions + uniqueExtensions(gles1Extensions + gles2Extensions)
writeInlFile(os.path.normpath(os.path.join(SCRIPTS_DIR, "..", "..", "modules", "egl", "teglGetProcAddressTests.inl")),
chain(METHOD_NAME ("EGL14", eglCoreIface),
METHOD_NAME ("EGL15", egl15Iface),
METHOD_NAME ("GLES10", gles10CoreIface),
METHOD_NAME ("GLES20", gles20CoreIface),
METHOD_NAME ("GLES30", gles30CoreIface),
# makeFunctionList ("GLES31", gles31CoreIface),
makeExtensionList (allExtensions)))
|
3,787 |
hash143
|
from micropython import const
from typing import TYPE_CHECKING
from trezor.crypto.hashlib import blake2b
from trezor.utils import HashWriter
from trezor.wire import DataError
from ..writers import TX_HASH_SIZE, write_bytes_reversed, write_uint32, write_uint64
from .bitcoinlike import Bitcoinlike
if TYPE_CHECKING:
from typing import Sequence
from trezor.messages import PrevTx, SignTx, TxInput, TxOutput
from apps.common.coininfo import CoinInfo
from apps.common.keychain import Keychain
from ..common import SigHashType
from ..writers import Writer
from . import approvers
from .sig_hasher import SigHasher
from .tx_info import OriginalTxInfo, TxInfo
_OVERWINTERED = const(0x8000_0000)
class Zip243SigHasher:
def __init__(self) -> None:
self.h_prevouts = HashWriter(blake2b(outlen=32, personal=b"ZcashPrevoutHash"))
self.h_sequence = HashWriter(blake2b(outlen=32, personal=b"ZcashSequencHash"))
self.h_outputs = HashWriter(blake2b(outlen=32, personal=b"ZcashOutputsHash"))
def add_input(self, txi: TxInput, script_pubkey: bytes) -> None:
write_bytes_reversed(self.h_prevouts, txi.prev_hash, TX_HASH_SIZE)
write_uint32(self.h_prevouts, txi.prev_index)
write_uint32(self.h_sequence, txi.sequence)
def add_output(self, txo: TxOutput, script_pubkey: bytes) -> None:
from ..writers import write_tx_output
write_tx_output(self.h_outputs, txo, script_pubkey)
def METHOD_NAME(
self,
txi: TxInput,
public_keys: Sequence[bytes | memoryview],
threshold: int,
tx: SignTx | PrevTx,
coin: CoinInfo,
hash_type: int,
) -> bytes:
import ustruct as struct
from ..scripts import write_bip143_script_code_prefixed
from ..writers import get_tx_hash, write_bytes_fixed
h_preimage = HashWriter(
blake2b(
outlen=32,
personal=b"ZcashSigHash" + struct.pack("<I", tx.branch_id),
)
)
assert tx.version_group_id is not None
assert tx.expiry is not None
zero_hash = b"\x00" * TX_HASH_SIZE
# 1. nVersion | fOverwintered
write_uint32(h_preimage, tx.version | _OVERWINTERED)
# 2. nVersionGroupId
write_uint32(h_preimage, tx.version_group_id)
# 3. hashPrevouts
write_bytes_fixed(h_preimage, get_tx_hash(self.h_prevouts), TX_HASH_SIZE)
# 4. hashSequence
write_bytes_fixed(h_preimage, get_tx_hash(self.h_sequence), TX_HASH_SIZE)
# 5. hashOutputs
write_bytes_fixed(h_preimage, get_tx_hash(self.h_outputs), TX_HASH_SIZE)
# 6. hashJoinSplits
write_bytes_fixed(h_preimage, zero_hash, TX_HASH_SIZE)
# 7. hashShieldedSpends
write_bytes_fixed(h_preimage, zero_hash, TX_HASH_SIZE)
# 8. hashShieldedOutputs
write_bytes_fixed(h_preimage, zero_hash, TX_HASH_SIZE)
# 9. nLockTime
write_uint32(h_preimage, tx.lock_time)
# 10. expiryHeight
write_uint32(h_preimage, tx.expiry)
# 11. valueBalance
write_uint64(h_preimage, 0)
# 12. nHashType
write_uint32(h_preimage, hash_type)
# 13a. outpoint
write_bytes_reversed(h_preimage, txi.prev_hash, TX_HASH_SIZE)
write_uint32(h_preimage, txi.prev_index)
# 13b. scriptCode
write_bip143_script_code_prefixed(h_preimage, txi, public_keys, threshold, coin)
# 13c. value
write_uint64(h_preimage, txi.amount)
# 13d. nSequence
write_uint32(h_preimage, txi.sequence)
return get_tx_hash(h_preimage)
def hash341(
self,
i: int,
tx: SignTx | PrevTx,
sighash_type: SigHashType,
) -> bytes:
raise NotImplementedError
def hash_zip244(
self,
txi: TxInput | None,
script_pubkey: bytes | None,
) -> bytes:
raise NotImplementedError
class ZcashV4(Bitcoinlike):
def __init__(
self,
tx: SignTx,
keychain: Keychain,
coin: CoinInfo,
approver: approvers.Approver | None,
) -> None:
from trezor.utils import ensure
ensure(coin.overwintered)
super().__init__(tx, keychain, coin, approver)
if tx.version != 4:
raise DataError("Unsupported transaction version.")
def create_sig_hasher(self, tx: SignTx | PrevTx) -> SigHasher:
return Zip243SigHasher()
async def step7_finish(self) -> None:
from apps.common.writers import write_compact_size
from . import helpers
serialized_tx = self.serialized_tx # local_cache_attribute
if self.serialize:
self.write_tx_footer(serialized_tx, self.tx_info.tx)
write_uint64(serialized_tx, 0) # valueBalance
write_compact_size(serialized_tx, 0) # nShieldedSpend
write_compact_size(serialized_tx, 0) # nShieldedOutput
write_compact_size(serialized_tx, 0) # nJoinSplit
await helpers.request_tx_finish(self.tx_req)
async def sign_nonsegwit_input(self, i_sign: int) -> None:
await self.sign_nonsegwit_bip143_input(i_sign)
async def get_tx_digest(
self,
i: int,
txi: TxInput,
tx_info: TxInfo | OriginalTxInfo,
public_keys: Sequence[bytes | memoryview],
threshold: int,
script_pubkey: bytes,
tx_hash: bytes | None = None,
) -> bytes:
return tx_info.sig_hasher.METHOD_NAME(
txi,
public_keys,
threshold,
tx_info.tx,
self.coin,
self.get_sighash_type(txi),
)
def write_tx_header(
self, w: Writer, tx: SignTx | PrevTx, witness_marker: bool
) -> None:
if tx.version < 3:
# pre-overwinter
write_uint32(w, tx.version)
else:
if tx.version_group_id is None:
raise DataError("Version group ID is missing")
# nVersion | fOverwintered
write_uint32(w, tx.version | _OVERWINTERED)
write_uint32(w, tx.version_group_id) # nVersionGroupId
def write_tx_footer(self, w: Writer, tx: SignTx | PrevTx) -> None:
assert tx.expiry is not None # checked in sanitize_*
write_uint32(w, tx.lock_time)
if tx.version >= 3:
write_uint32(w, tx.expiry) # expiryHeight
|
3,788 |
write code files
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import os
import shutil
import frappe
import frappe.model
from frappe.modules import get_module_path, scrub, scrub_dt_dn
def export_doc(doc):
write_document_file(doc)
def export_to_files(record_list=None, record_module=None, verbose=0, create_init=None):
"""
Export record_list to files. record_list is a list of lists ([doctype, docname, folder name],) ,
"""
if frappe.flags.in_import:
return
if record_list:
for record in record_list:
folder_name = record[2] if len(record) == 3 else None
write_document_file(
frappe.get_doc(record[0], record[1]),
record_module,
create_init=create_init,
folder_name=folder_name,
)
def write_document_file(doc, record_module=None, create_init=True, folder_name=None):
doc_export = doc.as_dict(no_nulls=True)
doc.run_method("before_export", doc_export)
doc_export = strip_default_fields(doc, doc_export)
module = record_module or get_module_name(doc)
# create folder
if folder_name:
folder = create_folder(module, folder_name, doc.name, create_init)
else:
folder = create_folder(module, doc.doctype, doc.name, create_init)
fname = scrub(doc.name)
METHOD_NAME(folder, fname, doc, doc_export)
# write the data file
path = os.path.join(folder, f"{fname}.json")
with open(path, "w+") as txtfile:
txtfile.write(frappe.as_json(doc_export))
print(f"Wrote document file for {doc.doctype} {doc.name} at {path}")
def strip_default_fields(doc, doc_export):
# strip out default fields from children
if doc.doctype == "DocType" and doc.migration_hash:
del doc_export["migration_hash"]
for df in doc.meta.get_table_fields():
for d in doc_export.get(df.fieldname):
for fieldname in frappe.model.default_fields + frappe.model.child_table_fields:
if fieldname in d:
del d[fieldname]
return doc_export
def METHOD_NAME(folder, fname, doc, doc_export):
"""Export code files and strip from values"""
if hasattr(doc, "get_code_fields"):
for key, extn in doc.get_code_fields().items():
if doc.get(key):
with open(os.path.join(folder, fname + "." + extn), "w+") as txtfile:
txtfile.write(doc.get(key))
# remove from exporting
del doc_export[key]
def get_module_name(doc):
if doc.doctype == "Module Def":
module = doc.name
elif doc.doctype == "Workflow":
module = frappe.db.get_value("DocType", doc.document_type, "module")
elif hasattr(doc, "module"):
module = doc.module
else:
module = frappe.db.get_value("DocType", doc.doctype, "module")
return module
def delete_folder(module, dt, dn):
if frappe.db.get_value("Module Def", module, "custom"):
module_path = get_custom_module_path(module)
else:
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# delete folder
folder = os.path.join(module_path, dt, dn)
if os.path.exists(folder):
shutil.rmtree(folder)
def create_folder(module, dt, dn, create_init):
if frappe.db.get_value("Module Def", module, "custom"):
module_path = get_custom_module_path(module)
else:
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# create folder
folder = os.path.join(module_path, dt, dn)
frappe.create_folder(folder)
# create init_py_files
if create_init:
create_init_py(module_path, dt, dn)
return folder
def get_custom_module_path(module):
package = frappe.db.get_value("Module Def", module, "package")
if not package:
frappe.throw(f"Package must be set for custom Module <b>{module}</b>")
path = os.path.join(get_package_path(package), scrub(module))
if not os.path.exists(path):
os.makedirs(path)
return path
def get_package_path(package):
path = os.path.join(
frappe.get_site_path("packages"), frappe.db.get_value("Package", package, "package_name")
)
if not os.path.exists(path):
os.makedirs(path)
return path
def create_init_py(module_path, dt, dn):
def create_if_not_exists(path):
initpy = os.path.join(path, "__init__.py")
if not os.path.exists(initpy):
open(initpy, "w").close()
create_if_not_exists(os.path.join(module_path))
create_if_not_exists(os.path.join(module_path, dt))
create_if_not_exists(os.path.join(module_path, dt, dn))
|
3,789 |
test permutation3 q
|
import numpy as np
import pytest
from scipy.sparse import csr_matrix
from qulacs import QuantumCircuit, QuantumState
from qulacs.gate import (
CNOT,
TOFFOLI,
DenseMatrix,
DiagonalMatrix,
H,
PauliRotation,
ReversibleBoolean,
SparseMatrix,
T,
X,
)
nqubits_list = range(4, 26)
def bench_gate(benchmark, nqubits, g):
st = QuantumState(nqubits)
benchmark(g.update_quantum_state, st)
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_X(benchmark, nqubits):
benchmark.group = "X"
bench_gate(benchmark, nqubits, X(3))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_CNOT(benchmark, nqubits):
benchmark.group = "CNOT"
bench_gate(benchmark, nqubits, CNOT(2, 3))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_TOFFOLI(benchmark, nqubits):
benchmark.group = "TOFFOLI"
bench_gate(benchmark, nqubits, TOFFOLI(1, 2, 3))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_CCCNOT(benchmark, nqubits):
benchmark.group = "CCCNOT"
g = DenseMatrix([3], [[0, 1], [1, 0]])
g.add_control_qubit(2, 1)
g.add_control_qubit(1, 1)
g.add_control_qubit(0, 1)
bench_gate(benchmark, nqubits, g)
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Dense1Q(benchmark, nqubits):
benchmark.group = "DenseMatrix1Q"
bench_gate(benchmark, nqubits, DenseMatrix([3], np.eye(2)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Dense2Q(benchmark, nqubits):
benchmark.group = "DenseMatrix2Q"
bench_gate(benchmark, nqubits, DenseMatrix([2, 3], np.eye(4)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Dense3Q(benchmark, nqubits):
benchmark.group = "DenseMatrix3Q"
bench_gate(benchmark, nqubits, DenseMatrix([1, 2, 3], np.eye(8)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Dense4Q(benchmark, nqubits):
benchmark.group = "DenseMatrix4Q"
bench_gate(benchmark, nqubits, DenseMatrix([0, 1, 2, 3], np.eye(16)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Diagonal1QSp(benchmark, nqubits):
benchmark.group = "DiagonalMatrix1QSp"
bench_gate(benchmark, nqubits, DiagonalMatrix([0], np.ones(2)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Diagonal1Q(benchmark, nqubits):
benchmark.group = "DiagonalMatrix1Q"
bench_gate(benchmark, nqubits, DiagonalMatrix([3], np.ones(2)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Diagonal2Q(benchmark, nqubits):
benchmark.group = "DiagonalMatrix2Q"
bench_gate(benchmark, nqubits, DiagonalMatrix([2, 3], np.ones(4)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Diagonal3Q(benchmark, nqubits):
benchmark.group = "DiagonalMatrix3Q"
bench_gate(benchmark, nqubits, DiagonalMatrix([1, 2, 3], np.ones(8)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Diagonal4Q(benchmark, nqubits):
benchmark.group = "DiagonalMatrix4Q"
bench_gate(benchmark, nqubits, DiagonalMatrix([0, 1, 2, 3], np.ones(16)))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Sparse1Q(benchmark, nqubits):
benchmark.group = "SparseMatrix1Q"
sparse_matrix = csr_matrix(([1], ([0], [0])), shape=(2, 2), dtype=complex)
bench_gate(benchmark, nqubits, SparseMatrix([3], sparse_matrix))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Sparse2Q(benchmark, nqubits):
benchmark.group = "SparseMatrix2Q"
sparse_matrix = csr_matrix(([1], ([0], [0])), shape=(4, 4), dtype=complex)
bench_gate(benchmark, nqubits, SparseMatrix([2, 3], sparse_matrix))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Sparse3Q(benchmark, nqubits):
benchmark.group = "SparseMatrix3Q"
sparse_matrix = csr_matrix(([1], ([0], [0])), shape=(8, 8), dtype=complex)
bench_gate(benchmark, nqubits, SparseMatrix([1, 2, 3], sparse_matrix))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Sparse4Q(benchmark, nqubits):
benchmark.group = "SparseMatrix4Q"
sparse_matrix = csr_matrix(([1], ([0], [0])), shape=(16, 16), dtype=complex)
bench_gate(benchmark, nqubits, SparseMatrix([0, 1, 2, 3], sparse_matrix))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Permutation1Q(benchmark, nqubits):
benchmark.group = "Permutation1Q"
def rev(index, dim):
return (index + 1) % dim
bench_gate(benchmark, nqubits, ReversibleBoolean([3], rev))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Permutation2Q(benchmark, nqubits):
benchmark.group = "Permutation2Q"
def rev(index, dim):
return (index + 1) % dim
bench_gate(benchmark, nqubits, ReversibleBoolean([2, 3], rev))
@pytest.mark.parametrize("nqubits", nqubits_list)
def METHOD_NAME(benchmark, nqubits):
benchmark.group = "Permutation3Q"
def rev(index, dim):
return (index + 1) % dim
bench_gate(benchmark, nqubits, ReversibleBoolean([1, 2, 3], rev))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_Permutation4Q(benchmark, nqubits):
benchmark.group = "Permutation4Q"
def rev(index, dim):
return (index + 1) % dim
bench_gate(benchmark, nqubits, ReversibleBoolean([0, 1, 2, 3], rev))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_PauliRotation1Q(benchmark, nqubits):
benchmark.group = "PauliRotation1Q"
bench_gate(benchmark, nqubits, PauliRotation([3], [1], 0.1))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_PauliRotation2Q(benchmark, nqubits):
benchmark.group = "PauliRotation2Q"
bench_gate(benchmark, nqubits, PauliRotation([2, 3], [1, 1], 0.1))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_PauliRotation3Q(benchmark, nqubits):
benchmark.group = "PauliRotation3Q"
bench_gate(benchmark, nqubits, PauliRotation([1, 2, 3], [1, 1, 1], 0.1))
@pytest.mark.parametrize("nqubits", nqubits_list)
def test_PauliRotation4Q(benchmark, nqubits):
benchmark.group = "PauliRotation4Q"
bench_gate(benchmark, nqubits, PauliRotation([0, 1, 2, 3], [1, 1, 1, 1], 0.1))
|
3,790 |
extract works
|
#!/usr/bin/env python
import argparse
import datetime
import itertools
import sys
import time
import requests
BASE_URL = "https://openlibrary.org/recentchanges/"
# BASE_URL = "http://0.0.0.0:8080/recentchanges/"
def parse_options(args):
parser = argparse.ArgumentParser(
description="""Find works that have been changed in the given time period.
If the `from` or `to` options are specified. Prints works modified in that time period and quits.
Without these options, goes into loop mode which will keep polling openlibrary and print works modified on stdout. """
)
parser.add_argument(
'-f',
'--from',
dest='frm',
type=str,
help='From date (yyyy/mm/dd)',
default=False,
)
parser.add_argument(
'-t', '--to', dest='to', type=str, help='To date (yyyy/mm/dd)', default=False
)
parser.add_argument(
'-s',
'--start-time-file',
dest='start_file',
type=str,
help='File to store last time polling was done in loop mode',
default="find_modified_works.date",
)
parser.add_argument(
'-d',
'--delay',
dest='delay',
type=int,
default=3,
help='Number of seconds to wait between polling openlibrary in loop mode',
)
parser.add_argument(
'-m',
'--max-chunk-size',
dest='max_chunk_size',
default=100,
type=int,
help='maximum number of works returned in each loop of the loop mode',
)
return parser.parse_args(args)
def METHOD_NAME(data):
for i in data:
for change in i['changes']:
if change['key'].startswith("/works/"):
yield change['key']
def get_modified_works(frm, to):
one_day = datetime.timedelta(days=1)
ret = []
logging.debug("Querying between %s and %s", frm, to)
while frm < to:
url = frm.strftime(BASE_URL + "%Y/%m/%d.json")
logging.debug("Fetching changes from %s", url)
ret.append(METHOD_NAME(requests.get(url).json()))
frm += one_day
return itertools.chain(*ret)
def poll_for_changes(start_time_file, max_chunk_size, delay):
try:
with open(start_time_file) as f:
date = datetime.datetime.strptime(f.read(), "%Y/%m/%d")
logging.debug("Obtained last end time from file '%s'" % start_time_file)
except OSError:
date = datetime.datetime.now()
logging.info("No state file. Starting from now.")
current_day = date.day
logging.debug("Starting at %s with current day %d", date, current_day)
logging.debug("Will emit at most %d works", max_chunk_size)
seen = set()
rest = []
while True:
url = date.strftime(BASE_URL + "%Y/%m/%d.json")
logging.debug("-- Fetching changes from %s", url)
changes = list(requests.get(url).json())
unseen_changes = [x for x in changes if x['id'] not in seen]
logging.debug("%d changes fetched", len(changes))
logging.debug(" of which %d are unseen", len(unseen_changes))
# Fetch works for all changesets we've not seen yet. Add them
# to the ones left over from the previous iteration.
works = list(METHOD_NAME(unseen_changes))
logging.debug(" in which %d have works modified.", len(works))
logging.debug(
" There are %d left over works from the last iteration.", len(rest)
)
works += rest
logging.debug(" Totally %d works to be emitted" % len(works))
# Record all the ones we've already emitted for this day
for i in (x['id'] for x in unseen_changes):
seen.add(i)
logging.debug("Number of Changes seen so far %d", len(list(seen)))
# If the current day is over.
if current_day != datetime.datetime.now().day:
seen = set() # Clear things seen so far
date = datetime.datetime.now() # Update date
current_day = date.day
logging.debug("Flipping the clock to %s and clearing seen changes", date)
# If there are too many works, emit only max_chunk_size
# works. Keep the rest for the next iteration
if len(works) > max_chunk_size:
logging.debug(
" Number of works to be emitted (%d) is more than %s",
len(works),
max_chunk_size,
)
to_be_emitted, rest = works[:max_chunk_size], works[max_chunk_size:]
logging.debug(" Retaining %s", len(rest))
else:
to_be_emitted, rest = works, []
if to_be_emitted:
logging.debug("Emitting %d works", len(to_be_emitted))
for i in to_be_emitted:
print(i)
logging.debug("Sleeping for %d seconds", delay)
time.sleep(delay)
with open(start_time_file, "w") as f:
logging.debug("Writing %s to state file", date.strftime("%Y/%m/%d"))
f.write(date.strftime("%Y/%m/%d"))
def main():
args = parse_options(sys.argv[1:])
loop = not args.frm and not args.to
if args.frm:
frm = datetime.datetime.strptime(args.frm, "%Y/%m/%d")
else:
frm = datetime.datetime.now() - datetime.timedelta(days=1)
if args.to:
frm = datetime.datetime.strptime(frm, "%Y/%m/%d")
else:
to = datetime.datetime.now()
if loop:
poll_for_changes(args.start_file, args.max_chunk_size, args.delay)
else:
for i in get_modified_works(frm, to):
print(i)
if __name__ == "__main__":
import logging
logging.basicConfig(
level=logging.DEBUG, format="%(levelname)-7s (%(asctime)s) : %(message)s"
) # , filename="/home/noufal/works.log")
sys.exit(main())
|
3,791 |
installaehandler
|
"""MiniAEFrame - A minimal AppleEvent Application framework.
There are two classes:
AEServer -- a mixin class offering nice AE handling.
MiniApplication -- a very minimal alternative to FrameWork.py,
only suitable for the simplest of AppleEvent servers.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the MiniAEFrame module is removed.", stacklevel=2)
import traceback
import MacOS
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
from Carbon import Menu
from Carbon import Win
from Carbon.Windows import *
from Carbon import Qd
import aetools
import EasyDialogs
kHighLevelEvent = 23 # Not defined anywhere for Python yet?
class MiniApplication:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Initialize menu
self.appleid = 1
self.quitid = 2
Menu.ClearMenuBar()
self.applemenu = applemenu = Menu.NewMenu(self.appleid, "\024")
applemenu.AppendMenu("%s;(-" % self.getaboutmenutext())
if MacOS.runtimemodel == 'ppc':
applemenu.AppendResMenu('DRVR')
applemenu.InsertMenu(0)
self.quitmenu = Menu.NewMenu(self.quitid, "File")
self.quitmenu.AppendMenu("Quit")
self.quitmenu.SetItemCmd(1, ord("Q"))
self.quitmenu.InsertMenu(0)
Menu.DrawMenuBar()
def __del__(self):
self.close()
def close(self):
pass
def mainloop(self, mask = everyEvent, timeout = 60*60):
while not self.quitting:
self.dooneevent(mask, timeout)
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = everyEvent, timeout = 60*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandler(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
msg = "High Level Event: %r %r" % (code(message), code(h | (v<<16)))
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
print 'AE error: ', err
print 'in', msg
traceback.print_exc()
return
elif what == keyDown:
c = chr(message & charCodeMask)
if modifiers & cmdKey:
if c == '.':
raise KeyboardInterrupt, "Command-period"
if c == 'q':
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
return
elif what == mouseDown:
partcode, window = Win.FindWindow(where)
if partcode == inMenuBar:
result = Menu.MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id == self.appleid:
if item == 1:
EasyDialogs.Message(self.getabouttext())
elif item > 1 and hasattr(Menu, 'OpenDeskAcc'):
name = self.applemenu.GetMenuItemText(item)
Menu.OpenDeskAcc(name)
elif id == self.quitid and item == 1:
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
Menu.HiliteMenu(0)
return
# Anything not handled is passed to Python/SIOUX
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
else:
print "Unhandled event:", event
def getabouttext(self):
return self.__class__.__name__
def getaboutmenutext(self):
return "About %s\311" % self.__class__.__name__
class AEServer:
def __init__(self):
self.ae_handlers = {}
def METHOD_NAME(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if (_class, _type) in self.ae_handlers:
_function = self.ae_handlers[(_class, _type)]
elif (_class, '****') in self.ae_handlers:
_function = self.ae_handlers[(_class, '****')]
elif ('****', '****') in self.ae_handlers:
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if '----' in _parameters:
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv is None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def code(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class _Test(AEServer, MiniApplication):
"""Mini test application, handles required events"""
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.METHOD_NAME('aevt', 'oapp', self.open_app)
self.METHOD_NAME('aevt', 'quit', self.quit)
self.METHOD_NAME('****', '****', self.other)
self.mainloop()
def quit(self, **args):
self._quit()
def open_app(self, **args):
pass
def other(self, _object=None, _class=None, _type=None, **args):
print 'AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
_Test()
|
3,792 |
get tag label
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tag utils Module
"""
import functools
import traceback
from typing import Iterable, List, Optional
from metadata.generated.schema.api.classification.createClassification import (
CreateClassificationRequest,
)
from metadata.generated.schema.api.classification.createTag import CreateTagRequest
from metadata.generated.schema.entity.classification.tag import Tag
from metadata.generated.schema.type.basic import FullyQualifiedEntityName
from metadata.generated.schema.type.tagLabel import (
LabelType,
State,
TagLabel,
TagSource,
)
from metadata.ingestion.api.models import Either, StackTraceError
from metadata.ingestion.models.ometa_classification import OMetaTagAndClassification
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.utils import fqn
from metadata.utils.logger import ingestion_logger
logger = ingestion_logger()
def get_ometa_tag_and_classification(
tags: List[str],
classification_name: str,
tag_description: Optional[str],
classification_description: Optional[str],
include_tags: bool = True,
tag_fqn: Optional[FullyQualifiedEntityName] = None,
) -> Iterable[Either[OMetaTagAndClassification]]:
"""
Returns the OMetaTagAndClassification object
"""
if include_tags:
for tag in tags:
try:
classification = OMetaTagAndClassification(
fqn=tag_fqn,
classification_request=CreateClassificationRequest(
name=classification_name,
description=classification_description,
),
tag_request=CreateTagRequest(
classification=classification_name,
name=tag,
description=tag_description,
),
)
yield Either(right=classification)
logger.debug(
f"Classification {classification_name}, Tag {tag} Ingested"
)
except Exception as err:
yield Either(
left=StackTraceError(
name=tag,
error=f"Error yielding tag [{tag}]: [{err}]",
stack_trace=traceback.format_exc(),
)
)
@functools.lru_cache(maxsize=512)
def METHOD_NAME(
metadata: OpenMetadata, tag_name: str, classification_name: str
) -> Optional[TagLabel]:
"""
Returns the tag label if the tag is created
"""
try:
# Build the tag FQN
tag_fqn = fqn.build(
metadata,
Tag,
classification_name=classification_name,
tag_name=tag_name,
)
# Check if the tag exists
tag = metadata.get_by_name(entity=Tag, fqn=tag_fqn)
if tag:
return TagLabel(
tagFQN=tag_fqn,
labelType=LabelType.Automated.value,
state=State.Suggested.value,
source=TagSource.Classification.value,
)
except Exception as err:
logger.debug(traceback.format_exc())
logger.error(f"Error processing tag label: {err}")
return None
def get_tag_labels(
metadata: OpenMetadata,
tags: List[str],
classification_name: str,
include_tags: bool = True,
) -> Optional[List[TagLabel]]:
"""
Method to create tag labels from the collected tags
"""
tag_labels_list = []
if tags and include_tags:
for tag in tags:
try:
tag_label = METHOD_NAME(
metadata, tag_name=tag, classification_name=classification_name
)
if tag_label:
tag_labels_list.append(tag_label)
except Exception as err:
logger.debug(traceback.format_exc())
logger.error(f"Error processing tag labels: {err}")
return tag_labels_list or None
|
3,793 |
setup method
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from legate.core import LEGATE_MAX_DIM
from utils.generators import broadcasts_to_along_axis, mk_seq_array
import cunumeric as num
N = 10
@pytest.mark.parametrize("ndim", range(1, LEGATE_MAX_DIM + 1))
def test_ndim(ndim):
shape = (N,) * ndim
np_arr = mk_seq_array(np, shape)
num_arr = mk_seq_array(num, shape)
shape_idx = (1,) * ndim
np_indices = mk_seq_array(np, shape_idx) % N
num_indices = mk_seq_array(num, shape_idx) % N
for axis in range(-1, ndim):
res_np = np.take_along_axis(np_arr, np_indices, axis=axis)
res_num = num.take_along_axis(num_arr, num_indices, axis=axis)
assert np.array_equal(res_num, res_np)
np_indices = mk_seq_array(np, (3,))
num_indices = mk_seq_array(num, (3,))
res_np = np.take_along_axis(np_arr, np_indices, None)
res_num = num.take_along_axis(num_arr, num_indices, None)
assert np.array_equal(res_num, res_np)
@pytest.mark.parametrize(
"axis", range(-1, 3), ids=lambda axis: f"(axis={axis})"
)
def test_full(axis):
shape = (3, 4, 5)
np_arr = mk_seq_array(np, shape)
num_arr = mk_seq_array(num, shape)
size = shape[axis]
axis_values = (0, size - 1, size * 2)
for shape_idx in broadcasts_to_along_axis(shape, axis, axis_values):
np_indices = mk_seq_array(np, shape_idx) % shape[axis]
num_indices = mk_seq_array(num, shape_idx) % shape[axis]
res_np = np.take_along_axis(np_arr, np_indices, axis=axis)
res_num = num.take_along_axis(num_arr, num_indices, axis=axis)
assert np.array_equal(res_num, res_np)
def test_empty_indice():
np_arr = mk_seq_array(np, (10,))
num_arr = mk_seq_array(num, (10,))
np_indices = np.array([], dtype=int)
num_indices = num.array([], dtype=int)
res_np = np.take_along_axis(np_arr, np_indices, axis=0)
res_num = num.take_along_axis(num_arr, num_indices, axis=0)
assert np.array_equal(res_num, res_np)
class TestTakeAlongAxisErrors:
def METHOD_NAME(self):
self.a = num.ones((3, 3))
self.ai = num.ones((3, 3), dtype=int)
@pytest.mark.parametrize("dtype", (bool, float), ids=str)
def test_indices_bad_type(self, dtype):
ai = num.ones((3, 3), dtype=dtype)
msg = "`indices` must be an integer array"
with pytest.raises(TypeError, match=msg):
num.take_along_axis(self.a, ai, axis=0)
@pytest.mark.xfail
@pytest.mark.parametrize(
"shape", ((3, 2), (3, 0)), ids=lambda shape: f"(shape={shape})"
)
def test_indices_bad_shape(self, shape):
# In Numpy, it raises IndexError.
# In cuNumeric, it raises ValueError.
ai = num.ones(shape, dtype=int)
msg = "shape mismatch: indexing arrays could not be broadcast"
with pytest.raises(IndexError, match=msg):
num.take_along_axis(self.a, ai, axis=0)
@pytest.mark.parametrize(
"shape", ((1,), (3, 3, 1)), ids=lambda shape: f"(shape={shape})"
)
def test_indices_bad_dims(self, shape):
ai = num.ones(shape, dtype=int)
msg = "`indices` and `a` must have the same number of dimensions"
with pytest.raises(ValueError, match=msg):
num.take_along_axis(self.a, ai, axis=0)
@pytest.mark.parametrize(
"value", (-4, 3), ids=lambda value: f"(value={value})"
)
def test_indices_out_of_bound(self, value):
ai = num.full((3, 3), value, dtype=int)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
num.take_along_axis(self.a, ai, axis=0)
@pytest.mark.parametrize(
"axis", (2, -3), ids=lambda axis: f"(axis={axis})"
)
def test_axis_out_of_bound(self, axis):
msg = "out of bounds"
# In Numpy, it raises AxisError
with pytest.raises(ValueError, match=msg):
num.take_along_axis(self.a, self.ai, axis=axis)
def test_axis_float(self):
axis = 0.0
msg = "integer argument expected"
with pytest.raises(TypeError, match=msg):
num.take_along_axis(self.a, self.ai, axis=axis)
def test_axis_none_indice_not_1d(self):
axis = None
msg = "indices must be 1D if axis=None"
with pytest.raises(ValueError, match=msg):
num.take_along_axis(self.a, self.ai, axis=axis)
def test_a_none(self):
ai = num.array([1, 1, 1])
msg = "object has no attribute 'ndim'"
with pytest.raises(AttributeError, match=msg):
num.take_along_axis(None, ai, axis=0)
def test_indice_none(self):
msg = "'NoneType' object has no attribute 'dtype'"
with pytest.raises(AttributeError, match=msg):
num.take_along_axis(self.a, None, axis=0)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
3,794 |
transform
|
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
import hashlib
import unittest
import tarfile
import io
import pytest
from aistore.sdk.const import PROVIDER_AIS
from aistore.sdk.errors import InvalidBckProvider
from tests.integration import REMOTE_SET, TEST_TIMEOUT, OBJECT_COUNT
from tests.integration.sdk.remote_enabled_test import RemoteEnabledTest
from tests.utils import random_string
# pylint: disable=unused-variable,too-many-instance-attributes
class TestObjectGroupOps(RemoteEnabledTest):
def setUp(self) -> None:
super().setUp()
self.obj_names = self._create_objects(suffix="-suffix")
def test_delete(self):
object_group = self.bucket.objects(obj_names=self.obj_names[1:])
job_id = object_group.delete()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
existing_objects = self.bucket.list_objects(prefix=self.obj_prefix).entries
self.assertEqual(1, len(existing_objects))
self.assertEqual(self.obj_names[0], existing_objects[0].name)
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_evict(self):
object_group = self.bucket.objects(obj_names=self.obj_names[1:])
job_id = object_group.evict()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
self._verify_cached_objects(OBJECT_COUNT, [0])
def test_evict_objects_local(self):
local_bucket = self.client.bucket(random_string(), provider=PROVIDER_AIS)
with self.assertRaises(InvalidBckProvider):
local_bucket.objects(obj_names=[]).evict()
@unittest.skipIf(
not REMOTE_SET,
"Remote bucket is not set",
)
def test_prefetch_list(self):
obj_group = self.bucket.objects(obj_names=self.obj_names[1:])
self._evict_all_objects()
# Fetch back a specific object group and verify cache status
job_id = obj_group.prefetch()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT * 2)
self._verify_cached_objects(OBJECT_COUNT, range(1, OBJECT_COUNT))
def test_prefetch_objects_local(self):
local_bucket = self.client.bucket(random_string(), provider=PROVIDER_AIS)
with self.assertRaises(InvalidBckProvider):
local_bucket.objects(obj_names=[]).prefetch()
def test_copy_objects(self):
to_bck_name = "destination-bucket"
to_bck = self._create_bucket(to_bck_name)
self.assertEqual(0, len(to_bck.list_all_objects(prefix=self.obj_prefix)))
self.assertEqual(
OBJECT_COUNT, len(self.bucket.list_all_objects(prefix=self.obj_prefix))
)
new_prefix = "prefix-"
copy_job = self.bucket.objects(obj_names=self.obj_names[1:5]).copy(
to_bck, prepend=new_prefix
)
self.client.job(job_id=copy_job).wait_for_idle(timeout=TEST_TIMEOUT)
self.assertEqual(
4, len(to_bck.list_all_objects(prefix=new_prefix + self.obj_prefix))
)
def test_archive_objects_without_copy(self):
arch_name = self.obj_prefix + "-archive-without-copy.tar"
self._archive_exec_assert(arch_name, self.bucket, self.bucket)
def test_archive_objects_with_copy(self):
arch_name = self.obj_prefix + "-archive-with-copy.tar"
dest_bck = self._create_bucket(random_string())
self._archive_exec_assert(arch_name, self.bucket, dest_bck, to_bck=dest_bck)
def _archive_exec_assert(self, arch_name, src_bck, res_bck, **kwargs):
# Add to object list to clean up on test finish
if res_bck.provider != PROVIDER_AIS:
self.cloud_objects.append(arch_name)
archived_names = self.obj_names[1:5]
expected_contents = {}
for name in archived_names:
expected_contents[name] = src_bck.object(obj_name=name).get().read_all()
arch_job = src_bck.objects(obj_names=archived_names).archive(
archive_name=arch_name, **kwargs
)
self.client.job(job_id=arch_job).wait_for_idle(timeout=TEST_TIMEOUT)
# Read the tar archive and assert the object names and contents match
res_bytes = res_bck.object(arch_name).get().read_all()
with tarfile.open(fileobj=io.BytesIO(res_bytes), mode="r") as tar:
member_names = []
for member in tar.getmembers():
inner_file = tar.extractfile(member)
self.assertEqual(expected_contents[member.name], inner_file.read())
inner_file.close()
member_names.append(member.name)
self.assertEqual(set(archived_names), set(member_names))
@pytest.mark.etl
def test_transform_objects(self):
# Define an etl with code that hashes the contents of each object
etl_name = "etl-" + random_string(5)
def METHOD_NAME(input_bytes):
md5 = hashlib.md5()
md5.update(input_bytes)
return md5.hexdigest().encode()
md5_etl = self.client.etl(etl_name)
md5_etl.init_code(METHOD_NAME=METHOD_NAME)
to_bck_name = "destination-bucket"
to_bck = self._create_bucket(to_bck_name)
new_prefix = "prefix-"
self.assertEqual(0, len(to_bck.list_all_objects(prefix=self.obj_prefix)))
self.assertEqual(
OBJECT_COUNT, len(self.bucket.list_all_objects(prefix=self.obj_prefix))
)
transform_job = self.bucket.objects(obj_names=self.obj_names).METHOD_NAME(
to_bck, etl_name=md5_etl.name, prepend=new_prefix
)
self.client.job(job_id=transform_job).wait_for_idle(timeout=TEST_TIMEOUT)
# Get the md5 transform of each source object and verify the destination bucket contains those results
from_obj_hashes = [
METHOD_NAME(self.bucket.object(name).get().read_all())
for name in self.obj_names
]
to_obj_values = [
to_bck.object(new_prefix + name).get().read_all() for name in self.obj_names
]
self.assertEqual(to_obj_values, from_obj_hashes)
def _evict_all_objects(self):
job_id = self.bucket.objects(obj_names=self.obj_names).evict()
self.client.job(job_id).wait(timeout=TEST_TIMEOUT)
self._check_all_objects_cached(OBJECT_COUNT, expected_cached=False)
def _verify_cached_objects(self, expected_object_count, cached_range):
"""
List each of the objects and verify the correct count and that all objects matching
the cached range are cached and all others are not
Args:
expected_object_count: expected number of objects to list
cached_range: object indices that should be cached, all others should not
"""
objects = self.bucket.list_objects(
props="name,cached", prefix=self.obj_prefix
).entries
self.assertEqual(expected_object_count, len(objects))
cached_names = {self.obj_prefix + str(x) + "-suffix" for x in cached_range}
cached_objs = []
evicted_objs = []
for obj in objects:
if obj.name in cached_names:
cached_objs.append(obj)
else:
evicted_objs.append(obj)
self._validate_objects_cached(cached_objs, True)
self._validate_objects_cached(evicted_objs, False)
|
3,795 |
columns
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from bugbot import utils
from bugbot.bzcleaner import BzCleaner
class UnderestimatedSeverity(BzCleaner):
def __init__(self):
super(UnderestimatedSeverity, self).__init__()
self.nweeks = self.get_config("weeks_lookup")
self.ndups = self.get_config("number_dups")
self.votes = self.get_config("number_votes")
self.cc = self.get_config("number_cc")
self.see_also = self.get_config("number_see_also")
self.extra_ni = {}
def description(self):
return "Bugs with underestimated severity for the last {} weeks".format(
self.nweeks
)
def has_needinfo(self):
return True
def get_mail_to_auto_ni(self, bug):
for field in ["assigned_to", "triage_owner"]:
person = bug.get(field, "")
if person and not utils.is_no_assignee(person):
return {"mail": person, "nickname": bug[f"{field}_detail"]["nick"]}
return None
def get_extra_for_needinfo_template(self):
return self.extra_ni
def METHOD_NAME(self):
return [
"id",
"summary",
"creation",
"last_change",
"severity",
"dups_count",
"votes",
"cc_count",
"see_also_count",
]
def get_extra_for_template(self):
return {
"dups_threshold": self.ndups,
"votes_threshold": self.votes,
"cc_threshold": self.cc,
"see_also_threshold": self.see_also,
}
def handle_bug(self, bug, data):
bugid = str(bug["id"])
cc_count = len(bug["cc"])
dups_count = len(bug["duplicates"])
votes_count = bug["votes"]
see_also_count = len(bug["see_also"])
data[bugid] = {
"creation": utils.get_human_lag(bug["creation_time"]),
"last_change": utils.get_human_lag(bug["last_change_time"]),
"severity": bug["severity"],
"dups_count": dups_count,
"votes": votes_count,
"cc_count": cc_count,
"see_also_count": see_also_count,
}
factors = []
if dups_count >= self.ndups:
factors.append(f"{dups_count} duplicates")
if votes_count >= self.votes:
factors.append(f"{votes_count} votes")
if cc_count >= self.cc:
factors.append(f"{cc_count} CCs")
if see_also_count >= self.see_also:
factors.append(f"{see_also_count} See Also bugs")
self.extra_ni[bugid] = {
"severity": bug["severity"],
"factors": utils.english_list(factors),
}
return bug
def get_bz_params(self, date):
fields = [
"assigned_to",
"triage_owner",
"creation_time",
"last_change_time",
"severity",
"votes",
"cc",
"duplicates",
"see_also",
]
params = {
"include_fields": fields,
"resolution": "---",
"bug_type": "defect",
"bug_severity": ["S3", "S4"],
"f1": "keywords",
"o1": "nowords",
"v1": ["meta", "intermittent"],
"f2": "days_elapsed",
"o2": "lessthan",
"v2": self.nweeks * 7,
"j3": "OR",
"f3": "OP",
"f4": "dupe_count",
"o4": "greaterthaneq",
"v4": self.ndups,
"f5": "votes",
"o5": "greaterthaneq",
"v5": self.votes,
"f6": "cc_count",
"o6": "greaterthaneq",
"v6": self.cc,
"f7": "see_also_count",
"o7": "greaterthaneq",
"v7": self.see_also,
"f8": "CP",
"n15": 1,
"f15": "longdesc",
"o15": "casesubstring",
"v15": "could you consider increasing the bug severity?",
}
return params
if __name__ == "__main__":
UnderestimatedSeverity().run()
|
3,796 |
pd post process
|
from typing import Optional
import logging
import networkx
from ..utils.graph import compute_dominance_frontier, PostDominators, TemporaryNode
from . import Analysis
_l = logging.getLogger(name=__name__)
class CDG(Analysis):
"""
Implements a control dependence graph.
"""
def __init__(self, cfg, start=None, no_construct=False):
"""
Constructor.
:param cfg: The control flow graph upon which this control dependence graph will build
:param start: The starting point to begin constructing the control dependence graph
:param no_construct: Skip the construction step. Only used in unit-testing.
"""
self._start = start if start is not None else self.project.entry
self._cfg = cfg
self._ancestor = None
self._semi = None
self._post_dom: Optional[networkx.DiGraph] = None
self._graph: Optional[networkx.DiGraph] = None
self._normalized_cfg = None
if not no_construct:
if self._cfg is None:
# This leads to import cycles otherwise
# pylint: disable=import-outside-toplevel
from angr.analyses.cfg.cfg_emulated import CFGEmulated
self._cfg = self.project.analyses[CFGEmulated].prep()()
# FIXME: We should not use get_any_irsb in such a real setting...
self._entry = self._cfg.model.get_any_node(self._start)
self._construct()
#
# Properties
#
@property
def graph(self):
return self._graph
#
# Public methods
#
def get_post_dominators(self):
"""
Return the post-dom tree
"""
return self._post_dom
def get_dependants(self, run):
"""
Return a list of nodes that are control dependent on the given node in the control dependence graph
"""
if run in self._graph.nodes():
return list(self._graph.successors(run))
else:
return []
def get_guardians(self, run):
"""
Return a list of nodes on whom the specific node is control dependent in the control dependence graph
"""
if run in self._graph.nodes():
return list(self._graph.predecessors(run))
else:
return []
#
# Private methods
#
def _construct(self):
"""
Construct a control dependence graph.
This implementation is based on figure 6 of paper An Efficient Method of Computing Static Single Assignment
Form by Ron Cytron, etc.
"""
if not self._cfg._model.ident.startswith("CFGEmulated"):
raise ValueError("CDG is only supported by CFGEmulated.")
self._acyclic_cfg = self._cfg.copy()
# TODO: Cycle-removing is not needed - confirm it later
# The CFG we use should be acyclic!
# self._acyclic_cfg.remove_cycles()
# Pre-process the acyclic CFG
self._pre_process_cfg()
# Construct post-dominator tree
self._pd_construct()
self._graph: networkx.DiGraph = networkx.DiGraph()
# Construct the reversed dominance frontier mapping
rdf = compute_dominance_frontier(self._normalized_cfg, self._post_dom)
for y in self._cfg.graph.nodes():
if y not in rdf:
continue
for x in rdf[y]:
self._graph.add_edge(x, y)
# self._post_process()
def _pre_process_cfg(self):
"""
Pre-process the acyclic CFG.
- Change all FakeRet edges to normal edges when necessary (e.g. the normal/expected return edge does not exist)
"""
for _, dst, data in self._acyclic_cfg.graph.edges(data=True):
if "jumpkind" in data and data["jumpkind"] == "Ijk_FakeRet":
all_edges_to_dst = self._acyclic_cfg.graph.in_edges([dst], data=True)
if not any((s, d) for s, d, da in all_edges_to_dst if da["jumpkind"] != "Ijk_FakeRet"):
# All in edges are FakeRets
# Change them to a normal edge
for _, _, data_ in all_edges_to_dst:
data_["jumpkind"] = "Ijk_Boring"
def _post_process(self):
"""
There are cases where a loop has two overlapping loop headers thanks
to the way VEX is dealing with continuous instructions. As we were
breaking the connection between the second loop header and its
successor, we shall restore them in our CDG.
"""
# TODO: Verify its correctness
loop_back_edges = self._cfg.get_loop_back_edges()
for b1, b2 in loop_back_edges:
self._graph.add_edge(b1, b2)
#
# Post-dominator tree related
#
def _pd_construct(self):
pdoms = PostDominators(self._acyclic_cfg, self._entry, successors_func=self._pd_graph_successors)
self._post_dom = pdoms.post_dom
self.METHOD_NAME(self._acyclic_cfg)
# Create the normalized_cfg without the annoying ContainerNodes
self._normalized_cfg = networkx.DiGraph()
for src, dst in pdoms.prepared_graph.edges():
self._normalized_cfg.add_edge(src.obj, dst.obj)
@staticmethod
def _pd_graph_successors(graph, node):
if type(node) is TemporaryNode:
# This is for testing
successors = graph.graph.successors(node)
else:
# Real CFGNode!
successors = graph.model.get_successors(node)
return successors
def METHOD_NAME(self, cfg):
"""
Take care of those loop headers/tails where we manually broke their
connection to the next BBL
"""
loop_back_edges = self._cfg.get_loop_back_edges()
for b1, b2 in loop_back_edges:
# The edge between b1 and b2 is manually broken
# The post dominator of b1 should be b2 (or not?)
successors = list(self._pd_graph_successors(cfg, b1))
if len(successors) == 0:
if b2 in self._post_dom:
self._post_dom.add_edge(b1, b2)
else:
_l.debug("%s is not in post dominator dict.", b2)
from angr.analyses import AnalysesHub
AnalysesHub.register_default("CDG", CDG)
|
3,797 |
update
|
""" Create aircraft trails on the radar display."""
from math import *
import numpy as np
import bluesky as bs
from bluesky import settings
from bluesky.core import TrafficArrays
class Trails(TrafficArrays):
"""
Traffic trails class definition : Data for trails
Methods:
Trails() : constructor
Members: see create
Created by : Jacco M. Hoekstra
"""
def __init__(self,dttrail=10.):
super().__init__()
self.active = False # Wether or not to show trails
self.dt = dttrail # Resolution of trail pieces in time
self.pygame = (bs.gui == 'pygame') # Trails are different for pygame
self.tcol0 = 60. # After how many seconds old colour
# This list contains some standard colors
self.colorList = {'BLUE': np.array([0, 0, 255]),
'CYAN': np.array([0,255,255]),
'RED' : np.array([255, 0, 0]),
'YELLOW': np.array([255, 255, 0])}
# Set default color to Blue
self.defcolor = self.colorList['CYAN']
# Foreground data on line pieces
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = []
self.fcol = np.array([])
# background copy of data
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgcol = []
with self.settrafarrays():
self.accolor = []
self.lastlat = np.array([])
self.lastlon = np.array([])
self.lasttim = np.array([])
self.clearnew()
return
def create(self,n=1):
super().create(n)
self.accolor[-1] = self.defcolor
self.lastlat[-1] = bs.traf.lat[-1]
self.lastlon[-1] = bs.traf.lon[-1]
def METHOD_NAME(self):
self.acid = bs.traf.id
if not self.active:
self.lastlat = bs.traf.lat
self.lastlon = bs.traf.lon
self.lasttim[:] = bs.sim.simt
return
"""Add linepieces for trails based on traffic data"""
# Use temporary list/array for fast append
lstlat0 = []
lstlon0 = []
lstlat1 = []
lstlon1 = []
lsttime = []
# Check for update
delta = bs.sim.simt - self.lasttim
idxs = np.where(delta > self.dt)[0]
# Add all a/c which need the update
# if len(idxs)>0:
# print "len(idxs)=",len(idxs)
for i in idxs:
# Add to lists
lstlat0.append(self.lastlat[i])
lstlon0.append(self.lastlon[i])
lstlat1.append(bs.traf.lat[i])
lstlon1.append(bs.traf.lon[i])
lsttime.append(bs.sim.simt)
if isinstance(self.col, np.ndarray):
# print type(trailcol[i])
# print trailcol[i]
# print "col type: ",type(self.col)
self.col = self.col.tolist()
type(self.col)
self.col.append(self.accolor[i])
# Update aircraft record
self.lastlat[i] = bs.traf.lat[i]
self.lastlon[i] = bs.traf.lon[i]
self.lasttim[i] = bs.sim.simt
# When a/c is no longer part of trail semgment,
# it is no longer a/c data => move to the GUI buffer (send or draw)
if self.pygame:
# Pygame: send to drawing buffer
self.lat0 = np.concatenate((self.lat0, np.array(lstlat0)))
self.lon0 = np.concatenate((self.lon0, np.array(lstlon0)))
self.lat1 = np.concatenate((self.lat1, np.array(lstlat1)))
self.lon1 = np.concatenate((self.lon1, np.array(lstlon1)))
self.time = np.concatenate((self.time, np.array(lsttime)))
else:
# QtGL: add to send buffer
self.newlat0.extend(lstlat0)
self.newlon0.extend(lstlon0)
self.newlat1.extend(lstlat1)
self.newlon1.extend(lstlon1)
# Update colours
self.fcol = (1. - np.minimum(self.tcol0, np.abs(bs.sim.simt - self.time)) / self.tcol0)
return
def buffer(self):
"""Buffer trails: Move current stack to background """
self.bglat0 = np.append(self.bglat0, self.lat0)
self.bglon0 = np.append(self.bglon0, self.lon0)
self.bglat1 = np.append(self.bglat1, self.lat1)
self.bglon1 = np.append(self.bglon1, self.lon1)
self.bgtime = np.append(self.bgtime, self.time)
# No color saved: Background: always 'old color' self.col0
if isinstance(self.bgcol, np.ndarray):
self.bgcol = self.bgcol.tolist()
if isinstance(self.col, np.ndarray):
self.col = self.col.tolist()
self.bgcol = self.bgcol + self.col
self.bgacid = self.bgacid + self.acid
self.clearfg() # Clear foreground trails
return
def clearnew(self):
# Clear new lines pipeline used for QtGL
self.newlat0 = []
self.newlon0 = []
self.newlat1 = []
self.newlon1 = []
def clearfg(self): # Foreground
"""Clear trails foreground"""
self.lat0 = np.array([])
self.lon0 = np.array([])
self.lat1 = np.array([])
self.lon1 = np.array([])
self.time = np.array([])
self.col = np.array([])
return
def clearbg(self): # Background
"""Clear trails background"""
self.bglat0 = np.array([])
self.bglon0 = np.array([])
self.bglat1 = np.array([])
self.bglon1 = np.array([])
self.bgtime = np.array([])
self.bgacid = []
return
def clear(self):
"""Clear all data, Foreground and background"""
self.lastlon = np.array([])
self.lastlat = np.array([])
self.clearfg()
self.clearbg()
self.clearnew()
return
def setTrails(self, *args):
""" Set trails on/off, or change trail color of aircraft """
if len(args)==0:
msg = "TRAIL ON/OFF, [dt] / TRAIL acid color\n"
if self.active:
msg = msg + "TRAILS ARE ON"
else:
msg = msg + "TRAILS ARE OFF"
return True,msg
# Switch on/off
elif type(args[0]) == bool:
# Set trails on/off
self.active = args[0]
if len(args) > 1:
self.dt = args[1]
if not self.active:
self.clear()
# Change color per acid (pygame only)
else:
# Change trail color
if len(args) < 2 or args[1] not in ["BLUE", "RED", "YELLOW"]:
return False, "Set aircraft trail color with: TRAIL acid BLUE/RED/YELLOW"
self.changeTrailColor(args[1], args[0])
return True
def changeTrailColor(self, color, idx):
"""Change color of aircraft trail"""
self.accolor[idx] = self.colorList[color]
return
def reset(self):
# This ensures that the traffic arrays (which size is dynamic)
# are all reset as well, so all lat,lon,sdp etc but also objects adsb
super().reset()
self.clear()
self.active = False
|
3,798 |
name
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetCassandraResourceCassandraTableResult',
'AwaitableGetCassandraResourceCassandraTableResult',
'get_cassandra_resource_cassandra_table',
'get_cassandra_resource_cassandra_table_output',
]
@pulumi.output_type
class GetCassandraResourceCassandraTableResult:
"""
An Azure Cosmos DB Cassandra table.
"""
def __init__(__self__, id=None, location=None, METHOD_NAME=None, options=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", METHOD_NAME)
if options and not isinstance(options, dict):
raise TypeError("Expected argument 'options' to be a dict")
pulumi.set(__self__, "options", options)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def options(self) -> Optional['outputs.CassandraTableGetPropertiesResponseOptions']:
return pulumi.get(self, "options")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.CassandraTableGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetCassandraResourceCassandraTableResult(GetCassandraResourceCassandraTableResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCassandraResourceCassandraTableResult(
id=self.id,
location=self.location,
METHOD_NAME=self.METHOD_NAME,
options=self.options,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_cassandra_resource_cassandra_table(account_name: Optional[str] = None,
keyspace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
table_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCassandraResourceCassandraTableResult:
"""
Gets the Cassandra table under an existing Azure Cosmos DB database account.
:param str account_name: Cosmos DB database account name.
:param str keyspace_name: Cosmos DB keyspace name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str table_name: Cosmos DB table name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['keyspaceName'] = keyspace_name
__args__['resourceGroupName'] = resource_group_name
__args__['tableName'] = table_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20230415:getCassandraResourceCassandraTable', __args__, opts=opts, typ=GetCassandraResourceCassandraTableResult).value
return AwaitableGetCassandraResourceCassandraTableResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
METHOD_NAME=pulumi.get(__ret__, 'name'),
options=pulumi.get(__ret__, 'options'),
resource=pulumi.get(__ret__, 'resource'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_cassandra_resource_cassandra_table)
def get_cassandra_resource_cassandra_table_output(account_name: Optional[pulumi.Input[str]] = None,
keyspace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCassandraResourceCassandraTableResult]:
"""
Gets the Cassandra table under an existing Azure Cosmos DB database account.
:param str account_name: Cosmos DB database account name.
:param str keyspace_name: Cosmos DB keyspace name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str table_name: Cosmos DB table name.
"""
...
|
3,799 |
is leader process
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ utility/helper.py ]
# Synopsis [ helper functions ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import sys
import math
import torch
import shutil
import builtins
import numpy as np
from time import time
from typing import List
from pathlib import Path
from datetime import datetime
from collections import defaultdict
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import is_initialized, get_rank, get_world_size
def METHOD_NAME():
return not is_initialized() or get_rank() == 0
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_used_parameters(model):
# The model should be at least backward once
return sum(p.numel() for p in model.parameters() if p.grad is not None)
def get_time_tag():
return datetime.fromtimestamp(time()).strftime('%Y-%m-%d-%H-%M-%S')
def backup(src_path, tgt_dir):
stem = Path(src_path).stem
suffix = Path(src_path).suffix
shutil.copyfile(src_path, os.path.join(tgt_dir, f'{stem}_{get_time_tag()}{suffix}'))
def get_model_state(model):
if isinstance(model, DDP):
return model.module.state_dict()
return model.state_dict()
def show(*args, **kwargs):
if METHOD_NAME():
print(*args, **kwargs)
def hack_isinstance():
# Pytorch do not support passing a defaultdict into DDP module
# https://github.com/pytorch/pytorch/blob/v1.7.1/torch/nn/parallel/scatter_gather.py#L19
# This hack can be removed after torch 1.8.0, where when each DDP process use single GPU
# (which is the best practice) DDP will not pass args, kwargs into scatter function
# https://github.com/pytorch/pytorch/blob/v1.7.1/torch/nn/parallel/distributed.py#L617
# https://github.com/pytorch/pytorch/blob/v1.8.0-rc1/torch/nn/parallel/distributed.py#L700
_isinstance = builtins.isinstance
def isinstance(obj, cls):
if _isinstance(obj, defaultdict):
return _isinstance(obj, cls) and issubclass(cls, defaultdict)
return _isinstance(obj, cls)
builtins.isinstance = isinstance
def override(string, args, config):
"""
Example usgae:
-o "config.optimizer.lr=1.0e-3,,config.optimizer.name='AdamW',,config.runner.eval_dataloaders=['dev', 'test']"
"""
options = string.split(',,')
for option in options:
option = option.strip()
key, value_str = option.split('=')
key, value_str = key.strip(), value_str.strip()
first_field, *remaining = key.split('.')
try:
value = eval(value_str)
except:
value = value_str
print(f'[Override] - {key} = {value}', file=sys.stderr)
if first_field == 'args':
assert len(remaining) == 1
setattr(args, remaining[0], value)
elif first_field == 'config':
target_config = config
for i, field_name in enumerate(remaining):
if i == len(remaining) - 1:
target_config[field_name] = value
else:
target_config.setdefault(field_name, {})
target_config = target_config[field_name]
def zero_mean_unit_var_norm(input_values: List[np.ndarray]) -> List[np.ndarray]:
"""
Every array in the list is normalized to have zero mean and unit variance
Taken from huggingface to ensure the same behavior across s3prl and huggingface
Reference: https://github.com/huggingface/transformers/blob/a26f4d620874b32d898a5b712006a4c856d07de1/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py#L81-L86
"""
return [(x - np.mean(x)) / np.sqrt(np.var(x) + 1e-5) for x in input_values]
#####################
# PARSE PRUNE HEADS #
#####################
def parse_prune_heads(config):
if 'prune_headids' in config['transformer'] and config['transformer']['prune_headids'] != 'None':
heads_int = []
spans = config['transformer']['prune_headids'].split(',')
for span in spans:
endpoints = span.split('-')
if len(endpoints) == 1:
heads_int.append(int(endpoints[0]))
elif len(endpoints) == 2:
heads_int += torch.arange(int(endpoints[0]), int(endpoints[1])).tolist()
else:
raise ValueError
print(f'[PRUNING] - heads {heads_int} will be pruned')
config['transformer']['prune_headids'] = heads_int
else:
config['transformer']['prune_headids'] = None
##########################
# GET TRANSFORMER TESTER #
##########################
def get_transformer_tester(from_path='result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/model-1000000.ckpt', display_settings=False):
''' Wrapper that loads the transformer model from checkpoint path '''
# load config and paras
all_states = torch.load(from_path, map_location='cpu')
config = all_states['Settings']['Config']
paras = all_states['Settings']['Paras']
# handling older checkpoints
if not hasattr(paras, 'multi_gpu'):
setattr(paras, 'multi_gpu', False)
if 'prune_headids' not in config['transformer']:
config['transformer']['prune_headids'] = None
# display checkpoint settings
if display_settings:
for cluster in config:
print(cluster + ':')
for item in config[cluster]:
print('\t' + str(item) + ': ', config[cluster][item])
print('paras:')
v_paras = vars(paras)
for item in v_paras:
print('\t' + str(item) + ': ', v_paras[item])
# load model with Tester
from transformer.solver import Tester
tester = Tester(config, paras)
tester.set_model(inference=True, with_head=False, from_path=from_path)
return tester
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.