id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
4,100 |
id
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDataExportResult',
'AwaitableGetDataExportResult',
'get_data_export',
'get_data_export_output',
]
@pulumi.output_type
class GetDataExportResult:
"""
The top level data export resource container.
"""
def __init__(__self__, created_date=None, data_export_id=None, enable=None, event_hub_name=None, METHOD_NAME=None, last_modified_date=None, name=None, resource_id=None, table_names=None, type=None):
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if data_export_id and not isinstance(data_export_id, str):
raise TypeError("Expected argument 'data_export_id' to be a str")
pulumi.set(__self__, "data_export_id", data_export_id)
if enable and not isinstance(enable, bool):
raise TypeError("Expected argument 'enable' to be a bool")
pulumi.set(__self__, "enable", enable)
if event_hub_name and not isinstance(event_hub_name, str):
raise TypeError("Expected argument 'event_hub_name' to be a str")
pulumi.set(__self__, "event_hub_name", event_hub_name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if last_modified_date and not isinstance(last_modified_date, str):
raise TypeError("Expected argument 'last_modified_date' to be a str")
pulumi.set(__self__, "last_modified_date", last_modified_date)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_id and not isinstance(resource_id, str):
raise TypeError("Expected argument 'resource_id' to be a str")
pulumi.set(__self__, "resource_id", resource_id)
if table_names and not isinstance(table_names, list):
raise TypeError("Expected argument 'table_names' to be a list")
pulumi.set(__self__, "table_names", table_names)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> Optional[str]:
"""
The latest data export rule modification time.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dataExportId")
def data_export_id(self) -> Optional[str]:
"""
The data export rule ID.
"""
return pulumi.get(self, "data_export_id")
@property
@pulumi.getter
def enable(self) -> Optional[bool]:
"""
Active when enabled.
"""
return pulumi.get(self, "enable")
@property
@pulumi.getter(name="eventHubName")
def event_hub_name(self) -> Optional[str]:
"""
Optional. Allows to define an Event Hub name. Not applicable when destination is Storage Account.
"""
return pulumi.get(self, "event_hub_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedDate")
def last_modified_date(self) -> Optional[str]:
"""
Date and time when the export was last modified.
"""
return pulumi.get(self, "last_modified_date")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The destination resource ID. This can be copied from the Properties entry of the destination resource in Azure.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="tableNames")
def table_names(self) -> Sequence[str]:
"""
An array of tables to export, for example: [“Heartbeat, SecurityEvent”].
"""
return pulumi.get(self, "table_names")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetDataExportResult(GetDataExportResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataExportResult(
created_date=self.created_date,
data_export_id=self.data_export_id,
enable=self.enable,
event_hub_name=self.event_hub_name,
METHOD_NAME=self.METHOD_NAME,
last_modified_date=self.last_modified_date,
name=self.name,
resource_id=self.resource_id,
table_names=self.table_names,
type=self.type)
def get_data_export(data_export_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataExportResult:
"""
Gets a data export instance.
Azure REST API version: 2020-08-01.
:param str data_export_name: The data export rule name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataExportName'] = data_export_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:operationalinsights:getDataExport', __args__, opts=opts, typ=GetDataExportResult).value
return AwaitableGetDataExportResult(
created_date=pulumi.get(__ret__, 'created_date'),
data_export_id=pulumi.get(__ret__, 'data_export_id'),
enable=pulumi.get(__ret__, 'enable'),
event_hub_name=pulumi.get(__ret__, 'event_hub_name'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
last_modified_date=pulumi.get(__ret__, 'last_modified_date'),
name=pulumi.get(__ret__, 'name'),
resource_id=pulumi.get(__ret__, 'resource_id'),
table_names=pulumi.get(__ret__, 'table_names'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_data_export)
def get_data_export_output(data_export_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDataExportResult]:
"""
Gets a data export instance.
Azure REST API version: 2020-08-01.
:param str data_export_name: The data export rule name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
...
|
4,101 |
delete
|
import functools
import json
import os
from datetime import datetime, timedelta, timezone
import subprocess
import time
from typing import Any, Dict, List, Tuple
from time import sleep
from joblib import Parallel, delayed
import requests_wrapper as requests
import logging
from datahub.cli import cli_utils
from datahub.cli.cli_utils import get_system_auth
from datahub.ingestion.graph.client import DataHubGraph, DatahubClientConfig
from datahub.ingestion.run.pipeline import Pipeline
from tests.consistency_utils import wait_for_writes_to_sync
TIME: int = 1581407189000
logger = logging.getLogger(__name__)
def get_frontend_session():
session = requests.Session()
headers = {
"Content-Type": "application/json",
}
system_auth = get_system_auth()
if system_auth is not None:
session.headers.update({"Authorization": system_auth})
else:
username, password = get_admin_credentials()
data = '{"username":"' + username + '", "password":"' + password + '"}'
response = session.post(
f"{get_frontend_url()}/logIn", headers=headers, data=data
)
response.raise_for_status()
return session
def get_admin_username() -> str:
return get_admin_credentials()[0]
def get_admin_credentials():
return (
os.getenv("ADMIN_USERNAME", "datahub"),
os.getenv("ADMIN_PASSWORD", "datahub"),
)
def get_root_urn():
return "urn:li:corpuser:datahub"
def get_gms_url():
return os.getenv("DATAHUB_GMS_URL") or "http://localhost:8080"
def get_frontend_url():
return os.getenv("DATAHUB_FRONTEND_URL") or "http://localhost:9002"
def get_kafka_broker_url():
return os.getenv("DATAHUB_KAFKA_URL") or "localhost:9092"
def get_kafka_schema_registry():
# internal registry "http://localhost:8080/schema-registry/api/"
return os.getenv("DATAHUB_KAFKA_SCHEMA_REGISTRY_URL") or "http://localhost:8081"
def get_mysql_url():
return os.getenv("DATAHUB_MYSQL_URL") or "localhost:3306"
def get_mysql_username():
return os.getenv("DATAHUB_MYSQL_USERNAME") or "datahub"
def get_mysql_password():
return os.getenv("DATAHUB_MYSQL_PASSWORD") or "datahub"
def get_sleep_info() -> Tuple[int, int]:
return (
int(os.getenv("DATAHUB_TEST_SLEEP_BETWEEN", 20)),
int(os.getenv("DATAHUB_TEST_SLEEP_TIMES", 3)),
)
def is_k8s_enabled():
return os.getenv("K8S_CLUSTER_ENABLED", "false").lower() in ["true", "yes"]
def wait_for_healthcheck_util():
assert not check_endpoint(f"{get_frontend_url()}/admin")
assert not check_endpoint(f"{get_gms_url()}/health")
def check_endpoint(url):
try:
get = requests.get(url)
if get.status_code == 200:
return
else:
return f"{url}: is Not reachable, status_code: {get.status_code}"
except requests.exceptions.RequestException as e:
raise SystemExit(f"{url}: is Not reachable \nErr: {e}")
def ingest_file_via_rest(filename: str) -> Pipeline:
pipeline = Pipeline.create(
{
"source": {
"type": "file",
"config": {"filename": filename},
},
"sink": {
"type": "datahub-rest",
"config": {"server": get_gms_url()},
},
}
)
pipeline.run()
pipeline.raise_from_status()
wait_for_writes_to_sync()
return pipeline
@functools.lru_cache(maxsize=1)
def get_datahub_graph() -> DataHubGraph:
return DataHubGraph(DatahubClientConfig(server=get_gms_url()))
def delete_urn(urn: str) -> None:
get_datahub_graph().hard_delete_entity(urn)
def delete_urns(urns: List[str]) -> None:
for urn in urns:
delete_urn(urn)
def delete_urns_from_file(filename: str, shared_data: bool = False) -> None:
if not cli_utils.get_boolean_env_variable("CLEANUP_DATA", True):
print("Not cleaning data to save time")
return
session = requests.Session()
session.headers.update(
{
"X-RestLi-Protocol-Version": "2.0.0",
"Content-Type": "application/json",
}
)
def METHOD_NAME(entry):
is_mcp = "entityUrn" in entry
urn = None
# Kill Snapshot
if is_mcp:
urn = entry["entityUrn"]
else:
snapshot_union = entry["proposedSnapshot"]
snapshot = list(snapshot_union.values())[0]
urn = snapshot["urn"]
delete_urn(urn)
with open(filename) as f:
d = json.load(f)
Parallel(n_jobs=10)(delayed(METHOD_NAME)(entry) for entry in d)
wait_for_writes_to_sync()
# Fixed now value
NOW: datetime = datetime.now()
def get_timestampmillis_at_start_of_day(relative_day_num: int) -> int:
"""
Returns the time in milliseconds from epoch at the start of the day
corresponding to `now + relative_day_num`
"""
time: datetime = NOW + timedelta(days=float(relative_day_num))
time = datetime(
year=time.year,
month=time.month,
day=time.day,
hour=0,
minute=0,
second=0,
microsecond=0,
)
return int(time.timestamp() * 1000)
def get_strftime_from_timestamp_millis(ts_millis: int) -> str:
return datetime.fromtimestamp(ts_millis / 1000, tz=timezone.utc).isoformat()
def create_datahub_step_state_aspect(
username: str, onboarding_id: str
) -> Dict[str, Any]:
entity_urn = f"urn:li:dataHubStepState:urn:li:corpuser:{username}-{onboarding_id}"
print(f"Creating dataHubStepState aspect for {entity_urn}")
return {
"auditHeader": None,
"entityType": "dataHubStepState",
"entityUrn": entity_urn,
"changeType": "UPSERT",
"aspectName": "dataHubStepStateProperties",
"aspect": {
"value": f'{{"properties":{{}},"lastModified":{{"actor":"urn:li:corpuser:{username}","time":{TIME}}}}}',
"contentType": "application/json",
},
"systemMetadata": None,
}
def create_datahub_step_state_aspects(
username: str, onboarding_ids: str, onboarding_filename
) -> None:
"""
For a specific user, creates dataHubStepState aspects for each onboarding id in the list
"""
aspects_dict: List[Dict[str, Any]] = [
create_datahub_step_state_aspect(username, onboarding_id)
for onboarding_id in onboarding_ids
]
with open(onboarding_filename, "w") as f:
json.dump(aspects_dict, f, indent=2)
|
4,102 |
plugin reconfigure
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: https://foglamp-foglamp-documentation.readthedocs-hosted.com
# FOGLAMP_END
""" Plugin module which adds a square block of specific monochrome shade on images """
import os
import logging
import datetime
import filter_ingest
import traceback
import copy
import json
import numpy as np
from fledge.common import logger
# local logger
_LOGGER = logger.setup(__name__, level=logging.DEBUG)
_DEFAULT_CONFIG = {
'plugin': { # mandatory filter
'description': 'Filter that overlays a square block on image',
'type': 'string',
'default': 'imageblock',
'readonly': 'true'
},
'enable': { # recommended filter
'description': 'Enable imageblock filter plugin',
'type': 'boolean',
'default': 'false',
'displayName': 'Enabled',
'order': "1"
},
'block_color': {
'description': 'Block color (0-255)',
'type': 'integer',
'default': '255',
'displayName': 'Block color',
'order': '2'
}
}
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
_LOGGER.info("imageblock - plugin_info called")
return {
'name': 'imageblock',
'version': '1.9.2',
'mode': 'none',
'type': 'filter',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config, ingest_ref, callback):
""" Initialise the plugin.
Args:
config: JSON configuration document for the plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
_LOGGER.info("imageblock - plugin_init called")
try:
_config = copy.deepcopy(config)
_config['ingest_ref'] = ingest_ref
_config['callback'] = callback
except:
_LOGGER.info("could not create configuration")
raise
return _config
def plugin_ingest(handle, data):
""" plugin_ingest -- log data we receive """
if handle['enable']['value'] == 'false':
_LOGGER.debug("imageblock - plugin_ingest: enable=FALSE, not processing data, forwarding received data")
filter_ingest.filter_ingest_callback(handle['callback'], handle['ingest_ref'], data)
return
_LOGGER.debug("imageblock - plugin_ingest: INPUT: type(data)={}, data={}".format(type(data), data))
color = int(handle['block_color']['value'])
try:
if type(data) == dict:
data = [data]
for entry in data:
_LOGGER.debug("np.pi={}, type(entry) = {}".format(np.pi, type(entry)))
for k in entry['readings'].keys():
v = entry['readings'][k]
_LOGGER.debug("k={}, type(v)={}, v.shape={}, v={}".format(k, type(v), v.shape, v))
import random
center = random.randint(v.shape[0]//4,v.shape[0]//4*3+1)
sz = random.randint(10,v.shape[0]//4-10)
_LOGGER.debug("imageblock - plugin_ingest: center={}, sz={}, color={}".format(center, sz, color))
v[center-sz:center+sz,center-sz:center+sz] = color
entry['readings'][k] = v
_LOGGER.debug("After adding a small block, pixel values: OUTPUT: data={}".format(data))
filter_ingest.filter_ingest_callback(handle['callback'], handle['ingest_ref'], data)
except Exception as ex:
_LOGGER.error("imageblock writer exception {}".format(traceback.format_exc()))
raise
def METHOD_NAME(handle, new_config):
""" Reconfigures the plugin
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
"""
_LOGGER.info("imageblock - Old config for plugin {} \n new config {}".format(handle, new_config))
plugin_shutdown(handle)
# plugin_init
new_handle = plugin_init(new_config, handle['ingest_ref'], handle['callback'])
return new_handle
def plugin_shutdown(handle):
""" Shut down the plugin.
Args:
handle: JSON configuration document for the plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
_LOGGER.info("imageblock Shutdown")
|
4,103 |
go to last page
|
import discord
from redbot.core.bank import get_currency_name
from redbot.vendored.discord.ext import menus
class BadgeMenu(menus.MenuPages, inherit_buttons=False):
def __init__(
self,
source: menus.PageSource,
timeout: int = 30,
can_buy=False,
):
super().__init__(
source,
timeout=timeout,
clear_reactions_after=True,
delete_message_after=True,
)
self.can_buy = can_buy
async def start(self, ctx, *, channel=None, wait=False):
if self.can_buy:
self.can_buy = await ctx.cog.buy_badge.can_run(ctx, check_all_parents=True)
await super().start(ctx, channel=channel, wait=wait)
def should_add_reactions(self):
return True
def _no_pages(self):
return not self._source.is_paginating()
def _skip_double_triangle_buttons(self):
return (not self._source.is_paginating()) or super()._skip_double_triangle_buttons()
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
if timed_out and self.delete_message_after:
self.delete_message_after = False
def cant_buy_check(self):
return not self.can_buy
@menus.button("\N{BANKNOTE WITH DOLLAR SIGN}", position=menus.First(0), skip_if=cant_buy_check)
async def buy_badge(self, payload):
page = await self.source.get_page(self.current_page)
await self.ctx.invoke(
self.ctx.cog.buy_badge,
is_global=True if page["server_id"] == "global" else False,
name=page["badge_name"],
)
@menus.button(
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.First(0),
skip_if=_skip_double_triangle_buttons,
)
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@menus.button(
"\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f", position=menus.First(1), skip_if=_no_pages
)
async def go_to_previous_page(self, payload):
"""go to the previous page"""
if self.current_page == 0:
await self.show_page(self._source.get_max_pages() - 1)
else:
await self.show_checked_page(self.current_page - 1)
@menus.button(
"\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f", position=menus.Last(0), skip_if=_no_pages
)
async def go_to_next_page(self, payload):
"""go to the next page"""
if self.current_page == self._source.get_max_pages() - 1:
await self.show_page(0)
else:
await self.show_checked_page(self.current_page + 1)
@menus.button(
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\ufe0f",
position=menus.Last(1),
skip_if=_skip_double_triangle_buttons,
)
async def METHOD_NAME(self, payload):
"""go to the last page"""
# The call here is safe because it's guarded by skip_if
await self.show_page(self._source.get_max_pages() - 1)
@menus.button("\N{CROSS MARK}", position=menus.First(2))
async def stop_pages(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
class AvailableBadgePager(menus.ListPageSource):
def __init__(self, entries, server_name, server_id, icon):
super().__init__(entries, per_page=1)
self.server_name = server_name
self.icon = icon
self.server_id = server_id
async def format_page(self, menu: BadgeMenu, page):
em = discord.Embed(
title=page["badge_name"],
description=page["description"],
color=int(page["border_color"][1:], base=16),
)
if page["price"] > 0:
em.add_field(
name="Price", value=f"{page['price']}{await get_currency_name(menu.ctx.guild)}"
)
elif page["price"] == 0:
em.add_field(name="Price", value="Free")
em.set_author(name=self.server_name, icon_url=self.icon)
em.set_thumbnail(url=page["bg_img"])
em.set_footer(text=f"Badge {menu.current_page+1}/{self.get_max_pages()}")
return em
class OwnBadgePager(menus.ListPageSource):
def __init__(self, entries, user: discord.Member):
super().__init__(entries, per_page=1)
self.user = user
async def format_page(self, menu: BadgeMenu, page):
em = discord.Embed(
title=page["badge_name"],
description=page["description"],
color=int(page["border_color"][1:], base=16),
)
em.set_author(name=self.user.display_name, icon_url=self.user.avatar_url)
em.set_thumbnail(url=page["bg_img"])
em.set_footer(
text=f"Server: {page['server_name']} • Badge {menu.current_page+1}/{self.get_max_pages()}"
)
return em
|
4,104 |
test send confirmation email
|
# coding: utf-8
from flask import render_template, url_for
from itsdangerous import URLSafeTimedSerializer
from mock import patch
from webapp.notifications import send_confirmation_email, send_reset_password_email
from webapp.utils import get_timed_serializer
from .base import BaseTestCase
class NotificationsTestCase(BaseTestCase):
def test_send_confirmation_email_with_empty_recipient_email(self):
"""
Com:
- recipient_email = None
Quando:
- Enviamos notificações de confiração de email
Verificamos:
- Que ocorra uma exeção por causa do email inválido para recipient_email
"""
recipient_email = None
with self.assertRaises(ValueError):
send_confirmation_email(recipient_email)
def test_send_reset_password_email_with_empty_recipient_email(self):
"""
Com:
- recipient_email = None
Quando:
- Enviamos notificações de resetar a senha
Verificamos:
- Que ocorra uma exeção por causa do email inválido para recipient_email
"""
recipient_email = None
with self.assertRaises(ValueError):
send_reset_password_email(recipient_email)
def test_send_confirmation_email_with_invalid_recipient_email(self):
"""
Com:
- recipient_email inválido
Quando:
- Enviamos notificações de confirmación de email
Verifcamos:
- Que ocorra uma exeção por causa do email inválido para recipient_email
"""
recipient_email = "foo@bar"
with self.assertRaises(ValueError):
send_confirmation_email(recipient_email)
def test__send_reset_password_email_with_invalid_recipient_email(self):
"""
Com:
- recipient_email inválido
Quando:
- Enviamos notificações de resetar a senha
Verifcamos:
- Que ocorra uma exeção por causa do email inválido para recipient_email
"""
recipient_email = "foo@bar"
with self.assertRaises(ValueError):
send_reset_password_email(recipient_email)
def test_invalid_token_confirmation_email(self):
"""
Quando:
- current_app.config["SECRET_KEY"] não tem valor
Verifcamos:
- Que ocorra uma exeção quando é criado um token com
get_timed_serializer ao enviar a notificação de confirmação de email.
"""
recipient_email = "[email protected]"
with patch("webapp.notifications.utils") as mock:
mock.get_timed_serializer.return_value = URLSafeTimedSerializer(None)
expected = "Token inválido"
result = send_confirmation_email(recipient_email)
self.assertIn(expected, str(result))
def test_invalid_token_reset_password(self):
"""
Quando:
- current_app.config["SECRET_KEY"] não tem valor
Verifcamos:
- Que ocorra uma exeção qunado é criado um token com
get_timed_serializer ao enviar a notificação de resetar a senha.
"""
recipient_email = "[email protected]"
with patch("webapp.notifications.utils") as mock:
mock.get_timed_serializer.return_value = URLSafeTimedSerializer(None)
expected = "Token inválido"
result = send_reset_password_email(recipient_email)
self.assertIn(expected, str(result))
def METHOD_NAME(self):
"""
Com:
- Um email válido para: recipient_email = '[email protected]'
Quando:
- Enviamos a notificação de confirmação de email.
Verificamos:
- Que ``app.utils.send_email`` seja invocado como os parámetros:
- recipient = '[email protected]'
- subject = "Confirmação de email"
- html = render_template('email/activate.html', confirm_url=confirm_url)
- Que o valor de retorno da função: send_confirmation_email seja: (True, '')
"""
recipient_email = "[email protected]"
ts = get_timed_serializer()
token = ts.dumps(recipient_email, salt="email-confirm-key")
confirm_url = url_for("admin.confirm_email", token=token, _external=True)
result_expected = (True, "")
with patch(
"webapp.utils.utils.send_email", return_value=result_expected
) as mock:
result = send_confirmation_email(recipient_email)
mock.assert_called_with(
recipient_email,
"Confirmação de email",
render_template("email/activate.html", confirm_url=confirm_url),
)
self.assertEqual(result_expected, result)
def test_send_reset_password_email(self):
"""
Com:
- Um email válido para: recipient_email = '[email protected]'
Quando:
- Enviamos a notificação de resetar a senha.
Verificamos:
- Que ``app.utils.send_email`` seja invocado como os parámetros:
- recipient = '[email protected]'
- subject = "Instruções para recuperar sua senha"
- html = render_template('email/recover.html', recover_url=recover_url)
- Que o valor de retorno da função: send_confirmation_email seja: (True, '')
"""
recipient_email = "[email protected]"
ts = get_timed_serializer()
token = ts.dumps(recipient_email, salt="recover-key")
recover_url = url_for("admin.reset_with_token", token=token, _external=True)
result_expected = (True, "")
with patch(
"webapp.utils.utils.send_email", return_value=result_expected
) as mock:
result = send_reset_password_email(recipient_email)
mock.assert_called_with(
recipient_email,
"Instruções para recuperar sua senha",
render_template("email/recover.html", recover_url=recover_url),
)
self.assertEqual(result_expected, result)
|
4,105 |
test missing column
|
import json
import os
import pytest
from dbt.tests.util import run_dbt
from dbt.tests.adapter.persist_docs.fixtures import (
_DOCS__MY_FUN_DOCS,
_MODELS__MISSING_COLUMN,
_MODELS__MODEL_USING_QUOTE_UTIL,
_MODELS__NO_DOCS_MODEL,
_MODELS__TABLE,
_MODELS__VIEW,
_PROPERTIES__QUOTE_MODEL,
_PROPERITES__SCHEMA_MISSING_COL,
_PROPERTIES__SCHEMA_YML,
_SEEDS__SEED,
)
class BasePersistDocsBase:
@pytest.fixture(scope="class", autouse=True)
def setUp(self, project):
run_dbt(["seed"])
run_dbt()
@pytest.fixture(scope="class")
def seeds(self):
return {"seed.csv": _SEEDS__SEED}
@pytest.fixture(scope="class")
def models(self):
return {
"no_docs_model.sql": _MODELS__NO_DOCS_MODEL,
"table_model.sql": _MODELS__TABLE,
"view_model.sql": _MODELS__VIEW,
}
@pytest.fixture(scope="class")
def properties(self):
return {
"my_fun_docs.md": _DOCS__MY_FUN_DOCS,
"schema.yml": _PROPERTIES__SCHEMA_YML,
}
def _assert_common_comments(self, *comments):
for comment in comments:
assert '"with double quotes"' in comment
assert """'''abc123'''""" in comment
assert "\n" in comment
assert "Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting" in comment
assert "/* comment */" in comment
if os.name == "nt":
assert "--\r\n" in comment or "--\n" in comment
else:
assert "--\n" in comment
def _assert_has_table_comments(self, table_node):
table_comment = table_node["metadata"]["comment"]
assert table_comment.startswith("Table model description")
table_id_comment = table_node["columns"]["id"]["comment"]
assert table_id_comment.startswith("id Column description")
table_name_comment = table_node["columns"]["name"]["comment"]
assert table_name_comment.startswith("Some stuff here and then a call to")
self._assert_common_comments(table_comment, table_id_comment, table_name_comment)
def _assert_has_view_comments(
self, view_node, has_node_comments=True, has_column_comments=True
):
view_comment = view_node["metadata"]["comment"]
if has_node_comments:
assert view_comment.startswith("View model description")
self._assert_common_comments(view_comment)
else:
assert view_comment is None
view_id_comment = view_node["columns"]["id"]["comment"]
if has_column_comments:
assert view_id_comment.startswith("id Column description")
self._assert_common_comments(view_id_comment)
else:
assert view_id_comment is None
view_name_comment = view_node["columns"]["name"]["comment"]
assert view_name_comment is None
class BasePersistDocs(BasePersistDocsBase):
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"models": {
"test": {
"+persist_docs": {
"relation": True,
"columns": True,
},
}
}
}
def test_has_comments_pglike(self, project):
run_dbt(["docs", "generate"])
with open("target/catalog.json") as fp:
catalog_data = json.load(fp)
assert "nodes" in catalog_data
assert len(catalog_data["nodes"]) == 4
table_node = catalog_data["nodes"]["model.test.table_model"]
view_node = self._assert_has_table_comments(table_node)
view_node = catalog_data["nodes"]["model.test.view_model"]
self._assert_has_view_comments(view_node)
no_docs_node = catalog_data["nodes"]["model.test.no_docs_model"]
self._assert_has_view_comments(no_docs_node, False, False)
class BasePersistDocsColumnMissing(BasePersistDocsBase):
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"models": {
"test": {
"+persist_docs": {
"columns": True,
},
}
}
}
@pytest.fixture(scope="class")
def models(self):
return {"missing_column.sql": _MODELS__MISSING_COLUMN}
@pytest.fixture(scope="class")
def properties(self):
return {"schema.yml": _PROPERITES__SCHEMA_MISSING_COL}
def METHOD_NAME(self, project):
run_dbt(["docs", "generate"])
with open("target/catalog.json") as fp:
catalog_data = json.load(fp)
assert "nodes" in catalog_data
table_node = catalog_data["nodes"]["model.test.missing_column"]
table_id_comment = table_node["columns"]["id"]["comment"]
assert table_id_comment.startswith("test id column description")
class BasePersistDocsCommentOnQuotedColumn:
"""Covers edge case where column with comment must be quoted.
We set this using the `quote:` tag in the property file."""
@pytest.fixture(scope="class")
def models(self):
return {"quote_model.sql": _MODELS__MODEL_USING_QUOTE_UTIL}
@pytest.fixture(scope="class")
def properties(self):
return {"properties.yml": _PROPERTIES__QUOTE_MODEL}
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"models": {
"test": {
"materialized": "table",
"+persist_docs": {
"relation": True,
"columns": True,
},
}
}
}
@pytest.fixture(scope="class")
def run_has_comments(self, project):
def fixt():
run_dbt()
run_dbt(["docs", "generate"])
with open("target/catalog.json") as fp:
catalog_data = json.load(fp)
assert "nodes" in catalog_data
assert len(catalog_data["nodes"]) == 1
column_node = catalog_data["nodes"]["model.test.quote_model"]
column_comment = column_node["columns"]["2id"]["comment"]
assert column_comment.startswith("XXX")
return fixt
def test_quoted_column_comments(self, run_has_comments):
run_has_comments()
class TestPersistDocs(BasePersistDocs):
pass
class TestPersistDocsColumnMissing(BasePersistDocsColumnMissing):
pass
class TestPersistDocsCommentOnQuotedColumn(BasePersistDocsCommentOnQuotedColumn):
pass
|
4,106 |
test angle sexagesimal args
|
"""Tests of whether units behave."""
from assay import assert_raises
from numpy import array, nan
from skyfield.units import (
Angle, Distance, Velocity, UnpackingError, WrongUnitError,
)
try:
from astropy import units as u
except ImportError:
u = None
def needs_astropy(test):
"""Skip `test` if AstroPy is not available."""
return None if (u is None) else test
def test_degree_rounding():
tenth = 0.1 / 60.0 / 60.0 # of an arcsecond
assert str(Angle(degrees=tenth * -600.75)) == '-00deg 01\' 00.1"'
assert str(Angle(degrees=tenth * -600.25)) == '-00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * -599.75)) == '-00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * -599.25)) == '-00deg 00\' 59.9"'
assert str(Angle(degrees=tenth * -1.75)) == '-00deg 00\' 00.2"'
assert str(Angle(degrees=tenth * -1.25)) == '-00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * -0.75)) == '-00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * -0.25)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=0.0)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=tenth * 0.25)) == '00deg 00\' 00.0"'
assert str(Angle(degrees=tenth * 0.75)) == '00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * 1.25)) == '00deg 00\' 00.1"'
assert str(Angle(degrees=tenth * 1.75)) == '00deg 00\' 00.2"'
assert str(Angle(degrees=tenth * 599.25)) == '00deg 00\' 59.9"'
assert str(Angle(degrees=tenth * 599.75)) == '00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * 600.25)) == '00deg 01\' 00.0"'
assert str(Angle(degrees=tenth * 600.75)) == '00deg 01\' 00.1"'
def test_angle_scalar_strs():
assert str(Angle(degrees=array(91))) == '''91deg 00' 00.0"'''
assert str(Angle(degrees=array(91), signed=True)) == '''+91deg 00' 00.0"'''
assert str(Angle(hours=array(12))) == '''12h 00m 00.00s'''
def test_angle_array_strs():
h = Angle(hours=array([0.5, nan, -13]))
d = Angle(degrees=h._degrees)
assert str(h) == '3 values from 00h 30m 00.00s to -13h 00m 00.00s'
assert str(d) == '''3 values from 07deg 30' 00.0" to -195deg 00' 00.0"'''
with assert_raises(WrongUnitError):
h.dstr()
d.hstr()
assert h.hstr() == d.hstr(warn=False) == [
'00h 30m 00.00s',
'nan',
'-13h 00m 00.00s',
]
assert d.dstr() == h.dstr(warn=False) == [
'07deg 30\' 00.0"',
'nan',
'-195deg 00\' 00.0"',
]
empty = Angle(radians=[])
assert str(empty) == 'Angle []'
assert empty.hstr(warn=False) == []
assert empty.dstr() == []
assert h.hstr(format='{0} {1} {2} {3} {4} {5}', places=6) == [
' 0 30 0 0 6', 'nan', '- 13 0 0 0 6']
assert d.dstr(format='{0} {1} {2} {3} {4} {5}', places=6) == [
' 7 30 0 0 6', 'nan', '- 195 0 0 0 6']
def METHOD_NAME():
assert str(Angle(degrees=(90,))) == '''90deg 00' 00.0"'''
assert str(Angle(hours=(12,))) == '''12h 00m 00.00s'''
assert str(Angle(degrees=(90, 15))) == '''90deg 15' 00.0"'''
assert str(Angle(hours=(12, 30))) == '''12h 30m 00.00s'''
assert str(Angle(degrees=(90, 15, 30))) == '''90deg 15' 30.0"'''
assert str(Angle(hours=(12, 30, 15))) == '''12h 30m 15.00s'''
def test_arcminutes_and_arcseconds_and_mas():
angle = Angle(degrees=1.0)
assert angle.arcminutes() == 60
assert angle.arcseconds() == 60 * 60
assert angle.mas() == 60 * 60 * 1000
def test_distance_input_units():
for d in (
Distance(1.0),
Distance(au=1.0), # deprecated
Distance(m=149597870700),
Distance(km=149597870.700),
Distance.au(1.0), # modern
Distance.m(149597870700),
Distance.km(149597870.700),
):
assert abs(d.au - 1.0) <= 0
def test_velocity_input_units():
v1 = Velocity(au_per_d=2.0)
v2 = Velocity(km_per_s=3462.9137)
assert abs(v1.au_per_d - v2.au_per_d) < 1e-7
v1 = Velocity.au_per_d(2.0)
v2 = Velocity.km_per_s(3462.9137)
assert abs(v1.au_per_d - v2.au_per_d) < 1e-7
def test_stringifying_vector_distance():
a = array([1.23, 4.56])
s = str(Distance(au=a))
if '[1' in s:
# Python 3.5, says Travis CI. No idea.
assert s == '[1.23 4.56] au'
else:
# Every other version of Python.
assert s == '[ 1.23 4.56] au'
def test_helpful_exceptions():
distance = Distance(1.234)
expect = '''\
to use this Distance, ask for its value in a particular unit:
distance.au
distance.km
distance.m'''
with assert_raises(UnpackingError) as a:
x, y, z = distance
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
distance[0]
assert str(a.exception) == expect
velocity = Velocity(1.234)
expect = '''\
to use this Velocity, ask for its value in a particular unit:
velocity.au_per_d
velocity.km_per_s
velocity.m_per_s'''
with assert_raises(UnpackingError) as a:
x, y, z = velocity
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
velocity[0]
assert str(a.exception) == expect
angle = Angle(radians=1.234)
expect = '''\
to use this Angle, ask for its value in a particular unit:
angle.degrees
angle.hours
angle.radians'''
with assert_raises(UnpackingError) as a:
x, y, z = angle
assert str(a.exception) == expect
with assert_raises(UnpackingError) as a:
angle[0]
assert str(a.exception) == expect
def test_constructors_accept_plain_lists():
Distance(au=[1,2,3])
Distance(km=[1,2,3])
Distance(m=[1,2,3])
Velocity(au_per_d=[1,2,3])
Velocity(km_per_s=[1,2,3])
def test_converting_from_km_to_m():
distance = Distance(km=1.234)
assert abs(distance.m - 1234.0) < 1e-15
def test_converting_from_m_to_km():
distance = Distance(m=1234.0)
assert abs(distance.km - 1.234) < 1e-15
def test_deprecated_method_from_au():
distance = Distance.from_au(1.25)
assert distance.au == 1.25
@needs_astropy
def test_converting_distance_with_astropy():
distance = Distance(au=1.234)
value1 = distance.km
value2 = distance.to(u.km)
epsilon = 0.02 # definitions of AU seem to disagree slightly
assert abs(value1 - value2.value) < epsilon
@needs_astropy
def test_converting_velocity_with_astropy():
velocity = Velocity(au_per_d=1.234)
value1 = velocity.km_per_s
value2 = velocity.to(u.km / u.s)
epsilon = 1e-6
assert abs(value1 - value2.value) < epsilon
|
4,107 |
test brackets
|
from common import *
from trezor.utils import ensure
from apps.common.paths import *
# NOTE: moved into tests not to occupy flash space
# in firmware binary, when it is not used in production
class NeverMatchingSchema:
@staticmethod
def match(path: "Bip32Path") -> bool:
return False
class TestPaths(unittest.TestCase):
def test_is_hardened(self):
self.assertTrue(is_hardened(H_(44)))
self.assertTrue(is_hardened(H_(0)))
self.assertTrue(is_hardened(H_(99999)))
self.assertFalse(is_hardened(44))
self.assertFalse(is_hardened(0))
self.assertFalse(is_hardened(99999))
def test_path_is_hardened(self):
self.assertTrue(path_is_hardened([H_(44), H_(1), H_(0)]))
self.assertTrue(path_is_hardened([H_(0)]))
self.assertFalse(path_is_hardened([44, H_(44), H_(0)]))
self.assertFalse(path_is_hardened([0]))
self.assertFalse(path_is_hardened([H_(44), H_(1), H_(0), H_(0), 0]))
class TestPathSchemas(unittest.TestCase):
def assertMatch(self, schema, path):
self.assertTrue(
schema.match(path),
f"Expected schema {repr(schema)} to match path {address_n_to_str(path)}",
)
def assertMismatch(self, schema, path):
self.assertFalse(
schema.match(path),
f"Expected schema {repr(schema)} to not match path {address_n_to_str(path)}",
)
def assertEqualSchema(self, schema_a, schema_b):
def is_equal(a, b):
if isinstance(a, Interval) and isinstance(b, Interval):
return a.min == b.min and a.max == b.max
return set(a) == set(b)
ensure(
all(is_equal(a, b) for a, b in zip(schema_a.schema, schema_b.schema))
and is_equal(schema_a.trailing_components, schema_b.trailing_components),
f"Schemas differ:\nA = {repr(schema_a)}\nB = {repr(schema_b)}"
)
def test_always_never_matching(self):
paths = [
[],
[0],
[H_(0)],
[44],
[H_(44)],
[H_(44), H_(0), H_(0), 0, 0],
[H_(44), H_(0), H_(0), H_(0), H_(0)],
[H_(44), H_(0), H_(0), H_(0), H_(0)] * 10,
]
for path in paths:
self.assertMatch(AlwaysMatchingSchema, path)
self.assertMismatch(NeverMatchingSchema, path)
def test_pattern_fixed(self):
pattern = "m/44'/0'/0'/0/0"
schema = PathSchema.parse(pattern, 0)
self.assertMatch(schema, [H_(44), H_(0), H_(0), 0, 0])
paths = [
[],
[0],
[H_(0)],
[44],
[H_(44)],
[44, 0, 0, 0, 0],
[H_(44), H_(0), H_(0), H_(0), H_(0)],
[H_(44), H_(0), H_(0), H_(0), H_(0)] * 10,
]
for path in paths:
self.assertMismatch(schema, path)
def test_ranges_sets(self):
pattern_ranges = "m/44'/[100-109]'/[0-20]"
pattern_sets = "m/44'/[100,105,109]'/[0,10,20]"
schema_ranges = PathSchema.parse(pattern_ranges, 0)
schema_sets = PathSchema.parse(pattern_sets, 0)
paths_good = [
[H_(44), H_(100), 0],
[H_(44), H_(100), 10],
[H_(44), H_(100), 20],
[H_(44), H_(105), 0],
[H_(44), H_(105), 10],
[H_(44), H_(105), 20],
[H_(44), H_(109), 0],
[H_(44), H_(109), 10],
[H_(44), H_(109), 20],
]
for path in paths_good:
self.assertMatch(schema_ranges, path)
self.assertMatch(schema_sets, path)
paths_bad = [
[H_(44), H_(100)],
[H_(44), H_(100), 0, 0],
[H_(44), 100, 0],
[H_(44), 100, H_(0)],
[H_(44), H_(99), 0],
[H_(44), H_(110), 0],
[H_(44), H_(100), 21],
]
for path in paths_bad:
self.assertMismatch(schema_ranges, path)
self.assertMismatch(schema_sets, path)
self.assertMatch(schema_ranges, [H_(44), H_(104), 19])
self.assertMismatch(schema_sets, [H_(44), H_(104), 19])
def METHOD_NAME(self):
pattern_a = "m/[0]'/[0-5]'/[0,1,2]'/[0]/[0-5]/[0,1,2]"
pattern_b = "m/0'/0-5'/0,1,2'/0/0-5/0,1,2"
schema_a = PathSchema.parse(pattern_a, 0)
schema_b = PathSchema.parse(pattern_b, 0)
self.assertEqualSchema(schema_a, schema_b)
def test_wildcard(self):
pattern = "m/44'/0'/*"
schema = PathSchema.parse(pattern, 0)
paths_good = [
[H_(44), H_(0)],
[H_(44), H_(0), 0],
[H_(44), H_(0), 0, 1, 2, 3, 4, 5, 6, 7, 8],
]
for path in paths_good:
self.assertMatch(schema, path)
paths_bad = [
[H_(44)],
[H_(44), H_(0), H_(0)],
[H_(44), H_(0), 0, 1, 2, 3, 4, 5, 6, 7, H_(8)],
]
for path in paths_bad:
self.assertMismatch(schema, path)
def test_substitutes(self):
pattern_sub = "m/44'/coin_type'/account'/change/address_index"
pattern_plain = "m/44'/19'/0-100'/0,1/0-1000000"
schema_sub = PathSchema.parse(pattern_sub, slip44_id=19)
# use wrong slip44 id to ensure it doesn't affect anything
schema_plain = PathSchema.parse(pattern_plain, slip44_id=0)
self.assertEqualSchema(schema_sub, schema_plain)
def test_copy(self):
schema_normal = PathSchema.parse("m/44'/0'/0'/0/0", slip44_id=0)
self.assertEqualSchema(schema_normal, schema_normal.copy())
schema_wildcard = PathSchema.parse("m/44'/0'/0'/0/**", slip44_id=0)
self.assertEqualSchema(schema_wildcard, schema_wildcard.copy())
def test_parse(self):
schema_parsed = PathSchema.parse("m/44'/0-5'/0,1,2'/0/**", slip44_id=0)
schema_manual = PathSchema(
[
(H_(44),),
Interval(H_(0), H_(5)),
set((H_(0), H_(1), H_(2))),
(0,),
],
trailing_components=Interval(0, 0xFFFF_FFFF),
)
self.assertEqualSchema(schema_manual, schema_parsed)
def test_restrict(self):
PATTERN_BIP44 = "m/44'/coin_type'/account'/change/address_index"
# Restrict coin type to Bitcoin.
schema = PathSchema.parse(PATTERN_BIP44, (0, 145))
self.assertTrue(schema.restrict([H_(44), H_(0)]))
expected = PathSchema.parse(PATTERN_BIP44, 0)
self.assertEqualSchema(schema, expected)
# Restrict coin type to Bitcoin Cash and account 2.
schema = PathSchema.parse(PATTERN_BIP44, (0, 145))
self.assertTrue(schema.restrict([H_(44), H_(145), H_(2)]))
expected = PathSchema.parse("m/44'/145'/2'/change/address_index", 0)
self.assertEqualSchema(schema, expected)
# Restrict wildcards.
schema = PathSchema.parse("m/10018'/**", 0)
self.assertTrue(schema.restrict([H_(10018), H_(3), 7]))
expected = PathSchema.parse("m/10018'/3'/7/**", 0)
self.assertEqualSchema(schema, expected)
# Restrict to a never-matching schema.
schema = PathSchema.parse(PATTERN_BIP44, (0, 145))
self.assertFalse(schema.restrict([H_(44), H_(0), 0]))
if __name__ == "__main__":
unittest.main()
|
4,108 |
c main
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Convert plain qtest traces to C or Bash reproducers
Use this to help build bug-reports or create in-tree reproducers for bugs.
Note: This will not format C code for you. Pipe the output through
clang-format -style="{BasedOnStyle: llvm, IndentWidth: 4, ColumnLimit: 90}"
or similar
"""
import sys
import os
import argparse
import textwrap
from datetime import date
__author__ = "Alexander Bulekov <[email protected]>"
__copyright__ = "Copyright (C) 2021, Red Hat, Inc."
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Alexander Bulekov"
__email__ = "[email protected]"
def c_header(owner):
return """/*
* Autogenerated Fuzzer Test Case
*
* Copyright (c) {date} {owner}
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "libqtest.h"
""".format(date=date.today().year, owner=owner)
def c_comment(s):
""" Return a multi-line C comment. Assume the text is already wrapped """
return "/*\n * " + "\n * ".join(s.splitlines()) + "\n*/"
def print_c_function(s):
print("/* ")
for l in s.splitlines():
print(" * {}".format(l))
def bash_reproducer(path, args, trace):
result = '\\\n'.join(textwrap.wrap("cat << EOF | {} {}".format(path, args),
72, break_on_hyphens=False,
drop_whitespace=False))
for l in trace.splitlines():
result += "\n" + '\\\n'.join(textwrap.wrap(l,72,drop_whitespace=False))
result += "\nEOF"
return result
def c_reproducer(name, args, trace):
result = []
result.append("""static void {}(void)\n{{""".format(name))
# libqtest will add its own qtest args, so get rid of them
args = args.replace("-accel qtest","")
args = args.replace(",accel=qtest","")
args = args.replace("-machine accel=qtest","")
args = args.replace("-qtest stdio","")
result.append("""QTestState *s = qtest_init("{}");""".format(args))
for l in trace.splitlines():
param = l.split()
cmd = param[0]
if cmd == "write":
buf = param[3][2:] #Get the 0x... buffer and trim the "0x"
assert len(buf)%2 == 0
bufbytes = [buf[i:i+2] for i in range(0, len(buf), 2)]
bufstring = '\\x'+'\\x'.join(bufbytes)
addr = param[1]
size = param[2]
result.append("""qtest_bufwrite(s, {}, "{}", {});""".format(
addr, bufstring, size))
elif cmd.startswith("in") or cmd.startswith("read"):
result.append("qtest_{}(s, {});".format(
cmd, param[1]))
elif cmd.startswith("out") or cmd.startswith("write"):
result.append("qtest_{}(s, {}, {});".format(
cmd, param[1], param[2]))
elif cmd == "clock_step":
if len(param) ==1:
result.append("qtest_clock_step_next(s);")
else:
result.append("qtest_clock_step(s, {});".format(param[1]))
result.append("qtest_quit(s);\n}")
return "\n".join(result)
def METHOD_NAME(name, arch):
return """int main(int argc, char **argv)
{{
const char *arch = qtest_get_arch();
g_test_init(&argc, &argv, NULL);
if (strcmp(arch, "{arch}") == 0) {{
qtest_add_func("fuzz/{name}",{name});
}}
return g_test_run();
}}""".format(name=name, arch=arch)
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-bash", help="Only output a copy-pastable bash command",
action="store_true")
group.add_argument("-c", help="Only output a c function",
action="store_true")
parser.add_argument('-owner', help="If generating complete C source code, \
this specifies the Copyright owner",
nargs='?', default="<name of author>")
parser.add_argument("-no_comment", help="Don't include a bash reproducer \
as a comment in the C reproducers",
action="store_true")
parser.add_argument('-name', help="The name of the c function",
nargs='?', default="test_fuzz")
parser.add_argument('input_trace', help="input QTest command sequence \
(stdin by default)",
nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
args = parser.parse_args()
qemu_path = os.getenv("QEMU_PATH")
qemu_args = os.getenv("QEMU_ARGS")
if not qemu_args or not qemu_path:
print("Please set QEMU_PATH and QEMU_ARGS environment variables")
sys.exit(1)
bash_args = qemu_args
if " -qtest stdio" not in qemu_args:
bash_args += " -qtest stdio"
arch = qemu_path.split("-")[-1]
trace = args.input_trace.read().strip()
if args.bash :
print(bash_reproducer(qemu_path, bash_args, trace))
else:
output = ""
if not args.c:
output += c_header(args.owner) + "\n"
if not args.no_comment:
output += c_comment(bash_reproducer(qemu_path, bash_args, trace))
output += c_reproducer(args.name, qemu_args, trace)
if not args.c:
output += METHOD_NAME(args.name, arch)
print(output)
if __name__ == '__main__':
main()
|
4,109 |
get to choices
|
# Copyright The IETF Trust 2013-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from django import forms
from ietf.group.models import Group, Role
from ietf.utils.html import unescape
from ietf.ietfauth.utils import has_role
from ietf.message.models import Message, AnnouncementFrom
from ietf.utils.fields import MultiEmailField
# ---------------------------------------------
# Globals
# ---------------------------------------------
TO_LIST = ('IETF Announcement List <[email protected]>',
'I-D Announcement List <[email protected]>',
'RFP Announcement List <[email protected]>',
'The IESG <[email protected]>',
'Working Group Chairs <[email protected]>',
'BOF Chairs <[email protected]>',
'Other...')
# ---------------------------------------------
# Helper Functions
# ---------------------------------------------
def get_from_choices(user):
'''
This function returns a choices tuple containing
all the Announced From choices. Including
leadership chairs and other entities.
'''
addresses = []
if has_role(user,'Secretariat'):
addresses = AnnouncementFrom.objects.values_list('address', flat=True).order_by('address').distinct()
else:
for role in user.person.role_set.all():
addresses.extend(AnnouncementFrom.objects.filter(name=role.name, group=role.group).values_list('address', flat=True).order_by('address'))
nomcom_choices = get_nomcom_choices(user)
if nomcom_choices:
addresses = list(addresses) + nomcom_choices
return list(zip(addresses, addresses))
def get_nomcom_choices(user):
'''
Returns the list of nomcom email addresses for given user
'''
nomcoms = Role.objects.filter(name="chair",
group__acronym__startswith="nomcom",
group__state="active",
group__type="nomcom",
person=user.person)
addresses = []
for nomcom in nomcoms:
year = nomcom.group.acronym[-4:]
addresses.append('NomCom Chair %s <nomcom-chair-%[email protected]>' % (year,year))
return addresses
def METHOD_NAME():
return list(zip(TO_LIST,TO_LIST))
# ---------------------------------------------
# Forms
# ---------------------------------------------
class AnnounceForm(forms.ModelForm):
nomcom = forms.ModelChoiceField(queryset=Group.objects.filter(acronym__startswith='nomcom',type='nomcom',state='active'),required=False)
to_custom = MultiEmailField(required=False)
class Meta:
model = Message
fields = ('nomcom', 'to','to_custom','frm','cc','bcc','reply_to','subject','body')
def __init__(self, *args, **kwargs):
if 'hidden' in kwargs:
self.hidden = kwargs.pop('hidden')
else:
self.hidden = False
user = kwargs.pop('user')
person = user.person
super(AnnounceForm, self).__init__(*args, **kwargs)
self.fields['to'].widget = forms.Select(choices=METHOD_NAME())
self.fields['to'].help_text = 'Select name OR select Other... and enter email below'
self.fields['cc'].help_text = 'Use comma separated lists for emails (Cc, Bcc, Reply To)'
self.fields['frm'].widget = forms.Select(choices=get_from_choices(user))
self.fields['frm'].label = 'From'
self.fields['reply_to'].required = True
self.fields['nomcom'].label = 'NomCom message:'
nomcom_roles = person.role_set.filter(group__in=self.fields['nomcom'].queryset,name='chair')
secr_roles = person.role_set.filter(group__acronym='secretariat',name='secr')
if nomcom_roles:
self.initial['nomcom'] = nomcom_roles[0].group.pk
if not nomcom_roles and not secr_roles:
self.fields['nomcom'].widget = forms.HiddenInput()
if self.hidden:
for key in list(self.fields.keys()):
self.fields[key].widget = forms.HiddenInput()
def clean(self):
super(AnnounceForm, self).clean()
data = self.cleaned_data
if self.errors:
return self.cleaned_data
if data['to'] == 'Other...' and not data['to_custom']:
raise forms.ValidationError('You must enter a "To" email address')
for k in ['to', 'frm', 'cc',]:
data[k] = unescape(data[k])
return data
def save(self, *args, **kwargs):
user = kwargs.pop('user')
message = super(AnnounceForm, self).save(commit=False)
message.by = user.person
if self.cleaned_data['to'] == 'Other...':
message.to = self.cleaned_data['to_custom']
if kwargs['commit']:
message.save()
# handle nomcom message
nomcom = self.cleaned_data.get('nomcom',False)
if nomcom:
message.related_groups.add(nomcom)
return messag
|
4,110 |
rst body
|
# Copyright (C) British Crown (Met Office) & Contributors.
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import re
from subprocess import DEVNULL, PIPE, Popen
import sys
from textwrap import dedent, indent
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
import sphinx
from sphinx.util.nodes import nested_parse_with_titles
# RE_CMD = re.compile(r'^={5,}$\n^(.*)$\n^={5,}$')
RE_CMD = re.compile(r'={5,}\n')
RE_USAGE = re.compile(r'Usage: (.*)')
RE_SECTION = re.compile(r'^(([A-Z][A-Z: ]+)|Options:)$')
RST_HEADINGS = ['=', '-', '^', '"']
RE_LITERAL = re.compile(r'(?<!\`)\`(?!\`)([^\`]+)(?<!\`)\`(?!\`)')
def format_literals(text, references_to_match=None, ref_template='%s'):
r"""Replace single back-quotes with double ones.
Optionally replace certain strings with references to other sections in
this document.
Args:
text (str): The text to format literals in.
references_to_match (list): List of strings which will be replaced
by rst document references using the ref_template.
ref_template (str): Template string for the document reference.
Examples:
>>> format_literals('a`x`a b`x` `x`c `x`')
'a\\ ``x``\\ a b\\ ``x`` ``x``\\ c ``x``\\ '
>>> format_literals('here-`bar`', ['bar'], 'foo%sbaz')
'here-:ref:`foobarbaz`\\ '
Returns: str
"""
if not references_to_match:
references_to_match = []
ref_template = ':ref:`{0}`'.format(ref_template)
for match in reversed([x for x in RE_LITERAL.finditer(text)]):
repl = ''
start, end = match.span()
if start > 0:
pre_char = text[start - 1]
else:
pre_char = '\n'
if pre_char != ' ' and pre_char != '\n':
repl += '\\ '
body = match.group()[1:-1]
if body in references_to_match:
repl = ref_template % body.replace(' ', '-')
else:
repl += '``%s``' % body
if end < len(text) - 2:
post_char = text[end]
else:
post_char = ''
if post_char not in [' ', '\n']:
repl += '\\ '
text = text[:start] + repl + text[end:]
return text
def list_strip(lst):
"""
Examples:
>>> list_strip(['', '', 'foo', '', ''])
['foo']
>>> list_strip(['foo', '', 'bar'])
['foo', '', 'bar']
"""
for item in list(lst):
if not item:
lst.pop(0)
else:
break
for item in reversed(lst):
if not item:
lst.pop(-1)
else:
break
return lst
def split(text):
parts = RE_CMD.split(text)
return {
cmd.strip(): parse(content.strip())
for cmd, content in zip(parts[1::2], parts[2::2])
}
def parse(text):
if not text:
return
lines = text.splitlines()
ret = {}
try:
ret['USAGE'] = RE_USAGE.search(lines[0]).group()
except AttributeError:
pass
except IndexError:
breakpoint()
section = 'DESCRIPTION'
buffer = []
for line in lines[1:]:
if RE_SECTION.search(line):
ret[section] = '\n'.join(list_strip(buffer))
buffer = []
section = RE_SECTION.search(line).groups()[0].replace(':', '')
else:
buffer.append(line)
else:
ret[section] = '\n'.join(list_strip(buffer))
if 'USAGE' not in ret:
try:
ret['USAGE'] = ret.pop('SYNOPSIS')
except KeyError:
ret['USAGE'] = ''
if 'NAME' in ret:
ret.pop('NAME')
return ret
def rst_heading(text, level=0):
return f'\n{text}\n{RST_HEADINGS[level] * len(text)}\n'
def rst_code_block(code, lang=None):
return (
'\n'
f'.. code-block:: {lang or ""}'
'\n\n'
+ indent(dedent(code), ' ' * 3)
+ '\n'
)
return dedent(f'''
.. code-block:: {lang}
{indent(dedent(code), ' ' * 0)}\n
''')
def rst_anchor(text):
return f'\n.. _{text}:\n'
def METHOD_NAME(text):
return f'\n{format_literals(dedent(text))}\n'
def write(ns, cmds, _write):
for cmd, content in cmds.items():
if 'USAGE' not in content:
breakpoint()
_write(
rst_anchor(f'command-{cmd.replace(" ", "-")}')
)
_write(
rst_heading(cmd, 1)
)
_write(
rst_code_block(content['USAGE'], 'bash')
)
_write(
METHOD_NAME(content['DESCRIPTION'])
)
for key, text in content.items():
key = key.upper()
if key in {'USAGE', 'DESCRIPTION'}:
continue
_write(
rst_heading(key.capitalize(), 2)
)
if key in {'OPTIONS'}:
_write(
rst_code_block(text)
)
elif key in {'EXAMPLE', 'EXAMPLES'}:
_write(
rst_code_block(text, 'bash')
)
else:
_write(
METHOD_NAME(text)
)
def load_from_file(filename):
with open(filename, 'r') as doc_file:
return doc_file.read().strip()
def load_from_cli(ns):
return Popen(
[f'{ns}', 'doc'],
stdin=DEVNULL,
stdout=PIPE,
text=True
).communicate()[0].strip()
def get_parser():
parser = ArgumentParser()
parser.add_argument(
'ns',
default=None,
help='Rose namespace i.e. rose, rosie, rosa'
)
parser.add_argument(
'doc_file',
nargs='?',
default=None,
help='read `rose doc` output from a file (for testing).'
)
return parser
def test():
parser = get_parser()
args = parser.parse_args()
if args.doc_file:
text = load_from_file(args.doc_file)
else:
text = load_from_cli(args.ns)
main(args.ns, text, sys.stdout.write)
def make(ns):
text = load_from_cli(ns)
lines = []
def _write(text):
nonlocal lines
lines.extend(text.splitlines())
main(ns, text, _write)
return lines
def main(ns, text, _write):
cmds = split(text)
write(ns, cmds, _write)
class AutoCLIDoc(Directive):
"""A custom ReStructured Text directive for auto-documenting CLIs.
Directive Args:
cli_help_format (str): The type of command line help to generate help
for (only option "rose").
command (str): The command to document.
"""
option_spec = {}
required_arguments = 2
def run(self):
# The rose command to document (i.e. rose / rosie)
_, ns = self.arguments[0:2]
lines = make(ns)
# Parse these lines into a docutills node.
node = nodes.section()
node.document = self.state.document
nested_parse_with_titles(self.state, ViewList(lines), node)
# Return the children of this node (the generated nodes).
return node.children
def setup(app):
"""Sphinx setup function."""
app.add_directive('auto-cli-doc', AutoCLIDoc)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
if __name__ == '__main__':
test()
|
4,111 |
lint file
|
from __future__ import annotations
import abc
import glob
from collections.abc import Iterable
from pathlib import Path
from typing import Any, Protocol
import click
import yaml
from commodore.config import Config
from commodore.helpers import yaml_load_all
from .lint_dependency_specification import lint_components, lint_packages
from .lint_deprecated_parameters import lint_deprecated_parameters
class LintFunc(Protocol):
def __call__(self, file: Path, filecontents: dict[str, Any]) -> int:
...
class Linter:
@abc.abstractmethod
def __call__(
self, config: Config, path: Path, ignore_patterns: tuple[str, ...] = ()
) -> int:
...
class ComponentSpecLinter(Linter):
def __call__(
self, config: Config, path: Path, ignore_patterns: tuple[str, ...] = ()
) -> int:
return run_linter(config, path, ignore_patterns, lint_components)
class DeprecatedParameterLinter(Linter):
def __call__(
self, config: Config, path: Path, ignore_patterns: tuple[str, ...] = ()
) -> int:
return run_linter(config, path, ignore_patterns, lint_deprecated_parameters)
class PackageSpecLinter(Linter):
def __call__(
self, config: Config, path: Path, ignore_patterns: tuple[str, ...] = ()
) -> int:
return run_linter(config, path, ignore_patterns, lint_packages)
LINTERS = {
"components": ComponentSpecLinter(),
"deprecated-parameters": DeprecatedParameterLinter(),
"packages": PackageSpecLinter(),
}
def METHOD_NAME(cfg: Config, file: Path, lintfunc: LintFunc) -> int:
errcount = 0
try:
filecontents = yaml_load_all(file)
if len(filecontents) == 0:
if cfg.debug:
click.echo(f"> Skipping empty file {file}")
elif len(filecontents) > 1:
if cfg.debug:
click.echo(
f"> Skipping file {file}: Linting multi-document YAML streams is not supported",
)
elif not isinstance(filecontents[0], dict):
if cfg.debug:
click.echo(
f"> Skipping file {file}: Expected top-level dictionary in YAML document"
)
else:
errcount = lintfunc(file, filecontents[0])
except (yaml.YAMLError, UnicodeDecodeError) as e:
if cfg.debug:
click.echo(f"> Skipping file {file}: Unable to load as YAML: {e}")
return errcount
def _lint_directory(
cfg: Config, path: Path, ignore_paths: set[Path], lintfunc: LintFunc
) -> int:
if not path.is_dir():
raise ValueError("Unexpected path argument: expected to be a directory")
errcount = 0
for dentry in path.iterdir():
if dentry.stem.startswith("."):
if cfg.debug:
click.echo(f"> Skipping hidden directory entry {dentry}")
continue
if dentry.absolute() in ignore_paths:
if cfg.debug:
click.echo(f"> Skipping ignored directory entry {dentry}")
continue
if dentry.is_dir():
errcount += _lint_directory(cfg, dentry, ignore_paths, lintfunc)
else:
errcount += METHOD_NAME(cfg, dentry, lintfunc)
return errcount
def _read_ignore_patterns_from_file(path: Path) -> set[str]:
ignore_patterns = set()
if path.is_file():
with open(path, "r", encoding="utf-8") as ifile:
for p in ifile.readlines():
ignore_patterns.add(p.strip())
return ignore_patterns
def _render_ignore_patterns(
base_dir: Path, ignore_patterns: tuple[str, ...]
) -> set[Path]:
"""Use `glob.glob()` to render the paths to ignore from `ignore_patterns`.
All patterns are rooted at `base_dir`.
If pattern doesn't start with /, we will treat it as matching any prefix in
`base_dir`."""
# Read additional ignore patterns from `<base_dir>/.commodoreignore`, if the file
# exists
_ignore_patterns = set(ignore_patterns)
# Extend ignore patterns set with ignore patterns from `.commodoreignore`
_ignore_patterns |= _read_ignore_patterns_from_file(base_dir / ".commodoreignore")
ignore_paths: set[str] = set()
for pat in _ignore_patterns:
if pat.startswith("/"):
pat = f"{base_dir.absolute()}{pat}"
else:
pat = f"{base_dir.absolute()}/**/{pat}"
ignore_paths |= set(glob.glob(pat, recursive=True))
return {Path(p) for p in ignore_paths}
def run_linter(
cfg: Config, path: Path, ignore_patterns: tuple[str, ...], lintfunc: LintFunc
) -> int:
"""Run lint function `lintfunc` in `path`.
If `path` is a directory, run the function in all `.ya?ml` files in the directory
(recursively).
If `path` is a file, run the lint function in that file, if it's a YAML file.
Returns a value that can be used as exit code to indicate whether there were linting
errors.
"""
base_dir = path.absolute()
if not base_dir.is_dir():
base_dir = path.parent
ignore_paths = _render_ignore_patterns(base_dir, ignore_patterns)
if path.absolute() in ignore_paths:
if cfg.debug:
click.echo(f" > Skipping ignored path {path}")
return 0
if path.is_dir():
return _lint_directory(cfg, path, ignore_paths, lintfunc)
return METHOD_NAME(cfg, path, lintfunc)
def check_removed_reclass_variables(
config: Config, location: str, paths: Iterable[Path]
):
lint = DeprecatedParameterLinter()
errcount = 0
for path in paths:
errcount += lint(config, path)
# Raise error if any linting errors occurred
if errcount > 0:
raise click.ClickException(
f"Found {errcount} usages of removed reclass variables "
+ f"in the {location}. See individual lint errors for details."
)
|
4,112 |
test serialisation with zero alpha
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ConstantTest( GafferImageTest.ImageTestCase ) :
def testChannelData( self ) :
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 511 ) ), 1 ) )
constant["color"].setValue( imath.Color4f( 0, 0.25, 0.5, 1 ) )
for i, channel in enumerate( [ "R", "G", "B", "A" ] ) :
channelData = constant["out"].channelData( channel, imath.V2i( 0 ) )
self.assertEqual( len( channelData ), constant["out"].tileSize() * constant["out"].tileSize() )
expectedValue = constant["color"][i].getValue()
for value in channelData :
self.assertEqual( value, expectedValue )
def testChannelDataHash( self ) :
# The hash for each individual channel should only
# be affected by that particular channel of the colour plug.
constant = GafferImage.Constant()
constant["format"].setValue( GafferImage.Format( imath.Box2i( imath.V2i( 0 ), imath.V2i( 511 ) ), 1 ) )
constant["color"].setValue( imath.Color4f( 0 ) )
channels = [ "R", "G", "B", "A" ]
for i, channel in enumerate( channels ) :
h1 = [ constant["out"].channelDataHash( c, imath.V2i( 0 ) ) for c in channels ]
constant["color"][i].setValue( constant["color"][i].getValue() + .1 )
h2 = [ constant["out"].channelDataHash( c, imath.V2i( 0 ) ) for c in channels ]
for j in range( 0, len( channels ) ) :
if j == i :
self.assertNotEqual( h1[j], h2[j] )
else :
self.assertEqual( h1[j], h2[j] )
def testFormatHash( self ) :
# Check that the data hash doesn't change when the format does.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
h1 = c["out"].channelData( "R", imath.V2i( 0 ) ).hash()
c["format"].setValue( GafferImage.Format( 1920, 1080, 1. ) )
h2 = c["out"].channelData( "R", imath.V2i( 0 ) ).hash()
self.assertEqual( h1, h2 )
def testTileHashes( self ) :
# Test that two tiles within the image have the same hash.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
c["color"][0].setValue( .5 )
self.assertEqual(
c["out"].channelDataHash( "R", imath.V2i( 0 ) ),
c["out"].channelDataHash( "R", imath.V2i( GafferImage.ImagePlug().tileSize() ) ),
)
def testTileIdentity( self ) :
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 2048, 1156, 1. ) )
# The channelData() binding returns a copy by default, so we wouldn't
# expect two tiles to be referencing the same object.
self.assertFalse(
c["out"].channelData( "R", imath.V2i( 0 ) ).isSame(
c["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug.tileSize() ) )
)
)
# But behind the scenes we do want them to be the same, so
# check that that is the case.
self.assertTrue(
c["out"].channelData( "R", imath.V2i( 0 ), _copy = False ).isSame(
c["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug.tileSize() ), _copy = False )
)
)
def testEnableBehaviour( self ) :
c = GafferImage.Constant()
self.assertTrue( c.enabledPlug().isSame( c["enabled"] ) )
self.assertEqual( c.correspondingInput( c["out"] ), None )
self.assertEqual( c.correspondingInput( c["color"] ), None )
self.assertEqual( c.correspondingInput( c["format"] ), None )
def testChannelNamesHash( self ) :
c = GafferImage.Constant()
h1 = c["out"]["channelNames"].hash()
c["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
h2 = c["out"]["channelNames"].hash()
self.assertEqual( h1, h2 )
def METHOD_NAME( self ) :
s = Gaffer.ScriptNode()
s["c"] = GafferImage.Constant()
s["c"]["color"].setValue( imath.Color4f( 0, 1, 0, 0 ) )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["c"]["color"].getValue(), imath.Color4f( 0, 1, 0, 0 ) )
def testFormatDependencies( self ) :
c = GafferImage.Constant()
self.assertEqual(
c.affects( c["format"]["displayWindow"]["min"]["x"] ),
[ c["out"]["format"], c["out"]["dataWindow"] ],
)
# For the sake of simplicity when dealing with falling back to a default format from the context,
# we make all child plugs of the format affect everything that depends at all on the format
self.assertEqual(
c.affects( c["format"]["pixelAspect"] ),
[ c["out"]["format"], c["out"]["dataWindow"] ],
)
def testLayer( self ) :
c1 = GafferImage.Constant()
c2 = GafferImage.Constant()
c1["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
c2["color"].setValue( imath.Color4f( 1, 0.5, 0.25, 1 ) )
c2["layer"].setValue( "diffuse" )
self.assertEqual(
c1["out"]["channelNames"].getValue(),
IECore.StringVectorData( [ "R", "G", "B", "A" ] )
)
self.assertEqual(
c2["out"]["channelNames"].getValue(),
IECore.StringVectorData( [ "diffuse.R", "diffuse.G", "diffuse.B", "diffuse.A" ] )
)
for channelName in ( "R", "G", "B", "A" ) :
self.assertEqual(
c1["out"].channelDataHash( channelName, imath.V2i( 0 ) ),
c2["out"].channelDataHash( "diffuse." + channelName, imath.V2i( 0 ) )
)
self.assertEqual(
c1["out"].channelData( channelName, imath.V2i( 0 ) ),
c2["out"].channelData( "diffuse." + channelName, imath.V2i( 0 ) )
)
def testLayerAffectsChannelNames( self ) :
c = GafferImage.Constant()
cs = GafferTest.CapturingSlot( c.plugDirtiedSignal() )
c["layer"].setValue( "diffuse" )
self.assertTrue( c["out"]["channelNames"] in set( [ x[0] for x in cs ] ) )
if __name__ == "__main__":
unittest.main()
|
4,113 |
rouge
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for evaluating various tasks, e.g., translation & summarization."""
import codecs
import os
import re
import subprocess
import tensorflow as tf
from ..scripts import bleu
from ..scripts import rouge
__all__ = ["evaluate"]
def evaluate(ref_file, trans_file, metric, subword_option=None):
"""Pick a metric and evaluate depending on task."""
# BLEU scores for translation task
if metric.lower() == "bleu":
evaluation_score = _bleu(ref_file, trans_file,
subword_option=subword_option)
# ROUGE scores for summarization tasks
elif metric.lower() == "rouge":
evaluation_score = METHOD_NAME(ref_file, trans_file,
subword_option=subword_option)
elif metric.lower() == "accuracy":
evaluation_score = _accuracy(ref_file, trans_file)
elif metric.lower() == "word_accuracy":
evaluation_score = _word_accuracy(ref_file, trans_file)
else:
raise ValueError("Unknown metric %s" % metric)
return evaluation_score
def _clean(sentence, subword_option):
"""Clean and handle BPE or SPM outputs."""
sentence = sentence.strip()
# BPE
if subword_option == "bpe":
sentence = re.sub("@@ ", "", sentence)
# SPM
elif subword_option == "spm":
sentence = u"".join(sentence.split()).replace(u"\u2581", u" ").lstrip()
return sentence
# Follow //transconsole/localization/machine_translation/metrics/bleu_calc.py
def _bleu(ref_file, trans_file, subword_option=None):
"""Compute BLEU scores and handling BPE."""
max_order = 4
smooth = False
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with codecs.getreader("utf-8")(
tf.gfile.GFile(reference_filename, "rb")) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference = _clean(reference, subword_option)
reference_list.append(reference.split(" "))
per_segment_references.append(reference_list)
translations = []
with codecs.getreader("utf-8")(tf.gfile.GFile(trans_file, "rb")) as fh:
for line in fh:
line = _clean(line, subword_option=None)
translations.append(line.split(" "))
# bleu_score, precisions, bp, ratio, translation_length, reference_length
bleu_score, _, _, _, _, _ = bleu.compute_bleu(
per_segment_references, translations, max_order, smooth)
return 100 * bleu_score
def METHOD_NAME(ref_file, summarization_file, subword_option=None):
"""Compute ROUGE scores and handling BPE."""
references = []
with codecs.getreader("utf-8")(tf.gfile.GFile(ref_file, "rb")) as fh:
for line in fh:
references.append(_clean(line, subword_option))
hypotheses = []
with codecs.getreader("utf-8")(
tf.gfile.GFile(summarization_file, "rb")) as fh:
for line in fh:
hypotheses.append(_clean(line, subword_option=None))
rouge_score_map = rouge.rouge(hypotheses, references)
return 100 * rouge_score_map["rouge_l/f_score"]
def _accuracy(label_file, pred_file):
"""Compute accuracy, each line contains a label."""
with codecs.getreader("utf-8")(tf.gfile.GFile(label_file, "rb")) as label_fh:
with codecs.getreader("utf-8")(tf.gfile.GFile(pred_file, "rb")) as pred_fh:
count = 0.0
match = 0.0
for label in label_fh:
label = label.strip()
pred = pred_fh.readline().strip()
if label == pred:
match += 1
count += 1
return 100 * match / count
def _word_accuracy(label_file, pred_file):
"""Compute accuracy on per word basis."""
with codecs.getreader("utf-8")(tf.gfile.GFile(label_file, "r")) as label_fh:
with codecs.getreader("utf-8")(tf.gfile.GFile(pred_file, "r")) as pred_fh:
total_acc, total_count = 0., 0.
for sentence in label_fh:
labels = sentence.strip().split(" ")
preds = pred_fh.readline().strip().split(" ")
match = 0.0
for pos in range(min(len(labels), len(preds))):
label = labels[pos]
pred = preds[pos]
if label == pred:
match += 1
total_acc += 100 * match / max(len(labels), len(preds))
total_count += 1
return total_acc / total_count
def _moses_bleu(multi_bleu_script, tgt_test, trans_file, subword_option=None):
"""Compute BLEU scores using Moses multi-bleu.perl script."""
# TODO(thangluong): perform rewrite using python
# BPE
if subword_option == "bpe":
debpe_tgt_test = tgt_test + ".debpe"
if not os.path.exists(debpe_tgt_test):
# TODO(thangluong): not use shell=True, can be a security hazard
subprocess.call("cp %s %s" % (tgt_test, debpe_tgt_test), shell=True)
subprocess.call("sed s/@@ //g %s" % (debpe_tgt_test),
shell=True)
tgt_test = debpe_tgt_test
elif subword_option == "spm":
despm_tgt_test = tgt_test + ".despm"
if not os.path.exists(despm_tgt_test):
subprocess.call("cp %s %s" % (tgt_test, despm_tgt_test))
subprocess.call("sed s/ //g %s" % (despm_tgt_test))
subprocess.call(u"sed s/^\u2581/g %s" % (despm_tgt_test))
subprocess.call(u"sed s/\u2581/ /g %s" % (despm_tgt_test))
tgt_test = despm_tgt_test
cmd = "%s %s < %s" % (multi_bleu_script, tgt_test, trans_file)
# subprocess
# TODO(thangluong): not use shell=True, can be a security hazard
bleu_output = subprocess.check_output(cmd, shell=True)
# extract BLEU score
m = re.search("BLEU = (.+?),", bleu_output)
bleu_score = float(m.group(1))
return bleu_score
|
4,114 |
add args
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq import utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.fconv import FConvDecoder
from fairseq.utils import safe_hasattr
@register_model("fconv_lm")
class FConvLanguageModel(FairseqLanguageModel):
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def METHOD_NAME(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-layers",
type=str,
metavar="EXPR",
help="decoder layers [(dim, kernel_size), ...]",
)
parser.add_argument(
"--decoder-out-embed-dim",
type=int,
metavar="N",
help="decoder output embedding dimension",
)
parser.add_argument(
"--adaptive-softmax-cutoff",
metavar="EXPR",
help="comma separated list of adaptive softmax cutoff points. "
"Must be used with adaptive_loss criterion",
)
parser.add_argument(
"--adaptive-softmax-dropout",
type=float,
metavar="D",
help="sets adaptive softmax dropout for the tail projections",
)
parser.add_argument(
"--decoder-attention",
type=str,
metavar="EXPR",
help="decoder attention [True, ...]",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if safe_hasattr(args, "max_target_positions") and not safe_hasattr(
args, "tokens_per_sample"
):
args.tokens_per_sample = args.max_target_positions
decoder = FConvDecoder(
dictionary=task.target_dictionary,
embed_dim=args.decoder_embed_dim,
convolutions=eval(args.decoder_layers),
out_embed_dim=args.decoder_embed_dim,
attention=eval(args.decoder_attention),
dropout=args.dropout,
max_positions=args.tokens_per_sample,
share_embed=False,
positional_embeddings=False,
adaptive_softmax_cutoff=(
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int)
if args.criterion == "adaptive_loss"
else None
),
adaptive_softmax_dropout=args.adaptive_softmax_dropout,
)
return FConvLanguageModel(decoder)
@register_model_architecture("fconv_lm", "fconv_lm")
def base_lm_architecture(args):
args.dropout = getattr(args, "dropout", 0.1)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", "[(1268, 4)] * 13")
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_wikitext103")
def fconv_lm_dauphin_wikitext103(args):
layers = "[(850, 6)] * 3"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 5)] * 4"
layers += " + [(850, 1)] * 1"
layers += " + [(850, 4)] * 3"
layers += " + [(1024, 4)] * 1"
layers += " + [(2048, 4)] * 1"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 280)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,20000,200000"
)
base_lm_architecture(args)
@register_model_architecture("fconv_lm", "fconv_lm_dauphin_gbw")
def fconv_lm_dauphin_gbw(args):
layers = "[(512, 5)]"
layers += " + [(128, 1, 0), (128, 5, 0), (512, 1, 3)] * 3"
layers += " + [(512, 1, 0), (512, 5, 0), (1024, 1, 3)] * 3"
layers += " + [(1024, 1, 0), (1024, 5, 0), (2048, 1, 3)] * 6"
layers += " + [(1024, 1, 0), (1024, 5, 0), (4096, 1, 3)]"
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 128)
args.decoder_layers = getattr(args, "decoder_layers", layers)
args.decoder_attention = getattr(args, "decoder_attention", "False")
args.adaptive_softmax_cutoff = getattr(
args, "adaptive_softmax_cutoff", "10000,50000,200000"
)
base_lm_architecture(args)
|
4,115 |
prepare
|
#!/usr/bin/env python3
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
import argparse
import re
import failure_lib as lib
import os
import sys
def get_first_line_of_file(fname):
with open(fname, encoding="utf-8", errors="ignore") as f:
return f.readline()
class AnalyzerCommandPathModifier:
def __init__(self, opts):
self.opts = opts
def __call__(self, path):
# Find a clang executable that can be "clang" or "clang-<version>".
# The version here is only a simple number (no point inside),
# clang should generate only version with whole release number.
if re.search(r'clang(-(\d)+)?$', path):
return self.opts.clang
if self.opts.ctu_dir is not None and re.search(r'\.plist$', path):
# Put a plist (seemingly analyzer output) file into the
# report_debug directory, that is 2 levels above ctu_dir
# ("report_debug/ctu-dir/<target>").
return os.path.join(
os.path.dirname(
os.path.dirname(
self.opts.ctu_dir.rstrip(os.path.sep))),
os.path.basename(path))
if self.opts.clang_plugin_name is not None and\
re.search(self.opts.clang_plugin_name, path):
if self.opts.clang_plugin_path is None:
print("clang_plugin_name is in a path, "
"but clang_plugin_path is not given in the options")
sys.exit(-1)
return self.opts.clang_plugin_path
if re.search('ctu-dir', path):
if self.opts.ctu_dir is None:
print('ctu-dir is in a path, but not in the options!')
sys.exit(-1)
return self.opts.ctu_dir
return os.path.join(
self.opts.sources_root,
os.path.normpath(
path.lstrip(
os.path.sep)))
class PathOptions:
def __init__(
self,
sources_root,
clang,
clang_plugin_name,
clang_plugin_path,
ctu_dir):
self.sources_root = sources_root
self.clang = clang
self.clang_plugin_name = clang_plugin_name
self.clang_plugin_path = clang_plugin_path
self.ctu_dir = ctu_dir
def METHOD_NAME(analyzer_command_file, pathOptions):
res = lib.change_paths(get_first_line_of_file(analyzer_command_file),
AnalyzerCommandPathModifier(pathOptions))
if '-nobuiltininc' not in res:
return res
# Find Clang include path
clang_include_path = lib.get_resource_dir(pathOptions.clang) + '/include'
if clang_include_path is None:
clang_lib_path = os.path.dirname(pathOptions.clang) + '/../lib'
clang_include_path = ''
for path, _, files in os.walk(clang_lib_path):
if 'stddef.h' in files:
clang_include_path = path
break
if clang_include_path is None:
return res
return res.replace('-nobuiltininc',
'-nobuiltininc -isystem ' + clang_include_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Prepare analyzer-command '
'to execute in local environmennt.')
parser.add_argument(
'analyzer_command_file',
help="The stored analyzer command.")
parser.add_argument(
'--sources_root',
default='./sources-root',
help="Path of the source root.")
parser.add_argument(
'--ctu_dir',
default=None,
help="Path of the used ctu-dir.")
parser.add_argument(
'--clang',
required=True,
help="Path to the clang binary.")
parser.add_argument(
'--clang_plugin_name', default=None,
help="Name of the used clang plugin.")
parser.add_argument(
'--clang_plugin_path', default=None,
help="Path to the used clang plugin.")
args = parser.parse_args()
print(
METHOD_NAME(
args.analyzer_command_file,
PathOptions(
args.sources_root,
args.clang,
args.clang_plugin_name,
args.clang_plugin_path,
args.ctu_dir)))
|
4,116 |
start vertex
|
from compas.geometry import BrepEdge
from compas.geometry import Line
from compas.geometry import Circle
from compas.geometry import Ellipse
from compas.geometry import Frame
from compas.geometry import Arc
from compas_rhino.geometry import RhinoNurbsCurve
from compas_rhino.conversions import curve_to_compas_line
from compas_rhino.conversions import plane_to_compas_frame
from compas_rhino.conversions import circle_to_compas
from compas_rhino.conversions import ellipse_to_compas
from compas_rhino.conversions import ellipse_to_rhino
from compas_rhino.conversions import circle_to_rhino
from compas_rhino.conversions import frame_to_rhino_plane
from compas_rhino.conversions import line_to_rhino
from compas_rhino.conversions import arc_to_compas
from compas_rhino.conversions import arc_to_rhino
from Rhino.Geometry import ArcCurve
from Rhino.Geometry import NurbsCurve
from Rhino.Geometry import LineCurve
from Rhino.Geometry import Interval
from .vertex import RhinoBrepVertex
class RhinoBrepEdge(BrepEdge):
"""A wrapper for Rhino.Geometry.BrepEdge.
The expected native type here is a Rhino.Geometry.BrepTrim.
a BrepTrim holds a reference to its associated BrepEdge as well as its start a end vertices
in a correct topological order (!).
Attributes
----------
curve : :class:`Rhino.Geometry.Curve3D`
The underlying geometry of this edge.
start_vertex : :class:`~compas_rhino.geometry.RhinoBrepVertex`, read-only
The start vertex of this edge (taken from BrepTrim).
end_vertex : :class:`~compas_rhino.geometry.RhinoBrepVertex`, read-only
The end vertex of this edge (taken from BrepTrim).
vertices : list[:class:`~compas_rhino.geometry.RhinoBrepVertex`], read-only
The list of vertices which comprise this edge (start and end)
is_circle : bool, read-only
True if the geometry of this edge is a circle, False otherwise.
is_line : bool, read-only
True if the geometry of this edge is a line, False otherwise.
"""
def __init__(self, rhino_edge=None, builder=None):
super(RhinoBrepEdge, self).__init__()
self._builder = builder
self._edge = None
self._curve = None
self._curve_type = None
self._start_vertex = None
self._end_vertex = None
if rhino_edge:
self._set_edge(rhino_edge)
def _set_edge(self, rhino_edge):
self._edge = rhino_edge
self._curve = RhinoNurbsCurve.from_rhino(rhino_edge.EdgeCurve.ToNurbsCurve())
self._start_vertex = RhinoBrepVertex(rhino_edge.StartVertex)
self._end_vertex = RhinoBrepVertex(rhino_edge.EndVertex)
# ==============================================================================
# Data
# ==============================================================================
@property
def data(self):
curve_type, curve, plane, domain = self._get_curve_geometry()
return {
"curve_type": curve_type,
"curve": curve.data,
"frame": plane_to_compas_frame(plane).data,
"start_vertex": self._edge.StartVertex.VertexIndex,
"end_vertex": self._edge.EndVertex.VertexIndex,
"domain": domain,
}
@data.setter
def data(self, value):
edge_curve = self._create_curve_from_data(value["curve_type"], value["curve"], value["frame"], value["domain"])
edge = self._builder.add_edge(edge_curve, value["start_vertex"], value["end_vertex"])
self._set_edge(edge)
@classmethod
def from_data(cls, data, builder):
"""Construct an object of this type from the provided data.
Parameters
----------
data : dict
The data dictionary.
builder : :class:`~compas_rhino.geometry.BrepBuilder`
The object reconstructing the current Brep.
Returns
-------
:class:`~compas.data.Data`
An instance of this object type if the data contained in the dict has the correct schema.
"""
obj = cls(builder=builder)
obj.data = data
return obj
# ==============================================================================
# Properties
# ==============================================================================
@property
def curve(self):
return self._curve
@property
def METHOD_NAME(self):
return self._start_vertex
@property
def end_vertex(self):
return self._end_vertex
@property
def vertices(self):
return [self.METHOD_NAME, self.end_vertex]
@property
def is_circle(self):
return self._edge.EdgeCurve.IsCircle()
@property
def is_line(self):
return self._edge.EdgeCurve.IsLinear()
@property
def is_ellipse(self):
return self._edge.EdgeCurve.IsEllipse()
def _get_curve_geometry(self):
curve = self._edge.EdgeCurve
domain = [self._edge.Domain[0], self._edge.Domain[1]]
_, frame = curve.FrameAt(0)
if isinstance(curve, LineCurve):
return "line", curve_to_compas_line(curve), frame, domain
if isinstance(curve, NurbsCurve):
return "nurbs", RhinoNurbsCurve.from_rhino(curve), frame, domain
if isinstance(curve, ArcCurve):
if not curve.IsClosed:
return "arc", arc_to_compas(curve.Arc), curve.Arc.Plane, domain
is_circle, circle = curve.TryGetCircle()
if is_circle:
return "circle", circle_to_compas(circle), circle.Plane, domain
is_ellipse, ellipse = curve.TryGetEllipse()
if is_ellipse:
return "ellipse", ellipse_to_compas(ellipse), ellipse.Plane, domain
return "nurbs", curve.ToNurbsCurve(), frame, domain
raise ValueError("Unknown curve type: {}".format(curve.__class__.__name__))
@staticmethod
def _create_curve_from_data(curve_type, curve_data, frame_data, domain):
frame = Frame.from_data(frame_data)
if curve_type == "line":
line = Line.from_data(curve_data)
curve = LineCurve(line_to_rhino(line))
elif curve_type == "circle":
circle = circle_to_rhino(Circle.from_data(curve_data))
circle.Plane = frame_to_rhino_plane(frame)
curve = ArcCurve(circle)
elif curve_type == "ellipse":
ellipse = ellipse_to_rhino(Ellipse.from_data(curve_data))
ellipse.Plane = frame_to_rhino_plane(frame)
curve = NurbsCurve.CreateFromEllipse(ellipse)
elif curve_type == "arc":
arc = arc_to_rhino(Arc.from_data(curve_data))
curve = ArcCurve(arc)
elif curve_type == "nurbs":
curve = RhinoNurbsCurve.from_data(curve_data).rhino_curve
else:
raise ValueError("Unknown curve type: {}".format(curve_type))
curve.Domain = Interval(*domain)
return curve
|
4,117 |
get send msg progress
|
import base64
import hmac
import time
from common.sdk.im.mixin import BaseRequest
from common.sdk.im.utils import digest, as_request
from common.utils import get_logger
from users.utils import construct_user_email
logger = get_logger(__file__)
def sign(secret, data):
digest = hmac.HMAC(
key=secret.encode('utf8'),
msg=data.encode('utf8'),
digestmod=hmac._hashlib.sha256
).digest()
signature = base64.standard_b64encode(digest).decode('utf8')
# signature = urllib.parse.quote(signature, safe='')
# signature = signature.replace('+', '%20').replace('*', '%2A').replace('~', '%7E').replace('/', '%2F')
return signature
class ErrorCode:
INVALID_TOKEN = 88
class URL:
QR_CONNECT = 'https://oapi.dingtalk.com/connect/qrconnect'
OAUTH_CONNECT = 'https://oapi.dingtalk.com/connect/oauth2/sns_authorize'
GET_USER_INFO_BY_CODE = 'https://oapi.dingtalk.com/sns/getuserinfo_bycode'
GET_TOKEN = 'https://oapi.dingtalk.com/gettoken'
SEND_MESSAGE_BY_TEMPLATE = 'https://oapi.dingtalk.com/topapi/message/corpconversation/sendbytemplate'
SEND_MESSAGE = 'https://oapi.dingtalk.com/topapi/message/corpconversation/asyncsend_v2'
GET_SEND_MSG_PROGRESS = 'https://oapi.dingtalk.com/topapi/message/corpconversation/getsendprogress'
GET_USERID_BY_UNIONID = 'https://oapi.dingtalk.com/topapi/user/getbyunionid'
GET_USER_INFO_BY_USER_ID = 'https://oapi.dingtalk.com/topapi/v2/user/get'
class DingTalkRequests(BaseRequest):
invalid_token_errcodes = (ErrorCode.INVALID_TOKEN,)
msg_key = 'errmsg'
def __init__(self, appid, appsecret, agentid, timeout=None):
self._appid = appid or ''
self._appsecret = appsecret or ''
self._agentid = agentid or ''
super().__init__(timeout=timeout)
def get_access_token_cache_key(self):
return digest(self._appid, self._appsecret)
def request_access_token(self):
# https://developers.dingtalk.com/document/app/obtain-orgapp-token?spm=ding_open_doc.document.0.0.3a256573JEWqIL#topic-1936350
params = {'appkey': self._appid, 'appsecret': self._appsecret}
data = self.raw_request('get', url=URL.GET_TOKEN, params=params)
access_token = data['access_token']
expires_in = data['expires_in']
return access_token, expires_in
def add_token(self, kwargs: dict):
params = kwargs.get('params')
if params is None:
params = {}
kwargs['params'] = params
params['access_token'] = self.access_token
def get(self, url, params=None,
with_token=False, with_sign=False,
check_errcode_is_0=True,
**kwargs):
pass
get = as_request(get)
def post(self, url, json=None, params=None,
with_token=False, with_sign=False,
check_errcode_is_0=True,
**kwargs) -> dict:
pass
post = as_request(post)
def _add_sign(self, kwargs: dict):
params = kwargs.get('params')
if params is None:
params = {}
kwargs['params'] = params
timestamp = str(int(time.time() * 1000))
signature = sign(self._appsecret, timestamp)
params['timestamp'] = timestamp
params['signature'] = signature
params['accessKey'] = self._appid
def request(self, method, url,
with_token=False, with_sign=False,
check_errcode_is_0=True,
**kwargs):
if with_sign:
self._add_sign(kwargs)
data = super().request(
method, url, with_token=with_token,
check_errcode_is_0=check_errcode_is_0, **kwargs
)
return data
class DingTalk:
def __init__(self, appid, appsecret, agentid, timeout=None):
self._appid = appid or ''
self._appsecret = appsecret or ''
self._agentid = agentid or ''
self._request = DingTalkRequests(
appid=appid, appsecret=appsecret, agentid=agentid,
timeout=timeout
)
def get_userinfo_bycode(self, code):
# https://developers.dingtalk.com/document/app/obtain-the-user-information-based-on-the-sns-temporary-authorization?spm=ding_open_doc.document.0.0.3a256573y8Y7yg#topic-1995619
body = {
"tmp_auth_code": code
}
data = self._request.post(URL.GET_USER_INFO_BY_CODE, json=body, with_sign=True)
return data['user_info']
def get_user_id_by_code(self, code):
user_info = self.get_userinfo_bycode(code)
unionid = user_info['unionid']
userid = self.get_userid_by_unionid(unionid)
return userid, None
def get_userid_by_unionid(self, unionid):
body = {
'unionid': unionid
}
data = self._request.post(URL.GET_USERID_BY_UNIONID, json=body, with_token=True)
userid = data['result']['userid']
return userid
def send_by_template(self, template_id, user_ids, dept_ids, data):
body = {
'agent_id': self._agentid,
'template_id': template_id,
'userid_list': ','.join(user_ids),
'dept_id_list': ','.join(dept_ids),
'data': data
}
data = self._request.post(URL.SEND_MESSAGE_BY_TEMPLATE, json=body, with_token=True)
return data
def send_markdown(self, user_ids, title, msg):
body = {
'agent_id': self._agentid,
'userid_list': ','.join(user_ids),
'to_all_user': False,
'msg': {
'msgtype': 'markdown',
'markdown': {
'title': title,
'text': msg
}
}
}
logger.info(f'Dingtalk send markdown to user {user_ids}: {msg}')
data = self._request.post(URL.SEND_MESSAGE, json=body, with_token=True)
return data
def send_text(self, user_ids, msg):
body = {
'agent_id': self._agentid,
'userid_list': ','.join(user_ids),
'to_all_user': False,
'msg': {
'msgtype': 'text',
'text': {
'content': msg
}
}
}
logger.info(f'Dingtalk send msg to user {user_ids}: {msg}')
data = self._request.post(URL.SEND_MESSAGE, json=body, with_token=True)
return data
def METHOD_NAME(self, task_id):
body = {
'agent_id': self._agentid,
'task_id': task_id
}
data = self._request.post(URL.GET_SEND_MSG_PROGRESS, json=body, with_token=True)
return data
def get_user_detail(self, user_id, **kwargs):
# https://open.dingtalk.com/document/orgapp/query-user-details
body = {'userid': user_id}
data = self._request.post(
URL.GET_USER_INFO_BY_USER_ID, json=body, with_token=True
)
data = data['result']
username = user_id
name = data.get('name', username)
email = data.get('email') or data.get('org_email')
email = construct_user_email(username, email)
return {
'username': username, 'name': name, 'email': email
}
|
4,118 |
test swap name
|
import json
from django.contrib.messages import get_messages
from django.test import Client, TestCase
from django.urls import reverse
from unittest import mock
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.locations.exceptions import LocationConsistencyError
from corehq.apps.locations.models import LocationType
from corehq.apps.locations.views import LocationTypesView
from corehq.apps.users.models import WebUser
OTHER_DETAILS = {
'expand_from': None,
'expand_to': None,
'expand_from_root': False,
'include_without_expanding': None,
'include_only': [],
'parent_type': '',
'administrative': '',
'shares_cases': False,
'view_descendants': False,
}
class LocationTypesViewTest(TestCase):
@classmethod
def setUpClass(cls):
super(LocationTypesViewTest, cls).setUpClass()
cls.domain = "test-domain"
cls.project = create_domain(cls.domain)
cls.couch_user = WebUser.create(cls.domain, "test", "foobar", None, None)
cls.couch_user.add_domain_membership(cls.domain, is_admin=True)
cls.couch_user.set_role(cls.domain, "admin")
cls.couch_user.save()
cls.loc_type1 = LocationType(domain=cls.domain, name='type1', code='code1')
cls.loc_type1.save()
cls.loc_type2 = LocationType(domain=cls.domain, name='type2', code='code2')
cls.loc_type2.save()
def setUp(self):
self.url = reverse(LocationTypesView.urlname, args=[self.domain])
self.client = Client()
self.client.login(username='test', password='foobar')
@classmethod
def tearDownClass(cls):
cls.couch_user.delete(cls.domain, deleted_by=None)
cls.project.delete()
super(LocationTypesViewTest, cls).tearDownClass()
@mock.patch('django_prbac.decorators.has_privilege', return_value=True)
def send_request(self, data, _):
return self.client.post(self.url, {'json': json.dumps(data)})
def test_missing_property(self):
with self.assertRaises(LocationConsistencyError):
self.send_request({'loc_types': [{}]})
def METHOD_NAME(self):
loc_type1 = OTHER_DETAILS.copy()
loc_type2 = OTHER_DETAILS.copy()
loc_type1.update({'name': self.loc_type2.name, 'pk': self.loc_type1.pk})
loc_type2.update({'name': self.loc_type1.name, 'pk': self.loc_type2.pk})
data = {'loc_types': [loc_type1, loc_type2]}
response = self.send_request(data)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(
str(messages[0].message),
'Looks like you are assigning a location name/code to a different location in the same request. '
'Please do this in two separate updates by using a temporary name to free up the name/code to be '
're-assigned.'
)
def test_swap_code(self):
loc_type1 = OTHER_DETAILS.copy()
loc_type2 = OTHER_DETAILS.copy()
loc_type1.update({'name': self.loc_type1.name, 'pk': self.loc_type1.pk, 'code': self.loc_type2.code})
loc_type2.update({'name': self.loc_type2.name, 'pk': self.loc_type2.pk, 'code': self.loc_type1.code})
data = {'loc_types': [loc_type1, loc_type2]}
response = self.send_request(data)
messages = list(get_messages(response.wsgi_request))
self.assertEqual(
str(messages[0].message),
'Looks like you are assigning a location name/code to a different location in the same request. '
'Please do this in two separate updates by using a temporary name to free up the name/code to be '
're-assigned.'
)
def test_valid_update(self):
loc_type1 = OTHER_DETAILS.copy()
loc_type2 = OTHER_DETAILS.copy()
loc_type1.update({'name': "new name", 'pk': self.loc_type1.pk, 'code': self.loc_type1.code})
loc_type2.update({'name': "new name 2", 'pk': self.loc_type2.pk, 'code': self.loc_type2.code})
data = {'loc_types': [loc_type1, loc_type2]}
response = self.send_request(data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, self.url)
|
4,119 |
widgets
|
'''
conv_ellipse.py
Copyright (C) 2021, 2022 Phillip A Carter
Copyright (C) 2021, 2022 Gregory D Carl
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from PyQt5.QtCore import Qt, QCoreApplication
from PyQt5.QtWidgets import QLabel, QMessageBox
from importlib import reload
from plasmac import ellipse as ELLIPSE
_translate = QCoreApplication.translate
def preview(P, W, Conv):
if P.dialogError:
return
if not W.xsEntry.text():
W.xsEntry.setText('{:0.3f}'.format(P.xOrigin))
if not W.ysEntry.text():
W.ysEntry.setText('{:0.3f}'.format(P.yOrigin))
origin = W.centLeft.text() == 'CENTER'
error = ELLIPSE.preview(Conv, P.fTmp, P.fNgc, P.fNgcBkp, \
int(W.conv_material.currentText().split(':')[0]), \
W.conv_material.currentText().split(':')[1].strip(), \
P.preAmble, P.postAmble, \
W.liEntry.text(), W.loEntry.text(), \
origin, W.xsEntry.text(), W.ysEntry.text(), \
W.kerf_width.value(), P.intExt, \
W.wEntry.text(), W.hEntry.text(), W.aEntry.text(), P.unitsPerMm)
if error:
P.dialogError = True
P.dialog_show_ok(QMessageBox.Warning, _translate('Conversational', 'Ellipse Error'), error)
else:
W.conv_preview.load(P.fNgc)
W.conv_preview.set_current_view()
W.add.setEnabled(True)
W.undo.setEnabled(True)
Conv.conv_preview_button(P, W, True)
def entry_changed(P, W, Conv, widget):
Conv.conv_entry_changed(P, W, widget)
def auto_preview(P, W, Conv, button=False):
if button == 'intext':
if not W.intExt.isChecked():
return
Conv.conv_auto_preview_button(P, W, button)
elif button == 'center':
if not W.centLeft.isChecked():
return
Conv.conv_auto_preview_button(P, W, button)
if W.main_tab_widget.currentIndex() == 1 and W.wEntry.text() and W.hEntry.text():
preview(P, W, Conv)
def METHOD_NAME(P, W, Conv):
if P.developmentPin.get():
reload(ELLIPSE)
W.hLabel.setText(_translate('Conversational', 'HEIGHT'))
W.hEntry.setObjectName('')
W.lDesc.setText(_translate('Conversational', 'CREATING ELLIPSE'))
W.iLabel.setPixmap(P.conv_ellipse_l)
#alignment and size
rightAlign = ['ctLabel', 'spLabel', 'xsLabel', 'xsEntry', 'ysLabel', \
'ysEntry', 'liLabel', 'liEntry', 'loLabel', 'loEntry', \
'wLabel', 'wEntry', 'hLabel', 'hEntry', 'aLabel', 'aEntry']
centerAlign = ['lDesc']
rButton = ['intExt', 'centLeft']
pButton = ['preview', 'add', 'undo']
for widget in rightAlign:
W[widget].setAlignment(Qt.AlignRight | Qt.AlignVCenter)
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
for widget in centerAlign:
W[widget].setAlignment(Qt.AlignCenter | Qt.AlignBottom)
W[widget].setFixedWidth(240)
W[widget].setFixedHeight(24)
for widget in rButton:
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
for widget in pButton:
W[widget].setFixedWidth(80)
W[widget].setFixedHeight(24)
#connections
W.conv_material.currentTextChanged.connect(lambda:auto_preview(P, W, Conv))
W.intExt.toggled.connect(lambda:auto_preview(P, W, Conv, 'intext'))
W.centLeft.toggled.connect(lambda:auto_preview(P, W, Conv, 'center'))
W.preview.pressed.connect(lambda:preview(P, W, Conv))
W.add.pressed.connect(lambda:Conv.conv_add_shape_to_file(P, W))
W.undo.pressed.connect(lambda:Conv.conv_undo_shape(P, W))
entries = ['xsEntry', 'ysEntry', 'liEntry', 'loEntry', 'wEntry', 'hEntry', 'aEntry']
for entry in entries:
W[entry].textChanged.connect(lambda:entry_changed(P, W, Conv, W.sender()))
W[entry].returnPressed.connect(lambda:preview(P, W, Conv))
#add to layout
if P.landscape:
W.entries.addWidget(W.ctLabel, 0, 0)
W.entries.addWidget(W.intExt, 0, 1)
W.entries.addWidget(W.spLabel, 1, 0)
W.entries.addWidget(W.centLeft, 1, 1)
W.entries.addWidget(W.xsLabel, 2, 0)
W.entries.addWidget(W.xsEntry, 2, 1)
W.entries.addWidget(W.ysLabel, 3, 0)
W.entries.addWidget(W.ysEntry, 3, 1)
W.entries.addWidget(W.liLabel, 4, 0)
W.entries.addWidget(W.liEntry, 4, 1)
W.entries.addWidget(W.loLabel, 5, 0)
W.entries.addWidget(W.loEntry, 5, 1)
W.entries.addWidget(W.wLabel, 6, 0)
W.entries.addWidget(W.wEntry, 6, 1)
W.entries.addWidget(W.hLabel, 7, 0)
W.entries.addWidget(W.hEntry, 7, 1)
W.entries.addWidget(W.aLabel, 8, 0)
W.entries.addWidget(W.aEntry, 8, 1)
for r in [9,10,11]:
W['s{}'.format(r)] = QLabel('')
W['s{}'.format(r)].setFixedHeight(24)
W.entries.addWidget(W['s{}'.format(r)], r, 0)
W.entries.addWidget(W.preview, 12, 0)
W.entries.addWidget(W.add, 12, 2)
W.entries.addWidget(W.undo, 12, 4)
W.entries.addWidget(W.lDesc, 13 , 1, 1, 3)
W.entries.addWidget(W.iLabel, 0 , 2, 7, 3)
else:
W.entries.addWidget(W.conv_material, 0, 0, 1, 5)
W.entries.addWidget(W.ctLabel, 1, 0)
W.entries.addWidget(W.intExt, 1, 1)
W.entries.addWidget(W.spLabel, 2, 0)
W.entries.addWidget(W.centLeft, 2, 1)
W.entries.addWidget(W.xsLabel, 3, 0)
W.entries.addWidget(W.xsEntry, 3, 1)
W.entries.addWidget(W.ysLabel, 3, 2)
W.entries.addWidget(W.ysEntry, 3, 3)
W.entries.addWidget(W.liLabel, 4, 0)
W.entries.addWidget(W.liEntry, 4, 1)
W.entries.addWidget(W.loLabel, 4, 2)
W.entries.addWidget(W.loEntry, 4, 3)
W.entries.addWidget(W.wLabel, 5, 0)
W.entries.addWidget(W.wEntry, 5, 1)
W.entries.addWidget(W.hLabel, 5, 2)
W.entries.addWidget(W.hEntry, 5, 3)
W.entries.addWidget(W.aLabel, 6, 0)
W.entries.addWidget(W.aEntry, 6, 1)
for r in [7,8]:
W['s{}'.format(r)] = QLabel('')
W['s{}'.format(r)].setFixedHeight(24)
W.entries.addWidget(W['s{}'.format(r)], r, 0)
W.entries.addWidget(W.preview, 9, 0)
W.entries.addWidget(W.add, 9, 2)
W.entries.addWidget(W.undo, 9, 4)
W.entries.addWidget(W.lDesc, 10 , 1, 1, 3)
W.entries.addWidget(W.iLabel, 0 , 5, 7, 3)
W.wEntry.setFocus()
P.convSettingsChanged = False
|
4,120 |
alanize fix
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Main views are inherited from Beeswax.
import base64
import logging
import json
import struct
import sys
from django.views.decorators.http import require_POST
from beeswax.api import error_handler
from beeswax.models import Session
from beeswax.server import dbms as beeswax_dbms
from beeswax.views import authorized_get_query_history
from desktop.lib.django_util import JsonResponse
from desktop.lib.thrift_util import unpack_guid
from desktop.models import Document2
from jobbrowser.apis.query_api import _get_api
from impala import dbms
from impala.server import get_api as get_impalad_api, _get_impala_server_url
from libanalyze import analyze as analyzer, rules
from notebook.models import make_notebook
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger()
ANALYZER = rules.TopDownAnalysis() # We need to parse some files so save as global
@require_POST
@error_handler
def invalidate(request):
cluster = json.loads(request.POST.get('cluster', '{}'))
database = request.POST.get('database', None)
table = request.POST.get('table', None)
flush_all = request.POST.get('flush_all', 'false').lower() == 'true'
query_server = dbms.get_query_server_config(connector=None) # TODO: connector support
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': 0, 'message': ''}
db.invalidate(database=database, table=table, flush_all=flush_all)
response['message'] = _('Successfully invalidated metadata')
return JsonResponse(response)
@require_POST
@error_handler
def refresh_table(request, database, table):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': 0, 'message': ''}
db.refresh_table(database, table)
response['message'] = _('Successfully refreshed metadata for `%s`.`%s`') % (database, table)
return JsonResponse(response)
@require_POST
@error_handler
def get_exec_summary(request, query_history_id):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': -1}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
if query_history is None:
response['message'] = _('get_exec_summary requires a valid query_history_id')
else:
session = Session.objects.get_session(request.user, query_server['server_name'])
operation_handle = query_history.get_handle().get_rpc_handle()
session_handle = session.get_handle()
summary = db.get_exec_summary(operation_handle, session_handle)
response['status'] = 0
response['summary'] = summary
return JsonResponse(response)
@require_POST
@error_handler
def get_runtime_profile(request, query_history_id):
query_server = dbms.get_query_server_config()
db = beeswax_dbms.get(request.user, query_server=query_server)
response = {'status': -1}
query_history = authorized_get_query_history(request, query_history_id, must_exist=True)
if query_history is None:
response['message'] = _('get_runtime_profile requires a valid query_history_id')
else:
session = Session.objects.get_session(request.user, query_server['server_name'])
operation_handle = query_history.get_handle().get_rpc_handle()
session_handle = session.get_handle()
profile = db.get_runtime_profile(operation_handle, session_handle)
response['status'] = 0
response['profile'] = profile
return JsonResponse(response)
@require_POST
@error_handler
def alanize(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
query_id = json.loads(request.POST.get('query_id'))
api = _get_api(request.user, cluster=cluster)
if query_id:
LOG.debug("Attempting to get Impala query profile for query ID: %s" % (query_id))
doc = Document2.objects.get(id=query_id)
snippets = doc.data_dict.get('snippets', [])
secret = snippets[0]['result']['handle']['secret']
impala_query_id = unpack_guid(base64.decodestring(secret))
query_profile = api.get_query_profile_encoded(impala_query_id)
profile = analyzer.analyze(analyzer.parse_data(query_profile))
ANALYZER.pre_process(profile)
result = ANALYZER.run(profile)
heatmap = {}
summary = analyzer.summary(profile)
heatmapMetrics = ['AverageThreadTokens', 'BloomFilterBytes', 'PeakMemoryUsage', 'PerHostPeakMemUsage', 'PrepareTime', 'RowsProduced', 'TotalCpuTime', 'TotalNetworkReceiveTime', 'TotalNetworkSendTime', 'TotalStorageWaitTime', 'TotalTime']
for key in heatmapMetrics:
metrics = analyzer.heatmap_by_host(profile, key)
if metrics['data']:
heatmap[key] = metrics
response['data'] = { 'query': { 'healthChecks' : result[0]['result'], 'summary': summary, 'heatmap': heatmap, 'heatmapMetrics': sorted(list(heatmap.keys())) } }
response['status'] = 0
return JsonResponse(response)
def alanize_metrics(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
query_id = json.loads(request.POST.get('query_id'))
api = _get_api(request.user, cluster=cluster)
if query_id:
LOG.debug("Attempting to get Impala query profile for query ID: %s" % (query_id))
query_profile = api.get_query_profile_encoded(query_id)
profile = analyzer.analyze(analyzer.parse_data(query_profile))
ANALYZER.pre_process(profile)
metrics = analyzer.metrics(profile)
response['data'] = metrics
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def METHOD_NAME(request):
response = {'status': -1}
cluster = json.loads(request.POST.get('cluster', '{}'))
fix = json.loads(request.POST.get('fix'))
start_time = json.loads(request.POST.get('start_time'), '-1')
if fix['id'] == 0:
notebook = make_notebook(
name=_('compute stats %(data)s') % fix,
editor_type='impala',
statement='compute stats %(data)s' % fix,
status='ready',
last_executed=start_time,
is_task=True,
compute=cluster
)
response['details'] = { 'task': notebook.execute(request, batch=True) }
response['status'] = 0
return JsonResponse(response)
|
4,121 |
test subscription manager list docs
|
import doctest
from insights.parsers import subscription_manager_list
from insights.tests import context_wrap
subscription_manager_list_consumed_in_docs = '''
+-------------------------------------------+
Consumed Subscriptions
+-------------------------------------------+
Subscription Name: Red Hat Enterprise Linux Server, Premium (1-2 sockets) (Up to 1 guest)
Provides: Oracle Java (for RHEL Server)
Red Hat Software Collections Beta (for RHEL Server)
Red Hat Enterprise Linux Server
Red Hat Beta
SKU: RH0155783S
Contract: 12345678
Account: 1000001
Serial: 0102030405060708090
Pool ID: 8a85f981477e5284014783abaf5d4dcd
Active: True
Quantity Used: 1
Service Level: PREMIUM
Service Type: L1-L3
Status Details: Subscription is current
Subscription Type: Standard
Starts: 11/14/14
Ends: 07/06/15
System Type: Physical
'''
subscription_manager_list_installed_in_docs = '''
+-------------------------------------------+
Installed Product Status
+-------------------------------------------+
Product Name: Red Hat Software Collections (for RHEL Server)
Product ID: 201
Version: 2
Arch: x86_64
Status: Subscribed
Status Details:
Starts: 04/27/15
Ends: 04/27/16
Product Name: Red Hat Enterprise Linux Server
Product ID: 69
Version: 7.1
Arch: x86_64
Status: Subscribed
Status Details:
Starts: 04/27/15
Ends: 04/27/16
'''
subscription_manager_list_test_data = '''
+-------------------------------------------+
Consumed Subscriptions
+-------------------------------------------+
Subscription Name: Red Hat Enterprise Linux Server, Premium (1-2 sockets) (Up to 1 guest)
Subscription Type: Standard
Starts: 17/2
'''
subscription_manager_list_no_installed_products = '''
No installed products to list
'''
subscription_manager_list_errors = """
Traceback (most recent call last):
File "/usr/sbin/subscription-manager", line 9, in <module>
load_entry_point('subscription-manager==1.21.10', 'console_scripts', 'subscription-manager')()
File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 378, in load_entry_point
return get_distribution(dist).load_entry_point(group, name)
File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2566, in load_entry_point
return ep.load()
File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2260, in load
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
File "/usr/lib64/python2.7/site-packages/subscription_manager/scripts/subscription_manager.py", line 29, in <module>
if six.PY2:
AttributeError: 'module' object has no attribute 'PY2'
"""
def test_subscription_manager_list_exceptions():
sml = subscription_manager_list.SubscriptionManagerListConsumed(
context_wrap(subscription_manager_list_test_data)
)
assert len(sml.records) == 1
rec0 = sml.records[0]
assert 'Subscription Name' in rec0
assert 'Subscription Type' in rec0
assert 'Starts' in rec0
assert rec0['Starts'] == '17/2'
assert 'Starts timestamp' not in rec0
sml = subscription_manager_list.SubscriptionManagerListInstalled(
context_wrap(subscription_manager_list_no_installed_products)
)
assert sml.records == []
def METHOD_NAME():
env = {
'installed': subscription_manager_list.SubscriptionManagerListInstalled(context_wrap(subscription_manager_list_installed_in_docs)),
'consumed': subscription_manager_list.SubscriptionManagerListConsumed(context_wrap(subscription_manager_list_consumed_in_docs)),
}
failed, total = doctest.testmod(subscription_manager_list, globs=env)
assert failed == 0
def test_exception():
sml = subscription_manager_list.SubscriptionManagerListConsumed(
context_wrap(subscription_manager_list_errors)
)
assert not sml.records
assert "AttributeError: 'module' object has no attribute 'PY2'" in sml.error
|
4,122 |
test qa mocks
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Test desitarget.QA.
"""
import unittest
import os
import sys
import shutil
import tempfile
import warnings
import numpy as np
import healpy as hp
from pkg_resources import resource_filename
from glob import glob
from desitarget.QA import make_qa_page, _load_systematics
from desitarget.QA import _parse_tcnames, _in_desi_footprint
from desiutil.log import get_logger
log = get_logger()
_macos = sys.platform == 'darwin'
class TestQA(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.datadir = resource_filename('desitarget.test', 't/')
cls.targfile = os.path.join(cls.datadir, 'targets.fits')
cls.mocktargfile = os.path.join(cls.datadir, 'targets-mocks.fits')
cls.cmxfile = os.path.join(cls.datadir, 'cmx-targets.fits')
cls.pixmapfile = os.path.join(cls.datadir, 'pixweight.fits')
cls.origdir = os.getcwd()
cls.testdir = tempfile.mkdtemp()
log.info("working in {}...".format(cls.testdir))
os.chdir(cls.testdir)
@classmethod
def tearDownClass(cls):
# - Remove all test input and output files.
os.chdir(cls.origdir)
if os.path.exists(cls.testdir):
shutil.rmtree(cls.testdir)
def setUp(self):
# Treat some specific warnings as errors so that we can find and fix
# warnings.filterwarnings('error', '.*Mean of empty slice.*')
# warnings.filterwarnings('error', '.*Using or importing the ABCs.*')
warnings.filterwarnings('error', '.*invalid value encountered.*')
# SJB Always make sure we start in the test directory
os.chdir(self.testdir)
def tearDown(self):
# ADM Remove the output files.
# SJB only in testdir, just in case something else did a chdir
os.chdir(self.testdir)
for filelist in [sorted(glob("*png")), sorted(glob("*html")), sorted(glob("*dat"))]:
for filename in filelist:
if os.path.exists(filename):
os.remove(filename)
@unittest.skipIf(_macos, "Skipping parallel test that fails on macOS.")
def test_qa_main(self):
"""Test plots/pages made for some main survey target types.
"""
# ADM note that these might not all be in the test files
# ADM but this also tests passing via tcnames.
tcnames = ["ALL", "BGS_FAINT"]
# ADM the large max_bin_area helps speed the tests.
make_qa_page(self.targfile, qadir=self.testdir, max_bin_area=99.,
imaging_map_file=self.pixmapfile, tcnames=tcnames)
pngs, htmls = len(glob("*png")), len(glob("*html"))
dats, alls = len(glob("*dat")), len(glob("./*"))
sysplots = len(_load_systematics())
# ADM one webpage is made per tc, plus the index.html.
self.assertEqual(htmls, len(tcnames)+1)
# ADM 4 N(m) plots are made per tc.
self.assertEqual(dats, 4*len(tcnames))
# ADM 11 plots made per tc. plus 2 lots of systematics plots.
self.assertEqual(pngs, 11*len(tcnames)+2*sysplots)
# ADM there are only .html, .dat and .png files.
self.assertEqual(pngs+htmls+dats, alls)
@unittest.skipIf(_macos, "Skipping parallel test that fails on macOS.")
def test_qa_cmx(self):
"""Test plots/pages are made for some commissioning targets.
"""
# ADM the large max_bin_area helps speed the tests.
make_qa_page(self.cmxfile, qadir=self.testdir, max_bin_area=99.,
systematics=False)
pngs, htmls = len(glob("*png")), len(glob("*html"))
dats, alls = len(glob("*dat")), len(glob("./*"))
# ADM there are only .html, .dat and .png files.
self.assertEqual(pngs+htmls+dats, alls)
def METHOD_NAME(self):
"""Test mock QA plots/pages
"""
make_qa_page(self.mocktargfile, qadir=self.testdir,
makeplots=True, numproc=1, mocks=True,
max_bin_area=53.7148, systematics=False)
pngs, htmls = len(glob("*png")), len(glob("*html"))
dats, alls = len(glob("*dat")), len(glob("./*"))
# pngs, htmls, and dats exist, and nothing else
self.assertGreater(pngs, 0)
self.assertGreater(htmls, 0)
self.assertGreater(dats, 0)
self.assertEqual(pngs+htmls+dats, alls)
def test_parse_tc_names(self):
"""Test target class strings are parsed into lists.
"""
# ADM the defaults list of target classes without "ALL".
no_all = _parse_tcnames(add_all=False)
# ADM passing the string instead of defaulting.
no_all2 = _parse_tcnames(tcstring=",".join(no_all), add_all=False)
# ADM the default list of target classes with "ALL".
with_all = _parse_tcnames()
# ADM you shouldn't be able to pass gobbledygook.
failed = False
try:
fooblat = _parse_tcnames(tcstring="blat,foo")
except ValueError:
failed = True
self.assertTrue(no_all == no_all2)
self.assertTrue(set(with_all)-set(no_all) == {'ALL'})
self.assertTrue(failed)
def test_in_footprint(self):
"""Test target class strings are parsed into lists.
"""
# ADM a location that's definitely in DESI (38.5,7.5).
targs = np.zeros(1, dtype=[('RA', '>f8'), ('DEC', '>f8')])
targs["RA"], targs["DEC"] = 38.5, 7.5
tin = _in_desi_footprint(targs)
# ADM shift to a location definitely out of DESI (38.5,-60).
targs["DEC"] = -60.
tout = _in_desi_footprint(targs)
self.assertEqual(len(tin[0]), 1)
self.assertEqual(len(tout[0]), 0)
if __name__ == '__main__':
unittest.main()
def test_suite():
"""Allows testing of only this module with the command:
python setup.py test -m desitarget.test.test_qa
"""
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
4,123 |
print sol
|
# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
import numpy as np
import pyomo.environ as pyo
import mpisppy.utils.sputils as sputils
from mpisppy.extensions.extension import Extension
class NetworkDesignTracker(Extension):
def __init__(self, ph):
self.ph = ph
self.cylinder_rank = ph.cylinder_rank
def pre_iter0(self):
pass
def post_iter0(self):
print('Just completed iteration 0')
def miditer(self):
''' Currently hard-coded for no parallelism
'''
print('About to do the solve in iteration', self.ph._PHIter)
sols = self.get_sol()
METHOD_NAME(sols, print_all=False)
if (self.ph._PHIter < 5):
return
models = self.ph.local_scenarios
arb_model = get_arb_elt(models)
edges = arb_model.edges
num_scenarios = len(sols)
bundling = not hasattr(arb_model, '_solver_plugin')
if (bundling):
arb_bundle_model = get_arb_elt(self.ph.local_subproblems)
persistent = sputils.is_persistent(arb_bundle_model._solver_plugin)
else:
persistent = sputils.is_persistent(arb_model._solver_plugin)
for edge in edges:
if (arb_model.x[edge].fixed): # Fixed in one model = fixed in all
continue
sol_values = {name: sols[name][edge]['value'] for name in sols}
total = sum(sol_values.values())
if (total < 0.1) or (total > num_scenarios - 0.1):
''' All scenarios agree '''
pass
if (total > (num_scenarios//2) + 1.9):
print(f'Fixing edge {edge} to 1')
for (sname, model) in models.items():
model.x[edge].fix(1.0)
if (persistent):
if (bundling):
solver = self.get_solver(sname)
else:
solver = model._solver_plugin
solver.update_var(model.x[edge])
fixed_edges = [edge for edge in edges if arb_model.x[edge].fixed]
print(f'Fixed {len(fixed_edges)} total edges')
def get_solver(self, sname):
''' Move this to PHBase if it is successful
Need to add some checks for bundling first
'''
rank = self.cylinder_rank
names = self.ph.names_in_bundles[rank]
bunnum = None
for (num, scens) in names.items():
if (sname in scens):
bunnum = num
break
if (bunnum is None):
raise RuntimeError(f'Could not find {sname}')
bname = f'rank{rank}bundle{bunnum}'
return self.ph.local_subproblems[bname]._solver_plugin
def enditer(self):
''' Variable fixing must be done in miditer, so that a solve takes
place after the variables are fixed (otherwise, we could end up with
and infeasible solution).
'''
pass
def post_everything(self):
pass
def get_sol(self):
ph = self.ph
arb_model = get_arb_elt(ph.local_scenarios)
edges = arb_model.edges
res = {
name: {
edge: {
'value': pyo.value(model.x[edge]),
'fixed': model.x[edge].fixed,
} for edge in edges
} for (name, model) in ph.local_scenarios.items()
}
return res
# My helper functions here
def get_arb_elt(dictionary):
''' Return an arbitrary element of the dictionary '''
if not (len(dictionary)):
return None
return next(iter(dictionary.values()))
def METHOD_NAME(sol_dict, print_all=True):
edges = list(get_arb_elt(sol_dict).keys())
for edge in edges:
if (not print_all):
tot = sum(sol_dict[sname][edge]['value'] for sname in sol_dict)
if (tot >= 0.9): # At least one scenario wants to take the edge
row = ''.join(['x' if sol_dict[sname][edge]['value'] > 0.1 else '.'
for sname in sol_dict])
row = f'{edge[0]:2d}-->{edge[1]:2d} ' + row
fixed = [sol_dict[sname][edge]['fixed'] for sname in sol_dict]
assert(all(fixed) or (not any(fixed)))
if (fixed[0]):
row += ' <-- fixed'
print(row)
else:
row = ''.join(['x' if sol_dict[sname][edge]['value'] > 0.1 else '.'
for sname in sol_dict])
row = f'{edge[0]:2d}-->{edge[1]:2d} ' + row
fixed = [sol_dict[sname][edge]['fixed'] for sname in sol_dict]
assert(all(fixed) or (not any(fixed)))
if (fixed[0]):
row += ' <-- fixed'
print(row)
|
4,124 |
get energies
|
#Copyright (c) 2008 Erik Tollerud ([email protected])
import numpy as np
class Pca:
"""
A basic class for Principal Component Analysis (PCA).
p is the number of dimensions, while N is the number of data points
"""
_colors=('r','g','b','c','y','m','k') #defaults
def __calc(self):
A = self.A
M=A-np.mean(A,axis=0)
N=M/np.std(M,axis=0)
self.M = M
self.N = N
self._eig = None
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(x) for x in names])
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T)
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig
def getEigenvalues(self):
return self.getEigensystem()[0]
def getEigenvectors(self):
return self.getEigensystem()[1]
def METHOD_NAME(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v)
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$')
def plot3d(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes()
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m)
def reset(self):
self.A = self._origA.copy()
self.__calc()
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e is not None for e in (enthresh, nPCs, cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("cannot specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals does not match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals do not have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0)
|
4,125 |
test has permission as non owner
|
""" Tests for API permissions classes. """
import ddt
import pytest
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from django.test import RequestFactory, TestCase
from opaque_keys.edx.keys import CourseKey
from rest_framework.generics import GenericAPIView
from common.djangoapps.student.roles import CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import UserFactory
from openedx.core.lib.api.permissions import IsCourseStaffInstructor, IsMasterCourseStaffInstructor, IsStaffOrOwner
class TestObject:
""" Fake class for object permission tests. """
def __init__(self, user=None, course_id=None):
self.user = user
self.course_id = course_id
class TestCcxObject(TestObject):
""" Fake class for object permission for CCX Courses """
def __init__(self, user=None, course_id=None):
super().__init__(user, course_id)
self.coach = user
class IsCourseStaffInstructorTests(TestCase):
""" Test for IsCourseStaffInstructor permission class. """
def setUp(self):
super().setUp()
self.permission = IsCourseStaffInstructor()
self.coach = UserFactory()
self.user = UserFactory()
self.request = RequestFactory().get('/')
self.request.user = self.user
self.course_key = CourseKey.from_string('edx/test123/run')
self.obj = TestCcxObject(user=self.coach, course_id=self.course_key)
def test_course_staff_has_access(self):
CourseStaffRole(course_key=self.course_key).add_users(self.user)
assert self.permission.has_object_permission(self.request, None, self.obj)
def test_course_instructor_has_access(self):
CourseInstructorRole(course_key=self.course_key).add_users(self.user)
assert self.permission.has_object_permission(self.request, None, self.obj)
def test_course_coach_has_access(self):
self.request.user = self.coach
assert self.permission.has_object_permission(self.request, None, self.obj)
def test_any_user_has_no_access(self):
assert not self.permission.has_object_permission(self.request, None, self.obj)
def test_anonymous_has_no_access(self):
self.request.user = AnonymousUser()
assert not self.permission.has_object_permission(self.request, None, self.obj)
class IsMasterCourseStaffInstructorTests(TestCase):
""" Test for IsMasterCourseStaffInstructorTests permission class. """
def setUp(self):
super().setUp()
self.permission = IsMasterCourseStaffInstructor()
master_course_id = 'edx/test123/run'
self.user = UserFactory()
self.get_request = RequestFactory().get(f'/?master_course_id={master_course_id}')
self.get_request.user = self.user
self.post_request = RequestFactory().post('/', data={'master_course_id': master_course_id})
self.post_request.user = self.user
self.course_key = CourseKey.from_string(master_course_id)
def test_course_staff_has_access(self):
CourseStaffRole(course_key=self.course_key).add_users(self.user)
assert self.permission.has_permission(self.get_request, None)
assert self.permission.has_permission(self.post_request, None)
def test_course_instructor_has_access(self):
CourseInstructorRole(course_key=self.course_key).add_users(self.user)
assert self.permission.has_permission(self.get_request, None)
assert self.permission.has_permission(self.post_request, None)
def test_any_user_has_partial_access(self):
assert not self.permission.has_permission(self.get_request, None)
assert not self.permission.has_permission(self.post_request, None)
def test_anonymous_has_no_access(self):
user = AnonymousUser()
self.get_request.user = user
self.post_request.user = user
assert not self.permission.has_permission(self.get_request, None)
assert not self.permission.has_permission(self.post_request, None)
def test_wrong_course_id_raises(self):
get_request = RequestFactory().get('/?master_course_id=this_is_invalid')
with pytest.raises(Http404):
self.permission.has_permission(get_request, None)
post_request = RequestFactory().post('/', data={'master_course_id': 'this_is_invalid'})
with pytest.raises(Http404):
self.permission.has_permission(post_request, None)
@ddt.ddt
class IsStaffOrOwnerTests(TestCase):
""" Tests for IsStaffOrOwner permission class. """
def setUp(self):
super().setUp()
self.permission = IsStaffOrOwner()
self.request = RequestFactory().get('/')
self.obj = TestObject()
def assert_user_has_object_permission(self, user, permitted):
"""
Asserts whether or not the user has permission to access an object.
Arguments
user (User)
permitted (boolean)
"""
self.request.user = user
assert self.permission.has_object_permission(self.request, None, self.obj) == permitted
def test_staff_user(self):
""" Staff users should be permitted. """
user = UserFactory(is_staff=True)
self.assert_user_has_object_permission(user, True)
def test_owner(self):
""" Owners should be permitted. """
user = UserFactory()
self.obj.user = user
self.assert_user_has_object_permission(user, True)
def test_non_staff_test_non_owner_or_staff_user(self):
""" Non-staff and non-owner users should not be permitted. """
user = UserFactory()
self.assert_user_has_object_permission(user, False)
def test_has_permission_as_staff(self):
""" Staff users always have permission. """
self.request.user = UserFactory(is_staff=True)
assert self.permission.has_permission(self.request, None)
def test_has_permission_as_owner_with_get(self):
""" Owners always have permission to make GET actions. """
user = UserFactory()
request = RequestFactory().get(f'/?username={user.username}')
request.user = user
assert self.permission.has_permission(request, None)
def test_has_permission_with_view_kwargs_as_owner_with_get(self):
""" Owners always have permission to make GET actions. """
user = UserFactory()
self.request.user = user
view = GenericAPIView()
view.kwargs = {'username': user.username}
assert self.permission.has_permission(self.request, view)
@ddt.data('patch', 'post', 'put')
def test_has_permission_as_owner_with_edit(self, action):
""" Owners always have permission to edit. """
user = UserFactory()
data = {'username': user.username}
request = getattr(RequestFactory(), action)('/', data, format='json')
request.user = user
request.data = data # Note (CCB): This is a hack that should be fixed. (ECOM-3171)
assert self.permission.has_permission(request, None)
def METHOD_NAME(self):
""" Non-owners should not have permission. """
user = UserFactory()
request = RequestFactory().get(f'/?username={user.username}')
request.user = UserFactory()
assert not self.permission.has_permission(request, None)
def test_has_permission_with_view_kwargs_as_non_owner(self):
""" Non-owners should not have permission. """
user = UserFactory()
self.request.user = user
view = GenericAPIView()
view.kwargs = {'username': UserFactory().username}
assert not self.permission.has_permission(self.request, view)
|
4,126 |
sample n
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["distributions.Uniform"])
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where
- `low = a`,
- `high = b`,
- `Z` is the normalizing constant, and
- `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[low, high]) as name:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
check_ops.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.TensorShape([])
def METHOD_NAME(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _prob(self, x):
broadcasted_x = x * array_ops.ones(
self.batch_shape_tensor(), dtype=x.dtype)
return array_ops.where_v2(
math_ops.is_nan(broadcasted_x), broadcasted_x,
array_ops.where_v2(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where_v2(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where_v2(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
4,127 |
test parse device information success
|
from typing import Dict
import pytest
from opentrons.drivers import utils
from opentrons.drivers.types import (
Temperature,
PlateTemperature,
RPM,
HeaterShakerLabwareLatchStatus,
)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
[
"version:123-2 serial:serial_v model:m",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
"serial:serial_v model:m version:123-2 ",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
" serial:serial_v model:m version:123-2 ",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
"something:extra serial:serial_v model:m version:123-2",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
],
)
def METHOD_NAME(
input_str: str, expected_result: Dict[str, str]
) -> None:
"""Test parse device information."""
assert utils.parse_device_information(input_str) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
["version:123 serial:serial_v"],
["version:123 serialg:serial_v model:123"],
[""],
],
)
def test_parse_device_information_failure(input_str: str) -> None:
"""Test parse device information."""
with pytest.raises(utils.ParseError):
utils.parse_device_information(input_str)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
[
"FW:123-2 SerialNo:serial_v HW:m",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
"SerialNo:serial_v HW:m FW:123-2 ",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
" SerialNo:serial_v HW:m FW:123-2 ",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
[
"something:extra SerialNo:serial_v HW:m FW:123-2",
{"version": "123-2", "serial": "serial_v", "model": "m"},
],
],
)
def test_parse_hs_device_information_success(
input_str: str, expected_result: Dict[str, str]
) -> None:
"""Test parse device information."""
assert utils.parse_hs_device_information(input_str) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
["FW:123 SerialNo:serial_v"],
["FW:123 SerialNog:serial_v HW:123"],
[""],
],
)
def test_parse_hs_device_information_failure(input_str: str) -> None:
"""Test parse device information."""
with pytest.raises(utils.ParseError):
utils.parse_hs_device_information(input_str)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
["T:none C:123.4", Temperature(target=None, current=123.4)],
["T:123.566 C:123.446", Temperature(target=123.57, current=123.45)],
["T:-123.566 C:-123.446", Temperature(target=-123.57, current=-123.45)],
["a:3 T:2. C:3 H:0 G:1", Temperature(target=2, current=3)],
["T:0 C:124", Temperature(target=0, current=124)],
["T:none C:124", Temperature(target=None, current=124)],
],
)
def test_parse_temperature_response_success(
input_str: str, expected_result: Temperature
) -> None:
"""It should parse temperature response."""
assert utils.parse_temperature_response(input_str, 2) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
["T:not_a_float C:123"],
["T:none C:none"],
["C:not_a_float T:123"],
[""],
],
)
def test_parse_temperature_response_failure(input_str: str) -> None:
"""It should fail to parse temperature response."""
with pytest.raises(utils.ParseError):
utils.parse_temperature_response(input_str, 2)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
["T:0 C:500", RPM(target=None, current=500)],
["T:25 C:24", RPM(target=25, current=24)],
["a:3 T:2. C:3 H:0 G:1", RPM(target=2, current=3)],
],
)
def test_parse_rpm_response_success(input_str: str, expected_result: RPM) -> None:
"""It should parse temperature response."""
assert utils.parse_rpm_response(input_str) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
["T:not_an_int C:123"],
["T:none C:none"],
["C:not_an_int T:123"],
[""],
],
)
def test_parse_rpm_response_failure(input_str: str) -> None:
"""It should fail to parse temperature response."""
with pytest.raises(utils.ParseError):
utils.parse_rpm_response(input_str)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
["STATUS:IDLE_OPEN", HeaterShakerLabwareLatchStatus.IDLE_OPEN],
["STATUS:IDLE_CLOSED", HeaterShakerLabwareLatchStatus.IDLE_CLOSED],
["a:safa STATUS:IDLE_UNKNOWN", HeaterShakerLabwareLatchStatus.IDLE_UNKNOWN],
],
)
def test_parse_labware_latch_status_response_success(
input_str: str, expected_result: HeaterShakerLabwareLatchStatus
):
assert utils.parse_labware_latch_status_response(input_str) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
[""],
["SSSSSS:IDLE_OPEN"],
["STATUS:Lord Humongous Sees You"],
["IDLE_OPEN"],
["STATUS"],
[":"],
],
)
def test_parse_labware_latch_status_response_failure(input_str):
with pytest.raises(utils.ParseError):
utils.parse_labware_latch_status_response(input_str)
@pytest.mark.parametrize(
argnames=["input_str", "expected_result"],
argvalues=[
["T:none C:0 H:none", PlateTemperature(target=None, current=0, hold=None)],
[
"T:321.4 C:45 H:123.222",
PlateTemperature(target=321.4, current=45, hold=123.22),
],
[
"T:-44.2442 C:-22.233 H:0",
PlateTemperature(target=-44.24, current=-22.23, hold=0),
],
["a:3 T:2. C:3 H:0 G:1", PlateTemperature(target=2, current=3, hold=0)],
],
)
def test_parse_plate_temperature_response_success(
input_str: str, expected_result: PlateTemperature
) -> None:
"""It should parse plate temperature response"""
assert utils.parse_plate_temperature_response(input_str, 2) == expected_result
@pytest.mark.parametrize(
argnames=["input_str"],
argvalues=[
["T:not_a_float C:123 H:321"],
["C:not_a_float T:123 H:321"],
["H:not_a_float C:123 T:321"],
[""],
],
)
def test_parse_plate_temperature_response_failure(input_str: str) -> None:
"""It should fail to parse plate temperature response"""
with pytest.raises(utils.ParseError):
utils.parse_plate_temperature_response(input_str, 2)
@pytest.mark.parametrize(
argnames=["input_str", "min_length", "expected"],
argvalues=[
["", 0, ""],
["", 1, "0"],
["abcd", 12, "616263640000"],
["abcd", 5, "61626364"],
],
)
def test_string_to_hex(input_str: str, min_length: int, expected: str) -> None:
"""It should convert string to hex with padding to reach min_length."""
assert expected == utils.string_to_hex(val=input_str, min_length=min_length)
|
4,128 |
on track end
|
import logging
from datetime import timedelta
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
from ..adapters.api_objects import Song
from .base import PlayerDeviceEvent, PlayerEvent
from .chromecast import ChromecastPlayer # noqa: F401
from .mpv import MPVPlayer # noqa: F401
class PlayerManager:
# Available Players. Order matters for UI display.
available_player_types: List[Type] = [MPVPlayer, ChromecastPlayer]
@staticmethod
def get_configuration_options() -> Dict[str, Dict[str, Union[Type, Tuple[str, ...]]]]:
"""
:returns: Dictionary of the name of the player -> option configs (see
:class:`sublime_music.players.base.Player.get_configuration_options` for
details).
"""
return {
p.name: p.get_configuration_options() for p in PlayerManager.available_player_types
}
# Initialization and Shutdown
def __init__(
self,
on_timepos_change: Callable[[Optional[float]], None],
on_track_end: Callable[[], None],
on_player_event: Callable[[PlayerEvent], None],
player_device_change_callback: Callable[[PlayerDeviceEvent], None],
config: Dict[str, Dict[str, Union[Type, Tuple[str, ...]]]],
):
self.current_song: Optional[Song] = None
self.next_song_uri: Optional[str] = None
self.on_timepos_change = on_timepos_change
self.on_track_end = on_track_end
self.config = config
self.players: Dict[Type, Any] = {}
self.device_id_type_map: Dict[str, Type] = {}
self._current_device_id: Optional[str] = None
self._track_ending: bool = False
def player_event_wrapper(pe: PlayerEvent):
if pe.device_id == self._current_device_id:
on_player_event(pe)
self.on_player_event = player_event_wrapper
def callback_wrapper(pde: PlayerDeviceEvent):
self.device_id_type_map[pde.id] = pde.player_type
player_device_change_callback(pde)
self.player_device_change_callback = callback_wrapper
self.players = {
player_type: player_type(
self.on_timepos_change,
self.METHOD_NAME,
self.on_player_event,
self.player_device_change_callback,
self.config.get(player_type.name),
)
for player_type in PlayerManager.available_player_types
}
def change_settings(
self,
config: Dict[str, Dict[str, Union[Type, Tuple[str, ...]]]],
):
self.config = config
for player_type, player in self.players.items():
player.change_settings(config.get(player_type.name))
def refresh_players(self):
for player in self.players.values():
player.refresh_players()
def shutdown(self):
for p in self.players.values():
p.shutdown()
def _get_current_player_type(self) -> Any:
device_id = self._current_device_id
if device_id:
return self.device_id_type_map.get(device_id)
def _get_current_player(self) -> Any:
if current_player_type := self._get_current_player_type():
return self.players.get(current_player_type)
def METHOD_NAME(self):
self._track_ending = True
self.on_track_end()
@property
def supported_schemes(self) -> Set[str]:
if cp := self._get_current_player():
return cp.supported_schemes
return set()
@property
def can_start_playing_with_no_latency(self) -> bool:
if self._current_device_id:
return self._get_current_player_type().can_start_playing_with_no_latency
else:
return False
@property
def current_device_id(self) -> Optional[str]:
return self._current_device_id
def set_current_device_id(self, device_id: str):
logging.info(f"Setting current device id to '{device_id}'")
if cp := self._get_current_player():
cp.pause()
cp.song_loaded = False
self._current_device_id = device_id
if cp := self._get_current_player():
cp.set_current_device_id(device_id)
cp.song_loaded = False
def reset(self):
if current_player := self._get_current_player():
current_player.reset()
@property
def song_loaded(self) -> bool:
if current_player := self._get_current_player():
return current_player.song_loaded
return False
@property
def playing(self) -> bool:
if current_player := self._get_current_player():
return current_player.playing
return False
def get_volume(self) -> float:
if current_player := self._get_current_player():
return current_player.get_volume()
return 100
def set_volume(self, volume: float):
if current_player := self._get_current_player():
current_player.set_volume(volume)
def get_is_muted(self) -> bool:
if current_player := self._get_current_player():
return current_player.get_is_muted()
return False
def set_muted(self, muted: bool):
if current_player := self._get_current_player():
current_player.set_muted(muted)
def play_media(self, uri: str, progress: timedelta, song: Song):
current_player = self._get_current_player()
if not current_player:
return
if (
current_player.gapless_playback
and self.next_song_uri
and uri == self.next_song_uri
and progress == timedelta(0)
and self._track_ending
):
# In this case the player already knows about the next
# song and will automatically play it when the current
# song is complete.
self.current_song = song
self.next_song_uri = None
self._track_ending = False
current_player.song_loaded = True
return
# If we are changing the current song then the next song
# should also be invalidated.
if self.current_song != song:
self.current_song = song
self.next_song_uri = None
self._track_ending = False
current_player.play_media(uri, progress, song)
def pause(self):
if current_player := self._get_current_player():
current_player.pause()
def toggle_play(self):
if current_player := self._get_current_player():
if self.playing:
current_player.pause()
else:
current_player.play()
def seek(self, position: timedelta):
if current_player := self._get_current_player():
current_player.seek(position)
def next_media_cached(self, uri: str, song: Song):
if current_player := self._get_current_player():
if current_player.gapless_playback:
self.next_song_uri = uri
current_player.next_media_cached(uri, song)
|
4,129 |
irreps in
|
import torch
from e3nn import o3
from e3nn.nn import Extract, Activation
from e3nn.util.jit import compile_mode
@compile_mode("script")
class _Sortcut(torch.nn.Module):
def __init__(self, *irreps_outs) -> None:
super().__init__()
self.irreps_outs = tuple(o3.Irreps(irreps).simplify() for irreps in irreps_outs)
METHOD_NAME = sum(self.irreps_outs, o3.Irreps([]))
i = 0
instructions = []
for irreps_out in self.irreps_outs:
instructions += [tuple(range(i, i + len(irreps_out)))]
i += len(irreps_out)
assert len(METHOD_NAME) == i, (len(METHOD_NAME), i)
METHOD_NAME, p, _ = METHOD_NAME.sort()
instructions = [tuple(p[i] for i in x) for x in instructions]
self.cut = Extract(METHOD_NAME, self.irreps_outs, instructions)
self.METHOD_NAME = METHOD_NAME.simplify()
def forward(self, x):
return self.cut(x)
@compile_mode("script")
class Gate(torch.nn.Module):
r"""Gate activation function.
The gate activation is a direct sum of two sets of irreps. The first set
of irreps is ``irreps_scalars`` passed through activation functions
``act_scalars``. The second set of irreps is ``irreps_gated`` multiplied
by the scalars ``irreps_gates`` passed through activation functions
``act_gates``. Mathematically, this can be written as:
.. math::
\left(\bigoplus_i \phi_i(x_i) \right) \oplus \left(\bigoplus_j \phi_j(g_j) y_j \right)
where :math:`x_i` and :math:`\phi_i` are from ``irreps_scalars`` and
``act_scalars``, and :math:`g_j`, :math:`\phi_j`, and :math:`y_j` are
from ``irreps_gates``, ``act_gates``, and ``irreps_gated``.
The parameters passed in should adhere to the following conditions:
1. ``len(irreps_scalars) == len(act_scalars)``.
2. ``len(irreps_gates) == len(act_gates)``.
3. ``irreps_gates.num_irreps == irreps_gated.num_irreps``.
Parameters
----------
irreps_scalars : `e3nn.o3.Irreps`
Representation of the scalars that will be passed through the
activation functions ``act_scalars``.
act_scalars : list of function or None
Activation functions acting on the scalars.
irreps_gates : `e3nn.o3.Irreps`
Representation of the scalars that will be passed through the
activation functions ``act_gates`` and multiplied by the
``irreps_gated``.
act_gates : list of function or None
Activation functions acting on the gates. The number of functions in
the list should match the number of irrep groups in ``irreps_gates``.
irreps_gated : `e3nn.o3.Irreps`
Representation of the gated tensors.
``irreps_gates.num_irreps == irreps_gated.num_irreps``
Examples
--------
>>> g = Gate("16x0o", [torch.tanh], "32x0o", [torch.tanh], "16x1e+16x1o")
>>> g.irreps_out
16x0o+16x1o+16x1e
"""
def __init__(self, irreps_scalars, act_scalars, irreps_gates, act_gates, irreps_gated) -> None:
super().__init__()
irreps_scalars = o3.Irreps(irreps_scalars)
irreps_gates = o3.Irreps(irreps_gates)
irreps_gated = o3.Irreps(irreps_gated)
if len(irreps_gates) > 0 and irreps_gates.lmax > 0:
raise ValueError(f"Gate scalars must be scalars, instead got irreps_gates = {irreps_gates}")
if len(irreps_scalars) > 0 and irreps_scalars.lmax > 0:
raise ValueError(f"Scalars must be scalars, instead got irreps_scalars = {irreps_scalars}")
if irreps_gates.num_irreps != irreps_gated.num_irreps:
raise ValueError(
f"There are {irreps_gated.num_irreps} irreps in irreps_gated, but a different number "
f"({irreps_gates.num_irreps}) of gate scalars in irreps_gates"
)
self.sc = _Sortcut(irreps_scalars, irreps_gates, irreps_gated)
self.irreps_scalars, self.irreps_gates, self.irreps_gated = self.sc.irreps_outs
self._irreps_in = self.sc.METHOD_NAME
self.act_scalars = Activation(irreps_scalars, act_scalars)
irreps_scalars = self.act_scalars.irreps_out
self.act_gates = Activation(irreps_gates, act_gates)
irreps_gates = self.act_gates.irreps_out
self.mul = o3.ElementwiseTensorProduct(irreps_gated, irreps_gates)
irreps_gated = self.mul.irreps_out
self._irreps_out = irreps_scalars + irreps_gated
def __repr__(self) -> str:
return f"{self.__class__.__name__} ({self.METHOD_NAME} -> {self.irreps_out})"
def forward(self, features):
"""Evaluate the gated activation function.
Parameters
----------
features : `torch.Tensor`
tensor of shape ``(..., irreps_in.dim)``
Returns
-------
`torch.Tensor`
tensor of shape ``(..., irreps_out.dim)``
"""
# - PROFILER - with torch.autograd.profiler.record_function('Gate'):
scalars, gates, gated = self.sc(features)
scalars = self.act_scalars(scalars)
if gates.shape[-1]:
gates = self.act_gates(gates)
gated = self.mul(gated, gates)
features = torch.cat([scalars, gated], dim=-1)
else:
features = scalars
return features
@property
def METHOD_NAME(self):
"""Input representations."""
return self._irreps_in
@property
def irreps_out(self):
"""Output representations."""
return self._irreps_out
|
4,130 |
detector fails properly
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from json import loads
from unittest import mock
from preggy import expect
from redis import Redis, Sentinel
from tornado.testing import gen_test
from tests.base import DetectorTestCase
from thumbor.config import Config
from thumbor.detectors.queued_detector import QueuedDetector
TEST_REDIS_HOST = "0.0.0.0"
TEST_REDIS_PORT = 6668
TEST_REDIS_PASSWORD = "hey_you"
TEST_REDIS_MODE = "single_node"
TEST_REDIS_SENTINEL_MODE = "sentinel"
TEST_REDIS_QUEUE_SENTINEL_INSTANCES = "localhost:26379"
TEST_REDIS_SENTINEL_MASTER_INSTANCE = "masterinstance"
TEST_REDIS_SENTINEL_SOCKET_TIMEOUT = 10.0
class SharedQueuedDetectorTestCase(DetectorTestCase):
def setUp(self):
super().setUp()
self.redis = None
async def detector_send_to_queues(self, ctx):
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
data = await detector.detect()
expect(data).to_be_empty()
expect(ctx.request.detection_error).to_be_false()
result = self.redis.get("resque:unique:queue:Detect:/image/test.jpg")
expect(result).to_equal("1")
expected_payload = {
"queue": "Detect",
"args": ["all", "/image/test.jpg", "/image/test.jpg"],
"class": "remotecv.pyres_tasks.DetectTask",
"key": "/image/test.jpg",
}
result = self.redis.lpop("resque:queue:Detect")
expect(loads(result.decode("utf-8"))).to_be_like(expected_payload)
async def METHOD_NAME(self, ctx):
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
data = await detector.detect()
expect(data).to_be_empty()
expect(ctx.request.detection_error).to_be_true()
expect(detector.queue).to_be_null()
async def detector_can_detect_twice(self, ctx):
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
data = await detector.detect()
expect(data).to_be_empty()
expect(ctx.request.detection_error).to_be_false()
expect(detector.queue).not_to_be_null()
data = detector.detect()
expect(detector.queue).not_to_be_null()
class QueuedDetectorTestCase(SharedQueuedDetectorTestCase):
def get_config(self):
return Config(
REDIS_QUEUE_SERVER_PORT=TEST_REDIS_PORT,
REDIS_QUEUE_SERVER_PASSWORD=TEST_REDIS_PASSWORD,
)
def setUp(self):
super().setUp()
self.redis = Redis(
host=TEST_REDIS_HOST,
port=TEST_REDIS_PORT,
db=0,
password=TEST_REDIS_PASSWORD,
)
self.redis.delete("resque:unique:queue:Detect:/image/test.jpg")
self.redis.delete("resque:queue:Detect")
QueuedDetector.queue = None
@gen_test
async def test_detector_sends_to_queue(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_MODE=TEST_REDIS_MODE,
REDIS_QUEUE_SERVER_HOST=TEST_REDIS_HOST,
REDIS_QUEUE_SERVER_PORT=TEST_REDIS_PORT,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD=TEST_REDIS_PASSWORD,
),
request=mock.Mock(
image_url="/image/test.jpg",
detection_error=False,
),
)
await self.detector_send_to_queues(ctx)
@gen_test
async def test_detector_fails_properly(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_MODE=TEST_REDIS_MODE,
REDIS_QUEUE_SERVER_HOST=TEST_REDIS_HOST,
REDIS_QUEUE_SERVER_PORT=6669,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD=TEST_REDIS_PASSWORD,
),
request=mock.Mock(
image_url="/image/test.jpg",
detection_error=False,
),
)
await self.METHOD_NAME(ctx)
@gen_test
async def test_detector_can_detect_twice(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_MODE=TEST_REDIS_MODE,
REDIS_QUEUE_SERVER_HOST=TEST_REDIS_HOST,
REDIS_QUEUE_SERVER_PORT=TEST_REDIS_PORT,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD=TEST_REDIS_PASSWORD,
),
request=mock.Mock(
image_url="/image/test.jpg",
detection_error=False,
),
)
await self.detector_can_detect_twice(ctx)
class QueuedSentinelDetectorTestCase(SharedQueuedDetectorTestCase):
def get_config(self):
return Config(
REDIS_QUEUE_MODE=TEST_REDIS_SENTINEL_MODE,
REDIS_QUEUE_SENTINEL_PASSWORD=TEST_REDIS_PASSWORD,
REDIS_QUEUE_SENTINEL_SOCKET_TIMEOUT=TEST_REDIS_SENTINEL_SOCKET_TIMEOUT,
REDIS_QUEUE_SENTINEL_MASTER_INSTANCE=TEST_REDIS_SENTINEL_MASTER_INSTANCE,
REDIS_QUEUE_SENTINEL_MASTER_PASSWORD=TEST_REDIS_PASSWORD,
REDIS_QUEUE_SENTINEL_MASTER_DB=0,
)
def setUp(self):
super().setUp()
self.sentinel = Sentinel(
[("localhost", 26379)],
sentinel_kwargs={"password": TEST_REDIS_PASSWORD},
)
self.redis = self.sentinel.master_for(
TEST_REDIS_SENTINEL_MASTER_INSTANCE,
password=TEST_REDIS_PASSWORD,
db=0,
)
self.redis.delete("resque:unique:queue:Detect:/image/test.jpg")
self.redis.delete("resque:queue:Detect")
QueuedDetector.queue = None
self.request = mock.Mock(
image_url="/image/test.jpg",
detection_error=False,
)
self.ctx = mock.Mock(
config=self.config,
request=self.request,
)
@gen_test
async def test_detector_sends_to_queue(self):
await self.detector_send_to_queues(self.ctx)
@gen_test
async def test_detector_fails_properly(self):
self.ctx.config.REDIS_QUEUE_SENTINEL_INSTANCES = "localhost:23680"
await self.METHOD_NAME(self.ctx)
@gen_test
async def test_detector_can_detect_twice(self):
await self.detector_can_detect_twice(self.ctx)
|
4,131 |
reference
|
from typing import List
import re
class Technicaldetails:
def __init__(self, node):
self.node = node
@property
def request(self) -> str:
if self.node in ('', None):
return ''
return self.node.findtext('Request', '')
@property
def response(self) -> str:
if self.node in ('', None):
return ''
return self.node.findtext('Response', '')
class Cve:
def __init__(self, node):
self.node = node
@property
def text(self) -> str:
if self.node in ('', None):
return ''
return self.node.text
class CVEList:
def __init__(self, node):
self.node = node
@property
def cve(self) -> Cve:
if self.node is None:
return ''
return Cve(self.node.find('CVE'))
class Cwe:
def __init__(self, node):
self.node = node
@property
def id_attr(self) -> str:
return self.node.attrib.get('id', '')
@property
def text(self) -> str:
return self.node.text
class Cwelist:
def __init__(self, node):
self.node = node
@property
def cwe(self) -> Cwe:
return Cwe(self.node.find('CWE'))
class Cvss:
def __init__(self, node):
self.node = node
@property
def descriptor(self) -> str:
return self.node.findtext('Descriptor', '')
@property
def score(self) -> str:
if self.node is None:
return ''
return self.node.findtext('Score')
@property
def av(self) -> str:
return self.node.findtext('AV', '')
@property
def ac(self) -> str:
return self.node.findtext('AC', '')
@property
def au(self) -> str:
return self.node.findtext('Au', '')
@property
def c(self) -> str:
return self.node.findtext('C', '')
@property
def i(self) -> str:
return self.node.findtext('I', '')
@property
def a(self) -> str:
return self.node.findtext('A', '')
@property
def e(self):
return self.node.find('E')
@property
def rl(self):
return self.node.find('RL')
@property
def rc(self):
return self.node.find('RC')
class Cvss3:
def __init__(self, node):
self.node = node
@property
def descriptor(self) -> str:
return self.node.findtext('Descriptor', '')
@property
def score(self) -> str:
return self.node.findtext('Score')
@property
def tempscore(self):
return self.node.find('TempScore')
@property
def envscore(self):
return self.node.find('EnvScore')
@property
def av(self) -> str:
return self.node.find('AV', '')
@property
def ac(self) -> str:
return self.node.find('AC', '')
@property
def pr(self) -> str:
return self.node.find('PR', '')
@property
def ui(self) -> str:
return self.node.find('UI', '')
@property
def s(self) -> str:
return self.node.find('S', '')
@property
def c(self) -> str:
return self.node.find('C', '')
@property
def i(self) -> str:
return self.node.findtext('I', '')
@property
def a(self) -> str:
return self.node.findtext('A', '')
@property
def e(self):
return self.node.find('E')
@property
def rl(self):
return self.node.find('RL')
@property
def rc(self):
return self.node.find('RC')
class Reference:
def __init__(self, node):
self.node = node
@property
def database(self) -> str:
return self.node.findtext('Database', '')
@property
def url(self) -> str:
return self.node.findtext('URL', '')
class References:
def __init__(self, node):
self.node = node
@property
def METHOD_NAME(self) -> List[Reference]:
return [Reference(i) for i in self.node.findall('Reference', [])]
class Reportitem:
def __init__(self, node):
self.node = node
@property
def id_attr(self) -> str:
return self.node.findtext('id', '')
@property
def color_attr(self) -> str:
return self.node.findtext('color', '')
@property
def name(self) -> str:
return self.node.findtext('Name', '')
@property
def modulename(self) -> str:
return self.node.findtext('ModuleName', '')
@property
def details(self) -> str:
return self.node.findtext('Details', '')
@property
def affects(self) -> str:
return self.node.findtext('Affects', '')
@property
def parameter(self) -> str:
return self.node.findtext('Parameter')
@property
def aop_sourcefile(self):
return self.node.find('AOP_SourceFile')
@property
def aop_sourceline(self):
return self.node.find('AOP_SourceLine')
@property
def aop_additional(self):
return self.node.find('AOP_Additional')
@property
def isfalsepositive(self):
return self.node.find('IsFalsePositive')
@property
def severity(self) -> str:
return self.node.findtext('Severity', '')
@property
def type(self) -> str:
return self.node.findtext('Type', '')
@property
def impact(self) -> str:
return self.node.findtext('Impact', '')
@property
def description(self) -> str:
return self.node.findtext('Description', '')
@property
def recommendation(self) -> str:
return self.node.findtext('Recommendation', '')
@property
def technicaldetails(self) -> Technicaldetails:
return Technicaldetails(self.node.find('TechnicalDetails'))
@property
def cwelist(self) -> Cwelist:
return Cwelist(self.node.find('CWEList'))
@property
def cvelist(self):
return CVEList(self.node.find('CVEList'))
@property
def cvss(self) -> Cvss:
cvss = self.node.find('CVSS')
if not cvss:
cvss = self.node.find('cvss')
return Cvss(cvss)
@property
def cvss3(self) -> Cvss3:
cvss = self.node.find('CVSS3')
if not cvss:
cvss = self.node.find('cvss3')
return Cvss3(cvss)
@property
def references(self) -> References:
return References(self.node.find('References'))
class Reportitems:
def __init__(self, node):
self.node = node
@property
def reportitem(self) -> List[Reportitem]:
return [Reportitem(i) for i in self.node.findall('ReportItem', [])]
class Crawler:
def __init__(self, node):
self.node = node
@property
def start_url_attr(self) -> str:
return self.node.get('StartUrl', '')
class Scan:
def __init__(self, node):
self.node = node
@property
def reportitems(self) -> Reportitems:
return Reportitems(self.node.find('ReportItems'))
@property
def start_url(self) -> str:
return self.node.findtext("StartURL", "")
@property
def crawler(self) -> Crawler:
return Crawler(self.node.find('Crawler'))
@property
def os(self) -> str:
if not self.node.findtext("Os", "unknown"):
return "unknown"
return self.node.findtext("Os", "unknown")
@property
def banner(self) -> str:
if not self.node.findtext('Banner'):
return None
return self.node.findtext("Banner")
@property
def start_url_new(self) -> str:
return self.node.findtext("", "")
class Acunetix:
def __init__(self, node):
self.node = node
@property
def exportedon_attr(self) -> str:
return self.node.get('ExportedOn')
@property
def scan(self) -> List[Scan]:
return [Scan(i) for i in self.node.findall('Scan', [])]
|
4,132 |
test effective url
|
# -*- coding: utf-8 -*-
"""Integration tests with httplib2"""
import sys
from six.moves.urllib_parse import urlencode
import pytest
import pytest_httpbin.certs
import vcr
from assertions import assert_cassette_has_one_response
httplib2 = pytest.importorskip("httplib2")
def http():
"""
Returns an httplib2 HTTP instance
with the certificate replaced by the httpbin one.
"""
kwargs = {"ca_certs": pytest_httpbin.certs.where()}
if sys.version_info[:2] in [(2, 7), (3, 7)]:
kwargs["disable_ssl_certificate_validation"] = True
return httplib2.Http(**kwargs)
def test_response_code(tmpdir, httpbin_both):
"""Ensure we can read a response code from a fetch"""
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join("atts.yaml"))):
resp, _ = http().request(url)
code = resp.status
with vcr.use_cassette(str(tmpdir.join("atts.yaml"))):
resp, _ = http().request(url)
assert code == resp.status
def test_random_body(httpbin_both, tmpdir):
"""Ensure we can read the content, and that it's served from cache"""
url = httpbin_both.url + "/bytes/1024"
with vcr.use_cassette(str(tmpdir.join("body.yaml"))):
_, content = http().request(url)
body = content
with vcr.use_cassette(str(tmpdir.join("body.yaml"))):
_, content = http().request(url)
assert body == content
def test_response_headers(tmpdir, httpbin_both):
"""Ensure we can get information from the response"""
url = httpbin_both.url
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
headers = resp.items()
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
assert set(headers) == set(resp.items())
def METHOD_NAME(tmpdir, httpbin_both):
"""Ensure that the effective_url is captured"""
url = httpbin_both.url + "/redirect-to?url=/html"
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
effective_url = resp["content-location"]
assert effective_url == httpbin_both + "/html"
with vcr.use_cassette(str(tmpdir.join("headers.yaml"))):
resp, _ = http().request(url)
assert effective_url == resp["content-location"]
def test_multiple_requests(tmpdir, httpbin_both):
"""Ensure that we can cache multiple requests"""
urls = [httpbin_both.url, httpbin_both.url, httpbin_both.url + "/get", httpbin_both.url + "/bytes/1024"]
with vcr.use_cassette(str(tmpdir.join("multiple.yaml"))) as cass:
[http().request(url) for url in urls]
assert len(cass) == len(urls)
def test_get_data(tmpdir, httpbin_both):
"""Ensure that it works with query data"""
data = urlencode({"some": 1, "data": "here"})
url = httpbin_both.url + "/get?" + data
with vcr.use_cassette(str(tmpdir.join("get_data.yaml"))):
_, res1 = http().request(url)
with vcr.use_cassette(str(tmpdir.join("get_data.yaml"))):
_, res2 = http().request(url)
assert res1 == res2
def test_post_data(tmpdir, httpbin_both):
"""Ensure that it works when posting data"""
data = urlencode({"some": 1, "data": "here"})
url = httpbin_both.url + "/post"
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))):
_, res1 = http().request(url, "POST", data)
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))) as cass:
_, res2 = http().request(url, "POST", data)
assert res1 == res2
assert_cassette_has_one_response(cass)
def test_post_unicode_data(tmpdir, httpbin_both):
"""Ensure that it works when posting unicode data"""
data = urlencode({"snowman": u"☃".encode("utf-8")})
url = httpbin_both.url + "/post"
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))):
_, res1 = http().request(url, "POST", data)
with vcr.use_cassette(str(tmpdir.join("post_data.yaml"))) as cass:
_, res2 = http().request(url, "POST", data)
assert res1 == res2
assert_cassette_has_one_response(cass)
def test_cross_scheme(tmpdir, httpbin, httpbin_secure):
"""Ensure that requests between schemes are treated separately"""
# First fetch a url under https, and then again under https and then
# ensure that we haven't served anything out of cache, and we have two
# requests / response pairs in the cassette
with vcr.use_cassette(str(tmpdir.join("cross_scheme.yaml"))) as cass:
http().request(httpbin_secure.url)
http().request(httpbin.url)
assert len(cass) == 2
assert cass.play_count == 0
def test_decorator(tmpdir, httpbin_both):
"""Test the decorator version of VCR.py"""
url = httpbin_both.url
@vcr.use_cassette(str(tmpdir.join("atts.yaml")))
def inner1():
resp, _ = http().request(url)
return resp["status"]
@vcr.use_cassette(str(tmpdir.join("atts.yaml")))
def inner2():
resp, _ = http().request(url)
return resp["status"]
assert inner1() == inner2()
|
4,133 |
test daemon
|
import logging
import time
from dagster import DagsterEvent, DagsterEventType, EventLogEntry
from dagster._core.instance import DagsterInstance
from dagster._core.test_utils import create_run_for_test
from dagster._daemon.auto_run_reexecution.event_log_consumer import (
EventLogConsumerDaemon,
get_new_cursor,
)
TEST_EVENT_LOG_FETCH_LIMIT = 10
class TestEventLogConsumerDaemon(EventLogConsumerDaemon):
"""Override the actual handlers so that we can just test which run records they receive."""
def __init__(self):
super(TestEventLogConsumerDaemon, self).__init__(
event_log_fetch_limit=TEST_EVENT_LOG_FETCH_LIMIT
)
self.run_records = []
@property
def handle_updated_runs_fns(self):
def stash_run_records(_ctx, run_records):
self.run_records = run_records
yield
return [stash_run_records]
def _create_success_event(instance, run):
dagster_event = DagsterEvent(
event_type_value=DagsterEventType.RUN_SUCCESS.value,
job_name="foo",
message="yay success",
)
event_record = EventLogEntry(
user_message="",
level=logging.INFO,
job_name="foo",
run_id=run.run_id,
error_info=None,
timestamp=time.time(),
dagster_event=dagster_event,
)
instance.handle_new_event(event_record)
def METHOD_NAME(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
assert daemon.run_records == []
run = create_run_for_test(instance, "test_job")
instance.report_run_failed(run)
list(daemon.run_iteration(empty_workspace_context))
assert [record.dagster_run.run_id for record in daemon.run_records] == [run.run_id]
# not called again for same event
daemon.run_records = [] # reset this since it will keep the value from the last call
list(daemon.run_iteration(empty_workspace_context))
assert daemon.run_records == []
def test_events_exceed_limit(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
for _ in range(TEST_EVENT_LOG_FETCH_LIMIT + 1):
run = create_run_for_test(instance, "test_job")
instance.report_run_failed(run)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == TEST_EVENT_LOG_FETCH_LIMIT
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 1
def test_success_and_failure_events(instance: DagsterInstance, empty_workspace_context):
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
for _ in range(TEST_EVENT_LOG_FETCH_LIMIT + 1):
run = create_run_for_test(instance, "foo")
instance.report_run_failed(run)
run = create_run_for_test(instance, "foo")
_create_success_event(instance, run)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == TEST_EVENT_LOG_FETCH_LIMIT * 2
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
FAILURE_KEY = "EVENT_LOG_CONSUMER_CURSOR-PIPELINE_FAILURE"
SUCCESS_KEY = "EVENT_LOG_CONSUMER_CURSOR-PIPELINE_SUCCESS"
def test_cursors(instance: DagsterInstance, empty_workspace_context):
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {}
daemon = TestEventLogConsumerDaemon()
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {
FAILURE_KEY: str(0),
SUCCESS_KEY: str(0),
}
run1 = create_run_for_test(instance, "foo")
run2 = create_run_for_test(instance, "foo")
instance.report_run_failed(run1)
instance.report_run_failed(run2)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
cursors = instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY})
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == cursors
for _ in range(5):
instance.report_engine_event("foo", run1)
instance.report_engine_event("foo", run2)
list(daemon.run_iteration(empty_workspace_context))
assert instance.run_storage.get_cursor_values({FAILURE_KEY, SUCCESS_KEY}) == {
FAILURE_KEY: str(int(cursors[FAILURE_KEY]) + 10),
SUCCESS_KEY: str(int(cursors[SUCCESS_KEY]) + 10),
}
run3 = create_run_for_test(instance, "foo")
run4 = create_run_for_test(instance, "foo")
instance.report_run_failed(run3)
instance.report_run_failed(run4)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 2
def test_cursor_init(instance: DagsterInstance, empty_workspace_context):
instance.run_storage.wipe()
daemon = TestEventLogConsumerDaemon()
run1 = create_run_for_test(instance, "foo")
run2 = create_run_for_test(instance, "foo")
instance.report_run_failed(run1)
instance.report_run_failed(run2)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 0, "Cursors init to latest event"
run3 = create_run_for_test(instance, "foo")
instance.report_run_failed(run3)
list(daemon.run_iteration(empty_workspace_context))
assert len(daemon.run_records) == 1
def test_get_new_cursor():
# hit fetch limit, uses max new_event_ids
assert get_new_cursor(0, 20, 8, [3, 4, 5, 6, 7, 8, 9, 10]) == 10
# hit fetch limit, uses max new_event_ids with overall_max_event_id low
assert get_new_cursor(0, 7, 8, [3, 4, 5, 6, 7, 8, 9, 10]) == 10
# didn't hit fetch limit, uses max new_event_ids with overall_max_event_id low
assert get_new_cursor(0, 7, 8, [3, 4, 5, 6, 7, 8, 9]) == 9
# didn't hit fetch limit, jumps to overall_max_event_id
assert get_new_cursor(0, 20, 4, [1, 2, 3]) == 20
# empty event log
assert get_new_cursor(0, None, 4, []) == 0
# empty overall_max_event_id
assert get_new_cursor(0, None, 5, [2, 3, 4]) == 4
# no new_event_ids
assert get_new_cursor(0, 10, 4, []) == 10
|
4,134 |
set up class
|
from typing import Tuple
import unittest
import panda.tests.safety.common as common
from panda.tests.libpanda import libpanda_py
from panda.tests.safety.common import make_msg
class Buttons:
NONE = 0
RESUME = 1
SET = 2
CANCEL = 4
PREV_BUTTON_SAMPLES = 8
ENABLE_BUTTONS = (Buttons.RESUME, Buttons.SET, Buttons.CANCEL)
class HyundaiButtonBase:
# pylint: disable=no-member,abstract-method
BUTTONS_TX_BUS = 0 # tx on this bus, rx on 0
SCC_BUS = 0 # rx on this bus
def test_button_sends(self):
"""
Only RES and CANCEL buttons are allowed
- RES allowed while controls allowed
- CANCEL allowed while cruise is enabled
"""
self.safety.set_controls_allowed(0)
self.assertFalse(self._tx(self._button_msg(Buttons.RESUME, bus=self.BUTTONS_TX_BUS)))
self.assertFalse(self._tx(self._button_msg(Buttons.SET, bus=self.BUTTONS_TX_BUS)))
self.safety.set_controls_allowed(1)
self.assertTrue(self._tx(self._button_msg(Buttons.RESUME, bus=self.BUTTONS_TX_BUS)))
self.assertFalse(self._tx(self._button_msg(Buttons.SET, bus=self.BUTTONS_TX_BUS)))
for enabled in (True, False):
self._rx(self._pcm_status_msg(enabled))
self.assertEqual(enabled, self._tx(self._button_msg(Buttons.CANCEL, bus=self.BUTTONS_TX_BUS)))
def test_enable_control_allowed_from_cruise(self):
"""
Hyundai non-longitudinal only enables on PCM rising edge and recent button press. Tests PCM enabling with:
- disallowed: No buttons
- disallowed: Buttons that don't enable cruise
- allowed: Buttons that do enable cruise
- allowed: Main button with all above combinations
"""
for main_button in (0, 1):
for btn in range(8):
for _ in range(PREV_BUTTON_SAMPLES): # reset
self._rx(self._button_msg(Buttons.NONE))
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._button_msg(btn, main_button=main_button))
self._rx(self._pcm_status_msg(True))
controls_allowed = btn in ENABLE_BUTTONS or main_button
self.assertEqual(controls_allowed, self.safety.get_controls_allowed())
def test_sampling_cruise_buttons(self):
"""
Test that we allow controls on recent button press, but not as button leaves sliding window
"""
self._rx(self._button_msg(Buttons.SET))
for i in range(2 * PREV_BUTTON_SAMPLES):
self._rx(self._pcm_status_msg(False))
self.assertFalse(self.safety.get_controls_allowed())
self._rx(self._pcm_status_msg(True))
controls_allowed = i < PREV_BUTTON_SAMPLES
self.assertEqual(controls_allowed, self.safety.get_controls_allowed())
self._rx(self._button_msg(Buttons.NONE))
class HyundaiLongitudinalBase(common.LongitudinalAccelSafetyTest):
# pylint: disable=no-member,abstract-method
DISABLED_ECU_UDS_MSG: Tuple[int, int]
DISABLED_ECU_ACTUATION_MSG: Tuple[int, int]
@classmethod
def METHOD_NAME(cls):
if cls.__name__ == "HyundaiLongitudinalBase":
cls.safety = None
raise unittest.SkipTest
# override these tests from PandaSafetyTest, hyundai longitudinal uses button enable
def test_disable_control_allowed_from_cruise(self):
pass
def test_enable_control_allowed_from_cruise(self):
pass
def test_sampling_cruise_buttons(self):
pass
def test_cruise_engaged_prev(self):
pass
def test_button_sends(self):
pass
def _pcm_status_msg(self, enable):
raise Exception
def _accel_msg(self, accel, aeb_req=False, aeb_decel=0):
raise NotImplementedError
def test_set_resume_buttons(self):
"""
SET and RESUME enter controls allowed on their falling edge.
"""
for btn_prev in range(8):
for btn_cur in range(8):
self._rx(self._button_msg(Buttons.NONE))
self.safety.set_controls_allowed(0)
for _ in range(10):
self._rx(self._button_msg(btn_prev))
self.assertFalse(self.safety.get_controls_allowed())
# should enter controls allowed on falling edge and not transitioning to cancel
should_enable = btn_cur != btn_prev and \
btn_cur != Buttons.CANCEL and \
btn_prev in (Buttons.RESUME, Buttons.SET)
self._rx(self._button_msg(btn_cur))
self.assertEqual(should_enable, self.safety.get_controls_allowed())
def test_cancel_button(self):
self.safety.set_controls_allowed(1)
self._rx(self._button_msg(Buttons.CANCEL))
self.assertFalse(self.safety.get_controls_allowed())
def test_tester_present_allowed(self):
"""
Ensure tester present diagnostic message is allowed to keep ECU knocked out
for longitudinal control.
"""
addr, bus = self.DISABLED_ECU_UDS_MSG
tester_present = libpanda_py.make_CANPacket(addr, bus, b"\x02\x3E\x80\x00\x00\x00\x00\x00")
self.assertTrue(self._tx(tester_present))
not_tester_present = libpanda_py.make_CANPacket(addr, bus, b"\x03\xAA\xAA\x00\x00\x00\x00\x00")
self.assertFalse(self._tx(not_tester_present))
def test_disabled_ecu_alive(self):
"""
If the ECU knockout failed, make sure the relay malfunction is shown
"""
addr, bus = self.DISABLED_ECU_ACTUATION_MSG
self.assertFalse(self.safety.get_relay_malfunction())
self._rx(make_msg(bus, addr, 8))
self.assertTrue(self.safety.get_relay_malfunction())
|
4,135 |
print applied undef
|
from sympy.core import Symbol
from sympy.core import S
from sympy.printing.precedence import precedence
from psydac.pyccel.codegen.printing.pycode import PythonCodePrinter as PyccelPythonCodePrinter
from sympde.topology.derivatives import _partial_derivatives
from sympde.topology import SymbolicExpr
#==============================================================================
class PythonCodePrinter(PyccelPythonCodePrinter):
def __init__(self, settings=None):
self._enable_dependencies = settings.pop('enable_dependencies', True)
PyccelPythonCodePrinter.__init__(self, settings=settings)
# .........................................................
# PSYDAC objects
# .........................................................
def _print_SplBasic(self, expr):
code = ''
if self._enable_dependencies and expr.dependencies:
imports = []
for dep in expr.dependencies:
imports +=dep.imports
code = '\n'.join(self._print(i) for i in imports)
for dep in expr.dependencies:
code = '{code}\n{dep}'.format(code=code,
dep=self._print(dep))
return '{code}\n{func}'.format(code=code, func=self._print(expr.func))
def _print_Kernel(self, expr):
code = ''
if self._enable_dependencies and expr.dependencies:
imports = []
for dep in expr.dependencies:
imports +=dep.imports
code = '\n'.join(self._print(i) for i in imports)
for dep in expr.dependencies:
code = '{code}\n{dep}'.format(code=code,
dep=self._print(dep))
funcs = [func for fs in expr.func for func in fs if func is not None ]
funcs = '\n'.join(self._print(func) for func in funcs)
return '{code}\n{funcs}'.format(code=code, funcs=funcs)
def _print_Interface(self, expr):
code = '\n'.join(self._print(i) for i in expr.imports)
return code +'\n' + self._print(expr.func)
def _print_GltInterface(self, expr):
code = '\n'.join(self._print(i) for i in expr.imports)
return code +'\n' + self._print(expr.func)
def _print_MinusInterfaceOperator(self, expr):
return self._print(expr.args[0])
def _print_PlusInterfaceOperator(self, expr):
return self._print(expr.args[0])
def _print_FloorDiv(self, expr):
return "(({})//({}))".format(self._print(expr.arg1), self._print(expr.arg2))
# .........................................................
# SYMPY objects
# .........................................................
def METHOD_NAME(self, expr):
args = ','.join(self._print(i) for i in expr.args)
fname = self._print(expr.func.__name__)
return '{fname}({args})'.format(fname=fname, args=args)
def _print_PythonTuple(self, expr):
args = ', '.join(self._print(i) for i in expr.args)
return '('+args+')'
def _hprint_Pow(self, expr, rational=False, sqrt='math.sqrt'):
"""Printing helper function for ``Pow``
Notes
=====
This only preprocesses the ``sqrt`` as math formatter
Examples
========
>>> from sympy.functions import sqrt
>>> from sympy.printing.pycode import PythonCodePrinter
>>> from sympy.abc import x
Python code printer automatically looks up ``math.sqrt``.
>>> printer = PythonCodePrinter({'standard':'python3'})
>>> printer._hprint_Pow(sqrt(x), rational=True)
'x**(1/2)'
>>> printer._hprint_Pow(sqrt(x), rational=False)
'math.sqrt(x)'
>>> printer._hprint_Pow(1/sqrt(x), rational=True)
'x**(-1/2)'
>>> printer._hprint_Pow(1/sqrt(x), rational=False)
'1/math.sqrt(x)'
Using sqrt from numpy or mpmath
>>> printer._hprint_Pow(sqrt(x), sqrt='numpy.sqrt')
'numpy.sqrt(x)'
>>> printer._hprint_Pow(sqrt(x), sqrt='mpmath.sqrt')
'mpmath.sqrt(x)'
See Also
========
sympy.printing.str.StrPrinter._print_Pow
"""
PREC = precedence(expr)
if expr.exp == S.Half and not rational:
func = self._module_format(sqrt)
arg = self._print(expr.base)
return '{func}({arg})'.format(func=func, arg=arg)
if expr.is_commutative:
if -expr.exp is S.Half and not rational:
func = self._module_format(sqrt)
num = self._print(S.One)
arg = self._print(expr.base)
return "{num}/{func}({arg})".format(
num=num, func=func, arg=arg)
base_str = self.parenthesize(expr.base, PREC, strict=False)
exp_str = self.parenthesize(expr.exp, PREC, strict=False)
return "{}**{}".format(base_str, exp_str)
def _print_Pow(self, expr, rational=False):
return self._hprint_Pow(expr, rational=rational, sqrt='sqrt')
def _print_Idx(self, expr):
return self._print(str(expr))
#==============================================================================
def pycode(expr, **settings):
""" Converts an expr to a string of Python code
Parameters
==========
expr : Expr
A SymPy expression.
fully_qualified_modules : bool
Whether or not to write out full module names of functions
(``math.sin`` vs. ``sin``). default: ``True``.
enable_dependencies: bool
Whether or not to print dependencies too (EvalField, Kernel, etc)
Examples
========
>>> from sympy import tan, Symbol
>>> from sympy.printing.pycode import pycode
>>> pycode(tan(Symbol('x')) + 1)
'math.tan(x) + 1'
"""
return PythonCodePrinter(settings).doprint(expr)
|
4,136 |
package
|
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.apple import is_apple_os, fix_apple_shared_install_name
from conan.tools.build import cross_building
from conan.tools.env import VirtualBuildEnv, VirtualRunEnv
from conan.tools.files import get, copy, rmdir, rm, export_conandata_patches, apply_conandata_patches
from conan.tools.gnu import Autotools, AutotoolsToolchain, AutotoolsDeps, PkgConfigDeps
from conan.tools.layout import basic_layout
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class ZbarConan(ConanFile):
name = "zbar"
license = "LGPL-2.1-only"
url = "https://github.com/conan-io/conan-center-index"
homepage = "http://zbar.sourceforge.net/"
topics = ("barcode", "scanner", "decoder", "reader", "bar")
description = "ZBar is an open source software suite for reading bar codes\
from various sources, such as video streams, image files and raw intensity sensors"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_video": [True, False],
"with_imagemagick": [True, False],
"with_gtk": [True, False],
"with_qt": [True, False],
"with_python_bindings": [True, False],
"with_x": [True, False],
"with_xshm": [True, False],
"with_xv": [True, False],
"with_jpeg": [True, False],
"enable_pthread": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_video": False,
"with_imagemagick": False,
"with_gtk": False,
"with_qt": False,
"with_python_bindings": False,
"with_x": False,
"with_xshm": False,
"with_xv": False,
"with_jpeg": False,
"enable_pthread": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
basic_layout(self, src_folder="src")
def requirements(self):
self.requires("libiconv/1.17")
if self.options.with_jpeg:
self.requires("libjpeg/9e")
if self.options.with_imagemagick:
self.requires("imagemagick/7.0.11-14")
if self.options.with_gtk:
self.requires("gtk/4.7.0")
if self.options.with_qt:
self.requires("qt/5.15.9")
def validate(self):
if self.settings.os == "Windows":
raise ConanInvalidConfiguration("Zbar can't be built on Windows")
if is_apple_os(self) and not self.options.shared:
raise ConanInvalidConfiguration("Zbar can't be built static on macOS")
if self.options.with_xv: #TODO add when available
self.output.warning("There is no Xvideo package available on Conan (yet). This recipe will use the one present on the system (if available).")
if Version(self.version) >= "0.22" and cross_building(self):
raise ConanInvalidConfiguration(f"{self.ref} can't be built on cross building environment currently because autopoint(part of gettext) doesn't execute correctly.")
def build_requirements(self):
self.tool_requires("gnu-config/cci.20210814")
if not self.conf.get("tools.gnu:pkg_config", check_type=str):
self.tool_requires("pkgconf/1.9.3")
if Version(self.version) >= "0.22":
self.tool_requires("gettext/0.21")
self.tool_requires("libtool/2.4.7")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
env = VirtualBuildEnv(self)
env.generate()
if not cross_building(self):
env = VirtualRunEnv(self)
env.generate(scope="build")
yes_no = lambda v: "yes" if v else "no"
tc = AutotoolsToolchain(self)
tc.configure_args.extend([
f"--enable-video={yes_no(self.options.with_video)}",
f"--with-imagemagick={yes_no(self.options.with_imagemagick)}",
f"--with-gtk={yes_no(self.options.with_gtk)}",
f"--with-qt={yes_no(self.options.with_qt)}",
f"--with-python={yes_no(self.options.with_python_bindings)}",
f"--with-x={yes_no(self.options.with_x)}",
f"--with-xshm={yes_no(self.options.with_xshm)}",
f"--with-xv={yes_no(self.options.with_xv)}",
f"--with-jpeg={yes_no(self.options.with_jpeg)}",
f"--enable-pthread={yes_no(self.options.enable_pthread)}",
])
env = tc.environment()
if self.settings.os == "Macos" and self.settings.arch == "armv8":
# ./libtool: eval: line 961: syntax error near unexpected token `|'
env.define("NM", "nm")
tc.generate(env)
AutotoolsDeps(self).generate()
PkgConfigDeps(self).generate()
def build(self):
apply_conandata_patches(self)
for gnu_config in [
self.conf.get("user.gnu-config:config_guess", check_type=str),
self.conf.get("user.gnu-config:config_sub", check_type=str),
]:
if gnu_config:
copy(self, os.path.basename(gnu_config),
src=os.path.dirname(gnu_config),
dst=os.path.join(self.source_folder, "config"))
autotools = Autotools(self)
if Version(self.version) >= "0.22":
autotools.autoreconf()
autotools.configure()
autotools.make()
def METHOD_NAME(self):
copy(self, "LICENSE*", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
autotools.install()
rmdir(self, os.path.join(self.package_folder, "share"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
fix_apple_shared_install_name(self)
def package_info(self):
self.cpp_info.libs = ["zbar"]
self.cpp_info.set_property("pkg_config_name", "zbar")
if self.settings.os in ("FreeBSD", "Linux") and self.options.enable_pthread:
self.cpp_info.system_libs = ["pthread"]
|
4,137 |
sample
|
import random
from typing import Any, Dict, List, Optional, cast
from lale.helpers import nest_HPparams
from lale.lib.lale import NoOp
from lale.operators import (
BasePipeline,
IndividualOp,
Operator,
OperatorChoice,
PlannedOperator,
clone_op,
make_choice,
make_pipeline,
make_pipeline_graph,
)
class NonTerminal(Operator):
"""Abstract operator for non-terminal grammar rules."""
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out = {}
out["name"] = self._name
return out
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator. NonTerminals do not support
in-place mutation
"""
known_keys = set(["name"])
if impl_params:
new_keys = set(impl_params.keys())
if not new_keys.issubset(known_keys):
unknowns = {k: v for k, v in impl_params.items() if k not in known_keys}
raise ValueError(
f"NonTerminal._with_params called with unknown parameters: {unknowns}"
)
assert "name" in impl_params
return NonTerminal(impl_params["name"])
else:
return self
def __init__(self, name):
self._name = name
def _has_same_impl(self, other: Operator):
pass
def is_supervised(self):
return False
def validate_schema(self, X, y=None):
raise NotImplementedError() # TODO
def transform_schema(self, s_X):
raise NotImplementedError() # TODO
def input_schema_fit(self):
raise NotImplementedError() # TODO
def is_classifier(self) -> bool:
return False # TODO
class Grammar(Operator):
"""Base class for Lale grammars."""
_variables: Dict[str, Operator]
def get_params(self, deep: bool = True) -> Dict[str, Any]:
out = {}
out["variables"] = self._variables
if deep:
deep_stuff: Dict[str, Any] = {}
for k, v in self._variables.items():
deep_stuff.update(nest_HPparams(k, v.get_params(deep=deep)))
out.update(deep_stuff)
return out
def _with_params(self, try_mutate: bool, **impl_params) -> Operator:
"""
This method updates the parameters of the operator.
If try_mutate is set, it will attempt to update the operator in place
this may not always be possible
"""
# TODO implement support
# from this point of view, Grammar is just a higher order operator
raise NotImplementedError("setting Grammar parameters is not yet supported")
def __init__(self, variables: Optional[Dict[str, Operator]] = None):
if variables is None:
variables = {}
self._variables = variables
def __getattr__(self, name):
if name.startswith("_"):
return self.__dict__[name]
if name not in self._variables:
self._variables[name] = NonTerminal(name)
return clone_op(self._variables[name])
def __setattr__(self, name, value):
if name.startswith("_"):
self.__dict__[name] = value
else:
self._variables[name] = value
def _has_same_impl(self, other: Operator):
pass
def is_supervised(self):
return False
def validate_schema(self, X, y=None):
raise NotImplementedError() # TODO
def transform_schema(self, s_X):
raise NotImplementedError() # TODO
def input_schema_fit(self):
raise NotImplementedError() # TODO
def is_classifier(self) -> bool:
raise NotImplementedError() # TODO
def _unfold(self, op: Operator, n: int) -> Optional[Operator]:
"""Unroll all possible operators from the grammar `g` starting from non-terminal `op` after `n` derivations.
Parameters
----------
op : Operator
starting rule (e.g., `g.start`)
n : int
number of derivations
Returns
-------
Optional[Operator]
"""
if isinstance(op, BasePipeline):
steps = op.steps_list()
new_maybe_steps: List[Optional[Operator]] = [
self._unfold(sop, n) for sop in op.steps_list()
]
if None not in new_maybe_steps:
new_steps: List[Operator] = cast(List[Operator], new_maybe_steps)
step_map = {steps[i]: new_steps[i] for i in range(len(steps))}
new_edges = [(step_map[s], step_map[d]) for s, d in op.edges()]
return make_pipeline_graph(new_steps, new_edges, True)
else:
return None
if isinstance(op, OperatorChoice):
steps = [s for s in (self._unfold(sop, n) for sop in op.steps_list()) if s]
return make_choice(*steps) if steps else None
if isinstance(op, NonTerminal):
return self._unfold(self._variables[op.name()], n - 1) if n > 0 else None
if isinstance(op, IndividualOp):
return op
assert False, f"Unknown operator {op}"
def unfold(self, n: int) -> PlannedOperator:
"""
Explore this grammar `self.start` and generate all possible choices after `n` derivations.
Parameters
----------
n : int
number of derivations
Returns
-------
PlannedOperator
"""
assert hasattr(self, "start"), "Rule start must be defined"
op = self._unfold(self.start, n)
return make_pipeline(op) if op else NoOp
def METHOD_NAME(self, op: Operator, n: int) -> Optional[Operator]:
"""
Sample the grammar `g` starting from `g.start`, that is, choose one element at random for each possible choices.
Parameters
----------
op : Operator
starting rule (e.g., `g.start`)
n : int
number of derivations
Returns
-------
Optional[Operator]
"""
if isinstance(op, BasePipeline):
steps = op.steps_list()
new_maybe_steps: List[Optional[Operator]] = [
self.METHOD_NAME(sop, n) for sop in op.steps_list()
]
if None not in new_maybe_steps:
new_steps: List[Operator] = cast(List[Operator], new_maybe_steps)
step_map = {steps[i]: new_steps[i] for i in range(len(steps))}
new_edges = [(step_map[s], step_map[d]) for s, d in op.edges()]
return make_pipeline_graph(new_steps, new_edges, True)
else:
return None
if isinstance(op, OperatorChoice):
# This choice does not need to be cryptographically secure or hard to predict
return self.METHOD_NAME(random.choice(op.steps_list()), n) # nosec
if isinstance(op, NonTerminal):
return self.METHOD_NAME(getattr(self, op.name()), n - 1) if n > 0 else None
if isinstance(op, IndividualOp):
return op
assert False, f"Unknown operator {op}"
def sample(self, n: int) -> PlannedOperator:
"""
Sample the grammar `g` starting from `g.start`, that is, choose one element at random for each possible choices.
Parameters
----------
n : int
number of derivations
Returns
-------
PlannedOperator
"""
assert hasattr(self, "start"), "Rule start must be defined"
op = self.METHOD_NAME(self.start, n)
return make_pipeline(op) if op else NoOp
|
4,138 |
read
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import ctypes
from qiling.hw.peripheral import QlPeripheral
class CortexMNvic(QlPeripheral):
class Type(ctypes.Structure):
_fields_ = [
('ISER' , ctypes.c_uint32 * 8),
('RESERVED0', ctypes.c_uint32 * 24),
('ICER' , ctypes.c_uint32 * 8),
('RESERVED1', ctypes.c_uint32 * 24),
('ISPR' , ctypes.c_uint32 * 8),
('RESERVED2', ctypes.c_uint32 * 24),
('ICPR' , ctypes.c_uint32 * 8),
('RESERVED3', ctypes.c_uint32 * 24),
('IABR' , ctypes.c_uint32 * 8),
('RESERVED4', ctypes.c_uint32 * 56),
('IPR' , ctypes.c_uint8 * 240),
('RESERVED5', ctypes.c_uint32 * 644),
('STIR' , ctypes.c_uint32 * 8),
]
def __init__(self, ql, label):
super().__init__(ql, label)
# reference:
# https://www.youtube.com/watch?v=uFBNf7F3l60
# https://developer.arm.com/documentation/ddi0439/b/Nested-Vectored-Interrupt-Controller
self.instance = self.struct()
## The max number of interrupt request
self.IRQN_MAX = self.struct.ISER.size * 8
## The ISER unit size
self.MASK = self.IRQN_MAX // len(self.instance.ISER) - 1
self.OFFSET = self.MASK.bit_length()
## special write behavior
self.triggers = [
(self.struct.ISER, self.enable),
(self.struct.ICER, self.disable),
(self.struct.ISPR, self.set_pending),
(self.struct.ICPR, self.clear_pending),
]
self.intrs = []
self.interrupt_handler = self.ql.arch.interrupt_handler
def enable(self, IRQn):
if IRQn >= 0:
self.instance.ISER[IRQn >> self.OFFSET] |= 1 << (IRQn & self.MASK)
self.instance.ICER[IRQn >> self.OFFSET] |= 1 << (IRQn & self.MASK)
else:
self.ql.hw.scb.enable(IRQn)
def disable(self, IRQn):
if IRQn >= 0:
self.instance.ISER[IRQn >> self.OFFSET] &= self.MASK ^ (1 << (IRQn & self.MASK))
self.instance.ICER[IRQn >> self.OFFSET] &= self.MASK ^ (1 << (IRQn & self.MASK))
else:
self.ql.hw.scb.disable(IRQn)
def get_enable(self, IRQn):
if IRQn >= 0:
return (self.instance.ISER[IRQn >> self.OFFSET] >> (IRQn & self.MASK)) & 1
else:
return self.ql.hw.scb.get_enable(IRQn)
def set_pending(self, IRQn):
if IRQn >= 0:
self.instance.ISPR[IRQn >> self.OFFSET] |= 1 << (IRQn & self.MASK)
self.instance.ICPR[IRQn >> self.OFFSET] |= 1 << (IRQn & self.MASK)
else:
self.ql.hw.scb.set_pending(IRQn)
if self.get_enable(IRQn):
self.intrs.append(IRQn)
def clear_pending(self, IRQn):
if IRQn >= 0:
self.instance.ISPR[IRQn >> self.OFFSET] &= self.MASK ^ (1 << (IRQn & self.MASK))
self.instance.ICPR[IRQn >> self.OFFSET] &= self.MASK ^ (1 << (IRQn & self.MASK))
else:
self.ql.hw.scb.clear_pending(IRQn)
def get_pending(self, IRQn):
if IRQn >= 0:
return (self.instance.ISER[IRQn >> self.OFFSET] >> (IRQn & self.MASK)) & 1
else:
return self.ql.hw.scb.get_pending(IRQn)
def get_priority(self, IRQn):
if IRQn >= 0:
return self.instance.IPR[IRQn]
else:
return self.ql.hw.scb.get_priority(IRQn)
def step(self):
if not self.intrs:
return
self.intrs.sort(key=lambda x: self.get_priority(x))
while self.intrs:
IRQn = self.intrs.pop(0)
self.clear_pending(IRQn)
self.interrupt_handler(self.ql, IRQn)
@QlPeripheral.monitor()
def METHOD_NAME(self, offset: int, size: int) -> int:
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, ctypes.addressof(self.instance) + offset, size)
return int.from_bytes(buf.raw, byteorder='little')
@QlPeripheral.monitor()
def write(self, offset: int, size: int, value: int):
def write_byte(ofs, byte):
for var, func in self.triggers:
if var.offset <= ofs < var.offset + var.size:
for i in range(8):
if (byte >> i) & 1:
func(i + (ofs - var.offset) * 8)
break
else:
ipr = self.struct.IPR
if ipr.offset <= ofs < ipr.offset + ipr.size:
byte &= 0xf0 # IPR[3: 0] reserved
ctypes.memmove(ctypes.addressof(self.instance) + ofs, bytes([byte]), 1)
for ofs in range(offset, offset + size):
write_byte(ofs, value & 0xff)
value >>= 8
@property
def region(self):
return [(0, self.struct.RESERVED5.offset), (self.struct.STIR.offset, ctypes.sizeof(self.struct))
|
4,139 |
test false bool home
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.io.expand_filespecs` function."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import os
from pathlib import Path
import shutil
import tempfile
import textwrap
import iris.io as iio
class TestExpandFilespecs(tests.IrisTest):
def setUp(self):
tests.IrisTest.setUp(self)
self.tmpdir = os.path.realpath(tempfile.mkdtemp())
self.fnames = ["a.foo", "b.txt"]
for fname in self.fnames:
with open(os.path.join(self.tmpdir, fname), "w") as fh:
fh.write("anything")
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_absolute_path(self):
result = iio.expand_filespecs([os.path.join(self.tmpdir, "*")])
expected = [os.path.join(self.tmpdir, fname) for fname in self.fnames]
self.assertEqual(result, expected)
def test_double_slash(self):
product = iio.expand_filespecs(["//" + os.path.join(self.tmpdir, "*")])
predicted = [os.path.join(self.tmpdir, fname) for fname in self.fnames]
self.assertEqual(product, predicted)
def test_relative_path(self):
cwd = os.getcwd()
try:
os.chdir(self.tmpdir)
item_out = iio.expand_filespecs(["*"])
item_in = [
os.path.join(self.tmpdir, fname) for fname in self.fnames
]
self.assertEqual(item_out, item_in)
finally:
os.chdir(cwd)
def test_return_order(self):
# It is really quite important what order we return the
# files. They should be in the order that was provided,
# so that we can control the order of load (for instance,
# this can be used with PP files to ensure that there is
# a surface reference).
patterns = [
os.path.join(self.tmpdir, "a.*"),
os.path.join(self.tmpdir, "b.*"),
]
expected = [
os.path.join(self.tmpdir, fname) for fname in ["a.foo", "b.txt"]
]
result = iio.expand_filespecs(patterns)
self.assertEqual(result, expected)
result = iio.expand_filespecs(patterns[::-1])
self.assertEqual(result, expected[::-1])
def test_no_files_found(self):
msg = r"\/no_exist.txt\" didn\'t match any files"
with self.assertRaisesRegex(IOError, msg):
iio.expand_filespecs([os.path.join(self.tmpdir, "no_exist.txt")])
def test_files_and_none(self):
with self.assertRaises(IOError) as err:
iio.expand_filespecs(
[
os.path.join(self.tmpdir, "does_not_exist.txt"),
os.path.join(self.tmpdir, "*"),
]
)
expected = (
textwrap.dedent(
"""
One or more of the files specified did not exist:
* "{0}/does_not_exist.txt" didn\'t match any files
- "{0}/*" matched 2 file(s)
"""
)
.strip()
.format(self.tmpdir)
)
self.assertMultiLineEqual(str(err.exception), expected)
def test_false_bool_absolute(self):
tempdir = self.tmpdir
msg = os.path.join(tempdir, "no_exist.txt")
(result,) = iio.expand_filespecs([msg], False)
self.assertEqual(result, msg)
def METHOD_NAME(self):
# ensure that not only does files_expected not error,
# but that the path is still expanded from a ~
msg = str(Path().home() / "no_exist.txt")
(result,) = iio.expand_filespecs(["~/no_exist.txt"], False)
self.assertEqual(result, msg)
def test_false_bool_relative(self):
cwd = os.getcwd()
try:
os.chdir(self.tmpdir)
item_out = iio.expand_filespecs(["no_exist.txt"], False)
item_in = [os.path.join(self.tmpdir, "no_exist.txt")]
self.assertEqual(item_out, item_in)
finally:
os.chdir(cwd)
if __name__ == "__main__":
tests.main()
|
4,140 |
inductor aten mm
|
import torch
import torch._dynamo
import torch._dynamo.config
import torch._inductor.config as config
import triton
from benchmark_helper import time_with_torch_timer
# The flag below controls whether to allow TF32 on matmul. This flag defaults to True.
torch.backends.cuda.matmul.allow_tf32 = True
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
@torch._dynamo.optimize("inductor", nopython=True)
def METHOD_NAME(a, b):
return torch.mm(a, b)
@torch._dynamo.optimize("inductor", nopython=True)
def inductor_triton_mm(a, b):
return torch.mm(a, b)
def torch_mm(a, b):
return torch.mm(a, b)
def triton_mm(a, b):
return triton.ops.matmul(a, b)
def test_total_time(shapes):
print("shape; torch mm; triton mm; inductor aten mm; inductor triton mm")
for i in range(len(shapes)):
a_shape, b_shape = shapes[i]
print(a_shape, "x", b_shape, end="; ")
a = torch.randn(a_shape, device="cuda", dtype=torch.float16)
b = torch.randn(b_shape, device="cuda", dtype=a.dtype)
config.triton.mm = "aten"
METHOD_NAME(a, b)
config.triton.mm = "triton"
inductor_triton_mm(a, b)
torch_ms = time_with_torch_timer(torch_mm, (a, b)).mean * 1000
triton_ms = time_with_torch_timer(triton_mm, (a, b)).mean * 1000
config.triton.mm = "aten"
ind_aten_ms = time_with_torch_timer(METHOD_NAME, (a, b)).mean * 1000
config.triton.mm = "triton"
ind_triton_ms = time_with_torch_timer(inductor_triton_mm, (a, b)).mean * 1000
print(torch_ms, triton_ms, ind_aten_ms, ind_triton_ms, sep="; ")
torch._dynamo.reset()
def test_GPU_time(shapes):
print("shape; torch mm; triton mm; inductor aten mm; inductor triton mm")
for i in range(len(shapes)):
a_shape, b_shape = shapes[i]
print(a_shape, "x", b_shape, end="; ")
a = torch.randn(a_shape, device="cuda", dtype=torch.float16)
b = torch.randn(b_shape, device="cuda", dtype=a.dtype)
config.triton.mm = "aten"
METHOD_NAME(a, b)
config.triton.mm = "triton"
inductor_triton_mm(a, b)
torch_ms, _, _ = triton.testing.do_bench(lambda: torch_mm(a, b))
triton_ms, _, _ = triton.testing.do_bench(lambda: triton_mm(a, b))
ind_aten_ms, _, _ = triton.testing.do_bench(lambda: METHOD_NAME(a, b))
ind_triton_ms, _, _ = triton.testing.do_bench(lambda: inductor_triton_mm(a, b))
print(torch_ms, triton_ms, ind_aten_ms, ind_triton_ms, sep="; ")
torch._dynamo.reset()
if __name__ == "__main__":
shapes = [
# alexnet
([128, 9216], [9216, 4096]),
([128, 4096], [4096, 4096]),
([128, 4096], [4096, 1000]),
# BERT
([2048, 768], [768, 768]),
([2048, 768], [768, 3072]),
([2048, 3072], [3072, 768]),
# hf_GPT2
([1024, 768], [768, 768]),
([1024, 768], [768, 3072]),
([1024, 3072], [3072, 768]),
([1024, 768], [768, 2304]),
]
print("test total time")
test_total_time(shapes)
print("test GPU time")
test_GPU_time(shapes)
# Results Preview on AWS AI cluster
"""
test total time
shape; torch mm; triton mm; inductor aten mm; inductor triton mm
[128, 9216] x [9216, 4096]; 0.07240759208798409; 0.10885953903198242; 0.20063146017491817; 0.20054904278367758
[128, 4096] x [4096, 4096]; 0.03640300128608942; 0.10960095096379519; 0.09948539081960917; 0.0996188772842288
[128, 4096] x [4096, 1000]; 0.02215010579675436; 0.12592008337378502; 0.031120930798351765; 0.0370654184371233
[2048, 768] x [768, 768]; 0.023501068353652954; 0.10804693214595318; 0.03004650119692087; 0.0276932492852211
[2048, 768] x [768, 3072]; 0.045639658346772194; 0.10883208829909563; 0.062736920081079; 0.06480381824076176
[2048, 3072] x [3072, 768]; 0.054093082435429096; 0.10804777964949608; 0.08744294755160809; 0.07766005117446184
[1024, 768] x [768, 768]; 0.021525858901441097; 0.10909941978752613; 0.02656651195138693; 0.02683836966753006
[1024, 768] x [768, 3072]; 0.027319076471030712; 0.10825308971107006; 0.040118801407516; 0.039282338693737984
[1024, 3072] x [3072, 768]; 0.034132059663534164; 0.10594133753329515; 0.05069758277386427; 0.04572632722556591
[1024, 768] x [768, 2304]; 0.02529360819607973; 0.10486091021448374; 0.03724239766597748; 0.036449190229177475
test GPU time
shape; torch mm; triton mm; inductor aten mm; inductor triton mm
[128, 9216] x [9216, 4096]; 0.09113600105047226; 0.09011200070381165; 0.21606400609016418; 0.21606400609016418
[128, 4096] x [4096, 4096]; 0.053247999399900436; 0.05222399905323982; 0.1157120019197464; 0.1157120019197464
[128, 4096] x [4096, 1000]; 0.026623999699950218; 0.02969600073993206; 0.04710400104522705; 0.05222399905323982
[2048, 768] x [768, 768]; 0.02457600086927414; 0.020479999482631683; 0.04095999896526337; 0.03993599861860275
[2048, 768] x [768, 3072]; 0.05119999870657921; 0.05222399905323982; 0.07475200295448303; 0.07577600330114365
[2048, 3072] x [3072, 768]; 0.05939200147986412; 0.05222399905323982; 0.09830400347709656; 0.0870399996638298
[1024, 768] x [768, 768]; 0.01945599913597107; 0.016383999958634377; 0.03276799991726875; 0.03276799991726875
[1024, 768] x [768, 3072]; 0.03174399957060814; 0.03276799991726875; 0.053247999399900436; 0.053247999399900436
[1024, 3072] x [3072, 768]; 0.04403200000524521; 0.03379200026392937; 0.06860800087451935; 0.062463998794555664
[1024, 768] x [768, 2304]; 0.02969600073993206; 0.02969600073993206; 0.04915200173854828; 0.048128001391887665
"""
|
4,141 |
set target region
|
#
# AUTHOR(S): Caitlin Haedrich <caitlin DOT haedrich AT gmail>
#
# PURPOSE: This module contains utility functions for InteractiveMap.
#
# COPYRIGHT: (C) 2021-2022 Caitlin Haedrich, and by the GRASS Development Team
#
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
"""Utility functions warpping existing processes in a suitable way"""
import grass.script as gs
def get_region(env=None):
"""Returns current computational region as dictionary.
Additionally, it adds long key names.
"""
region = gs.region(env=env)
region["east"] = region["e"]
region["west"] = region["w"]
region["north"] = region["n"]
region["south"] = region["s"]
return region
def get_location_proj_string(env=None):
"""Returns projection of environment in PROJ.4 format"""
out = gs.read_command("g.proj", flags="jf", env=env)
return out.strip()
def reproject_region(region, from_proj, to_proj):
"""Reproject boundary of region from one projection to another.
:param dict region: region to reproject as a dictionary with long key names
output of get_region
:param str from_proj: PROJ.4 string of region; output of get_location_proj_string
:param str in_proj: PROJ.4 string of target location;
output of get_location_proj_string
:return dict region: reprojected region as a dictionary with long key names
"""
region = region.copy()
# reproject all corners, otherwise reproj. region may be underestimated
# even better solution would be reprojecting vector region like in r.import
proj_input = (
f"{region['east']} {region['north']}\n"
f"{region['west']} {region['north']}\n"
f"{region['east']} {region['south']}\n"
f"{region['west']} {region['south']}\n"
)
proc = gs.start_command(
"m.proj",
input="-",
separator=" , ",
proj_in=from_proj,
proj_out=to_proj,
flags="d",
stdin=gs.PIPE,
stdout=gs.PIPE,
stderr=gs.PIPE,
)
proc.stdin.write(gs.encode(proj_input))
proc.stdin.close()
proc.stdin = None
proj_output, stderr = proc.communicate()
if proc.returncode:
raise RuntimeError(
_("Encountered error while running m.proj: {}").format(stderr)
)
output = gs.decode(proj_output).splitlines()
# get the largest bbox
latitude_list = []
longitude_list = []
for row in output:
longitude, latitude, unused = row.split(" ")
longitude_list.append(float(longitude))
latitude_list.append(float(latitude))
region["east"] = max(longitude_list)
region["north"] = max(latitude_list)
region["west"] = min(longitude_list)
region["south"] = min(latitude_list)
return region
def estimate_resolution(raster, mapset, location, dbase, env):
"""Estimates resolution of reprojected raster.
:param str raster: name of raster
:param str mapset: mapset of raster
:param str location: name of source location
:param str dbase: path to source database
:param dict env: target environment
:return float estimate: estimated resolution of raster in destination
environment
"""
output = gs.read_command(
"r.proj",
flags="g",
input=raster,
mapset=mapset,
location=location,
dbase=dbase,
env=env,
).strip()
params = gs.parse_key_val(output, vsep=" ")
output = gs.read_command("g.region", flags="ug", env=env, **params)
output = gs.parse_key_val(output, val_type=float)
cell_ns = (output["n"] - output["s"]) / output["rows"]
cell_ew = (output["e"] - output["w"]) / output["cols"]
estimate = (cell_ew + cell_ns) / 2.0
return estimate
def setup_location(name, path, epsg, src_env):
"""Setup temporary location with different projection but
same computational region as source location
:param str name: name of new location
:param path path: path to new location's database
:param str epsg: EPSG code
:param dict src_env: source environment
:return str rcfile: name of new locations rcfile
:return dict new_env: new environment
"""
# Create new environment
rcfile, new_env = gs.create_environment(path, name, "PERMANENT")
# Location and mapset
gs.create_location(path, name, epsg=epsg, overwrite=True)
# Reproject region
METHOD_NAME(src_env, new_env)
return rcfile, new_env
def METHOD_NAME(src_env, tgt_env):
"""Set target region based on source region.
Number of rows and columns is preserved.
"""
region = get_region(env=src_env)
from_proj = get_location_proj_string(src_env)
to_proj = get_location_proj_string(env=tgt_env)
new_region = reproject_region(region, from_proj, to_proj)
# Set region to match original region extent
gs.run_command(
"g.region",
n=new_region["north"],
s=new_region["south"],
e=new_region["east"],
w=new_region["west"],
rows=new_region["rows"],
cols=new_region["cols"],
env=tgt_env,
)
def get_map_name_from_d_command(module, **kwargs):
"""Returns map name from display command.
Assumes only positional parameters.
When more maps are present (e.g., d.rgb), it returns only 1.
Returns empty string if fails to find it.
"""
special = {"d.his": "hue", "d.legend": "raster", "d.rgb": "red", "d.shade": "shade"}
parameter = special.get(module, "map")
return kwargs.get(parameter, "")
def get_rendering_size(region, width, height, default_width=600, default_height=400):
"""Returns the rendering width and height based
on the region aspect ratio.
:param dict region: region dictionary
:param integer width: rendering width (can be None)
:param integer height: rendering height (can be None)
:param integer default_width: default rendering width (can be None)
:param integer default_height: default rendering height (can be None)
:return tuple (width, height): adjusted width and height
When both width and height are provided, values are returned without
adjustment. When one value is provided, the other is computed
based on the region aspect ratio. When no dimension is given,
the default width or height is used and the other dimension computed.
"""
if width and height:
return (width, height)
region_width = region["e"] - region["w"]
region_height = region["n"] - region["s"]
if width:
return (width, round(width * region_height / region_width))
if height:
return (round(height * region_width / region_height), height)
if region_height > region_width:
return (round(default_height * region_width / region_height), default_height)
return (default_width, round(default_width * region_height / region_width))
|
4,142 |
values or range
|
from portality.lib import dates, plugin
import json
from portality.lib.dates import FMT_DATE_STD
DO_TYPE_TO_JSON_TYPE = {
"str": "string",
"utcdatetime": "timestamp",
"integer": 0,
"bool": True,
"float": 0.0,
"isolang": "string",
"url": "string",
"isolang_2letter": "string",
"bigenddate" : "datestamp"
}
DO_TYPE_TO_DATATYPE = {
"str": "str",
"utcdatetime": "str",
"integer": "int",
"bool": "bool",
"float": "float",
"isolang": "str",
"url": "str",
"isolang_2letter": "str",
"bigenddate" : "str"
}
DO_TYPE_TO_FORMAT = {
"str": "",
"utcdatetime": "UTC ISO formatted date: YYYY-MM-DDTHH:MM:SSZ",
"integer": "",
"bool": "",
"float": "",
"isolang": "3 letter ISO language code",
"url": "URL",
"isolang_2letter": "2 letter ISO language code",
"bigenddate" : "Date, year first: YYYY-MM-DD"
}
def format(klazz, example, fields):
title = "# " + klazz.__name__
intro = "The JSON structure of the model is as follows:"
struct = "```json\n" + json.dumps(example, indent=4, sort_keys=True) + "\n```"
table_intro = "Each of the fields is defined as laid out in the table below. All fields are optional unless otherwise specified:"
table = "| Field | Description | Datatype | Format | Allowed Values |\n"
table += "| ----- | ----------- | -------- | ------ | -------------- |\n"
keys = list(fields.keys())
keys.sort()
for k in keys:
desc, datatype, format, values = fields.get(k)
table += "| {field} | {desc} | {datatype} | {format} | {values} |\n".format(field=k, desc=desc, datatype=datatype, format=format, values=values)
return title + "\n\n" + intro + "\n\n" + struct + "\n\n" + table_intro + "\n\n" + table
def document(klazz, field_descriptions):
inst = klazz()
base_struct = inst.__seamless_struct__.raw
fields = {}
def do_document(path, struct, fields):
example = {}
# first do all the fields at this level
for simple_field, instructions in struct.get('fields', {}).items():
example[simple_field] = type_map(instructions.get("coerce"))
fields[path + simple_field] = (field_descriptions.get(path + simple_field, ""), datatype(instructions.get("coerce")), form(instructions.get("coerce")), METHOD_NAME(instructions.get("allowed_values"), instructions.get("allowed_range")))
# now do all the objects at this level
for obj in struct.get('objects', []):
newpath = obj + "." if not path else path + obj + "."
instructions = struct.get('structs', {}).get(obj, {})
example[obj] = do_document(newpath, instructions, fields)
# finally do all the lists at this level
for l, instructions in struct.get('lists', {}).items():
if instructions['contains'] == 'field':
example[l] = [type_map(instructions.get("coerce"))]
fields[path + l] = (field_descriptions.get(path + l, ""), datatype(instructions.get("coerce")), form(instructions.get("coerce")), METHOD_NAME(instructions.get("allowed_values"), instructions.get("allowed_range")))
elif instructions['contains'] == 'object':
newpath = l + "." if not path else path + l + "."
inst = struct.get('structs', {}).get(l, {})
example[l] = [do_document(newpath, inst, fields)]
return example
example = do_document("", base_struct, fields)
return example, fields
def type_map(t):
type = DO_TYPE_TO_JSON_TYPE.get(t, "string")
if type == "timestamp":
return dates.now_str()
elif type == "datestamp":
return dates.now_str(FMT_DATE_STD)
return type
def datatype(t):
return DO_TYPE_TO_DATATYPE.get(t, "str")
def form(t):
return DO_TYPE_TO_FORMAT.get(t, "")
def METHOD_NAME(vals, range):
if vals is not None:
return ", ".join(vals)
if range is not None:
lower, upper = range
if lower is not None and upper is not None:
return lower + " to " + upper
elif lower is not None and upper is None:
return "less than " + lower
elif lower is None and upper is not None:
return "greater than " + upper
return ""
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--klazz", help="class to document")
parser.add_argument("-o", "--out", help="output file")
parser.add_argument("-f", "--fields", action="append", help="field descriptions table(s)")
args = parser.parse_args()
descriptions = {}
if args.fields:
for field_file in args.fields:
with open(field_file) as f:
fds = f.read()
lines = fds.split("\n")
for line in lines:
sep = line.find(":")
descriptions[line[:sep]] = line[sep + 1:].strip()
k = plugin.load_class_raw(args.klazz)
example, fields = document(k, descriptions)
doc = format(k, example, fields)
with open(args.out, "w") as f:
f.write(doc)
|
4,143 |
last modified at
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'AmountResponse',
'SystemDataResponse',
]
@pulumi.output_type
class AmountResponse(dict):
"""
The amount.
"""
def __init__(__self__, *,
currency: Optional[str] = None,
value: Optional[float] = None):
"""
The amount.
:param str currency: The type of currency being used for the value.
:param float value: Amount value.
"""
if currency is not None:
pulumi.set(__self__, "currency", currency)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def currency(self) -> Optional[str]:
"""
The type of currency being used for the value.
"""
return pulumi.get(self, "currency")
@property
@pulumi.getter
def value(self) -> Optional[float]:
"""
Amount value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
METHOD_NAME: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_at", METHOD_NAME)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def METHOD_NAME(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
4,144 |
safe int
|
# Copyright (c) 2016 Tomas Machalek <[email protected]>
# Copyright (c) 2016 Charles University, Faculty of Arts,
# Institute of the Czech National Corpus
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import collections
from functools import partial
from corplib.corpus import AbstractKCorpus
from .cache import TextTypesCache
class StructNormsCalc(object):
"""
Adds a size information of texts related to respective attribute values.
An instance is always bound to a concrete structure and required value type.
"""
def __init__(self, corpus: AbstractKCorpus, structname: str, subcnorm: str):
"""
arguments:
corpus --
structname -- a name of a corpus structure
subcnorm -- a type of value to be collected (allowed values: freq, tokens)
"""
self._corp = corpus
self._structname = structname
self._struct = self._corp.get_struct(structname)
self._subcnorm = subcnorm
self._normvals = None
@property
def normvals(self):
if self._normvals is None:
self._normvals = self._calc_normvals()
return self._normvals
def _calc_normvals(self):
if self._subcnorm == 'freq':
normvals = dict((self._struct.beg(i), 1) for i in range(self._struct.size()))
elif self._subcnorm == 'tokens':
normvals = dict((self._struct.beg(i), self._struct.end(i) - self._struct.beg(i))
for i in range(self._struct.size()))
else:
nas = self._struct.get_attr(self._subcnorm).pos2str
normvals = dict((self._struct.beg(i), self.METHOD_NAME(nas(i)))
for i in range(self._struct.size()))
return normvals
@staticmethod
def METHOD_NAME(s):
try:
return int(s)
except ValueError:
return 0
async def compute_norm(self, attrname, value):
attr = self._struct.get_attr(attrname)
valid = attr.str2id(value)
r = self._corp.filter_query(self._struct.attr_val(attrname, valid))
cnt = 0
while not r.end():
cnt += self.normvals[r.peek_beg()]
r.next()
return cnt
class CachedStructNormsCalc(StructNormsCalc):
"""
A caching variant of StructNormsCalc. Uses 'db' key=>value plug-in to
store values.
"""
def __init__(self, corpus: AbstractKCorpus, structname: str, subcnorm: str, tt_cache: TextTypesCache):
"""
arguments:
corpus --
structname -- a name of a corpus structure
subcnorm -- a type of value to be collected (allowed values: freq, tokens)
db -- a 'db' plug-in instance
"""
super().__init__(corpus, structname, subcnorm)
self._tt_cache = tt_cache
self._data = None
async def _init_data(self):
mkdict = partial(collections.defaultdict, lambda: {})
try:
self._data = mkdict(await self._tt_cache.get_attr_values(self._corp.corpname, self._structname, self._subcnorm))
except (IOError, TypeError):
self._data = mkdict()
async def compute_norm(self, attrname, value):
if self._data is None:
await self._init_data()
if attrname not in self._data or value not in self._data[attrname]:
self._data[attrname][value] = await super(
CachedStructNormsCalc, self).compute_norm(attrname, value)
await self._tt_cache.set_attr_values(self._corp.corpname, self._structname, self._subcnorm, self._data)
return self._data[attrname][value]
|
4,145 |
new main mod
|
#
# Copyright (c) 2009- Spyder Kernels Contributors
#
# Licensed under the terms of the MIT License
# (see spyder_kernels/__init__.py for details)
import linecache
import os.path
import types
import sys
def METHOD_NAME(filename, modname):
"""
Reimplemented from IPython/core/interactiveshell.py to avoid caching
and clearing recursive namespace.
"""
filename = os.path.abspath(filename)
main_mod = types.ModuleType(
modname,
doc="Module created for script run in IPython")
main_mod.__file__ = filename
# It seems pydoc (and perhaps others) needs any module instance to
# implement a __nonzero__ method
main_mod.__nonzero__ = lambda: True
return main_mod
class NamespaceManager:
"""
Get a namespace and set __file__ to filename for this namespace.
The namespace is either namespace, the current namespace if
current_namespace is True, or a new namespace.
"""
def __init__(
self,
shell,
filename,
current_namespace=False,
file_code=None,
context_locals=None,
context_globals=None,
):
self.shell = shell
self.filename = filename
self.ns_globals = None
self.ns_locals = None
self.current_namespace = current_namespace
self._previous_filename = None
self._previous_main = None
self._reset_main = False
self._file_code = file_code
if context_globals is None:
context_globals = shell.user_ns
self.context_globals = context_globals
self.context_locals = context_locals
def __enter__(self):
"""
Prepare the namespace.
"""
# Save previous __file__
if self.current_namespace:
self.ns_globals = self.context_globals
self.ns_locals = self.context_locals
if '__file__' in self.ns_globals:
self._previous_filename = self.ns_globals['__file__']
self.ns_globals['__file__'] = self.filename
else:
main_mod = METHOD_NAME(self.filename, '__main__')
self.ns_globals = main_mod.__dict__
self.ns_locals = None
# Needed to allow pickle to reference main
if '__main__' in sys.modules:
self._previous_main = sys.modules['__main__']
sys.modules['__main__'] = main_mod
self._reset_main = True
# Save current namespace for access by variable explorer
self.shell.add_namespace_manager(self)
if (self._file_code is not None
and isinstance(self._file_code, bytes)):
try:
self._file_code = self._file_code.decode()
except UnicodeDecodeError:
# Setting the cache is not supported for non utf-8 files
self._file_code = None
if self._file_code is not None:
# '\n' is used instead of the native line endings. (see linecache)
# mtime is set to None to avoid a cache update.
linecache.cache[self.filename] = (
len(self._file_code), None,
[line + '\n' for line in self._file_code.splitlines()],
self.filename)
return self.ns_globals, self.ns_locals
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Reset the namespace.
"""
self.shell.remove_namespace_manager(self)
if self._previous_filename:
self.ns_globals['__file__'] = self._previous_filename
elif '__file__' in self.ns_globals:
self.ns_globals.pop('__file__')
if not self.current_namespace:
if self.context_locals is not None:
self.context_locals.update(self.ns_globals)
else:
self.context_globals.update(self.ns_globals)
if self._previous_main:
sys.modules['__main__'] = self._previous_main
elif '__main__' in sys.modules and self._reset_main:
del sys.modules['__main__']
if self.filename in linecache.cache and os.path.exists(self.filename):
linecache.cache.pop(self.filename)
|
4,146 |
update data
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import logging
from schema import Schema, Optional
from typing_extensions import Literal
from nni import ClassArgsValidator
from nni.assessor import Assessor, AssessResult
from nni.utils import extract_scalar_history
logger = logging.getLogger('medianstop_Assessor')
class MedianstopClassArgsValidator(ClassArgsValidator):
def validate_class_args(self, **kwargs):
Schema({
Optional('optimize_mode'): self.choices('optimize_mode', 'maximize', 'minimize'),
Optional('start_step'): self.range('start_step', int, 0, 9999),
}).validate(kwargs)
class MedianstopAssessor(Assessor):
"""
The median stopping rule stops a pending trial X at step S
if the trial’s best objective value by step S is strictly worse than the median value
of the running averages of all completed trials’ objectives reported up to step S
Paper: `Google Vizer: A Service for Black-Box Optimization
<https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46180.pdf>`__
Examples
--------
.. code-block::
config.assessor.name = 'Medianstop'
config.assessor.class_args = {
'optimize_mode': 'maximize',
'start_step': 5
}
Parameters
----------
optimize_mode
Whether optimize to minimize or maximize trial result.
start_step
A trial is determined to be stopped or not
only after receiving start_step number of reported intermediate results.
"""
def __init__(self, optimize_mode: Literal['minimize', 'maximize'] = 'maximize', start_step: int = 0):
self._start_step = start_step
self._running_history = dict()
self._completed_avg_history = dict()
if optimize_mode == 'maximize':
self._high_better = True
elif optimize_mode == 'minimize':
self._high_better = False
else:
self._high_better = True
logger.warning('unrecognized optimize_mode %s', optimize_mode)
def METHOD_NAME(self, trial_job_id, trial_history):
"""update data
Parameters
----------
trial_job_id : int
trial job id
trial_history : list
The history performance matrix of each trial
"""
if trial_job_id not in self._running_history:
self._running_history[trial_job_id] = []
self._running_history[trial_job_id].extend(trial_history[len(self._running_history[trial_job_id]):])
def trial_end(self, trial_job_id, success):
if trial_job_id in self._running_history:
if success:
cnt = 0
history_sum = 0
self._completed_avg_history[trial_job_id] = []
for each in self._running_history[trial_job_id]:
cnt += 1
history_sum += each
self._completed_avg_history[trial_job_id].append(history_sum / cnt)
self._running_history.pop(trial_job_id)
else:
logger.warning('trial_end: trial_job_id does not exist in running_history')
def assess_trial(self, trial_job_id, trial_history):
curr_step = len(trial_history)
if curr_step < self._start_step:
return AssessResult.Good
scalar_trial_history = extract_scalar_history(trial_history)
self.METHOD_NAME(trial_job_id, scalar_trial_history)
if self._high_better:
best_history = max(scalar_trial_history)
else:
best_history = min(scalar_trial_history)
avg_array = []
for id_ in self._completed_avg_history:
if len(self._completed_avg_history[id_]) >= curr_step:
avg_array.append(self._completed_avg_history[id_][curr_step - 1])
if avg_array:
avg_array.sort()
if self._high_better:
median = avg_array[(len(avg_array)-1) // 2]
return AssessResult.Bad if best_history < median else AssessResult.Good
else:
median = avg_array[len(avg_array) // 2]
return AssessResult.Bad if best_history > median else AssessResult.Good
else:
return AssessResult.Good
|
4,147 |
test provides user
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Tests `requires_provides.py`.
"""
import unittest
from antlir.compiler.requires_provides import (
_normalize_path,
ProvidesDirectory,
ProvidesDoNotAccess,
ProvidesFile,
ProvidesGroup,
ProvidesKey,
ProvidesSymlink,
ProvidesUser,
RequireDirectory,
RequireFile,
RequireGroup,
RequireKey,
RequireSymlink,
RequireUser,
)
from antlir.fs_utils import Path
class RequiresProvidesTestCase(unittest.TestCase):
def test_normalize_path(self) -> None:
self.assertEqual(Path("/a"), _normalize_path(Path("a//.")))
self.assertEqual(Path("/b/d"), _normalize_path(Path("/b/c//../d")))
self.assertEqual(Path("/x/y"), _normalize_path(Path("///x/./y/")))
def test_path_normalization(self) -> None:
self.assertEqual(Path("/a"), RequireDirectory(path=Path("a//.")).path)
self.assertEqual(
Path("/b/d"), ProvidesDirectory(path=Path("/b/c//../d")).req.path
)
self.assertEqual(Path("/x/y"), ProvidesFile(path=Path("///x/./y/")).req.path)
def test_provides_requires(self) -> None:
pf1 = ProvidesFile(path=Path("f"))
pf2 = ProvidesFile(path=Path("f/b"))
pf3 = ProvidesFile(path=Path("f/b/c"))
pd1 = ProvidesDirectory(path=Path("a"))
pd2 = ProvidesDirectory(path=Path("a/b"))
pd3 = ProvidesDirectory(path=Path("a/b/c"))
provides = [pf1, pf2, pf3, pd1, pd2, pd3]
rf1 = RequireFile(path=Path("f"))
rf2 = RequireFile(path=Path("f/b"))
rf3 = RequireFile(path=Path("f/b/c"))
rd1 = RequireDirectory(path=Path("a"))
rd2 = RequireDirectory(path=Path("a/b"))
rd3 = RequireDirectory(path=Path("a/b/c"))
requires = [rf1, rf2, rf3, rd1, rd2, rd3]
for p in provides:
for r in requires:
self.assertEqual(
p.req.path == r.path,
p.provides(r),
f"{p}.provides({r})",
)
def test_provides_do_not_access(self) -> None:
self.assertFalse(
ProvidesDoNotAccess(path=Path("//a/b")).provides(
RequireFile(path=Path("/a/b"))
)
)
def test_with_new_path(self) -> None:
for new_path in ["b", "b/", "/b", "/../a/../b/c/.."]:
self.assertEqual(
ProvidesDirectory(path=Path("unused")).with_new_path(Path(new_path)),
ProvidesDirectory(path=Path("b")),
)
def test_provides_path_object_path(self) -> None:
p = Path("/a/b/c")
self.assertEqual(p, ProvidesDirectory(p).path())
self.assertEqual(p, ProvidesDirectory(p).path())
def test_require_group(self) -> None:
groupname = "foo"
g = RequireGroup(groupname)
self.assertEqual(g.name, groupname)
def test_provides_group(self) -> None:
groupname = "foo"
pg = ProvidesGroup(groupname)
# pyre-fixme[16]: `Requirement` has no attribute `name`.
self.assertEqual(pg.req.name, groupname)
self.assertTrue(pg.provides(RequireGroup(groupname)))
def test_require_user(self) -> None:
username = "user"
ru = RequireUser(username)
self.assertEqual(ru.name, username)
ru2 = RequireUser(username)
self.assertEqual(ru, ru2)
def METHOD_NAME(self) -> None:
username = "user"
pu = ProvidesUser(username)
# pyre-fixme[16]: `Requirement` has no attribute `name`.
self.assertEqual(pu.req.name, username)
self.assertTrue(pu.provides(RequireUser(username)))
self.assertFalse(pu.provides(RequireUser("user2")))
def test_require_symlink(self) -> None:
path = Path("/foo")
target = Path("/bar")
rs = RequireSymlink(path=path, target=target)
self.assertEqual(rs.path, path)
self.assertEqual(rs.target, target)
def test_provides_symlink(self) -> None:
path = Path("/foo")
target = Path("/bar")
ps = ProvidesSymlink(path=path, target=target)
rs = RequireSymlink(path=path, target=target)
self.assertEqual(ps.req, rs)
self.assertTrue(ps.provides(rs))
# Symlinks and files/dirs are different now
self.assertFalse(ps.provides(RequireFile(path)))
self.assertFalse(ps.provides(RequireDirectory(path)))
new_path = Path("/baz")
ps2 = ps.with_new_path(new_path)
rs2 = RequireSymlink(path=new_path, target=target)
self.assertEqual(ps2.req, rs2)
self.assertFalse(ps2.provides(rs))
self.assertTrue(ps2.provides(rs2))
def test_require_key(self) -> None:
rk = RequireKey(key="key")
self.assertEqual(rk.key, "key")
def test_provides_key(self) -> None:
pk = ProvidesKey(key="key")
rk = RequireKey(key="key")
self.assertTrue(pk.provides(rk))
|
4,148 |
get qt ui
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 13:13:27 2015
"""
from nplab.utils.gui import QtWidgets, uic, QtCore
import os
from nplab.instrument.visa_instrument import VisaInstrument
from nplab.instrument.temperatureControl import TemperatureControlMixin
class OxfordITC(VisaInstrument, TemperatureControlMixin):
def __init__(self, address, **kwargs):
TemperatureControlMixin.__init__(self)
if 'GPIB' in address:
VisaInstrument.__init__(self, address, settings=dict(timeout=10000, read_termination='\r',
write_termination='\r'))
else:
VisaInstrument.__init__(self, address, settings=dict(baud_rate=9600, read_termination='\r',
write_termination='\r', timeout=1000))
self.setControlMode(3)
self.params = {'T': 0, 'SetT': 0, 'PID': [0, 0, 0]}
self.flush_input_buffer()
self.clear_read_buffer()
self.get_temperature()
self.get_target_temperature()
def __del__(self):
try:
self.heaterOff()
self.setControlMode(0)
self.instr.close()
except:
self._logger.warn("Couldn't close %s on port %s" %(self.__name__, self._address))
def get_temperature(self):
temp = self.query('R1', delay=1)
temp = float(temp[1:len(temp)]) # Remove the first character ('R')
self.params['T'] = temp
return temp
def setControlMode(self, mode):
"""
Sets the operation mode (local or remote)
:param mode:
0 LOCAL & LOCKED (Default State),
1 REMOTE & LOCKED (Front Panel Disabled),
2 LOCAL & UNLOCKED,
3 REMOTE & UNLOCKED (Front Panel Active)
:return:
"""
if (mode not in [0, 1, 2, 3]):
raise Exception('valid modes are 0-3, see documentation')
self.write('C' + str(mode))
def get_target_temperature(self):
temp = self.query('R0')
temp = float(temp[1:len(temp)]) # Remove the first character ('R')
self.params['SetT'] = temp
return temp
def set_target_temperature(self, temp):
"""
Sets the set temperature
:param temp: Temperature in Kelvin (int)
:return:
"""
self.params['SetT'] = temp
self.write('T' + str(int(temp)))
def setHeaterMode(self, mode):
"""
Sets the heater mode (auto, manual)
:param mode:
0 HEATER MANUAL - GAS MANUAL,
1 HEATER AUTO - GAS MANUAL,
2 HEATER MANUAL - GAS AUTO,
3 HEATER AUTO - GAS AUTO
:return:
"""
if (mode not in [0, 1, 2, 3]):
raise Exception('valid modes are 0-3, see documentation')
self.write('A' + str(mode))
self.params['Heater'] = mode
def setHeaterPower(self, power):
self.params['HeaterPower'] = power
self.write('O' + str(int(power)))
def heaterOff(self):
self.setHeaterMode(0)
self.setHeaterPower(0)
def setAutoPID(self, mode):
"""
Sets the PID mode (auto or manual)
:param mode:
0 disable auto-PID,
1 enable auto-PID
:return:
"""
if (mode not in [0, 1]):
raise Exception('valid modes are 0 (off) or 1 (on)')
self.write('L' + str(mode))
self.params['autoPID'] = mode
def setPID(self, P, I, D):
"""
Sets the PID parameters for manual PID control
:param P: PROPORTIONAL BAND in Kelvin (resolution 0.001K, ideally 5 to 50K)
:param I: INTEGRAL ACTION TIME in minutes (0 to 140, ideally 1 to 10)
:param D: DERIVATIVE ACTION TIME in minutes (0 to 273, can be left at 0)
:return:
"""
self.write('P' + str(P))
self.write('I' + str(I))
self.write('D' + str(D))
self.params['PID'] = [P, I, D]
def METHOD_NAME(self):
return OxfordITCUI(self)
class OxfordITCUI(QtWidgets.QWidget):
updateGUI = QtCore.Signal()
def __init__(self, itc):
assert isinstance(itc, OxfordITC), "instrument must be an Oxford ITC"
super(OxfordITCUI, self).__init__()
self.ITC = itc
uic.loadUi(os.path.join(os.path.dirname(__file__), 'OxfordITC.ui'), self)
self.lineEditSetT.returnPressed.connect(self.setT)
self.updateGUI.connect(self.SentUpdateGUI)
self.SentUpdateGUI()
def SentUpdateGUI(self):
self.textEditT.setText(str(self.ITC.params['T']))
self.lineEditSetT.setText(str(self.ITC.params['SetT']))
self.lineEditP.setText(str(self.ITC.params['PID'][0]))
self.lineEditI.setText(str(self.ITC.params['PID'][1]))
self.lineEditD.setText(str(self.ITC.params['PID'][2]))
return
def setT(self):
temp = float(self.lineEditSetT.text())
self.ITC.setSetTemperature(temp)
if __name__ == '__main__':
ITC = OxfordITC('GPIB0::24::INSTR')
ITC.show_gui()
|
4,149 |
get dimension config
|
# -*- coding: utf-8 -*-
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow.io import ObjectItemSchema, StringItemSchema
from api.collections.monitor import BKMonitorClient
from pipeline_plugins.base.utils.inject import supplier_account_for_business
from pipeline_plugins.components.collections.sites.open.monitor.base import (
MonitorBaseService,
)
from pipeline_plugins.components.utils.sites.open.choose_time_tools import choose_time
from pipeline_plugins.components.utils.sites.open.utils import (
get_module_id_list_by_name,
)
from pipeline_plugins.variables.utils import (
get_list_by_selected_names,
get_service_template_list,
get_service_template_list_by_names,
get_set_list,
)
ALL_SELECTED_STR = "all"
class MonitorAlarmShieldServiceBase(MonitorBaseService):
def inputs_format(self):
return [
self.InputItem(
name=_("屏蔽范围类型"),
key="bk_alarm_shield_info",
type="object",
schema=ObjectItemSchema(description=_("屏蔽范围类型"), property_schemas={}),
),
self.InputItem(
name=_("策略 ID"),
key="bk_alarm_shield_target",
type="string",
schema=StringItemSchema(description=_("需要执行屏蔽的指标")),
),
self.InputItem(
name=_("时间选择"),
key="bk_alarm_time_type",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽开始时间"),
key="bk_alarm_shield_begin_time",
type="string",
schema=StringItemSchema(description=_("开始屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽结束时间"),
key="bk_alarm_end_time",
type="string",
schema=StringItemSchema(description=_("结束屏蔽的时间")),
),
self.InputItem(
name=_("屏蔽持续时间"),
key="bk_alarm_shield_duration",
type="string",
schema=StringItemSchema(description=_("屏蔽持续的时间")),
),
]
def get_scope_value(self, bk_biz_id, scope_type, combine):
scope = {"business": "bk_alarm_shield_business", "IP": "bk_alarm_shield_IP", "node": "bk_alarm_shield_node"}
scope_value = combine.get(scope[scope_type])
return scope_value
def execute(self, data, parent_data):
bk_biz_id = parent_data.get_one_of_inputs("biz_cc_id")
executor = parent_data.get_one_of_inputs("executor")
client = BKMonitorClient(username=executor)
combine = data.get_one_of_inputs("bk_alarm_shield_info")
scope_type = combine.get("bk_alarm_shield_scope")
scope_value = self.get_scope_value(bk_biz_id, scope_type, combine)
target = data.get_one_of_inputs("bk_alarm_shield_target")
begin_time = data.get_one_of_inputs("bk_alarm_shield_begin_time")
end_time = data.get_one_of_inputs("bk_alarm_shield_end_time")
time_type = int(data.get_one_of_inputs("bk_alarm_time_type"))
shield_duration = data.get_one_of_inputs("bk_alarm_shield_duration")
try:
begin_time, end_time = choose_time(time_type, begin_time, end_time, shield_duration)
except ValueError:
return False
if parent_data.get_one_of_inputs("language"):
setattr(client, "language", parent_data.get_one_of_inputs("language"))
translation.activate(parent_data.get_one_of_inputs("language"))
supplier_account = supplier_account_for_business(bk_biz_id)
request_body = self.get_request_body(
bk_biz_id, begin_time, end_time, scope_type, scope_value, executor, supplier_account
)
if "all" not in target:
request_body["dimension_config"].update({"metric_id": target})
result_flag = self.send_request(request_body, data, client)
return result_flag
def METHOD_NAME(self, shied_type, shied_value, bk_biz_id, username, bk_supplier_account):
dimension_map = {
"business": self.get_biz_dimension,
"IP": self.get_ip_dimension,
"node": self.get_node_dimension,
}
return dimension_map[shied_type](shied_value, bk_biz_id, username, bk_supplier_account)
def get_request_body(self, bk_biz_id, begin_time, end_time, shied_type, shied_value, username, bk_supplier_account):
dimension_config = self.METHOD_NAME(shied_type, shied_value, bk_biz_id, username, bk_supplier_account)
request_body = self.build_request_body(
begin_time=begin_time,
bk_biz_id=bk_biz_id,
shied_type=shied_type,
dimension_config=dimension_config,
end_time=end_time,
)
return request_body
def get_ip_dimension(self, scope_value, bk_biz_id, username, bk_supplier_account):
ip_dimension = super(MonitorAlarmShieldServiceBase, self).get_ip_dimension_config(
scope_value, bk_biz_id, username
)
return ip_dimension
@staticmethod
def get_biz_dimension(scope_value, bk_biz_id, username, bk_supplier_account):
return {"scope_type": "biz"}
@staticmethod
def get_node_dimension(scope_value, bk_biz_id, username, bk_supplier_account):
bk_set_method = scope_value["bk_set_method"]
if bk_set_method == "select":
bk_set_value = scope_value["bk_set_select"]
else:
bk_set_value = scope_value["bk_set_text"]
bk_module_method = scope_value["bk_module_method"]
if bk_module_method == "select":
bk_module_value = scope_value["bk_module_select"]
else:
bk_module_value = scope_value["bk_module_text"]
# 获取全部集群列表
set_list = get_set_list(username, bk_biz_id, bk_supplier_account)
# 集群全选,筛选条件不为空则调接口获取集群id列表
if ALL_SELECTED_STR not in bk_set_value:
selected_set_names = bk_set_value
# 根据选中的集群名称获取选中的集群列表
set_list = get_list_by_selected_names(selected_set_names, set_list)
# 获取全部服务模板列表
service_template_list = get_service_template_list(username, bk_biz_id, bk_supplier_account)
# 服务模板全选,则调接口获取服务模板列表
if ALL_SELECTED_STR not in bk_module_value:
selected_service_template_names = bk_module_value
# 通过选中的或输入的集群模板获取集群模板列表
service_template_list = get_service_template_list_by_names(
selected_service_template_names, service_template_list
)
# 获取模块id列表
module_ids = get_module_id_list_by_name(bk_biz_id, username, set_list, service_template_list)
target = [{"bk_obj_id": "module", "bk_inst_id": module_id["bk_module_id"]} for module_id in module_ids]
return {"scope_type": "node", "target": target}
|
4,150 |
options
|
"""Init file for Supervisor Supervisor RESTful API."""
import asyncio
from collections.abc import Awaitable
import logging
from typing import Any
from aiohttp import web
import voluptuous as vol
from ..const import (
ATTR_ADDONS,
ATTR_ADDONS_REPOSITORIES,
ATTR_ARCH,
ATTR_AUTO_UPDATE,
ATTR_BLK_READ,
ATTR_BLK_WRITE,
ATTR_CHANNEL,
ATTR_CONTENT_TRUST,
ATTR_CPU_PERCENT,
ATTR_DEBUG,
ATTR_DEBUG_BLOCK,
ATTR_DIAGNOSTICS,
ATTR_FORCE_SECURITY,
ATTR_HEALTHY,
ATTR_ICON,
ATTR_IP_ADDRESS,
ATTR_LOGGING,
ATTR_MEMORY_LIMIT,
ATTR_MEMORY_PERCENT,
ATTR_MEMORY_USAGE,
ATTR_NAME,
ATTR_NETWORK_RX,
ATTR_NETWORK_TX,
ATTR_REPOSITORY,
ATTR_SLUG,
ATTR_STATE,
ATTR_SUPPORTED,
ATTR_TIMEZONE,
ATTR_UPDATE_AVAILABLE,
ATTR_VERSION,
ATTR_VERSION_LATEST,
ATTR_WAIT_BOOT,
LogLevel,
UpdateChannel,
)
from ..coresys import CoreSysAttributes
from ..exceptions import APIError
from ..store.validate import repositories
from ..utils.sentry import close_sentry, init_sentry
from ..utils.validate import validate_timezone
from ..validate import version_tag, wait_boot
from .const import CONTENT_TYPE_BINARY
from .utils import api_process, api_process_raw, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_CHANNEL): vol.Coerce(UpdateChannel),
vol.Optional(ATTR_ADDONS_REPOSITORIES): repositories,
vol.Optional(ATTR_TIMEZONE): validate_timezone,
vol.Optional(ATTR_WAIT_BOOT): wait_boot,
vol.Optional(ATTR_LOGGING): vol.Coerce(LogLevel),
vol.Optional(ATTR_DEBUG): vol.Boolean(),
vol.Optional(ATTR_DEBUG_BLOCK): vol.Boolean(),
vol.Optional(ATTR_DIAGNOSTICS): vol.Boolean(),
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
vol.Optional(ATTR_AUTO_UPDATE): vol.Boolean(),
}
)
SCHEMA_VERSION = vol.Schema({vol.Optional(ATTR_VERSION): version_tag})
class APISupervisor(CoreSysAttributes):
"""Handle RESTful API for Supervisor functions."""
@api_process
async def ping(self, request):
"""Return ok for signal that the API is ready."""
return True
@api_process
async def info(self, request: web.Request) -> dict[str, Any]:
"""Return host information."""
return {
ATTR_VERSION: self.sys_supervisor.version,
ATTR_VERSION_LATEST: self.sys_supervisor.latest_version,
ATTR_UPDATE_AVAILABLE: self.sys_supervisor.need_update,
ATTR_CHANNEL: self.sys_updater.channel,
ATTR_ARCH: self.sys_supervisor.arch,
ATTR_SUPPORTED: self.sys_core.supported,
ATTR_HEALTHY: self.sys_core.healthy,
ATTR_IP_ADDRESS: str(self.sys_supervisor.ip_address),
ATTR_TIMEZONE: self.sys_config.timezone,
ATTR_LOGGING: self.sys_config.logging,
ATTR_DEBUG: self.sys_config.debug,
ATTR_DEBUG_BLOCK: self.sys_config.debug_block,
ATTR_DIAGNOSTICS: self.sys_config.diagnostics,
ATTR_AUTO_UPDATE: self.sys_updater.auto_update,
# Depricated
ATTR_WAIT_BOOT: self.sys_config.wait_boot,
ATTR_ADDONS: [
{
ATTR_NAME: addon.name,
ATTR_SLUG: addon.slug,
ATTR_VERSION: addon.version,
ATTR_VERSION_LATEST: addon.latest_version,
ATTR_UPDATE_AVAILABLE: addon.need_update,
ATTR_STATE: addon.state,
ATTR_REPOSITORY: addon.repository,
ATTR_ICON: addon.with_icon,
}
for addon in self.sys_addons.local.values()
],
ATTR_ADDONS_REPOSITORIES: [
{ATTR_NAME: store.name, ATTR_SLUG: store.slug}
for store in self.sys_store.all
],
}
@api_process
async def METHOD_NAME(self, request: web.Request) -> None:
"""Set Supervisor options."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_CHANNEL in body:
self.sys_updater.channel = body[ATTR_CHANNEL]
if ATTR_TIMEZONE in body:
self.sys_config.timezone = body[ATTR_TIMEZONE]
if ATTR_DEBUG in body:
self.sys_config.debug = body[ATTR_DEBUG]
if ATTR_DEBUG_BLOCK in body:
self.sys_config.debug_block = body[ATTR_DEBUG_BLOCK]
if ATTR_DIAGNOSTICS in body:
self.sys_config.diagnostics = body[ATTR_DIAGNOSTICS]
self.sys_dbus.agent.diagnostics = body[ATTR_DIAGNOSTICS]
if body[ATTR_DIAGNOSTICS]:
init_sentry(self.coresys)
else:
close_sentry()
if ATTR_LOGGING in body:
self.sys_config.logging = body[ATTR_LOGGING]
if ATTR_AUTO_UPDATE in body:
self.sys_updater.auto_update = body[ATTR_AUTO_UPDATE]
# Deprecated
if ATTR_WAIT_BOOT in body:
self.sys_config.wait_boot = body[ATTR_WAIT_BOOT]
# Save changes before processing addons in case of errors
self.sys_updater.save_data()
self.sys_config.save_data()
# Remove: 2022.9
if ATTR_ADDONS_REPOSITORIES in body:
await asyncio.shield(
self.sys_store.update_repositories(set(body[ATTR_ADDONS_REPOSITORIES]))
)
await self.sys_resolution.evaluate.evaluate_system()
@api_process
async def stats(self, request: web.Request) -> dict[str, Any]:
"""Return resource information."""
stats = await self.sys_supervisor.stats()
return {
ATTR_CPU_PERCENT: stats.cpu_percent,
ATTR_MEMORY_USAGE: stats.memory_usage,
ATTR_MEMORY_LIMIT: stats.memory_limit,
ATTR_MEMORY_PERCENT: stats.memory_percent,
ATTR_NETWORK_RX: stats.network_rx,
ATTR_NETWORK_TX: stats.network_tx,
ATTR_BLK_READ: stats.blk_read,
ATTR_BLK_WRITE: stats.blk_write,
}
@api_process
async def update(self, request: web.Request) -> None:
"""Update Supervisor OS."""
body = await api_validate(SCHEMA_VERSION, request)
# This option is useless outside of DEV
if not self.sys_dev and not self.sys_supervisor.need_update:
raise APIError(
f"No supervisor update available - {self.sys_supervisor.version!s}"
)
if self.sys_dev:
version = body.get(ATTR_VERSION, self.sys_updater.version_supervisor)
else:
version = self.sys_updater.version_supervisor
await asyncio.shield(self.sys_supervisor.update(version))
@api_process
def reload(self, request: web.Request) -> Awaitable[None]:
"""Reload add-ons, configuration, etc."""
return asyncio.shield(
asyncio.wait(
[
self.sys_create_task(coro)
for coro in [
self.sys_updater.reload(),
self.sys_homeassistant.secrets.reload(),
self.sys_resolution.evaluate.evaluate_system(),
]
]
)
)
@api_process
def repair(self, request: web.Request) -> Awaitable[None]:
"""Try to repair the local setup / overlayfs."""
return asyncio.shield(self.sys_core.repair())
@api_process
def restart(self, request: web.Request) -> Awaitable[None]:
"""Soft restart Supervisor."""
return asyncio.shield(self.sys_supervisor.restart())
@api_process_raw(CONTENT_TYPE_BINARY)
def logs(self, request: web.Request) -> Awaitable[bytes]:
"""Return supervisor Docker logs."""
return self.sys_supervisor.logs()
|
4,151 |
test read unknown error
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the internal implementation details of L{twisted.internet.udp}.
"""
import socket
from twisted.internet import udp
from twisted.internet.protocol import DatagramProtocol
from twisted.python.runtime import platformType
from twisted.trial import unittest
if platformType == "win32":
from errno import WSAEWOULDBLOCK as EWOULDBLOCK # type: ignore[attr-defined]
else:
from errno import EWOULDBLOCK
class StringUDPSocket:
"""
A fake UDP socket object, which returns a fixed sequence of strings and/or
socket errors. Useful for testing.
@ivar retvals: A C{list} containing either strings or C{socket.error}s.
@ivar connectedAddr: The address the socket is connected to.
"""
def __init__(self, retvals):
self.retvals = retvals
self.connectedAddr = None
def connect(self, addr):
self.connectedAddr = addr
def recvfrom(self, size):
"""
Return (or raise) the next value from C{self.retvals}.
"""
ret = self.retvals.pop(0)
if isinstance(ret, socket.error):
raise ret
return ret, None
class KeepReads(DatagramProtocol):
"""
Accumulate reads in a list.
"""
def __init__(self):
self.reads = []
def datagramReceived(self, data, addr):
self.reads.append(data)
class ErrorsTests(unittest.SynchronousTestCase):
"""
Error handling tests for C{udp.Port}.
"""
def test_socketReadNormal(self):
"""
Socket reads with some good data followed by a socket error which can
be ignored causes reading to stop, and no log messages to be logged.
"""
# Add a fake error to the list of ignorables:
udp._sockErrReadIgnore.append(-7000)
self.addCleanup(udp._sockErrReadIgnore.remove, -7000)
protocol = KeepReads()
port = udp.Port(None, protocol)
# Normal result, no errors
port.socket = StringUDPSocket(
[b"result", b"123", socket.error(-7000), b"456", socket.error(-7000)]
)
port.doRead()
# Read stops on error:
self.assertEqual(protocol.reads, [b"result", b"123"])
port.doRead()
self.assertEqual(protocol.reads, [b"result", b"123", b"456"])
def test_readImmediateError(self):
"""
If the socket is unconnected, socket reads with an immediate
connection refusal are ignored, and reading stops. The protocol's
C{connectionRefused} method is not called.
"""
# Add a fake error to the list of those that count as connection
# refused:
udp._sockErrReadRefuse.append(-6000)
self.addCleanup(udp._sockErrReadRefuse.remove, -6000)
protocol = KeepReads()
# Fail if connectionRefused is called:
protocol.connectionRefused = lambda: 1 / 0
port = udp.Port(None, protocol)
# Try an immediate "connection refused"
port.socket = StringUDPSocket(
[b"a", socket.error(-6000), b"b", socket.error(EWOULDBLOCK)]
)
port.doRead()
# Read stops on error:
self.assertEqual(protocol.reads, [b"a"])
# Read again:
port.doRead()
self.assertEqual(protocol.reads, [b"a", b"b"])
def test_connectedReadImmediateError(self):
"""
If the socket connected, socket reads with an immediate
connection refusal are ignored, and reading stops. The protocol's
C{connectionRefused} method is called.
"""
# Add a fake error to the list of those that count as connection
# refused:
udp._sockErrReadRefuse.append(-6000)
self.addCleanup(udp._sockErrReadRefuse.remove, -6000)
protocol = KeepReads()
refused = []
protocol.connectionRefused = lambda: refused.append(True)
port = udp.Port(None, protocol)
port.socket = StringUDPSocket(
[b"a", socket.error(-6000), b"b", socket.error(EWOULDBLOCK)]
)
port.connect("127.0.0.1", 9999)
# Read stops on error:
port.doRead()
self.assertEqual(protocol.reads, [b"a"])
self.assertEqual(refused, [True])
# Read again:
port.doRead()
self.assertEqual(protocol.reads, [b"a", b"b"])
self.assertEqual(refused, [True])
def METHOD_NAME(self):
"""
Socket reads with an unknown socket error are raised.
"""
protocol = KeepReads()
port = udp.Port(None, protocol)
# Some good data, followed by an unknown error
port.socket = StringUDPSocket([b"good", socket.error(-1337)])
self.assertRaises(socket.error, port.doRead)
self.assertEqual(protocol.reads, [b"good"])
|
4,152 |
read
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Image utils
"""
import base64
import sys
from io import BytesIO
import mxnet as mx
import numpy as np
from PIL import Image
from mxnet import image as img
def transform_shape(img_arr, dim_order='NCHW'):
"""
Rearrange image NDArray shape to 'NCHW' or 'NHWC' which
is valid for MXNet model input.
Input image NDArray should has dim_order of 'HWC'.
:param img_arr: NDArray
Image in NDArray format with shape (channel, width, height)
:param dim_order: str
Output image dimension order. Valid values are 'NCHW' and 'NHWC'
:return: NDArray
Image in NDArray format with dim_order shape
"""
assert dim_order in 'NCHW' or dim_order in 'NHWC', "dim_order must be 'NCHW' or 'NHWC'."
if dim_order == 'NCHW':
img_arr = mx.nd.transpose(img_arr, (2, 0, 1))
output = mx.nd.expand_dims(img_arr, axis=0)
return output
def METHOD_NAME(buf, flag=1, to_rgb=True, out=None):
"""
Read and decode an image to an NDArray.
Input image NDArray should has dim_order of 'HWC'.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
:param buf: str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
:param flag: {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
:param to_rgb: bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
:param out: NDArray, optional
Output buffer. Use `None` for automatic allocation.
:return: NDArray
An `NDArray` containing the image.
Example
-------
>>> buf = open("flower.jpg", 'rb').read()
>>> image.read(buf)
<NDArray 224x224x3 @cpu(0)>
"""
return img.imdecode(buf, flag, to_rgb, out)
def write(img_arr, flag=1, output_format='jpeg', dim_order='CHW'):
"""
Write an NDArray to a base64 string.
:param img_arr: NDArray
Image in NDArray format with shape (channel, width, height).
:param flag: {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
:param output_format: str
Output image format.
:param dim_order: str
Input image dimension order. Valid values are 'CHW' and 'HWC'
:return: str
Image in base64 string format
"""
assert dim_order in 'CHW' or dim_order in 'HWC', "dim_order must be 'CHW' or 'HWC'."
if dim_order == 'CHW':
img_arr = mx.nd.transpose(img_arr, (1, 2, 0))
if flag == 1:
mode = 'RGB'
else:
mode = 'L'
img_arr = mx.nd.reshape(img_arr, (img_arr.shape[0], img_arr.shape[1]))
img_arr = img_arr.astype(np.uint8).asnumpy()
image = Image.fromarray(img_arr, mode)
output = BytesIO()
image.save(output, format=output_format)
output.seek(0)
if sys.version_info[0] < 3:
return base64.b64encode(output.getvalue())
else:
return base64.b64encode(output.getvalue()).decode("utf-8")
def resize(src, new_width, new_height, interp=2):
"""
Resizes image to new_width and new_height.
Input image NDArray should has dim_order of 'HWC'.
:param src: NDArray
Source image in NDArray format
:param new_width: int
Width in pixel for resized image
:param new_height: int
Height in pixel for resized image
:param interp: int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
:return: NDArray
An `NDArray` containing the resized image.
"""
return img.imresize(src, new_width, new_height, interp)
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""
Crop src at fixed location, and (optionally) resize it to size.
Input image NDArray should has dim_order of 'HWC'.
:param src: NDArray
Input image
:param x0: int
Left boundary of the cropping area
:param y0 : int
Top boundary of the cropping area
:param w : int
Width of the cropping area
:param h : int
Height of the cropping area
:param size : tuple of (w, h)
Optional, resize to new size after cropping
:param interp : int, optional, default=2
Interpolation method. See resize for details.
:return: NDArray
An `NDArray` containing the cropped image.
"""
return img.fixed_crop(src, x0, y0, w, h, size, interp)
def color_normalize(src, mean, std=None):
"""
Normalize src with mean and std.
:param src : NDArray
Input image
:param mean : NDArray
RGB mean to be subtracted
:param std : NDArray
RGB standard deviation to be divided
:return: NDArray
An `NDArray` containing the normalized image.
"""
src = src.astype(np.float32)
return img.color_normalize(src, mean, std)
|
4,153 |
retrieve inventory
|
# Copyright 2019 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Noncritical:
# - One or more temperature sensors is in the warning range;
# - A panic dump exists in flash.
#Critical:
# - One or more temperature sensors is in the failure range;
# - One or more fans are running < 100 RPM;
# - One power supply is off.
import eventlet
import eventlet.queue as queue
import confluent.exceptions as exc
webclient = eventlet.import_patched('pyghmi.util.webclient')
import confluent.messages as msg
import confluent.util as util
class SwitchSensor(object):
def __init__(self, name, states, value=None, health=None):
self.name = name
self.value = value
self.states = states
self.health = health
def cnos_login(node, configmanager, creds):
wc = webclient.SecureHTTPConnection(node, port=443, verifycallback=util.TLSCertVerifier(
configmanager, node, 'pubkeys.tls_hardwaremanager').verify_cert)
wc.set_basic_credentials(creds[node]['secret.hardwaremanagementuser']['value'], creds[node]['secret.hardwaremanagementpassword']['value'])
wc.request('GET', '/nos/api/login/')
rsp = wc.getresponse()
body = rsp.read()
if rsp.status == 401: # CNOS gives 401 on first attempt...
wc.request('GET', '/nos/api/login/')
rsp = wc.getresponse()
body = rsp.read()
if rsp.status >= 200 and rsp.status < 300:
return wc
raise exc.TargetEndpointBadCredentials('Unable to authenticate')
def update(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def delete(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def create(nodes, element, configmanager, inputdata):
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
def retrieve(nodes, element, configmanager, inputdata):
results = queue.LightQueue()
workers = set([])
if element == ['power', 'state']:
for node in nodes:
yield msg.PowerState(node=node, state='on')
return
elif element == ['health', 'hardware']:
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(retrieve_health, configmanager, creds,
node, results))
elif element[:3] == ['inventory', 'hardware', 'all']:
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(METHOD_NAME, configmanager,
creds, node, results, element))
elif element[:3] == ['inventory', 'firmware', 'all']:
creds = configmanager.get_node_attributes(
nodes, ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], decrypt=True)
for node in nodes:
workers.add(eventlet.spawn(retrieve_firmware, configmanager,
creds, node, results, element))
else:
for node in nodes:
yield msg.ConfluentNodeError(node, 'Not Implemented')
return
currtimeout = 10
while workers:
try:
datum = results.get(10)
while datum:
if datum:
yield datum
datum = results.get_nowait()
except queue.Empty:
pass
eventlet.sleep(0.001)
for t in list(workers):
if t.dead:
workers.discard(t)
try:
while True:
datum = results.get_nowait()
if datum:
yield datum
except queue.Empty:
pass
def METHOD_NAME(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
results.put(msg.ChildCollection('system'))
return
wc = cnos_login(node, configmanager, creds)
sysinfo = wc.grab_json_response('/nos/api/sysinfo/inventory')
invinfo = {
'inventory': [{
'name': 'System',
'present': True,
'information': {
'Product name': sysinfo['Model'],
'Serial Number': sysinfo['Electronic Serial Number'],
'Board Serial Number': sysinfo['Serial Number'],
'Manufacturer': 'Lenovo',
'Model': sysinfo['Machine Type Model'],
'FRU Number': sysinfo['FRU'].strip(),
}
}]
}
results.put(msg.KeyValueData(invinfo, node))
def retrieve_firmware(configmanager, creds, node, results, element):
if len(element) == 3:
results.put(msg.ChildCollection('all'))
return
wc = cnos_login(node, configmanager, creds)
sysinfo = wc.grab_json_response('/nos/api/sysinfo/inventory')
items = [{
'Software': {'version': sysinfo['Software Revision']},
},
{
'BIOS': {'version': sysinfo['BIOS Revision']},
}]
results.put(msg.Firmware(items, node))
def retrieve_health(configmanager, creds, node, results):
wc = cnos_login(node, configmanager, creds)
hinfo = wc.grab_json_response('/nos/api/sysinfo/globalhealthstatus')
summary = hinfo['status'].lower()
if summary == 'noncritical':
summary = 'warning'
results.put(msg.HealthSummary(summary, name=node))
state = None
badreadings = []
if summary != 'ok': # temperature or dump or fans or psu
wc.grab_json_response('/nos/api/sysinfo/panic_dump')
switchinfo = wc.grab_json_response('/nos/api/sysinfo/panic_dump')
if switchinfo:
badreadings.append(
SwitchSensor('Panicdump', ['Present'], health='warning'))
switchinfo = wc.grab_json_response('/nos/api/sysinfo/temperatures')
for temp in switchinfo:
if temp == 'Temperature threshold':
continue
if switchinfo[temp]['State'] != 'OK':
temphealth = switchinfo[temp]['State'].lower()
if temphealth == 'noncritical':
temphealth = 'warning'
tempval = switchinfo[temp]['Temp']
badreadings.append(
SwitchSensor(temp, [], value=tempval, health=temphealth))
switchinfo = wc.grab_json_response('/nos/api/sysinfo/fans')
for fan in switchinfo:
if switchinfo[fan]['speed-rpm'] < 100:
badreadings.append(
SwitchSensor(fan, [], value=switchinfo[fan]['speed-rpm'],
health='critical'))
switchinfo = wc.grab_json_response('/nos/api/sysinfo/power')
for psu in switchinfo:
if switchinfo[psu]['State'] != 'Normal ON':
psuname = switchinfo[psu]['Name']
badreadings.append(
SwitchSensor(psuname, states=[switchinfo[psu]['State']],
health='critical'))
results.put(msg.SensorReadings(badreadings, name=node))
|
4,154 |
handle subawards
|
import copy
import logging
from sys import maxsize
from django.conf import settings
from django.db.models import Count
from rest_framework.response import Response
from rest_framework.views import APIView
from elasticsearch_dsl import Q
from usaspending_api.awards.v2.filters.sub_award import subaward_filter
from usaspending_api.awards.v2.lookups.lookups import all_award_types_mappings
from usaspending_api.common.api_versioning import api_transformations, API_TRANSFORM_FUNCTIONS
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.elasticsearch.search_wrappers import AwardSearch
from usaspending_api.common.exceptions import InvalidParameterException
from usaspending_api.common.helpers.generic_helper import (
deprecated_district_field_in_location_object,
get_generic_filters_message,
)
from usaspending_api.common.query_with_filters import QueryWithFilters
from usaspending_api.common.validator.award_filter import AWARD_FILTER_NO_RECIPIENT_ID
from usaspending_api.common.validator.pagination import PAGINATION
from usaspending_api.common.validator.tinyshield import TinyShield
from usaspending_api.search.filters.elasticsearch.filter import _QueryType
from usaspending_api.search.filters.time_period.decorators import NewAwardsOnlyTimePeriod
from usaspending_api.search.filters.time_period.query_types import AwardSearchTimePeriod
logger = logging.getLogger(__name__)
@api_transformations(api_version=settings.API_VERSION, function_list=API_TRANSFORM_FUNCTIONS)
class SpendingByAwardCountVisualizationViewSet(APIView):
"""This route takes award filters, and returns the number of awards in each award type.
(Contracts, Loans, Grants, etc.)
"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/search/spending_by_award_count.md"
@cache_response()
def post(self, request):
models = [
{"name": "subawards", "key": "subawards", "type": "boolean", "default": False},
{
"name": "object_class",
"key": "filter|object_class",
"type": "array",
"array_type": "text",
"text_type": "search",
},
{
"name": "program_activity",
"key": "filter|program_activity",
"type": "array",
"array_type": "integer",
"array_max": maxsize,
},
]
models.extend(copy.deepcopy(AWARD_FILTER_NO_RECIPIENT_ID))
models.extend(copy.deepcopy(PAGINATION))
self.original_filters = request.data.get("filters")
json_request = TinyShield(models).block(request.data)
subawards = json_request["subawards"]
filters = json_request.get("filters", None)
if filters is None:
raise InvalidParameterException("Missing required request parameters: 'filters'")
if "award_type_codes" in filters and "no intersection" in filters["award_type_codes"]:
# "Special case": there will never be results when the website provides this value
empty_results = {"contracts": 0, "idvs": 0, "grants": 0, "direct_payments": 0, "loans": 0, "other": 0}
if subawards:
empty_results = {"subcontracts": 0, "subgrants": 0}
results = empty_results
elif subawards:
results = self.METHOD_NAME(filters)
else:
results = self.query_elasticsearch_for_prime_awards(filters)
raw_response = {
"results": results,
"messages": get_generic_filters_message(
self.original_filters.keys(), [elem["name"] for elem in AWARD_FILTER_NO_RECIPIENT_ID]
),
}
# Add filter field deprecation notices
# TODO: To be removed in DEV-9966
messages = raw_response.get("messages", [])
deprecated_district_field_in_location_object(messages, self.original_filters)
raw_response["messages"] = messages
return Response(raw_response)
@staticmethod
def METHOD_NAME(filters: dict) -> dict:
"""Turn the filters into the result dictionary when dealing with Sub-Awards
Note: Due to how the Django ORM joins to the awards table as an
INNER JOIN, it is necessary to explicitly enforce the aggregations
to only count Sub-Awards that are linked to a Prime Award.
Remove the filter and update if we can move away from this behavior.
"""
queryset = (
subaward_filter(filters)
.filter(award_id__isnull=False)
.values("prime_award_group")
.annotate(count=Count("broker_subaward_id"))
)
results = {}
results["subgrants"] = sum([sub["count"] for sub in queryset if sub["prime_award_group"] == "grant"])
results["subcontracts"] = sum([sub["count"] for sub in queryset if sub["prime_award_group"] == "procurement"])
return results
def query_elasticsearch_for_prime_awards(self, filters) -> list:
filter_options = {}
time_period_obj = AwardSearchTimePeriod(
default_end_date=settings.API_MAX_DATE, default_start_date=settings.API_SEARCH_MIN_DATE
)
new_awards_only_decorator = NewAwardsOnlyTimePeriod(
time_period_obj=time_period_obj, query_type=_QueryType.AWARDS
)
filter_options["time_period_obj"] = new_awards_only_decorator
filter_query = QueryWithFilters.generate_awards_elasticsearch_query(filters, **filter_options)
s = AwardSearch().filter(filter_query)
s.aggs.bucket(
"types",
"filters",
filters={category: Q("terms", type=types) for category, types in all_award_types_mappings.items()},
)
results = s.handle_execute()
contracts = results.aggregations.types.buckets.contracts.doc_count
idvs = results.aggregations.types.buckets.idvs.doc_count
grants = results.aggregations.types.buckets.grants.doc_count
direct_payments = results.aggregations.types.buckets.direct_payments.doc_count
loans = results.aggregations.types.buckets.loans.doc_count
other = results.aggregations.types.buckets.other_financial_assistance.doc_count
response = {
"contracts": contracts,
"direct_payments": direct_payments,
"grants": grants,
"idvs": idvs,
"loans": loans,
"other": other,
}
return response
|
4,155 |
test encode input data
|
import pytest
import torch
from ignite.distributed.comp_models.base import _SerialModel, ComputationModel
def test_serial_model():
_SerialModel.create_from_backend()
model = _SerialModel.create_from_context()
assert model.get_local_rank() == 0
assert model.get_rank() == 0
assert model.get_world_size() == 1
assert model.get_nproc_per_node() == 1
assert model.get_nnodes() == 1
assert model.get_node_rank() == 0
if torch.cuda.is_available():
assert model.device().type == "cuda"
else:
assert model.device().type == "cpu"
assert model.backend() is None
model.finalize()
with pytest.raises(NotImplementedError, match=r"Serial computation model does not implement spawn method"):
model.spawn()
model.all_reduce(1)
model.all_gather(1)
model.broadcast(1)
assert model._do_all_reduce(torch.tensor(1)) == torch.tensor(1)
assert model._do_all_gather(torch.tensor(1)) == torch.tensor(1)
assert model._do_broadcast(torch.tensor(1), src=0) == torch.tensor(1)
model.barrier()
def test__encode_str__decode_str():
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
s = "test-abcedfg"
encoded_s = ComputationModel._encode_str(s, device, 1024)
assert isinstance(encoded_s, torch.Tensor) and encoded_s.shape == (1, 1025)
decoded_s = ComputationModel._decode_str(encoded_s)
assert isinstance(decoded_s, list) and len(decoded_s) == 1
assert decoded_s[0] == s
def METHOD_NAME():
encoded_msg = ComputationModel._encode_input_data(None, is_src=True)
assert encoded_msg == [-1] * 512
encoded_msg = ComputationModel._encode_input_data(12.0, is_src=True)
assert encoded_msg == [1] + [-1] * 511
encoded_msg = ComputationModel._encode_input_data("abc", is_src=True)
assert encoded_msg == [2] + [-1] * 511
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.randint(-1235, 1233, size=(2, 512, 32, 32, 64))
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 5, 2, 512, 32, 32, 64, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, is_src=True)
dtype_str = str(t.dtype)
true_msg = [0, 0, len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
assert encoded_msg == true_msg + [-1] * (512 - len(true_msg))
for t in [None, "abc", torch.rand(2, 512, 32, 32, 64), 12.34, object()]:
encoded_msg = ComputationModel._encode_input_data(t, is_src=False)
assert encoded_msg == [-1] * 512
def test__decode_as_placeholder():
device = torch.device("cpu")
encoded_msg = [-1] * 512
encoded_msg[0] = 1
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, float) and res == 0.0
encoded_msg = [-1] * 512
encoded_msg[0] = 2
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, str) and res == ""
encoded_msg = [-1] * 512
encoded_msg[0] = 0
encoded_msg[1 : 1 + 7] = [6, 2, 3, 4, 5, 6, 7]
dtype_str = "torch.int64"
payload = [len(dtype_str), *list(bytearray(dtype_str, "utf-8"))]
encoded_msg[1 + 7 : 1 + 7 + len(payload)] = payload
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == torch.int64 and res.shape == (2, 3, 4, 5, 6, 7)
encoded_msg = [-1] * 512
with pytest.raises(RuntimeError, match="Internal error: unhandled dtype"):
ComputationModel._decode_as_placeholder(encoded_msg, device)
t = torch.rand(2, 512, 32, 32, 64)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
t = torch.tensor(12)
encoded_msg = ComputationModel._encode_input_data(t, True)
res = ComputationModel._decode_as_placeholder(encoded_msg, device)
assert isinstance(res, torch.Tensor) and res.dtype == t.dtype and res.shape == t.shape
def test__setup_placeholder():
device = torch.device("cpu")
from ignite.distributed.utils import _model
for t in [torch.rand(2, 3, 4), "abc", 123.45]:
data = _model._setup_placeholder(t, device, True)
assert isinstance(data, type(t))
if isinstance(data, torch.Tensor):
assert (data == t).all()
else:
assert data == t
|
4,156 |
find config templates
|
#!/usr/bin/env python
"""script to concatenate the dirac.cfg file's Systems sections with the content of the ConfigTemplate.cfg files."""
from collections import OrderedDict
import logging
import os
import re
import textwrap
import sys
import shutil
from diracdoctools.Config import Configuration
from diracdoctools.Utilities import makeLogger
from diraccfg import CFG
# Try/except for python3 compatibility to ignore errors in ``import DIRAC`` while they last
# ultimate protection against not having the symbols imported is also done in the ``run`` function
try:
from DIRAC import S_OK, S_ERROR
except ImportError:
pass
LOG = makeLogger("ConcatCFG")
class ConcatCFG:
def __init__(self, configFile="docs.conf"):
self.config = Configuration(configFile, sections=["CFG"])
self.retVal = 0
def prepareDiracCFG(self):
"""Copy dirac.cfg file to source dir."""
LOG.info("Copy %r to source directory", self.config.cfg_baseFile)
shutil.copy(self.config.cfg_baseFile, "/".join([self.config.docsPath, "source/"]))
def updateCompleteDiracCFG(self):
"""Read the dirac.cfg and update the Systems sections from the ConfigTemplate.cfg files."""
compCfg = CFG()
mainDiracCfgPath = self.config.cfg_baseFile
if not os.path.exists(mainDiracCfgPath):
LOG.error("Failed to find Main Dirac cfg at %r", mainDiracCfgPath)
return 1
self.prepareDiracCFG()
LOG.info("Extracting default configuration from %r", mainDiracCfgPath)
loadCFG = CFG()
loadCFG.loadFromFile(mainDiracCfgPath)
compCfg = loadCFG.mergeWith(compCfg)
cfg = self.getSystemsCFG()
compCfg = compCfg.mergeWith(cfg)
diracCfgOutput = self.config.cfg_targetFile
LOG.info("Writing output to %r", diracCfgOutput)
with open(diracCfgOutput, "w") as rst:
rst.write(
textwrap.dedent(
"""
.. _full_configuration_example:
==========================
Full Configuration Example
==========================
.. This file is created by docs/Tools/UpdateDiracCFG.py
Below is a complete example configuration with anotations for some sections::
"""
)
)
# indent the cfg text
cfgString = "".join(" " + line for line in str(compCfg).splitlines(True))
# fix the links, add back the # for targets
# match .html with following character using positive look ahead
htmlMatch = re.compile(r"\.html(?=[a-zA-Z0-9])")
cfgString = re.sub(htmlMatch, ".html#", cfgString)
rst.write(cfgString)
return self.retVal
def getSystemsCFG(self):
"""Find all the ConfigTemplates and collate them into one CFG object."""
cfg = CFG()
cfg.createNewSection("/Systems")
templateLocations = self.METHOD_NAME()
for templatePath in templateLocations:
cfgRes = self.parseConfigTemplate(templatePath, cfg)
if cfgRes["OK"]:
cfg = cfgRes["Value"]
return cfg
def METHOD_NAME(self):
"""Traverse folders in DIRAC and find ConfigTemplate.cfg files."""
configTemplates = dict()
for baseDirectory, _subdirectories, files in os.walk(self.config.sourcePath):
LOG.debug("Looking in %r", baseDirectory)
if "ConfigTemplate.cfg" in files:
system = baseDirectory.rsplit("/", 1)[1]
LOG.info("Found Template for %r in %r", system, baseDirectory)
configTemplates[system] = baseDirectory
return OrderedDict(sorted(configTemplates.items(), key=lambda t: t[0])).values()
def parseConfigTemplate(self, templatePath, cfg):
"""Parse the ConfigTemplate.cfg files.
:param str templatePath: path to the folder containing a ConfigTemplate.cfg file
:param CFG cfg: cfg to merge with the systems config
:returns: CFG object
"""
system = os.path.split(templatePath.rstrip("/"))[1]
if system.lower().endswith("system"):
system = system[: -len("System")]
templatePath = os.path.join(templatePath, "ConfigTemplate.cfg")
if not os.path.exists(templatePath):
return S_ERROR(f"File not found: {templatePath}")
loadCfg = CFG()
try:
loadCfg.loadFromFile(templatePath)
except ValueError as err:
LOG.error("Failed loading file %r: %r", templatePath, err)
self.retVal = 1
return S_ERROR()
cfg.createNewSection(f"/Systems/{system}", contents=loadCfg)
return S_OK(cfg)
def run(configFile="docs.conf", logLevel=logging.INFO, debug=False):
"""Add sections from System/ConfigTemplates to main dirac.cfg file
:param str configFile: path to the configFile
:param logLevel: logging level to use
:param bool debug: unused
:returns: return value 1 or 0
"""
try:
logging.getLogger().setLevel(logLevel)
concat = ConcatCFG(configFile=configFile)
return concat.updateCompleteDiracCFG()
except (ImportError, NameError):
return 1
if __name__ == "__main__":
sys.exit(run())
|
4,157 |
get render dir
|
"""
Flowblade Movie Editor is a nonlinear video editor.
Copyright 2012 Janne Liljeblad.
This file is part of Flowblade Movie Editor <https://github.com/jliljebl/flowblade/>.
Flowblade Movie Editor is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Flowblade Movie Editor is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Flowblade Movie Editor. If not, see <http://www.gnu.org/licenses/>.
"""
from gi.repository import GLib
import os
import threading
import appconsts
import projectdatavault
_init_error = None
_xdg_config_dir = None
_xdg_data_dir = None
_xdg_cache_dir = None
# --------------------------------------------------------- interface
def init():
global _init_error
# Get user folder locations
global _xdg_config_dir, _xdg_data_dir, _xdg_cache_dir
# XDG folders
_xdg_config_dir = os.path.join(GLib.get_user_config_dir(), "flowblade")
_xdg_data_dir = os.path.join(GLib.get_user_data_dir(), "flowblade")
_xdg_cache_dir = os.path.join(GLib.get_user_cache_dir(), "flowblade")
print("XDG Config", _xdg_config_dir)
print("XDG Data", _xdg_data_dir)
print("XDG Cache",_xdg_cache_dir)
# Make sure XDG dirs data is available and usable by trying to create XDG folders
try:
_maybe_create_xdg_dirs()
except Exception as e:
_init_error = "Error message: " + str(e) + "\n\n"
_init_error += "XDG Config dir: " + _xdg_config_dir + "\n"
_init_error += "XDG Data dir: " + _xdg_data_dir + "\n"
_init_error += "XDG Cache dir: " + _xdg_cache_dir + "\n"
return
# --------------------------------------------------------- dirs paths
def get_config_dir():
return _xdg_config_dir + "/"
def get_data_dir():
return _xdg_data_dir + "/"
def get_cache_dir():
return _xdg_cache_dir + "/"
def METHOD_NAME(force_legacy=False):
if projectdatavault.vault_data_exists_for_project() == False or force_legacy == True:
return get_data_dir() + appconsts.RENDERED_CLIPS_DIR + "/"
return projectdatavault.get_render_folder()
def get_legacy_render_dir():
# For accessing legacy render data created prior to 2.12.
return get_data_dir() + appconsts.RENDERED_CLIPS_DIR
def get_container_clips_dir():
if projectdatavault.vault_data_exists_for_project() == False:
return get_data_dir() + appconsts.CONTAINER_CLIPS_DIR + "/"
return projectdatavault.get_containers_folder()
def get_container_clips_unrendered_dir():
if projectdatavault.vault_data_exists_for_project() == False:
return get_data_dir() + appconsts.CONTAINER_CLIPS_UNRENDERED + "/"
return projectdatavault.get_container_clips_unrendered_folder()
def get_proxies_dir():
if projectdatavault.vault_data_exists_for_project() == False:
return METHOD_NAME() + appconsts.PROXIES_DIR
return projectdatavault.get_proxies_folder()
def get_proxies_dir():
return projectdatavault.get_ingest_folder()
def get_audio_levels_dir():
if projectdatavault.vault_data_exists_for_project() == False:
return get_cache_dir() + appconsts.AUDIO_LEVELS_DIR
return projectdatavault.get_audio_levels_folder()
def get_thumbnail_dir():
if projectdatavault.vault_data_exists_for_project() == False:
return get_cache_dir() + appconsts.THUMBNAILS_DIR + "/"
return projectdatavault.get_thumbnails_folder()
def get_temp_render_dir():
return get_cache_dir() + appconsts.TEMP_RENDER_DIR
def get_hidden_screenshot_dir_path():
return get_cache_dir() + "screenshot/"
#------------------------------------------------------ state functions
def get_init_error():
return _init_error
# ---------------------------------------------------------------- internal functions
def _maybe_create_xdg_dirs():
# ---------------------- CONFIG
# Prefs and recents files
if not os.path.exists(_xdg_config_dir):
print("CREATED XDG CONFIG DIR.")
os.mkdir(_xdg_config_dir)
# --------------------- DATA
# Data that can break projects and cannot be regerated by app
# Data root folder
if not os.path.exists(_xdg_data_dir):
print("CREATED XDG DATA DIR.")
os.mkdir(_xdg_data_dir)
# Data individual folders
if not os.path.exists(get_data_dir() + appconsts.USER_PROFILES_DIR):
os.mkdir(get_data_dir() + appconsts.USER_PROFILES_DIR)
"""
Legacy data folders where data was kept prior to 2.12.
These can be accessed if existing after 2.12, but will not be created anymore
when applications are installed fresh.
if not os.path.exists(get_render_dir()):
os.mkdir(get_render_dir())
if not os.path.exists(get_data_dir() + appconsts.CONTAINER_CLIPS_DIR):
os.mkdir(get_data_dir() + appconsts.CONTAINER_CLIPS_DIR)
if not os.path.exists(get_data_dir() + appconsts.CONTAINER_CLIPS_UNRENDERED):
os.mkdir(get_data_dir() + appconsts.CONTAINER_CLIPS_UNRENDERED)
if not os.path.exists(get_render_dir() + "/" + appconsts.PROXIES_DIR):
os.mkdir(get_render_dir() + "/" + appconsts.PROXIES_DIR)
"""
if not os.path.exists(get_data_dir() + "/" + appconsts.USER_SHORTCUTS_DIR):
os.mkdir(get_data_dir() + "/" + appconsts.USER_SHORTCUTS_DIR)
#----------------- CACHE
# Data that can be regerated by app or is transient
# Cache root folder
if not os.path.exists(_xdg_cache_dir):
print("CREATED XDG CACHE DIR.")
os.mkdir(_xdg_cache_dir)
# Cache individual folders
if not os.path.exists(get_cache_dir() + appconsts.AUTOSAVE_DIR):
os.mkdir(get_cache_dir() + appconsts.AUTOSAVE_DIR)
"""
Legacy data folder.
if not os.path.exists(get_cache_dir() + appconsts.THUMBNAILS_DIR):
os.mkdir(get_cache_dir() + appconsts.THUMBNAILS_DIR)
"""
if not os.path.exists(get_cache_dir() + appconsts.GMIC_DIR):
os.mkdir(get_cache_dir() + appconsts.GMIC_DIR)
"""
Legacy data folder.
if not os.path.exists(get_cache_dir() + appconsts.AUDIO_LEVELS_DIR):
os.mkdir(get_cache_dir() + appconsts.AUDIO_LEVELS_DIR)
"""
if not os.path.exists(get_cache_dir() + appconsts.TRIM_VIEW_DIR):
os.mkdir(get_cache_dir() + appconsts.TRIM_VIEW_DIR)
if not os.path.exists(get_cache_dir() + appconsts.BATCH_DIR):
os.mkdir(get_cache_dir() + appconsts.BATCH_DIR)
if not os.path.exists(get_hidden_screenshot_dir_path()):
os.mkdir(get_hidden_screenshot_dir_path())
if not os.path.exists(get_cache_dir() + appconsts.SCRIP_TOOL_DIR):
os.mkdir(get_cache_dir() + appconsts.SCRIP_TOOL_DIR)
if not os.path.exists(get_temp_render_dir()):
os.mkdir(get_temp_render_dir())
|
4,158 |
pieslice
|
#
# The Python Imaging Library
# $Id$
#
# WCK-style drawing interface operations
#
# History:
# 2003-12-07 fl created
# 2005-05-15 fl updated; added to PIL as ImageDraw2
# 2005-05-15 fl added text support
# 2005-05-20 fl added arc/chord/pieslice support
#
# Copyright (c) 2003-2005 by Secret Labs AB
# Copyright (c) 2003-2005 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
"""
(Experimental) WCK-style drawing interface operations
.. seealso:: :py:mod:`PIL.ImageDraw`
"""
from . import Image, ImageColor, ImageDraw, ImageFont, ImagePath
class Pen:
"""Stores an outline color and width."""
def __init__(self, color, width=1, opacity=255):
self.color = ImageColor.getrgb(color)
self.width = width
class Brush:
"""Stores a fill color"""
def __init__(self, color, opacity=255):
self.color = ImageColor.getrgb(color)
class Font:
"""Stores a TrueType font and color"""
def __init__(self, color, file, size=12):
# FIXME: add support for bitmap fonts
self.color = ImageColor.getrgb(color)
self.font = ImageFont.truetype(file, size)
class Draw:
"""
(Experimental) WCK-style drawing interface
"""
def __init__(self, image, size=None, color=None):
if not hasattr(image, "im"):
image = Image.new(image, size, color)
self.draw = ImageDraw.Draw(image)
self.image = image
self.transform = None
def flush(self):
return self.image
def render(self, op, xy, pen, brush=None):
# handle color arguments
outline = fill = None
width = 1
if isinstance(pen, Pen):
outline = pen.color
width = pen.width
elif isinstance(brush, Pen):
outline = brush.color
width = brush.width
if isinstance(brush, Brush):
fill = brush.color
elif isinstance(pen, Brush):
fill = pen.color
# handle transformation
if self.transform:
xy = ImagePath.Path(xy)
xy.transform(self.transform)
# render the item
if op == "line":
self.draw.line(xy, fill=outline, width=width)
else:
getattr(self.draw, op)(xy, fill=fill, outline=outline)
def settransform(self, offset):
"""Sets a transformation offset."""
(xoffset, yoffset) = offset
self.transform = (1, 0, xoffset, 0, 1, yoffset)
def arc(self, xy, start, end, *options):
"""
Draws an arc (a portion of a circle outline) between the start and end
angles, inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.arc`
"""
self.render("arc", xy, start, end, *options)
def chord(self, xy, start, end, *options):
"""
Same as :py:meth:`~PIL.ImageDraw2.Draw.arc`, but connects the end points
with a straight line.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.chord`
"""
self.render("chord", xy, start, end, *options)
def ellipse(self, xy, *options):
"""
Draws an ellipse inside the given bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.ellipse`
"""
self.render("ellipse", xy, *options)
def line(self, xy, *options):
"""
Draws a line between the coordinates in the ``xy`` list.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.line`
"""
self.render("line", xy, *options)
def METHOD_NAME(self, xy, start, end, *options):
"""
Same as arc, but also draws straight lines between the end points and the
center of the bounding box.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.pieslice`
"""
self.render("pieslice", xy, start, end, *options)
def polygon(self, xy, *options):
"""
Draws a polygon.
The polygon outline consists of straight lines between the given
coordinates, plus a straight line between the last and the first
coordinate.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.polygon`
"""
self.render("polygon", xy, *options)
def rectangle(self, xy, *options):
"""
Draws a rectangle.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.rectangle`
"""
self.render("rectangle", xy, *options)
def text(self, xy, text, font):
"""
Draws the string at the given position.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.text`
"""
if self.transform:
xy = ImagePath.Path(xy)
xy.transform(self.transform)
self.draw.text(xy, text, font=font.font, fill=font.color)
def textbbox(self, xy, text, font):
"""
Returns bounding box (in pixels) of given text.
:return: ``(left, top, right, bottom)`` bounding box
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textbbox`
"""
if self.transform:
xy = ImagePath.Path(xy)
xy.transform(self.transform)
return self.draw.textbbox(xy, text, font=font.font)
def textlength(self, text, font):
"""
Returns length (in pixels) of given text.
This is the amount by which following text should be offset.
.. seealso:: :py:meth:`PIL.ImageDraw.ImageDraw.textlength`
"""
return self.draw.textlength(text, font=font.font)
|
4,159 |
test hackrf class send
|
import time
import unittest
import os
import tempfile
import numpy as np
from urh.util import util
util.set_shared_library_path()
from urh.dev.native.lib import hackrf
from urh.dev.native.Rad1o import Rad1o
class TestHackRF(unittest.TestCase):
def callback_fun(self, buffer):
print(buffer)
for i in range(0, len(buffer), 4):
try:
r = np.fromstring(buffer[i:i + 2], dtype=np.float16) / 32767.5
i = np.fromstring(buffer[i + 2:i + 4], dtype=np.float16) / 32767.5
except ValueError:
continue
if r and i:
print(r, i)
# out.append(complex(float(buffer[i:i+1])/32767.5, float(buffer[i+2:i+3])/32767.5))
return 0
def test_fromstring(self):
buffer = b'\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xff\xfd\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe\xfd\xfe\xff\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe'
r = np.empty(len(buffer) // 2, dtype=np.float32)
i = np.empty(len(buffer) // 2, dtype=np.float32)
c = np.empty(len(buffer) // 2, dtype=np.complex64)
# dtype =
unpacked = np.frombuffer(buffer, dtype=[('r', np.uint8), ('i', np.uint8)])
ru = unpacked['r'] / 128.0
iu = unpacked['i'] / 128.0
# for j in range(0, len(buffer)-1, 2):
# r[j//2] = np.frombuffer(buffer[j:j + 1], dtype=np.int8) / 128.0
# i[j//2] = np.frombuffer(buffer[j + 1:j + 2], dtype=np.int8) / 128.0
# r2 = np.fromstring(buffer[], dtype=np.float16) / 32767.5
c.real = ru
c.imag = iu
print(c)
# x,y = np.frombuffer(buffer, dtype=[('x', np.float16), ('y', np.float16)])
def test_fromstring2(self):
buffer = b'\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xff\xfd\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe\xfd\xfe\xff\xfe\xfe\xfe\xfe\xfe\xfe\xfe\xfd\xfe'
c = np.empty(len(buffer) // 2, dtype=np.complex64)
# dtype =
unpacked = np.frombuffer(buffer, dtype="<h") # cast in short
print(unpacked)
f = 1.0/32767.5
for i in range(0, len(unpacked)-1,2):
c[i] = complex(float(unpacked[i]*f), float(unpacked[i+1]*f))
print(c)
# x,y = np.frombuffer(buffer, dtype=[('x', np.float16), ('y', np.float16)])
def test_hackrf_class_recv(self):
hfc = Rad1o(433.92e6, 1e6, 1e6, 20)
hfc.start_rx_mode()
i = 0
TIME_TOTAL = 5
while i <TIME_TOTAL:
print("{0}/{1}".format(i+1, TIME_TOTAL))
time.sleep(1)
i+=1
print("{0:,}".format(hfc.current_recv_index))
hfc.received_data.tofile(os.path.join(tempfile.gettempdir(), "rad1o.complex"))
print("Wrote Data")
hfc.stop_rx_mode("Finished test")
def METHOD_NAME(self):
hfc = Rad1o(433.92e6, 1e6, 1e6, 20)
hfc.start_tx_mode(np.fromfile(os.path.join(tempfile.gettempdir(), "rad1o.complex"),
dtype=np.complex64), repeats=1)
while not hfc.sending_finished:
print("Repeat: {0} Current Sample: {1}/{2}".format(hfc.current_sending_repeat+1,
hfc.current_sent_sample,
len(hfc.samples_to_send)))
time.sleep(1)
hfc.stop_tx_mode("Test finished")
def test_hackrf_pack_unpack(self):
arr = np.array([-128, -128, -0.5, -0.5, -3, -3, 127, 127], dtype=np.int8)
self.assertEqual(arr[0], -128)
self.assertEqual(arr[1], -128)
self.assertEqual(arr[-1], 127)
received = arr.tobytes()
self.assertEqual(len(received), len(arr))
self.assertEqual(np.int8(received[0]), -128)
self.assertEqual(np.int8(received[1]), -128)
unpacked = Rad1o.bytes_to_iq(received)
self.assertEqual(complex(unpacked[0][0], unpacked[0][1]), complex(-128, -128))
self.assertAlmostEqual(complex(unpacked[1][0], unpacked[1][1]), complex(0, 0), places=1)
self.assertAlmostEqual(complex(unpacked[2][0], unpacked[2][1]), complex(-3, -3), places=1)
self.assertEqual(complex(unpacked[3][0], unpacked[3][1]), complex(127, 127))
packed = Rad1o.iq_to_bytes(unpacked)
self.assertEqual(received, bytearray(packed))
def test_c_api(self):
def callback(n):
print("called")
return np.array([1], dtype=np.ubyte)
print("init", hackrf.init())
print("open", hackrf.open())
print("start_tx", hackrf.start_tx_mode(callback))
time.sleep(1)
print("stop_tx", hackrf.stop_tx_mode())
print("close", hackrf.close())
print("exit", hackrf.exit())
def test_device_list(self):
print(hackrf.get_device_list())
if __name__ == "__main__":
unittest.main()
|
4,160 |
render
|
from typing import cast, List, Optional, TYPE_CHECKING, Union
from ._spinners import SPINNERS
from .measure import Measurement
from .table import Table
from .text import Text
if TYPE_CHECKING:
from .console import Console, ConsoleOptions, RenderResult, RenderableType
from .style import StyleType
class Spinner:
"""A spinner animation.
Args:
name (str): Name of spinner (run python -m rich.spinner).
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to 1.0.
Raises:
KeyError: If name isn't one of the supported spinner animations.
"""
def __init__(
self,
name: str,
text: "RenderableType" = "",
*,
style: Optional["StyleType"] = None,
speed: float = 1.0,
) -> None:
try:
spinner = SPINNERS[name]
except KeyError:
raise KeyError(f"no spinner called {name!r}")
self.text: "Union[RenderableType, Text]" = (
Text.from_markup(text) if isinstance(text, str) else text
)
self.frames = cast(List[str], spinner["frames"])[:]
self.interval = cast(float, spinner["interval"])
self.start_time: Optional[float] = None
self.style = style
self.speed = speed
self.frame_no_offset: float = 0.0
self._update_speed = 0.0
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
yield self.METHOD_NAME(console.get_time())
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> Measurement:
text = self.METHOD_NAME(0)
return Measurement.get(console, options, text)
def METHOD_NAME(self, time: float) -> "RenderableType":
"""Render the spinner for a given time.
Args:
time (float): Time in seconds.
Returns:
RenderableType: A renderable containing animation frame.
"""
if self.start_time is None:
self.start_time = time
frame_no = ((time - self.start_time) * self.speed) / (
self.interval / 1000.0
) + self.frame_no_offset
frame = Text(
self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
)
if self._update_speed:
self.frame_no_offset = frame_no
self.start_time = time
self.speed = self._update_speed
self._update_speed = 0.0
if not self.text:
return frame
elif isinstance(self.text, (str, Text)):
return Text.assemble(frame, " ", self.text)
else:
table = Table.grid(padding=1)
table.add_row(frame, self.text)
return table
def update(
self,
*,
text: "RenderableType" = "",
style: Optional["StyleType"] = None,
speed: Optional[float] = None,
) -> None:
"""Updates attributes of a spinner after it has been started.
Args:
text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
style (StyleType, optional): Style for spinner animation. Defaults to None.
speed (float, optional): Speed factor for animation. Defaults to None.
"""
if text:
self.text = Text.from_markup(text) if isinstance(text, str) else text
if style:
self.style = style
if speed:
self._update_speed = speed
if __name__ == "__main__": # pragma: no cover
from time import sleep
from .columns import Columns
from .panel import Panel
from .live import Live
all_spinners = Columns(
[
Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
for spinner_name in sorted(SPINNERS.keys())
],
column_first=True,
expand=True,
)
with Live(
Panel(all_spinners, title="Spinners", border_style="blue"),
refresh_per_second=20,
) as live:
while True:
sleep(0.1)
|
4,161 |
test pyobj constructor
|
import neuron
import pytest
def test_hocbase():
class MyList(neuron.HocBaseObject, hoc_type=neuron.h.Vector):
pass
assert issubclass(MyList, neuron.hoc.HocObject)
assert issubclass(MyList, neuron.HocBaseObject)
assert MyList._hoc_type == neuron.h.Vector
def test_hoc_template_hclass():
neuron.h(
"""
begintemplate A
public x, s, o, xa, oa, f, p
strdef s
objref o, oa[2]
double xa[3]
proc init() { \
x = $1 \
}
func f() { return $1*xa[$2] }
proc p() { x += 1 }
endtemplate A
"""
)
class A1(neuron.hclass(neuron.h.A)):
def __new__(cls, arg):
return super().__new__(cls, arg)
def __init__(self, arg):
self.bp = self.baseattr("p")
def p(self):
self.bp()
return self.x
a = A1(5)
assert a.x == 5.0
assert a.p() == 6.0
b = A1(4)
a.s = "one"
b.s = "two"
assert a.s == "one"
assert b.s == "two"
assert neuron.h.A[0].s == "one"
assert a.p() == 7.0
assert b.p() == 5.0
a.a = 2
b.a = 3
assert a.a == 2
assert b.a == 3
assert neuron.h.List("A").count() == 2
a = 1
b = 1
assert neuron.h.List("A").count() == 0
def METHOD_NAME():
# Test that __new__ is required when __init__ is overridden
with pytest.raises(TypeError):
class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List):
def __init__(self, first):
super().__init__()
self.append(first)
class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List):
def __new__(cls, first):
return super().__new__(cls)
def __init__(self, first):
super().__init__()
self.append(first)
p = PyObj(neuron.h.List())
assert p.count() == 1
def test_pyobj_def():
class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List):
def my_method(self, a):
return a * 2
p = PyObj()
assert p.my_method(4) == 8
def test_pyobj_overloading():
class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List):
def append(self, i):
self.last_appended = i
return self.baseattr("append")(i)
p = PyObj()
p2 = PyObj()
assert p.append(p) == 1
assert p.count() == 1
assert p[0] == p
def test_pyobj_inheritance():
class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List):
pass
class MyObj(PyObj):
pass
with pytest.raises(TypeError):
class MyObj2(PyObj):
def __init__(self, arg):
pass
class List(neuron.HocBaseObject, hoc_type=neuron.h.List):
def __new__(cls, *args, **kwargs):
super().__new__(cls)
class InitList(List):
def __init__(self, *args):
super().__init__()
for arg in args:
self.append(arg)
l = InitList(neuron.h.List(), neuron.h.List())
def test_pyobj_composition():
class A(neuron.HocBaseObject, hoc_type=neuron.h.List):
pass
class B(neuron.HocBaseObject, hoc_type=neuron.h.List):
pass
class C(neuron.HocBaseObject, hoc_type=neuron.h.Vector):
pass
with pytest.raises(TypeError):
# Composition of different HOC types is impossible.
class D(A, C):
pass
class E(A, B):
pass
assert E._hoc_type == neuron.h.List
class PickleTest(neuron.HocBaseObject, hoc_type=neuron.h.NetStim):
def __new__(cls, *args, **kwargs):
return super().__new__(cls)
def __init__(self, start, number, interval, noise):
self.start = start
self.number = number
self.interval = interval
self.noise = noise
def __reduce__(self):
return (
self.__class__,
(self.start, self.number, self.interval, self.noise),
)
def test_pyobj_pickle():
import pickle
p = pickle.loads(pickle.dumps(PickleTest(10, 100, 1, 0)))
assert p.__class__ is PickleTest
assert p.start == 10
|
4,162 |
tool activated
|
import unicodedata
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QColor, QKeySequence, QPainterPath
from PyQt5.QtWidgets import QApplication
from trufont.drawingTools.baseTool import BaseTool
_path = QPainterPath()
_path.moveTo(5.29, 17.96)
_path.lineTo(6.94, 17.96)
_path.lineTo(7.36, 21.5)
_path.lineTo(7.78, 21.95)
_path.lineTo(12.4, 21.95)
_path.lineTo(12.4, 6.35)
_path.lineTo(11.86, 5.93)
_path.lineTo(9.7, 5.78)
_path.lineTo(9.7, 4.45)
_path.lineTo(18.3, 4.45)
_path.lineTo(18.3, 5.78)
_path.lineTo(16.14, 5.93)
_path.lineTo(15.6, 6.35)
_path.lineTo(15.6, 21.95)
_path.lineTo(20.22, 21.95)
_path.lineTo(20.64, 21.5)
_path.lineTo(21.06, 17.96)
_path.lineTo(22.71, 17.96)
_path.lineTo(22.71, 23.58)
_path.lineTo(5.29, 23.58)
_path.closeSubpath()
def _isUnicodeChar(text):
return len(text) and unicodedata.category(text) != "Cc"
# XXX: rewind the shaped string when metrics change/anchors are moved
class TextTool(BaseTool):
icon = _path
name = QApplication.translate("TextTool", "Text")
shortcut = "T"
grabKeyboard = True
def __init__(self, parent=None):
super().__init__(parent)
@property
def _layoutManager(self):
return self.parent().layoutManager()
# TODO: we might want to fold this into LayoutManager
def _insertUnicodings(self, text):
unicodeData = self._font.unicodeData
for c in text:
glyphName = unicodeData.glyphNameForUnicode(ord(c))
if glyphName is not None:
self._layoutManager.insert(glyphName)
# methods
def METHOD_NAME(self):
widget = self.parent()
# XXX: don't disable tool on setGlyphs, then uncomment this
# self._layoutManager.initCaret()
widget.update()
def toolDisabled(self):
self.parent().update()
def drawingAttribute(self, attr, flags):
if flags.isActiveLayer:
return attr in ("showGlyphFill", "showGlyphComponentFill")
return False
def drawingColor(self, attr, flags):
if attr == "componentFillColor":
return Qt.black
return None
# events
def keyPressEvent(self, event):
key = event.key()
if event.matches(QKeySequence.Paste):
# XXX: the menu item should also go down this codepath
clipboard = QApplication.clipboard()
mimeData = clipboard.mimeData()
if mimeData.hasText():
self._insertUnicodings(mimeData.text())
elif key == Qt.Key_Left:
# TODO: we'll probably need to reform this stuff for RTL
self._layoutManager.caretPrevious()
elif key == Qt.Key_Right:
self._layoutManager.caretNext()
elif key in (Qt.Key_Backspace, Qt.Key_Delete):
self._layoutManager.delete(forward=(key == Qt.Key_Delete))
else:
text = event.text()
if not _isUnicodeChar(text):
return
# text should be just one codepoint, but be safe
self._insertUnicodings(text)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
pos = self.parent().mapFromCanvas(event.localPos())
self._layoutManager.setCaretFromPos(pos)
else:
super().mousePressEvent(event)
def paintBackground(self, painter, index):
# XXX: we can't currently draw the caret when there's no
# glyph on canvas
offset = self._layoutManager.drawingOffset(index)
if offset is None:
return
dx, dy = offset
bottom, top = self.parent().verticalBounds()
upm = top - bottom
painter.save()
pen = painter.pen()
pen.setColor(QColor(90, 90, 90))
pen.setWidth(0)
painter.setPen(pen)
painter.translate(dx, bottom + dy)
painter.drawLine(-30, -25, 0, 0)
painter.drawLine(0, 0, 30, -25)
painter.drawLine(0, 0, 0, upm)
painter.drawLine(-30, upm + 25, 0, upm)
painter.drawLine(0, upm, 30, upm + 25)
painter.restore()
|
4,163 |
get author
|
# Code generated by sqlc. DO NOT EDIT.
# versions:
# sqlc v1.21.0
# source: query.sql
import dataclasses
import datetime
from typing import AsyncIterator, List, Optional
import sqlalchemy
import sqlalchemy.ext.asyncio
from booktest import models
BOOKS_BY_TAGS = """-- name: books_by_tags \\:many
SELECT
book_id,
title,
name,
isbn,
tags
FROM books
LEFT JOIN authors ON books.author_id = authors.author_id
WHERE tags && :p1\\:\\:varchar[]
"""
@dataclasses.dataclass()
class BooksByTagsRow:
book_id: int
title: str
name: Optional[str]
isbn: str
tags: List[str]
BOOKS_BY_TITLE_YEAR = """-- name: books_by_title_year \\:many
SELECT book_id, author_id, isbn, book_type, title, year, available, tags FROM books
WHERE title = :p1 AND year = :p2
"""
CREATE_AUTHOR = """-- name: create_author \\:one
INSERT INTO authors (name) VALUES (:p1)
RETURNING author_id, name
"""
CREATE_BOOK = """-- name: create_book \\:one
INSERT INTO books (
author_id,
isbn,
book_type,
title,
year,
available,
tags
) VALUES (
:p1,
:p2,
:p3,
:p4,
:p5,
:p6,
:p7
)
RETURNING book_id, author_id, isbn, book_type, title, year, available, tags
"""
@dataclasses.dataclass()
class CreateBookParams:
author_id: int
isbn: str
book_type: models.BookType
title: str
year: int
available: datetime.datetime
tags: List[str]
DELETE_BOOK = """-- name: delete_book \\:exec
DELETE FROM books
WHERE book_id = :p1
"""
GET_AUTHOR = """-- name: get_author \\:one
SELECT author_id, name FROM authors
WHERE author_id = :p1
"""
GET_BOOK = """-- name: get_book \\:one
SELECT book_id, author_id, isbn, book_type, title, year, available, tags FROM books
WHERE book_id = :p1
"""
UPDATE_BOOK = """-- name: update_book \\:exec
UPDATE books
SET title = :p1, tags = :p2
WHERE book_id = :p3
"""
UPDATE_BOOK_ISBN = """-- name: update_book_isbn \\:exec
UPDATE books
SET title = :p1, tags = :p2, isbn = :p4
WHERE book_id = :p3
"""
class AsyncQuerier:
def __init__(self, conn: sqlalchemy.ext.asyncio.AsyncConnection):
self._conn = conn
async def books_by_tags(self, *, dollar_1: List[str]) -> AsyncIterator[BooksByTagsRow]:
result = await self._conn.stream(sqlalchemy.text(BOOKS_BY_TAGS), {"p1": dollar_1})
async for row in result:
yield BooksByTagsRow(
book_id=row[0],
title=row[1],
name=row[2],
isbn=row[3],
tags=row[4],
)
async def books_by_title_year(self, *, title: str, year: int) -> AsyncIterator[models.Book]:
result = await self._conn.stream(sqlalchemy.text(BOOKS_BY_TITLE_YEAR), {"p1": title, "p2": year})
async for row in result:
yield models.Book(
book_id=row[0],
author_id=row[1],
isbn=row[2],
book_type=row[3],
title=row[4],
year=row[5],
available=row[6],
tags=row[7],
)
async def create_author(self, *, name: str) -> Optional[models.Author]:
row = (await self._conn.execute(sqlalchemy.text(CREATE_AUTHOR), {"p1": name})).first()
if row is None:
return None
return models.Author(
author_id=row[0],
name=row[1],
)
async def create_book(self, arg: CreateBookParams) -> Optional[models.Book]:
row = (await self._conn.execute(sqlalchemy.text(CREATE_BOOK), {
"p1": arg.author_id,
"p2": arg.isbn,
"p3": arg.book_type,
"p4": arg.title,
"p5": arg.year,
"p6": arg.available,
"p7": arg.tags,
})).first()
if row is None:
return None
return models.Book(
book_id=row[0],
author_id=row[1],
isbn=row[2],
book_type=row[3],
title=row[4],
year=row[5],
available=row[6],
tags=row[7],
)
async def delete_book(self, *, book_id: int) -> None:
await self._conn.execute(sqlalchemy.text(DELETE_BOOK), {"p1": book_id})
async def METHOD_NAME(self, *, author_id: int) -> Optional[models.Author]:
row = (await self._conn.execute(sqlalchemy.text(GET_AUTHOR), {"p1": author_id})).first()
if row is None:
return None
return models.Author(
author_id=row[0],
name=row[1],
)
async def get_book(self, *, book_id: int) -> Optional[models.Book]:
row = (await self._conn.execute(sqlalchemy.text(GET_BOOK), {"p1": book_id})).first()
if row is None:
return None
return models.Book(
book_id=row[0],
author_id=row[1],
isbn=row[2],
book_type=row[3],
title=row[4],
year=row[5],
available=row[6],
tags=row[7],
)
async def update_book(self, *, title: str, tags: List[str], book_id: int) -> None:
await self._conn.execute(sqlalchemy.text(UPDATE_BOOK), {"p1": title, "p2": tags, "p3": book_id})
async def update_book_isbn(self, *, title: str, tags: List[str], book_id: int, isbn: str) -> None:
await self._conn.execute(sqlalchemy.text(UPDATE_BOOK_ISBN), {
"p1": title,
"p2": tags,
"p3": book_id,
"p4": isbn,
})
|
4,164 |
test unpassed entrance exam
|
"""
Milestone related tests for the mobile_api
"""
from unittest.mock import patch
from crum import set_current_request
from django.conf import settings
from common.djangoapps.util.milestones_helpers import add_prerequisite_course, fulfill_course_milestone
from lms.djangoapps.courseware.access_response import MilestoneAccessError
from lms.djangoapps.courseware.tests.test_entrance_exam import add_entrance_exam_milestone, answer_entrance_exam_problem
from openedx.core.djangolib.testing.utils import get_mock_request
from xmodule.modulestore.tests.factories import CourseFactory, BlockFactory # lint-amnesty, pylint: disable=wrong-import-order
class MobileAPIMilestonesMixin:
"""
Tests the Mobile API decorators for milestones.
The two milestones currently supported in these tests are entrance exams and
pre-requisite courses. If either of these milestones are unfulfilled,
the mobile api will appropriately block content until the milestone is
fulfilled.
"""
ALLOW_ACCESS_TO_MILESTONE_COURSE = False
@patch.dict(settings.FEATURES, {
'ENABLE_PREREQUISITE_COURSES': True,
'ENABLE_MKTG_SITE': True,
})
def test_unfulfilled_prerequisite_course(self):
""" Tests the case for an unfulfilled pre-requisite course """
self._add_prerequisite_course()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True, 'ENABLE_MKTG_SITE': True})
def test_unfulfilled_prerequisite_course_for_staff(self):
self._add_prerequisite_course()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True, 'ENABLE_MKTG_SITE': True})
def test_fulfilled_prerequisite_course(self):
"""
Tests the case when a user fulfills existing pre-requisite course
"""
self._add_prerequisite_course()
add_prerequisite_course(self.course.id, self.prereq_course.id)
fulfill_course_milestone(self.prereq_course.id, self.user)
self.init_course_access()
self.api_response()
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True, 'ENABLE_MKTG_SITE': True})
def METHOD_NAME(self):
"""
Tests the case where the user has not passed the entrance exam
"""
self._add_entrance_exam()
self.init_course_access()
self._verify_unfulfilled_milestone_response()
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True, 'ENABLE_MKTG_SITE': True})
def test_unpassed_entrance_exam_for_staff(self):
self._add_entrance_exam()
self.user.is_staff = True
self.user.save()
self.init_course_access()
self.api_response()
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True, 'ENABLE_MKTG_SITE': True})
def test_passed_entrance_exam(self):
"""
Tests access when user has passed the entrance exam
"""
self.addCleanup(set_current_request, None)
self._add_entrance_exam()
self._pass_entrance_exam()
self.init_course_access()
self.api_response()
def _add_entrance_exam(self):
""" Sets up entrance exam """
with self.store.bulk_operations(self.course.id):
self.course.entrance_exam_enabled = True
self.entrance_exam = BlockFactory.create(
parent=self.course,
category="chapter",
display_name="Entrance Exam Chapter",
is_entrance_exam=True,
in_entrance_exam=True,
)
self.subsection_1 = BlockFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name="The Only Exam Sequential",
graded=True,
in_entrance_exam=True,
)
self.problem_1 = BlockFactory.create(
parent=self.subsection_1,
category='problem',
display_name="The Only Exam Problem",
graded=True,
in_entrance_exam=True,
)
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = str(self.entrance_exam.location)
self.store.update_item(self.course, self.user.id)
def _add_prerequisite_course(self):
""" Helper method to set up the prerequisite course """
self.prereq_course = CourseFactory.create()
add_prerequisite_course(self.course.id, self.prereq_course.id)
def _pass_entrance_exam(self):
""" Helper function to pass the entrance exam """
request = get_mock_request(self.user)
answer_entrance_exam_problem(self.course, request, self.problem_1)
def _verify_unfulfilled_milestone_response(self):
"""
Verifies the response depending on ALLOW_ACCESS_TO_MILESTONE_COURSE
Since different endpoints will have different behaviours towards milestones,
setting ALLOW_ACCESS_TO_MILESTONE_COURSE (default is False) to True, will
not return a 404. For example, when getting a list of courses a user is
enrolled in, although a user may have unfulfilled milestones, the course
should still show up in the course enrollments list.
"""
if self.ALLOW_ACCESS_TO_MILESTONE_COURSE:
self.api_response()
else:
response = self.api_response(expected_response_code=404)
assert response.data == MilestoneAccessError().to_json()
|
4,165 |
check exists
|
import re
from pathlib import PurePosixPath
from typing import TYPE_CHECKING, Optional, Type
from lisa.executable import Tool
from lisa.tools.ls import Ls
from lisa.tools.mkdir import Mkdir
from lisa.tools.powershell import PowerShell
from lisa.tools.rm import Rm
from lisa.util import LisaException, is_valid_url
if TYPE_CHECKING:
from lisa.operating_system import Posix
class Wget(Tool):
__pattern_path = re.compile(
r"([\w\W]*?)(-|File) (‘|')(?P<path>.+?)(’|') (saved|already there)"
)
@property
def command(self) -> str:
return "wget"
@property
def can_install(self) -> bool:
return True
def install(self) -> bool:
posix_os: Posix = self.node.os # type: ignore
posix_os.install_packages([self])
return self.METHOD_NAME()
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
is_valid_url(url)
# combine download file path
# TODO: support current lisa folder in pathlib.
# So that here can use the corresponding path format.
if file_path:
# create folder when it doesn't exist
self.node.shell.mkdir(PurePosixPath(file_path), exist_ok=True)
download_path = f"{file_path}/{filename}"
else:
download_path = f"{self.node.working_path}/{filename}"
# remove existing file and dir to download again.
download_pure_path = self.node.get_pure_path(download_path)
if overwrite and self.node.shell.exists(download_pure_path):
self.node.shell.remove(download_pure_path, recursive=True)
force_run = True
command = f"'{url}' --no-check-certificate"
if filename:
command = f"{command} -O {download_path}"
else:
command = f"{command} -P {download_path}"
command_result = self.run(
command,
no_error_log=True,
shell=True,
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
matched_result = self.__pattern_path.match(command_result.stdout)
if matched_result:
download_file_path = matched_result.group("path")
else:
raise LisaException(
f"cannot find file path in stdout of '{command}', it may be caused "
" due to failed download or pattern mismatch."
f" stdout: {command_result.stdout}"
)
actual_file_path = self.node.execute(
f"ls {download_file_path}",
shell=True,
sudo=sudo,
expected_exit_code=0,
expected_exit_code_failure_message="File path does not exist, "
f"{download_file_path}",
)
if executable:
self.node.execute(f"chmod +x {actual_file_path}", sudo=sudo)
return actual_file_path.stdout
def verify_internet_access(self) -> bool:
try:
result = self.get("https://www.azure.com", force_run=True)
if result:
return True
except Exception as e:
self._log.debug(
f"Internet is not accessible, exception occurred with wget {e}"
)
return False
@classmethod
def _windows_tool(cls) -> Optional[Type[Tool]]:
return WindowsWget
class WindowsWget(Wget):
@property
def command(self) -> str:
return ""
def METHOD_NAME(self) -> bool:
return True
def get(
self,
url: str,
file_path: str = "",
filename: str = "",
overwrite: bool = True,
executable: bool = False,
sudo: bool = False,
force_run: bool = False,
timeout: int = 600,
) -> str:
ls = self.node.tools[Ls]
fullpath = f"{file_path}\\{filename}"
# return if file exists and not overwrite
if ls.path_exists(file_path, sudo=sudo) and not overwrite:
self._log.debug(
f"File {fullpath} already exists and rewrite is set to False"
)
# create directory if it doesn't exist
self.node.tools[Mkdir].create_directory(file_path, sudo=sudo)
# TODO: add support for executables
# remove existing file if present and download
self.node.tools[Rm].remove_file(fullpath, sudo=sudo)
self.node.tools[PowerShell].run_cmdlet(
f"$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri '{url}'"
f" -OutFile '{fullpath}'",
sudo=sudo,
force_run=force_run,
timeout=timeout,
)
return fullpath
|
4,166 |
create annotation table
|
"""create annotation table
Revision ID: 7f8b8920355f
Revises: c1d316c60985
Create Date: 2022-01-29 20:23:29.996133
"""
from alembic import op
import click
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "7f8b8920355f"
down_revision = "c1d316c60985"
branch_labels = None
depends_on = None
def upgrade():
METHOD_NAME()
create_annotation_account_relationship_table()
create_annotation_asset_relationship_table()
create_annotation_sensor_relationship_table()
create_user_roles_unique_constraints()
create_account_roles_unique_constraints()
def downgrade():
click.confirm(
"This downgrade drops the tables 'annotations_accounts', 'annotations_assets', 'annotations_sensors' and 'annotation'. Continue?",
abort=True,
)
op.drop_constraint(
op.f("roles_accounts_role_id_key"),
"roles_accounts",
type_="unique",
)
op.drop_constraint(
op.f("roles_users_role_id_key"),
"roles_users",
type_="unique",
)
op.drop_constraint(
op.f("annotations_accounts_annotation_id_key"),
"annotations_accounts",
type_="unique",
)
op.drop_constraint(
op.f("annotations_assets_annotation_id_key"),
"annotations_assets",
type_="unique",
)
op.drop_constraint(
op.f("annotations_sensors_annotation_id_key"),
"annotations_sensors",
type_="unique",
)
op.drop_table("annotations_accounts")
op.drop_table("annotations_assets")
op.drop_table("annotations_sensors")
op.drop_constraint(op.f("annotation_content_key"), "annotation", type_="unique")
op.drop_table("annotation")
op.execute("DROP TYPE annotation_type;")
def create_account_roles_unique_constraints():
"""Remove any duplicate relationships, then constrain any new relationships to be unique."""
op.execute(
"DELETE FROM roles_accounts WHERE id in (SELECT r1.id FROM roles_accounts r1, roles_accounts r2 WHERE r1.id > r2.id AND r1.role_id = r2.role_id and r1.account_id = r2.account_id);"
)
op.create_unique_constraint(
op.f("roles_accounts_role_id_key"),
"roles_accounts",
["role_id", "account_id"],
)
def create_user_roles_unique_constraints():
"""Remove any duplicate relationships, then constrain any new relationships to be unique."""
op.execute(
"DELETE FROM roles_users WHERE id in (SELECT r1.id FROM roles_users r1, roles_users r2 WHERE r1.id > r2.id AND r1.role_id = r2.role_id and r1.user_id = r2.user_id);"
)
op.create_unique_constraint(
op.f("roles_users_role_id_key"),
"roles_users",
["role_id", "user_id"],
)
def create_annotation_sensor_relationship_table():
op.create_table(
"annotations_sensors",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("sensor_id", sa.Integer()),
sa.Column("annotation_id", sa.Integer()),
sa.ForeignKeyConstraint(("sensor_id",), ["sensor.id"]),
sa.ForeignKeyConstraint(("annotation_id",), ["annotation.id"]),
)
op.create_unique_constraint(
op.f("annotations_sensors_annotation_id_key"),
"annotations_sensors",
["annotation_id", "sensor_id"],
)
def create_annotation_asset_relationship_table():
op.create_table(
"annotations_assets",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("generic_asset_id", sa.Integer()),
sa.Column("annotation_id", sa.Integer()),
sa.ForeignKeyConstraint(("generic_asset_id",), ["generic_asset.id"]),
sa.ForeignKeyConstraint(("annotation_id",), ["annotation.id"]),
)
op.create_unique_constraint(
op.f("annotations_assets_annotation_id_key"),
"annotations_assets",
["annotation_id", "generic_asset_id"],
)
def create_annotation_account_relationship_table():
op.create_table(
"annotations_accounts",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("account_id", sa.Integer()),
sa.Column("annotation_id", sa.Integer()),
sa.ForeignKeyConstraint(("account_id",), ["account.id"]),
sa.ForeignKeyConstraint(("annotation_id",), ["annotation.id"]),
)
op.create_unique_constraint(
op.f("annotations_accounts_annotation_id_key"),
"annotations_accounts",
["annotation_id", "account_id"],
)
def METHOD_NAME():
op.create_table(
"annotation",
sa.Column(
"id", sa.Integer(), nullable=False, autoincrement=True, primary_key=True
),
sa.Column("start", sa.DateTime(timezone=True), nullable=False),
sa.Column("end", sa.DateTime(timezone=True), nullable=False),
sa.Column("belief_time", sa.DateTime(timezone=True), nullable=True),
sa.Column("source_id", sa.Integer(), nullable=False),
sa.Column(
"type",
sa.Enum("alert", "holiday", "label", "feedback", name="annotation_type"),
nullable=False,
),
sa.Column("content", sa.String(1024), nullable=False),
sa.ForeignKeyConstraint(("source_id",), ["data_source.id"]),
)
op.create_unique_constraint(
op.f("annotation_content_key"),
"annotation",
["content", "start", "belief_time", "source_id", "type"],
)
|
4,167 |
test task postrun handler
|
import uuid
from unittest import mock
import celery
from django.test import TestCase
from documents.data_models import ConsumableDocument
from documents.data_models import DocumentSource
from documents.models import PaperlessTask
from documents.signals.handlers import before_task_publish_handler
from documents.signals.handlers import task_failure_handler
from documents.signals.handlers import task_postrun_handler
from documents.signals.handlers import task_prerun_handler
from documents.tests.test_consumer import fake_magic_from_file
from documents.tests.utils import DirectoriesMixin
@mock.patch("documents.consumer.magic.from_file", fake_magic_from_file)
class TestTaskSignalHandler(DirectoriesMixin, TestCase):
def util_call_before_task_publish_handler(self, headers_to_use, body_to_use):
"""
Simple utility to call the pre-run handle and ensure it created a single task
instance
"""
self.assertEqual(PaperlessTask.objects.all().count(), 0)
before_task_publish_handler(headers=headers_to_use, body=body_to_use)
self.assertEqual(PaperlessTask.objects.all().count(), 1)
def test_before_task_publish_handler_consume(self):
"""
GIVEN:
- A celery task is started via the consume folder
WHEN:
- Task before publish handler is called
THEN:
- The task is created and marked as pending
"""
headers = {
"id": str(uuid.uuid4()),
"task": "documents.tasks.consume_file",
}
body = (
# args
(
ConsumableDocument(
source=DocumentSource.ConsumeFolder,
original_file="/consume/hello-999.pdf",
),
None,
),
# kwargs
{},
# celery stuff
{"callbacks": None, "errbacks": None, "chain": None, "chord": None},
)
self.util_call_before_task_publish_handler(
headers_to_use=headers,
body_to_use=body,
)
task = PaperlessTask.objects.get()
self.assertIsNotNone(task)
self.assertEqual(headers["id"], task.task_id)
self.assertEqual("hello-999.pdf", task.task_file_name)
self.assertEqual("documents.tasks.consume_file", task.task_name)
self.assertEqual(celery.states.PENDING, task.status)
def test_task_prerun_handler(self):
"""
GIVEN:
- A celery task is started via the consume folder
WHEN:
- Task starts execution
THEN:
- The task is marked as started
"""
headers = {
"id": str(uuid.uuid4()),
"task": "documents.tasks.consume_file",
}
body = (
# args
(
ConsumableDocument(
source=DocumentSource.ConsumeFolder,
original_file="/consume/hello-99.pdf",
),
None,
),
# kwargs
{},
# celery stuff
{"callbacks": None, "errbacks": None, "chain": None, "chord": None},
)
self.util_call_before_task_publish_handler(
headers_to_use=headers,
body_to_use=body,
)
task_prerun_handler(task_id=headers["id"])
task = PaperlessTask.objects.get()
self.assertEqual(celery.states.STARTED, task.status)
def METHOD_NAME(self):
"""
GIVEN:
- A celery task is started via the consume folder
WHEN:
- Task finished execution
THEN:
- The task is marked as started
"""
headers = {
"id": str(uuid.uuid4()),
"task": "documents.tasks.consume_file",
}
body = (
# args
(
ConsumableDocument(
source=DocumentSource.ConsumeFolder,
original_file="/consume/hello-9.pdf",
),
None,
),
# kwargs
{},
# celery stuff
{"callbacks": None, "errbacks": None, "chain": None, "chord": None},
)
self.util_call_before_task_publish_handler(
headers_to_use=headers,
body_to_use=body,
)
task_postrun_handler(
task_id=headers["id"],
retval="Success. New document id 1 created",
state=celery.states.SUCCESS,
)
task = PaperlessTask.objects.get()
self.assertEqual(celery.states.SUCCESS, task.status)
def test_task_failure_handler(self):
"""
GIVEN:
- A celery task is started via the consume folder
WHEN:
- Task failed execution
THEN:
- The task is marked as failed
"""
headers = {
"id": str(uuid.uuid4()),
"task": "documents.tasks.consume_file",
}
body = (
# args
(
ConsumableDocument(
source=DocumentSource.ConsumeFolder,
original_file="/consume/hello-9.pdf",
),
None,
),
# kwargs
{},
# celery stuff
{"callbacks": None, "errbacks": None, "chain": None, "chord": None},
)
self.util_call_before_task_publish_handler(
headers_to_use=headers,
body_to_use=body,
)
task_failure_handler(
task_id=headers["id"],
exception="Example failure",
)
task = PaperlessTask.objects.get()
self.assertEqual(celery.states.FAILURE, task.status)
|
4,168 |
test compute angstrom coeff
|
from __future__ import annotations
import numpy as np
import pytest
from pyaerocom.aux_var_helpers import (
_calc_od_helper,
calc_abs550aer,
calc_ang4487aer,
calc_od550aer,
calc_od550gt1aer,
calc_od550lt1aer,
compute_angstrom_coeff,
compute_od_from_angstromexp,
vmrx_to_concx,
)
def test_calc_ang4487aer():
data = dict(od440aer=0.2, od870aer=0.1)
vals = calc_ang4487aer(data)
assert vals == pytest.approx(1, abs=0.05)
@pytest.mark.parametrize(
"data",
[
pytest.param(dict(), id="missing both"),
pytest.param(dict(od440aer=0.2), id="missing od870aer"),
pytest.param(dict(od870aer=0.1), id="missing od440aer"),
],
)
def test_calc_ang4487aer_error(data: dict):
error = "Either of the two (or both) required variables (od440aer, od870aer) are not available in data"
with pytest.raises(AttributeError) as e:
calc_ang4487aer(data)
assert str(e.value) == error
@pytest.mark.parametrize(
"data,args,kwargs,result",
[
pytest.param(
dict(od500aer=0.1, ang4487aer=1),
("od440aer",),
dict(
to_lambda=0.44, od_ref="od500aer", lambda_ref=0.5, use_angstrom_coeff="ang4487aer"
),
0.11,
id="1 data point",
),
pytest.param(
dict(
od500aer=np.asarray([2, 2, np.nan, 2, np.nan, np.nan]),
od440aer=np.ones(6) * 4,
ang4487aer=np.zeros(6),
),
("od550aer", 0.55, "od500aer", 0.5, "od440aer", 0.44, "ang4487aer"),
{},
[2, 2, 4, 2, 4, 4],
id="6 data points",
),
],
)
def test__calc_od_helper(data: dict, args: tuple, kwargs: dict, result: float | list[float]):
aod = _calc_od_helper(data, *args, **kwargs)
assert aod == pytest.approx(result, abs=0.05)
@pytest.mark.parametrize(
"data,result",
[
pytest.param(dict(od500aer=0.1, ang4487aer=1), 0.09, id="simple"),
pytest.param(
dict(
od440aer=_calc_od_helper(
dict(od500aer=0.1, ang4487aer=1),
"od440aer",
to_lambda=0.44,
od_ref="od500aer",
lambda_ref=0.5,
use_angstrom_coeff="ang4487aer",
),
ang4487aer=1,
),
0.09,
id="complicated",
),
],
)
def test_calc_od550aer(data: dict, result: float):
aod = calc_od550aer(data)
assert aod == pytest.approx(result, abs=0.05)
def test_calc_od550gt1aer():
data = dict(od500gt1aer=0.1, ang4487aer=1)
aod = calc_od550gt1aer(data)
assert aod == pytest.approx(0.09, abs=0.05)
def test_calc_od550lt1aer():
data = dict(od500lt1aer=0.1, ang4487aer=1)
aod = calc_od550lt1aer(data)
assert aod == pytest.approx(0.09, abs=0.05)
@pytest.mark.parametrize(
"od1,od2,lambda1,lambda2,result",
[
(0.1, 0.2, 0.6, 0.3, 1),
(0.1, 0.2, 0.3, 0.6, -1),
],
)
def METHOD_NAME(
od1: float, od2: float, lambda1: float, lambda2: float, result: float
):
assert compute_angstrom_coeff(od1, od2, lambda1, lambda2) == result
@pytest.mark.parametrize("angs,result", [(4, 0.77), (0, 0.1)])
def test_compute_od_from_angstromexp(angs: float, result: float):
aod = compute_od_from_angstromexp(
to_lambda=0.3, od_ref=0.1, lambda_ref=0.5, angstrom_coeff=angs
)
aod == result
@pytest.mark.parametrize(
"inputval,p,T,vmr_unit,mmol_var,mmol_air,to_unit,desired",
[
(1, 101300, 293, "nmol mol-1", 48, None, "ug m-3", 1.9959),
(1, 101300, 273, "nmol mol-1", 48, None, "ug m-3", 2.1421),
(1, 101300, 273, "nmol mol-1", 48, None, "kg m-3", 2.1421e-9),
(1, 101300, 273, "mol mol-1", 48, None, "kg m-3", 2.1421),
(1, 98000, 273, "mol mol-1", 48, None, "kg m-3", 2.0724),
],
)
def test_vmrx_to_concx(
inputval: float,
p: float,
T: float,
vmr_unit: str,
mmol_var: float,
mmol_air: float | None,
to_unit: str,
desired: float,
):
val = vmrx_to_concx(inputval, p, T, vmr_unit, mmol_var, mmol_air, to_unit)
assert val == pytest.approx(desired, rel=1e-4)
@pytest.mark.parametrize(
"data,expected_result",
[
pytest.param(
dict(od500aer=0.1, ang4487aer=1, abs440aer=1, angabs4487aer=1),
0.7999999999999999,
),
pytest.param(
dict(od500aer=0.1, ang4487aer=1, abs440aer=1, angabs4487aer=0),
1,
),
],
)
def test_calc_abs550aer(data, expected_result):
result = calc_abs550aer(data=data)
assert result == pytest.approx(expected_result, rel=1e-4)
@pytest.mark.parametrize(
"od_ref, use_angstrom_coeff",
[
pytest.param(None, "ang4487aer"),
pytest.param("od500aer", None),
],
)
def test__calc_od_helper_raise_error(od_ref, use_angstrom_coeff):
data = dict(od500aer=0.1, ang4487aer=1)
with pytest.raises(AttributeError) as e:
_calc_od_helper(
data=data,
var_name="od550lt1ang",
to_lambda=0.55,
od_ref=od_ref,
lambda_ref=0.50,
lambda_ref_alt=0.44,
use_angstrom_coeff=use_angstrom_coeff,
treshold_angstrom=1.0,
)
assert e.type is AttributeError
|
4,169 |
extract into event
|
import json
from copy import deepcopy
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.utils import AnnotatedValue
from sentry_sdk._compat import text_type, iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
import sentry_sdk
from typing import Any
from typing import Dict
from typing import Optional
from typing import Union
SENSITIVE_ENV_KEYS = (
"REMOTE_ADDR",
"HTTP_X_FORWARDED_FOR",
"HTTP_SET_COOKIE",
"HTTP_COOKIE",
"HTTP_AUTHORIZATION",
"HTTP_X_API_KEY",
"HTTP_X_FORWARDED_FOR",
"HTTP_X_REAL_IP",
)
SENSITIVE_HEADERS = tuple(
x[len("HTTP_") :] for x in SENSITIVE_ENV_KEYS if x.startswith("HTTP_")
)
def request_body_within_bounds(client, content_length):
# type: (Optional[sentry_sdk.Client], int) -> bool
if client is None:
return False
bodies = client.options["max_request_body_size"]
return not (
bodies == "never"
or (bodies == "small" and content_length > 10**3)
or (bodies == "medium" and content_length > 10**4)
)
class RequestExtractor(object):
def __init__(self, request):
# type: (Any) -> None
self.request = request
def METHOD_NAME(self, event):
# type: (Dict[str, Any]) -> None
client = Hub.current.client
if client is None:
return
data = None # type: Optional[Union[AnnotatedValue, Dict[str, Any]]]
content_length = self.content_length()
request_info = event.get("request", {})
if _should_send_default_pii():
request_info["cookies"] = dict(self.cookies())
if not request_body_within_bounds(client, content_length):
data = AnnotatedValue.removed_because_over_size_limit()
else:
parsed_body = self.parsed_body()
if parsed_body is not None:
data = parsed_body
elif self.raw_data():
data = AnnotatedValue.removed_because_raw_data()
else:
data = None
if data is not None:
request_info["data"] = data
event["request"] = deepcopy(request_info)
def content_length(self):
# type: () -> int
try:
return int(self.env().get("CONTENT_LENGTH", 0))
except ValueError:
return 0
def cookies(self):
# type: () -> Dict[str, Any]
raise NotImplementedError()
def raw_data(self):
# type: () -> Optional[Union[str, bytes]]
raise NotImplementedError()
def form(self):
# type: () -> Optional[Dict[str, Any]]
raise NotImplementedError()
def parsed_body(self):
# type: () -> Optional[Dict[str, Any]]
form = self.form()
files = self.files()
if form or files:
data = dict(iteritems(form))
for key, _ in iteritems(files):
data[key] = AnnotatedValue.removed_because_raw_data()
return data
return self.json()
def is_json(self):
# type: () -> bool
return _is_json_content_type(self.env().get("CONTENT_TYPE"))
def json(self):
# type: () -> Optional[Any]
try:
if not self.is_json():
return None
raw_data = self.raw_data()
if raw_data is None:
return None
if isinstance(raw_data, text_type):
return json.loads(raw_data)
else:
return json.loads(raw_data.decode("utf-8"))
except ValueError:
pass
return None
def files(self):
# type: () -> Optional[Dict[str, Any]]
raise NotImplementedError()
def size_of_file(self, file):
# type: (Any) -> int
raise NotImplementedError()
def env(self):
# type: () -> Dict[str, Any]
raise NotImplementedError()
def _is_json_content_type(ct):
# type: (Optional[str]) -> bool
mt = (ct or "").split(";", 1)[0]
return (
mt == "application/json"
or (mt.startswith("application/"))
and mt.endswith("+json")
)
def _filter_headers(headers):
# type: (Dict[str, str]) -> Dict[str, str]
if _should_send_default_pii():
return headers
return {
k: (
v
if k.upper().replace("-", "_") not in SENSITIVE_HEADERS
else AnnotatedValue.removed_because_over_size_limit()
)
for k, v in iteritems(headers)
}
|
4,170 |
printf
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-2017, Linaro Limited
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
import subprocess
import sys
def get_args():
parser = argparse.ArgumentParser(description='Shows the memory usage '
'of an OP-TEE based on ELF sections')
parser.add_argument('tee_elf', help='the OP-TEE ELF file (tee.elf)')
parser.add_argument('-a', '--all', action='store_true',
help=' same as -i -p -u -U')
parser.add_argument('-n', '--no-map', action='store_true',
help=' do not show the detailed section mappings and '
'RAM usage')
parser.add_argument('-i', '--init', action='store_true',
help='report the total size of the .*_init sections')
parser.add_argument('-p', '--paged', action='store_true',
help='report the total size of the .*_pageable '
'sections')
parser.add_argument('-u', '--unpaged', action='store_true',
help='report the total size of the unpaged sections, '
'that is, all sections but the ones in --init or '
'--paged')
parser.add_argument('-U', '--unpaged-no-heap', action='store_true',
help='report the size of all unpaged sections '
'excluding heap space. Reflects the size of unpaged '
'code and data (.text, .rodata, .data, .bss, .nozi '
'and possibly unwind tables)')
parser.add_argument('-r', '--raw', action='store_true',
help='when processing -i, -p, -u, or -U, show only '
'the size (in decimal) and no other text')
return parser.parse_args()
def METHOD_NAME(format, *args):
sys.stdout.write(format % args)
def print_sect(name, addr, size, round_up=False, print_num_pages=False):
if args.no_map:
return
if size == 0:
size_kib = 0
num_pages = 0
else:
if round_up:
size_kib = (size - 1) / 1024 + 1
else:
size_kib = size / 1024
num_pages = (size - 1) / 4096 + 1
METHOD_NAME('%-16s %.8X - %.8X size %.8X %3d KiB', name, addr, addr + size,
size, size_kib)
if print_num_pages:
METHOD_NAME(' %d pages', num_pages)
METHOD_NAME('\n')
def print_pager_stat(name, size):
size_kib = size / 1024
if args.raw:
METHOD_NAME('%d ', size)
else:
METHOD_NAME('%-36s size %.8X %3d KiB\n', name, size, size_kib)
def readelf_cmd():
return os.getenv('CROSS_COMPILE', '') + 'readelf'
def main():
global args
in_shdr = False
sects = []
init_size = 0
paged_size = 0
unpaged_size = 0
unpaged_no_heap_size = 0
args = get_args()
env = os.environ.copy()
env['LC_ALL'] = 'C'
readelf = subprocess.Popen(str.split(readelf_cmd()) + ['-s',
args.tee_elf],
stdout=subprocess.PIPE, env=env,
universal_newlines=True)
for line in iter(readelf.stdout.readline, ''):
words = line.split()
if len(words) == 8 and words[7] == '_end_of_ram':
end_of_ram = int(words[1], 16)
break
readelf.terminate()
readelf = subprocess.Popen(str.split(readelf_cmd()) + ['-S', '-W',
args.tee_elf],
stdout=subprocess.PIPE, env=env,
universal_newlines=True)
for line in iter(readelf.stdout.readline, ''):
if 'Section Headers:' in line:
in_shdr = True
continue
if 'Key to Flags:' in line:
in_shdr = False
continue
if in_shdr:
words = line.split()
if words[0] == '[':
words.pop(0)
try:
(_, name, _, addr, offs, size, _,
flags) = words[:8]
except BaseException:
continue
if ('A' in flags):
sects.append({'name': name, 'addr': addr,
'offs': offs, 'size': size})
first_addr = None
for sect in sects:
if sect['addr'] != 0:
addr = sect['addr']
if not first_addr:
first_addr = addr
if int(addr, 16) >= end_of_ram:
break
last_addr = addr
last_size = sect['size']
ram_usage = int(last_addr, 16) + int(last_size, 16) - int(first_addr, 16)
print_sect('RAM Usage', int(first_addr, 16), ram_usage, True, True)
last_addr = 0
last_size = 0
for sect in sects:
name = sect['name']
addr = int(sect['addr'], 16)
size = int(sect['size'], 16)
if addr >= end_of_ram:
break
if last_addr != 0 and addr != last_addr + last_size:
print_sect('*hole*', last_addr + last_size,
addr - (last_addr + last_size))
print_sect(name, addr, size)
if name.endswith('_init'):
init_size += size
elif name.endswith('_pageable'):
paged_size += size
else:
if not name.startswith('.heap'):
unpaged_no_heap_size += size
unpaged_size += size
last_addr = addr
last_size = size
if args.all or args.init:
print_pager_stat('Init sections (.*_init)', init_size)
if args.all or args.paged:
print_pager_stat('Paged sections (.*_pageable)', paged_size)
if args.all or args.unpaged:
print_pager_stat('Unpaged sections ', unpaged_size)
if args.all or args.unpaged_no_heap:
print_pager_stat('Unpaged sections (heap excluded)',
unpaged_no_heap_size)
if (args.raw and (args.all or args.init or args.paged or
args.unpaged or args.unpaged_no_heap)):
METHOD_NAME('\n')
if __name__ == "__main__":
main()
|
4,171 |
check escalation finished task
|
import datetime
import typing
import requests
from celery import shared_task
from django.conf import settings
from django.db.models import Q
from django.utils import timezone
from apps.alerts.tasks.task_logger import task_logger
from common.database import get_random_readonly_database_key_if_present_otherwise_default
if typing.TYPE_CHECKING:
from apps.alerts.models.alert_group import AlertGroup
class AlertGroupEscalationPolicyExecutionAuditException(BaseException):
"""This exception is raised when an alert group's escalation policy did not execute execute properly for some reason"""
def send_alert_group_escalation_auditor_task_heartbeat() -> None:
heartbeat_url = settings.ALERT_GROUP_ESCALATION_AUDITOR_CELERY_TASK_HEARTBEAT_URL
if heartbeat_url:
task_logger.info(f"Sending heartbeat to configured URL: {heartbeat_url}")
requests.get(heartbeat_url).raise_for_status()
task_logger.info(f"Heartbeat successfully sent to {heartbeat_url}")
else:
task_logger.info("Skipping sending heartbeat as no heartbeat URL is configured")
def audit_alert_group_escalation(alert_group: "AlertGroup") -> None:
escalation_snapshot = alert_group.escalation_snapshot
alert_group_id = alert_group.id
base_msg = f"Alert group {alert_group_id}"
if not alert_group.escalation_chain_exists:
task_logger.info(
f"{base_msg} does not have an escalation chain associated with it, and therefore it is expected "
"that it will not have an escalation snapshot, skipping further validation"
)
return
if not escalation_snapshot:
msg = f"{base_msg} does not have an escalation snapshot associated with it, this should never occur"
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
task_logger.info(f"{base_msg} has an escalation snapshot associated with it, auditing if it executed properly")
escalation_policies_snapshots = escalation_snapshot.escalation_policies_snapshots
if not escalation_policies_snapshots:
task_logger.info(
f"{base_msg}'s escalation snapshot has an empty escalation_policies_snapshots, skipping further validation"
)
return
task_logger.info(
f"{base_msg}'s escalation snapshot has a populated escalation_policies_snapshots, continuing validation"
)
if escalation_snapshot.next_step_eta_is_valid() is False:
msg = (
f"{base_msg}'s escalation snapshot does not have a valid next_step_eta: {escalation_snapshot.next_step_eta}"
)
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
task_logger.info(f"{base_msg}'s escalation snapshot has a valid next_step_eta: {escalation_snapshot.next_step_eta}")
executed_escalation_policy_snapshots = escalation_snapshot.executed_escalation_policy_snapshots
num_of_executed_escalation_policy_snapshots = len(executed_escalation_policy_snapshots)
if num_of_executed_escalation_policy_snapshots == 0:
task_logger.info(
f"{base_msg}'s escalation snapshot does not have any executed escalation policies, skipping further validation"
)
else:
task_logger.info(
f"{base_msg}'s escalation snapshot has {num_of_executed_escalation_policy_snapshots} executed escalation policies"
)
task_logger.info(f"{base_msg} passed the audit checks")
@shared_task
def METHOD_NAME() -> None:
"""
This task takes alert groups with active escalation, checks if escalation snapshot with escalation policies
was created and next escalation step eta is higher than now minus 5 min for every active alert group,
what means that escalations are going as expected.
If there are alert groups that failed the check, it raises exception. Otherwise - send heartbeat. Missing heartbeat
raises alert.
Attention: don't retry this task, the idea is to be alerted of failures
"""
from apps.alerts.models import AlertGroup
now = timezone.now() - datetime.timedelta(minutes=5)
two_days_ago = now - datetime.timedelta(days=2)
alert_groups = AlertGroup.objects.using(get_random_readonly_database_key_if_present_otherwise_default()).filter(
~Q(silenced=True, silenced_until__isnull=True), # filter silenced forever alert_groups
# here we should query maintenance_uuid rather than joining on channel__integration
# and checking for something like ~Q(channel__integration=AlertReceiveChannel.INTEGRATION_MAINTENANCE)
# this avoids an unnecessary join
maintenance_uuid__isnull=True,
is_escalation_finished=False,
resolved=False,
acknowledged=False,
root_alert_group=None,
started_at__range=(two_days_ago, now),
)
task_logger.info(
f"There are {len(alert_groups)} alert group(s) to audit"
if alert_groups.exists()
else "There are no alert groups to audit, everything is good :)"
)
alert_group_ids_that_failed_audit: typing.List[str] = []
for alert_group in alert_groups:
try:
audit_alert_group_escalation(alert_group)
except AlertGroupEscalationPolicyExecutionAuditException:
alert_group_ids_that_failed_audit.append(str(alert_group.id))
if alert_group_ids_that_failed_audit:
msg = f"The following alert group id(s) failed auditing: {', '.join(alert_group_ids_that_failed_audit)}"
task_logger.warning(msg)
raise AlertGroupEscalationPolicyExecutionAuditException(msg)
task_logger.info("There were no alert groups that failed auditing")
send_alert_group_escalation_auditor_task_heartbeat()
|
4,172 |
not acceptable
|
import warnings
import pytest
from starlette.exceptions import HTTPException, WebSocketException
from starlette.middleware.exceptions import ExceptionMiddleware
from starlette.requests import Request
from starlette.responses import JSONResponse, PlainTextResponse
from starlette.routing import Route, Router, WebSocketRoute
def raise_runtime_error(request):
raise RuntimeError("Yikes")
def METHOD_NAME(request):
raise HTTPException(status_code=406)
def no_content(request):
raise HTTPException(status_code=204)
def not_modified(request):
raise HTTPException(status_code=304)
def with_headers(request):
raise HTTPException(status_code=200, headers={"x-potato": "always"})
class BadBodyException(HTTPException):
pass
async def read_body_and_raise_exc(request: Request):
await request.body()
raise BadBodyException(422)
async def handler_that_reads_body(
request: Request, exc: BadBodyException
) -> JSONResponse:
body = await request.body()
return JSONResponse(status_code=422, content={"body": body.decode()})
class HandledExcAfterResponse:
async def __call__(self, scope, receive, send):
response = PlainTextResponse("OK", status_code=200)
await response(scope, receive, send)
raise HTTPException(status_code=406)
router = Router(
routes=[
Route("/runtime_error", endpoint=raise_runtime_error),
Route("/not_acceptable", endpoint=METHOD_NAME),
Route("/no_content", endpoint=no_content),
Route("/not_modified", endpoint=not_modified),
Route("/with_headers", endpoint=with_headers),
Route("/handled_exc_after_response", endpoint=HandledExcAfterResponse()),
WebSocketRoute("/runtime_error", endpoint=raise_runtime_error),
Route(
"/consume_body_in_endpoint_and_handler",
endpoint=read_body_and_raise_exc,
methods=["POST"],
),
]
)
app = ExceptionMiddleware(
router,
handlers={BadBodyException: handler_that_reads_body}, # type: ignore[dict-item]
)
@pytest.fixture
def client(test_client_factory):
with test_client_factory(app) as client:
yield client
def test_not_acceptable(client):
response = client.get("/not_acceptable")
assert response.status_code == 406
assert response.text == "Not Acceptable"
def test_no_content(client):
response = client.get("/no_content")
assert response.status_code == 204
assert "content-length" not in response.headers
def test_not_modified(client):
response = client.get("/not_modified")
assert response.status_code == 304
assert response.text == ""
def test_with_headers(client):
response = client.get("/with_headers")
assert response.status_code == 200
assert response.headers["x-potato"] == "always"
def test_websockets_should_raise(client):
with pytest.raises(RuntimeError):
with client.websocket_connect("/runtime_error"):
pass # pragma: nocover
def test_handled_exc_after_response(test_client_factory, client):
# A 406 HttpException is raised *after* the response has already been sent.
# The exception middleware should raise a RuntimeError.
with pytest.raises(RuntimeError):
client.get("/handled_exc_after_response")
# If `raise_server_exceptions=False` then the test client will still allow
# us to see the response as it will have been seen by the client.
allow_200_client = test_client_factory(app, raise_server_exceptions=False)
response = allow_200_client.get("/handled_exc_after_response")
assert response.status_code == 200
assert response.text == "OK"
def test_force_500_response(test_client_factory):
# use a sentinal variable to make sure we actually
# make it into the endpoint and don't get a 500
# from an incorrect ASGI app signature or something
called = False
async def app(scope, receive, send):
nonlocal called
called = True
raise RuntimeError()
force_500_client = test_client_factory(app, raise_server_exceptions=False)
response = force_500_client.get("/")
assert called
assert response.status_code == 500
assert response.text == ""
def test_http_str():
assert str(HTTPException(status_code=404)) == "404: Not Found"
assert str(HTTPException(404, "Not Found: foo")) == "404: Not Found: foo"
assert str(HTTPException(404, headers={"key": "value"})) == "404: Not Found"
def test_http_repr():
assert repr(HTTPException(404)) == (
"HTTPException(status_code=404, detail='Not Found')"
)
assert repr(HTTPException(404, detail="Not Found: foo")) == (
"HTTPException(status_code=404, detail='Not Found: foo')"
)
class CustomHTTPException(HTTPException):
pass
assert repr(CustomHTTPException(500, detail="Something custom")) == (
"CustomHTTPException(status_code=500, detail='Something custom')"
)
def test_websocket_str():
assert str(WebSocketException(1008)) == "1008: "
assert str(WebSocketException(1008, "Policy Violation")) == "1008: Policy Violation"
def test_websocket_repr():
assert repr(WebSocketException(1008, reason="Policy Violation")) == (
"WebSocketException(code=1008, reason='Policy Violation')"
)
class CustomWebSocketException(WebSocketException):
pass
assert (
repr(CustomWebSocketException(1013, reason="Something custom"))
== "CustomWebSocketException(code=1013, reason='Something custom')"
)
def test_exception_middleware_deprecation() -> None:
# this test should be removed once the deprecation shim is removed
with pytest.warns(DeprecationWarning):
from starlette.exceptions import ExceptionMiddleware # noqa: F401
with warnings.catch_warnings():
warnings.simplefilter("error")
import starlette.exceptions
with pytest.warns(DeprecationWarning):
starlette.exceptions.ExceptionMiddleware
def test_request_in_app_and_handler_is_the_same_object(client) -> None:
response = client.post("/consume_body_in_endpoint_and_handler", content=b"Hello!")
assert response.status_code == 422
assert response.json() == {"body": "Hello!"}
|
4,173 |
test trigger set math1
|
#
# @file TestTrigger.py
# @brief SBML Trigger unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestTrigger.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestTrigger(unittest.TestCase):
global D
D = None
def setUp(self):
self.D = libsbml.Trigger(2,4)
if (self.D == None):
pass
pass
def tearDown(self):
_dummyList = [ self.D ]; _dummyList[:] = []; del _dummyList
pass
def test_Trigger_create(self):
self.assertTrue( self.D.getTypeCode() == libsbml.SBML_TRIGGER )
self.assertTrue( self.D.getMetaId() == "" )
self.assertTrue( self.D.getNotes() == None )
self.assertTrue( self.D.getAnnotation() == None )
self.assertTrue( self.D.getMath() == None )
pass
def test_Trigger_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.Trigger(sbmlns)
self.assertTrue( object.getTypeCode() == libsbml.SBML_TRIGGER )
self.assertTrue( object.getMetaId() == "" )
self.assertTrue( object.getNotes() == None )
self.assertTrue( object.getAnnotation() == None )
self.assertTrue( object.getLevel() == 2 )
self.assertTrue( object.getVersion() == 1 )
self.assertTrue( object.getNamespaces() != None )
self.assertTrue( object.getNamespaces().getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_Trigger_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_Trigger_setMath(self):
math = libsbml.parseFormula("lambda(x, x^3)")
self.D.setMath(math)
math1 = self.D.getMath()
self.assertTrue( math1 != None )
formula = libsbml.formulaToString(math1)
self.assertTrue( formula != None )
self.assertTrue(( "lambda(x, x^3)" == formula ))
self.assertTrue( self.D.getMath() != math )
self.assertEqual( True, self.D.isSetMath() )
self.D.setMath(self.D.getMath())
math1 = self.D.getMath()
self.assertTrue( math1 != None )
formula = libsbml.formulaToString(math1)
self.assertTrue( formula != None )
self.assertTrue(( "lambda(x, x^3)" == formula ))
self.D.setMath(None)
self.assertEqual( False, self.D.isSetMath() )
if (self.D.getMath() != None):
pass
pass
def METHOD_NAME(self):
math = libsbml.parseFormula("2 * k")
i = self.D.setMath(math)
self.assertTrue( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertTrue( self.D.getMath() != math )
self.assertEqual( True, self.D.isSetMath() )
i = self.D.setMath(None)
self.assertTrue( i == libsbml.LIBSBML_OPERATION_SUCCESS )
self.assertTrue( self.D.getMath() == None )
self.assertEqual( False, self.D.isSetMath() )
_dummyList = [ math ]; _dummyList[:] = []; del _dummyList
pass
def test_Trigger_setMath2(self):
math = libsbml.ASTNode(libsbml.AST_DIVIDE)
i = self.D.setMath(math)
self.assertTrue( i == libsbml.LIBSBML_INVALID_OBJECT )
self.assertEqual( False, self.D.isSetMath() )
_dummyList = [ math ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestTrigger))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
4,174 |
kill process
|
"""Internal py2/3 compatibility library. A little more than six."""
import inspect
import os
import shlex
import sys
import threading
import time
from contextlib import contextmanager
from datetime import timezone
from types import ModuleType
from typing import Any, Callable, List, Sequence, Type
import pendulum
from typing_extensions import TypeGuard
from .compat.pendulum import PendulumDateTime as PendulumDateTime # re-exported
from .json import (
JSONDecodeError as JSONDecodeError,
dump as dump,
dumps as dumps,
)
from .temp_dir import get_system_temp_directory as get_system_temp_directory
IS_WINDOWS = os.name == "nt"
# TODO implement a generic import by name -- see https://stackoverflow.com/questions/301134/how-to-import-a-module-given-its-name
# https://stackoverflow.com/a/67692/324449
def import_module_from_path(module_name: str, path_to_file: str) -> ModuleType:
import importlib.util
spec = importlib.util.spec_from_file_location(module_name, path_to_file)
if spec is None:
raise Exception(
"Can not import module {module_name} from path {path_to_file}, unable to load spec."
.format(module_name=module_name, path_to_file=path_to_file)
)
if sys.modules.get(spec.name) and spec.origin:
f = sys.modules[spec.name].__file__
# __file__ can be relative depending on current working directory
if f and os.path.abspath(f) == os.path.abspath(spec.origin):
return sys.modules[spec.name]
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
assert spec.loader
spec.loader.exec_module(module)
return module
def is_ascii(str_):
if sys.version_info.major == 3 and sys.version_info.minor < 7:
try:
str_.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
return str_.isascii()
time_fn = time.perf_counter
def get_arg_names(callable_: Callable[..., Any]) -> Sequence[str]:
return [
parameter.name
for parameter in inspect.signature(callable_).parameters.values()
if parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def wait_for_process(process, timeout=30):
# Using Popen.communicate instead of Popen.wait since the latter
# can deadlock, see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.wait
if not timeout:
process.communicate()
elif sys.version_info.major >= 3:
process.communicate(timeout=timeout)
else:
timed_out_event = threading.Event()
def _wait_timeout():
timed_out_event.set()
process.kill()
timer = threading.Timer(timeout, _wait_timeout)
try:
timer.start()
process.wait()
finally:
timer.cancel()
if timed_out_event.is_set():
raise Exception("Timed out waiting for process to finish")
def METHOD_NAME(process):
import multiprocessing
if not isinstance(process, multiprocessing.Process):
raise Exception("invalid process argument passed to kill_process")
if sys.version_info >= (3, 7):
# Kill added in 3.7
process.kill()
else:
process.terminate()
# https://stackoverflow.com/a/58437485/324449
def is_module_available(module_name: str) -> bool:
# python 3.4 and above
import importlib.util
loader = importlib.util.find_spec(module_name)
return loader is not None
def builtin_print() -> str:
return "builtins.print"
def get_current_datetime_in_utc() -> Any:
return pendulum.now("UTC")
def get_timestamp_from_utc_datetime(utc_datetime):
if isinstance(utc_datetime, PendulumDateTime):
return utc_datetime.timestamp()
if utc_datetime.tzinfo != timezone.utc:
raise Exception("Must pass in a UTC timezone to compute UNIX timestamp")
return utc_datetime.timestamp()
def is_lambda(target: object) -> TypeGuard[Callable[..., Any]]:
return callable(target) and getattr(target, "__name__", None) == "<lambda>"
def is_function_or_decorator_instance_of(
target: object, kls: Type[Any]
) -> TypeGuard[Callable[..., Any]]:
return inspect.isfunction(target) or (isinstance(target, kls) and hasattr(target, "__name__"))
def qualname_differs(target: object) -> bool:
return hasattr(target, "__qualname__") and getattr(target, "__qualname__") != getattr(
target, "__name__"
)
def xplat_shlex_split(s: str) -> List[str]:
if IS_WINDOWS:
return shlex.split(s, posix=False)
return shlex.split(s)
def get_import_error_message(import_error: ImportError) -> str:
return import_error.msg
# Stand-in for contextlib.nullcontext, but available in python 3.6
@contextmanager
def nullcontext():
yield
def is_subclass(child_type: Type[Any], parent_type: Type[Any]):
"""Due to some pathological interactions betwen bugs in the Python typing library
(https://github.com/python/cpython/issues/88459 and
https://github.com/python/cpython/issues/89010), some types (list[str] in Python 3.9, for
example) pass inspect.isclass check above but then raise an exception if issubclass is called
with the same class. This function provides a workaround for that issue.
"""
if not inspect.isclass(child_type):
return False
try:
return issubclass(child_type, parent_type)
except TypeError:
return False
|
4,175 |
communicated node list
|
#-------------------------------------------------------------------------------
# DistributedBoundary
#-------------------------------------------------------------------------------
from PYB11Generator import *
from Boundary import *
from BoundaryAbstractMethods import *
@PYB11template("Dimension")
class DistributedBoundary(Boundary):
"""DistributedBoundary -- Base class for distributed parallel boundary
conditions, connecting NodeLists across parallel domains."""
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename %(Dimension)s::ThirdRankTensor ThirdRankTensor;
typedef typename %(Dimension)s::FourthRankTensor FourthRankTensor;
typedef typename %(Dimension)s::FifthRankTensor FifthRankTensor;
typedef typename %(Dimension)s::FacetedVolume FacetedVolume;
typedef typename DistributedBoundary<%(Dimension)s>::DomainBoundaryNodes DomainBoundaryNodes;
typedef std::map<int, DomainBoundaryNodes> DomainBoundaryNodeMap;
typedef std::map<NodeList<%(Dimension)s>*, DomainBoundaryNodeMap> NodeListDomainBoundaryNodeMap;
"""
#...........................................................................
class DomainBoundaryNodes:
sendNodes = PYB11readwrite()
receiveNodes = PYB11readwrite()
#...........................................................................
# Constructors
def pyinit(self):
"Default constructor"
#...........................................................................
# Methods
@PYB11const
def METHOD_NAME(self,
nodeList = "const NodeList<%(Dimension)s>&"):
"Test if the given NodeList is communicated on this domain or not."
return "bool"
@PYB11const
def nodeListSharedWithDomain(self,
nodeList = "const NodeList<%(Dimension)s>&",
neighborDomainID = "int"):
"Test if the given NodeList is communicated with the given domain."
return "bool"
@PYB11const
@PYB11returnpolicy("reference_internal")
def domainBoundaryNodeMap(self,
nodeList = "const NodeList<%(Dimension)s>&"):
return "const DomainBoundaryNodeMap&"
@PYB11const
@PYB11returnpolicy("reference_internal")
def domainBoundaryNodes(self,
nodeList = "const NodeList<%(Dimension)s>&",
neighborDomainID = "int"):
return "const DomainBoundaryNodes&"
@PYB11const
def communicatedProcs(self,
sendProcs = "std::vector<int>&",
recvProcs = "std::vector<int>&"):
"Extract the current set of processors we're communicating with."
return "void"
#...........................................................................
# Virtual methods
@PYB11pure_virtual
def setAllGhostNodes(self,
dataBase = "DataBase<%(Dimension)s>&"):
"Descendent Distributed Neighbors are required to provide the setGhostNodes method for DataBases."
return "void"
@PYB11virtual
def cullGhostNodes(self,
flagSet = "const FieldList<%(Dimension)s, int>&",
old2newIndexMap = "FieldList<%(Dimension)s, int>&",
numNodesRemoved = "std::vector<int>&"):
"Override the Boundary method for culling ghost nodes."
return "void"
@PYB11virtual
@PYB11const
def finalizeGhostBoundary(self):
"Override the base method to finalize ghost boundaries."
return "void"
@PYB11virtual
@PYB11const
def meshGhostNodes(self):
"We do not want to use the parallel ghost nodes as generators."
return "bool"
#...........................................................................
# Non-blocking exchanges
@PYB11const
def beginExchangeFieldFixedSize(self,
field = "FieldBase<%(Dimension)s>&"):
"Start a non-blocking Field exchange"
return "void"
@PYB11const
def beginExchangeFieldVariableSize(self,
field = "FieldBase<%(Dimension)s>&"):
"Start a non-blocking Field exchange"
return "void"
def finalizeExchanges(self):
"Force the exchanges which have been registered to execute."
return "void"
@PYB11const
def unpackField(self,
field = "FieldBase<%(Dimension)s>&",
packedValues = "const std::list< std::vector<char> >&"):
"Unpack a packed set of Field values back into the Field."
return "void"
def setControlAndGhostNodes(self):
"Update the control and ghost nodes of the base class"
return "void"
#...........................................................................
# Protected methods
@PYB11protected
@PYB11returnpolicy("reference_internal")
def accessNodeListDomainBoundaryNodeMap(self):
"Descendent classes get read/write access to the communication maps."
return "NodeListDomainBoundaryNodeMap&"
@PYB11protected
@PYB11returnpolicy("reference_internal")
def accessDomainBoundaryNodeMap(self,
nodeList = "const NodeList<%(Dimension)s>&"):
"Descendent classes get read/write access to the communication maps."
return "DomainBoundaryNodeMap&"
@PYB11protected
@PYB11returnpolicy("reference_internal")
def accessDomainBoundaryNodes(self,
nodeList = "const NodeList<%(Dimension)s>&",
neighborDomainID = "int"):
return "DomainBoundaryNodes&"
@PYB11protected
@PYB11returnpolicy("reference_internal")
def openDomainBoundaryNodes(self,
nodeListPtr = "NodeList<%(Dimension)s>*",
domainID = "const int"):
"""Convenience method to return an iterator to the DomainBoundaryNodes for the
given NodeList and domain pair. If there isn't an entry for this pair already,
this method creates one."""
return "DomainBoundaryNodes&"
@PYB11protected
def removeDomainBoundaryNodes(self,
nodeListPtr = "NodeList<%(Dimension)s>*",
domainID = "const int"):
"Inverse of above -- remove the DomainBoundaryNodes for a NodeList/procID pair."
return "void"
@PYB11virtual
@PYB11protected
def reset(self,
dataBase = "const DataBase<%(Dimension)s>&"):
"Override the Boundary method for clearing the maps."
return "void"
@PYB11protected
def buildReceiveAndGhostNodes(self,
dataBase = "const DataBase<%(Dimension)s>&"):
"""This handy helper method will build receive and ghost nodes on all each
domain based on send nodes that have already been filled in."""
return "void"
#...........................................................................
# Properties
domainID = PYB11property("int")
numDomains = PYB11property("int")
nodeListDomainBoundaryNodeMap = PYB11property("const NodeListDomainBoundaryNodeMap&", returnpolicy="reference_internal")
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(BoundaryAbstractMethods, DistributedBoundary, virtual=True, pure_virtual=False)
|
4,176 |
get application output
|
# coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetApplicationResult',
'AwaitableGetApplicationResult',
'get_application',
'get_application_output',
]
@pulumi.output_type
class GetApplicationResult:
"""
The HDInsight cluster application
"""
def __init__(__self__, etag=None, id=None, name=None, properties=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The ETag for the application
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ApplicationPropertiesResponse':
"""
The properties of the application.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags for the application.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetApplicationResult(GetApplicationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationResult(
etag=self.etag,
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_application(application_name: Optional[str] = None,
cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationResult:
"""
Gets properties of the specified application.
:param str application_name: The constant value for the application name.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationName'] = application_name
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hdinsight/v20230815preview:getApplication', __args__, opts=opts, typ=GetApplicationResult).value
return AwaitableGetApplicationResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_application)
def METHOD_NAME(application_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApplicationResult]:
"""
Gets properties of the specified application.
:param str application_name: The constant value for the application name.
:param str cluster_name: The name of the cluster.
:param str resource_group_name: The name of the resource group.
"""
...
|
4,177 |
report error
|
#!/usr/bin/env python3
import requests
import os
import sys
CURDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CURDIR, "helpers"))
from pure_http_client import ClickHouseClient
class Tester:
"""
- Creates test table with multiple integer columns
- Runs read queries with multiple range conditions on different columns in PREWHERE and check that the result is correct
"""
def __init__(self, session, url, index_granularity, total_rows):
self.session = session
self.url = url
self.index_granularity = index_granularity
self.total_rows = total_rows
self.reported_errors = set()
self.repro_queries = []
def METHOD_NAME(self):
print("Repro steps:", "\n\n\t".join(self.repro_queries))
exit(1)
def query(self, query_text, include_in_repro_steps=True, expected_data=None):
self.repro_queries.append(query_text)
resp = self.session.post(self.url, data=query_text)
if resp.status_code != 200:
# Group similar errors
error = resp.text[0:40]
if error not in self.reported_errors:
self.reported_errors.add(error)
print("Code:", resp.status_code)
print("Result:", resp.text)
self.METHOD_NAME()
result = resp.text
# Check that the result is as expected
if (not expected_data is None) and (int(result) != len(expected_data)):
print("Expected {} rows, got {}".format(len(expected_data), result))
print("Expected data:" + str(expected_data))
self.METHOD_NAME()
if not include_in_repro_steps:
self.repro_queries.pop()
def check_data(
self, all_data, c_range_start, c_range_end, d_range_start, d_range_end
):
for to_select in [
"count()",
"sum(e)",
]: # Test reading with and without column with default value
self.query("SELECT {} FROM tab_02473;".format(to_select), False, all_data)
delta = 10
for b_range_start in [0, delta]:
for b_range_end in [self.total_rows - delta]: # , self.total_rows]:
expected = all_data[
(all_data.a == 0)
& (all_data.b > b_range_start)
& (all_data.b <= b_range_end)
]
self.query(
"SELECT {} from tab_02473 PREWHERE b > {} AND b <= {} WHERE a == 0;".format(
to_select, b_range_start, b_range_end
),
False,
expected,
)
expected = all_data[
(all_data.a == 0)
& (all_data.b > b_range_start)
& (all_data.b <= b_range_end)
& (all_data.c > c_range_start)
& (all_data.c <= c_range_end)
]
self.query(
"SELECT {} from tab_02473 PREWHERE b > {} AND b <= {} AND c > {} AND c <= {} WHERE a == 0;".format(
to_select,
b_range_start,
b_range_end,
c_range_start,
c_range_end,
),
False,
expected,
)
expected = all_data[
(all_data.a == 0)
& (all_data.b > b_range_start)
& (all_data.b <= b_range_end)
& (all_data.c > c_range_start)
& (all_data.c <= c_range_end)
& (all_data.d > d_range_start)
& (all_data.d <= d_range_end)
]
self.query(
"SELECT {} from tab_02473 PREWHERE b > {} AND b <= {} AND c > {} AND c <= {} AND d > {} AND d <= {} WHERE a == 0;".format(
to_select,
b_range_start,
b_range_end,
c_range_start,
c_range_end,
d_range_start,
d_range_end,
),
False,
expected,
)
def run_test(self, c_range_start, c_range_end, d_range_start, d_range_end):
self.repro_queries = []
self.query(
"""
CREATE TABLE tab_02473 (a Int8, b Int32, c Int32, d Int32, PRIMARY KEY (a))
ENGINE = MergeTree() ORDER BY (a, b)
SETTINGS min_bytes_for_wide_part = 0, index_granularity = {};""".format(
self.index_granularity
)
)
self.query(
"INSERT INTO tab_02473 select 0, number+1, number+1, number+1 FROM numbers({});".format(
self.total_rows
)
)
client = ClickHouseClient()
all_data = client.query_return_df(
"SELECT a, b, c, d, 1 as e FROM tab_02473 FORMAT TabSeparatedWithNames;"
)
self.query("OPTIMIZE TABLE tab_02473 FINAL SETTINGS mutations_sync=2;")
# After all data has been written add a column with default value
self.query("ALTER TABLE tab_02473 ADD COLUMN e Int64 DEFAULT 1;")
self.check_data(
all_data, c_range_start, c_range_end, d_range_start, d_range_end
)
self.query("DROP TABLE tab_02473;")
def main():
# Enable multiple prewhere read steps
url = (
os.environ["CLICKHOUSE_URL"]
+ "&enable_multiple_prewhere_read_steps=1&move_all_conditions_to_prewhere=0&max_threads=1"
)
default_index_granularity = 10
total_rows = 8 * default_index_granularity
step = default_index_granularity
session = requests.Session()
for index_granularity in [default_index_granularity - 1, default_index_granularity]:
tester = Tester(session, url, index_granularity, total_rows)
# Test combinations of ranges of columns c and d
for c_range_start in range(0, total_rows, int(2.3 * step)):
for c_range_end in range(
c_range_start + 3 * step, total_rows, int(2.1 * step)
):
for d_range_start in range(
int(0.5 * step), total_rows, int(2.7 * step)
):
for d_range_end in range(
d_range_start + 3 * step, total_rows, int(2.2 * step)
):
tester.run_test(
c_range_start, c_range_end, d_range_start, d_range_end
)
if __name__ == "__main__":
main()
|
4,178 |
test user can read all
|
from rest_framework import status
from care.users.models import GENDER_CHOICES, User
from care.utils.tests.test_base import TestBase
class TestSuperUser(TestBase):
def setUp(self):
"""
Run once before every test
- login the super user
"""
self.client.force_authenticate(self.super_user)
def get_detail_representation(self, obj=None) -> dict:
return {
"username": obj.username,
"first_name": obj.first_name,
"last_name": obj.last_name,
"email": obj.email,
"user_type": obj.get_user_type_display(),
"created_by": obj.created_by,
"phone_number": obj.phone_number,
"alt_phone_number": obj.alt_phone_number,
"age": obj.age,
"gender": GENDER_CHOICES[obj.gender - 1][1],
"home_facility": None,
"home_facility_object": None,
"is_superuser": obj.is_superuser,
"verified": obj.verified,
"pf_endpoint": obj.pf_endpoint,
"pf_p256dh": obj.pf_p256dh,
"pf_auth": obj.pf_auth,
"doctor_experience_commenced_on": obj.doctor_experience_commenced_on,
"doctor_medical_council_registration": obj.doctor_medical_council_registration,
"doctor_qualification": obj.doctor_qualification,
"weekly_working_hours": obj.weekly_working_hours,
**self.get_local_body_district_state_representation(obj),
}
def test_superuser_can_access_url_by_location(self):
"""Test super user can access the url by location"""
response = self.client.get(f"/api/v1/users/{self.user.username}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_superuser_can_view(self):
"""Test super user can view all of their profile"""
response = self.client.get(f"/api/v1/users/{self.user.username}/")
res_data_json = response.json()
res_data_json.pop("id")
data = self.user_data.copy()
data.pop("password")
self.assertDictEqual(
res_data_json,
self.get_detail_representation(self.user),
)
def test_superuser_can_modify(self):
"""Test superusers can modify the attributes for other users"""
username = self.user.username
data = self.user_data.copy()
data["district"] = data["district"].id
data["state"] = data["state"].id
response = self.client.patch(
f"/api/v1/users/{username}/",
{"age": 31},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test the value from api
self.assertEqual(response.json()["age"], 31)
# test value at the backend
self.assertEqual(User.objects.get(username=username).age, 31)
def test_superuser_can_delete(self):
"""Test superuser can delete other users"""
response = self.client.delete(f"/api/v1/users/{self.user.username}/")
# test response code
self.assertEqual(response.status_code, 204)
# test backend response
with self.assertRaises(expected_exception=User.DoesNotExist):
User.objects.get(
username=self.user_data["username"],
is_active=True,
deleted=False,
)
class TestUser(TestBase):
def get_detail_representation(self, obj=None) -> dict:
return {
"username": obj.username,
"user_type": obj.get_user_type_display(),
"is_superuser": obj.is_superuser,
"gender": obj.get_gender_display(),
"email": obj.email,
"phone_number": obj.phone_number,
"first_name": obj.first_name,
"last_name": obj.last_name,
"age": obj.age,
**self.get_local_body_district_state_representation(obj),
}
@classmethod
def setUpClass(cls) -> None:
"""
Runs once per class method
Create 3 users
- 2 users initialized by setUpClass of TestBase
- 1 will be used to check if they can tinker attributes of the other
"""
super(TestUser, cls).setUpClass()
cls.data_2 = cls.get_user_data()
cls.data_2.update({"username": "user_2", "password": "password"})
cls.user_2 = cls.create_user(district=cls.district, username="user_2")
def setUp(self):
"""
Run once before every test
- login the super user
"""
self.client.force_login(self.user)
def test_user_can_access_url(self):
"""Test user can access the url by location"""
username = self.user.username
response = self.client.get(f"/api/v1/users/{username}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
def METHOD_NAME(self):
"""Test user can read all"""
response = self.client.get("/api/v1/users/")
# test response code
self.assertEqual(response.status_code, status.HTTP_200_OK)
res_data_json = response.json()
# test total user count
self.assertEqual(res_data_json["count"], 3) # 3 existing, plus the new one
results = res_data_json["results"]
# test presence of usernames
self.assertIn(self.user.id, {r["id"] for r in results})
self.assertIn(self.user_2.id, {r["id"] for r in results})
def test_user_can_modify_themselves(self):
"""Test user can modify the attributes for themselves"""
password = "new_password"
username = self.user.username
response = self.client.patch(
f"/api/v1/users/{username}/",
{
"age": 31,
"password": password,
},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test the value from api
self.assertEqual(response.json()["age"], 31)
# test value at the backend
self.assertEqual(User.objects.get(username=username).age, 31)
def test_user_cannot_read_others(self):
"""Test 1 user can read the attributes of the other user"""
username = self.data_2["username"]
response = self.client.get(f"/api/v1/users/{username}/")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_cannot_modify_others(self):
"""Test a user can't modify others"""
username = self.data_2["username"]
password = self.data_2["password"]
response = self.client.patch(
f"/api/v1/users/{username}/",
{
"age": 31,
"password": password,
},
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_cannot_delete_others(self):
"""Test a user can't delete others"""
field = "username"
response = self.client.delete(f"/api/v1/users/{self.data_2[field]}/")
# test response code
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# test backend response(user_2 still exists)
self.assertEqual(
self.data_2[field],
User.objects.get(username=self.data_2[field]).username,
)
|
4,179 |
run
|
# -*- coding: utf-8 -*-
"""
amanzi_xml
~~~~~~~~~~
Original code from sphinx.directives.code - literalinclude directive
:copyright: Copyright 2007-2011 by the Sphinx team, see Sphinx AUTHORS.
:license: BSD, see Sphinx LICENSE for details.
Derivative work - custom amanzi_xml_include directive
:copyright: Copyright 2013 by the Amanzi team, see AUTHORS.
:license: Three-clause BSD, see COPYRIGHT for details.
"""
import sys
import codecs
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.util import parselinenos
from sphinx.util.nodes import set_source_info
class AmanziXMLInclude(Directive):
"""
Based on the Sphinx ``literalinclude``, this derctive customizes output for
showing snippets of Amanzi XML files in out Tutorials.
Like ``.. include:: :literal:``, but only warns if the include file is
not found, and does not raise errors. Also has several options for
selecting what to include.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'linenos': directives.flag,
'encoding': directives.encoding,
'lines': directives.unchanged_required,
'string': directives.unchanged_required,
'indent': directives.unchanged_required,
'start': directives.unchanged_required,
'end': directives.unchanged_required,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
}
def METHOD_NAME(self):
# grab state of the document - not sure all this holds
document = self.state.document
# Ensure lines of the XML file can be inserted
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
#
# It looks like lines and start,end,append, and prepend do not
# work together?? Should probably add more tests here.
#
# extract the encoding
encoding = self.options.get('encoding', env.config.source_encoding)
# set the codec
codec_info = codecs.lookup(encoding)
# Read the xml file
try:
f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
f.close()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
#
# Collect lines specified in the "lines" option
#
linespec = self.options.get('lines')
if linespec is not None:
try:
linelist = parselinenos(linespec, len(lines))
except ValueError as err:
return [document.reporter.warning(str(err), line=self.lineno)]
# just ignore nonexisting lines
nlines = len(lines)
lines = [lines[i] for i in linelist if i < nlines]
if not lines:
return [document.reporter.warning(
'Line spec %r: no lines pulled from include file %r' %
(linespec, filename), line=self.lineno)]
#
# Collect lines from section specified with [start,end]
#
start = self.options.get('start')
end = self.options.get('end')
prepend = self.options.get('prepend')
append = self.options.get('append')
if start is not None or end is not None:
use = not start
res = []
for line in lines:
if not use and start and start in line:
use = True
res.append(line)
elif use and end and end in line:
use = False
res.append(line)
break
elif use:
res.append(line)
lines = res
# Prepend and Append if options used
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
#
# Adjust indent of single line strings
#
indent=self.options.get('indent')
if indent:
instr=" "*int(indent)
else:
instr=""
#
# Collect the first line containing the string
#
line_string = self.options.get('string')
if line_string:
result=[]
for line in lines:
if line_string in line:
if indent:
result.append(instr+line.lstrip())
else:
result.append(line)
break
lines = result
# Join lines into a single string
text = ''.join(lines)
# Insert text in docutils node
retnode = nodes.literal_block(text, text, source=filename)
# Set some info for Sphinx (not sure what this really does).
set_source_info(self, retnode)
# Language: xml
retnode['language']='xml'
# Display line numbers
if 'linenos' in self.options:
retnode['linenos'] = True
# Record dependency for Sphinx build
# (note: env = document.settings.env from sphinx)
env.note_dependency(rel_filename)
# return
return [retnode]
#
# Add extension to Sphinx
#
def setup(app):
app.add_directive('amanzi_xml_include', AmanziXMLInclude)
|
4,180 |
test cache list
|
"""
:codeauthor: Jayesh Kariya <[email protected]>
Test cases for salt.modules.npm
"""
import textwrap
import pytest
import salt.modules.npm as npm
import salt.utils.json
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
with patch("salt.modules.npm._check_valid_version", MagicMock(return_value=True)):
return {npm: {}}
# 'install' function tests: 4
def test_install():
"""
Test if it installs an NPM package.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
pytest.raises(CommandExecutionError, npm.install, "coffee-script")
# This is at least somewhat closer to the actual output format.
mock_json_out = textwrap.dedent(
"""\
[
{
"salt": "SALT"
}
]"""
)
# Successful run, expected output format
mock = MagicMock(return_value={"retcode": 0, "stderr": "", "stdout": mock_json_out})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.install("coffee-script") == [{"salt": "SALT"}]
mock_json_out_extra = textwrap.dedent(
"""\
Compilation output here
[bcrypt] Success: "/tmp/node_modules/bcrypt/foo" is installed via remote"
[grpc] Success: "/usr/lib/node_modules/@foo/bar" is installed via remote"
[
{
"from" : "express@",
"name" : "express",
"dependencies" : {
"escape-html" : {
"from" : "escape-html@~1.0.3",
"dependencies" : {},
"version" : "1.0.3"
}
},
"version" : "4.16.3"
}
]"""
)
extra_expected = [
{
"dependencies": {
"escape-html": {
"dependencies": {},
"from": "escape-html@~1.0.3",
"version": "1.0.3",
}
},
"from": "express@",
"name": "express",
"version": "4.16.3",
}
]
# Successful run, expected output format with additional leading text
mock = MagicMock(
return_value={"retcode": 0, "stderr": "", "stdout": mock_json_out_extra}
)
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.install("coffee-script") == extra_expected
# Successful run, unexpected output format
mock = MagicMock(return_value={"retcode": 0, "stderr": "", "stdout": "SALT"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
mock_err = MagicMock(side_effect=ValueError())
# When JSON isn't successfully parsed, return should equal input
with patch.object(salt.utils.json, "loads", mock_err):
assert npm.install("coffee-script") == "SALT"
# 'uninstall' function tests: 1
def test_uninstall():
"""
Test if it uninstalls an NPM package.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert not npm.uninstall("coffee-script")
mock = MagicMock(return_value={"retcode": 0, "stderr": ""})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.uninstall("coffee-script")
# 'list_' function tests: 1
def test_list():
"""
Test if it list installed NPM packages.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
pytest.raises(CommandExecutionError, npm.list_, "coffee-script")
mock = MagicMock(
return_value={
"retcode": 0,
"stderr": "error",
"stdout": '{"salt": ["SALT"]}',
}
)
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
mock_err = MagicMock(return_value={"dependencies": "SALT"})
with patch.object(salt.utils.json, "loads", mock_err):
assert npm.list_("coffee-script") == "SALT"
# 'cache_clean' function tests: 1
def test_cache_clean():
"""
Test if it cleans the cached NPM packages.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert not npm.cache_clean()
mock = MagicMock(return_value={"retcode": 0})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_clean()
mock = MagicMock(return_value={"retcode": 0})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_clean("coffee-script")
# 'cache_list' function tests: 1
def METHOD_NAME():
"""
Test if it lists the NPM cache.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
pytest.raises(CommandExecutionError, npm.cache_list)
mock = MagicMock(
return_value={"retcode": 0, "stderr": "error", "stdout": ["~/.npm"]}
)
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_list() == ["~/.npm"]
mock = MagicMock(return_value={"retcode": 0, "stderr": "error", "stdout": ""})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_list("coffee-script") == ""
# 'cache_path' function tests: 1
def test_cache_path():
"""
Test if it prints the NPM cache path.
"""
mock = MagicMock(return_value={"retcode": 1, "stderr": "error"})
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_path() == "error"
mock = MagicMock(
return_value={"retcode": 0, "stderr": "error", "stdout": "/User/salt/.npm"}
)
with patch.dict(npm.__salt__, {"cmd.run_all": mock}):
assert npm.cache_path() == "/User/salt/.npm"
|
4,181 |
build update
|
#!/usr/bin/env python
"""
Module running the set command
==============================
Update data of experiments and trials in the storage
"""
import argparse
import logging
import sys
# pylint:disable=consider-using-from-import
import orion.core.io.experiment_builder as experiment_builder
from orion.core.utils.pptree import print_tree
from orion.core.utils.terminal import confirm_name
logger = logging.getLogger(__name__)
DESCRIPTION = """
Command to update trial attributes.
To change a trial status, simply give the experiment name,
trial id and status. (use `orion status --all` to get trial ids)
$ orion db set my-exp-name id=3cc91e851e13281ca2152c19d888e937 status=interrupted
To change all trials from a given status to another, simply give the two status
$ orion db set my-exp-name status=broken status=interrupted
Or `*` to apply the change to all trials
$ orion db set my-exp-name '*' status=interrupted
By default, trials of the last version of the experiment are selected.
Add --version to select a prior version. Note that the modification
is applied recursively to all child experiment, but not to the parents.
$ orion db set my-exp-name --version 1 status=broken status=interrupted
"""
CONFIRM_MESSAGE = """
Trials matching the query `{query}` for all experiments listed above
will be modified with `{update}`.
To select a specific version use --version <VERSION>.
Make sure to stop any worker currently executing one of these experiment.
To proceed, type again the name of the experiment: """
def add_subparser(parser):
"""Return the parser that needs to be used for this command"""
set_parser = parser.add_parser(
"set",
description=DESCRIPTION,
help="Update trials' attributes",
formatter_class=argparse.RawTextHelpFormatter,
)
set_parser.set_defaults(func=main)
set_parser.add_argument("name", help="Name of the experiment to delete.")
set_parser.add_argument(
"-c",
"--config",
type=argparse.FileType("r"),
metavar="path-to-config",
help="user provided orion configuration file",
)
set_parser.add_argument(
"-v",
"--version",
type=int,
default=None,
help="specific version of experiment to fetch; "
"(default: last version matching.)",
)
set_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Force modify without asking to enter experiment name twice.",
)
set_parser.add_argument(
"query",
help=(
f"Query for trials to update. Can be `*` to update all trials. "
f"Otherwise format must be <attribute>=<value>. Ex: status=broken. "
f"Supported attributes are {VALID_QUERY_ATTRS}"
),
)
set_parser.add_argument(
"update",
help=(
f"Update for trials. Format must be <attribute>=<value>. "
f"Ex: status=interrupted. "
f"Supported attributes are {VALID_UPDATE_ATTRS}"
),
)
return set_parser
def process_updates(storage, root, query, update):
"""Update the matching trials of the given experiment and its children."""
trials_total = 0
for node in root:
count = storage.update_trials(node.item, where=query, **update)
logger.debug(
"%d trials modified in experiment %s-v%d",
count,
node.item.name,
node.item.version,
)
trials_total += count
print(f"{trials_total} trials modified")
VALID_QUERY_ATTRS = ["status", "id"]
VALID_UPDATE_ATTRS = ["status"]
def build_query(experiment, query):
"""Convert query string to dict format
String format must be <attr name>=<value>
"""
if query.strip() == "*":
return {}
attribute, value = query.split("=")
if attribute not in VALID_QUERY_ATTRS:
raise ValueError(
f"Invalid query attribute `{attribute}`. Must be one of {VALID_QUERY_ATTRS}"
)
query = {attribute: value}
if attribute == "id":
query["experiment"] = experiment.id
return query
def METHOD_NAME(update):
"""Convert update string to dict format
String format must be <attr name>=<value>
"""
attribute, value = update.split("=")
if attribute not in VALID_UPDATE_ATTRS:
raise ValueError(
f"Invalid update attribute `{attribute}`. Must be one of {VALID_UPDATE_ATTRS}"
)
return {attribute: value}
def main(args):
"""Remove the experiment(s) or trial(s)."""
config = experiment_builder.get_cmd_config(args)
builder = experiment_builder.ExperimentBuilder(config.get("storage"))
# Find root experiment
root = builder.load(name=args["name"], version=args.get("version", None)).node
try:
query = build_query(root.item, args["query"])
update = METHOD_NAME(args["update"])
except ValueError as e:
print(f"Error: {e}", file=sys.stderr)
return 1
# List all experiments with children
print_tree(root, nameattr="tree_name")
confirmed = confirm_name(
CONFIRM_MESSAGE.format(query=args["query"], update=args["update"]),
args["name"],
args["force"],
)
if not confirmed:
print("Confirmation failed, aborting operation.")
return 1
process_updates(builder.storage, root, query, update)
return 0
|
4,182 |
test angled drill hole
|
#**************************************************************************
# Copyright (c) 2017 Kurt Kremitzki <[email protected]> *
# *
# This file is part of the FreeCAD CAx development system. *
# *
# This program is free software; you can redistribute it and/or modify *
# it under the terms of the GNU Lesser General Public License (LGPL) *
# as published by the Free Software Foundation; either version 2 of *
# the License, or (at your option) any later version. *
# for detail see the LICENCE text file. *
# *
# FreeCAD is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU Library General Public License for more details. *
# *
# You should have received a copy of the GNU Library General Public *
# License along with FreeCAD; if not, write to the Free Software *
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# USA *
#**************************************************************************
from math import pi
import unittest
import FreeCAD
import TestSketcherApp
App = FreeCAD
class TestHole(unittest.TestCase):
def setUp(self):
self.Doc = FreeCAD.newDocument("PartDesignTestHole")
self.Body = self.Doc.addObject('PartDesign::Body','Body')
self.Box = self.Doc.addObject('PartDesign::AdditiveBox','Box')
self.Box.Length=10
self.Box.Width=10
self.Box.Height=10
self.Body.addObject(self.Box)
self.Doc.recompute()
self.HoleSketch = self.Doc.addObject('Sketcher::SketchObject', 'SketchHole')
self.HoleSketch.Support = (self.Doc.XY_Plane, [''])
self.HoleSketch.MapMode = 'FlatFace'
self.HoleSketch.MapReversed = True
self.Body.addObject(self.HoleSketch)
TestSketcherApp.CreateCircleSketch(self.HoleSketch, (-5, 5), 1)
self.Doc.recompute()
self.Hole = self.Doc.addObject("PartDesign::Hole", "Hole")
self.Hole.Profile = self.HoleSketch
self.Body.addObject(self.Hole)
self.Doc.recompute()
def testPlainHole(self):
self.Hole.Diameter = 6
self.Hole.Depth = 10
# self.Hole.DrillPointAngle = 118.000000
# self.Hole.TaperedAngle = 90
self.Hole.ThreadType = 0
self.Hole.HoleCutType = 0 # 1 = Counterbore, 2 = Countersink
# self.Hole.HoleCutDiameter = 5
# self.Hole.HoleCutCountersinkAngle = 90
# self.Hole.HoleCutDepth = 2 # Counterbore
self.Hole.DepthType = 0 # 1 = Through all
self.Hole.DrillPoint = 0 # 1 = Angled
self.Hole.Tapered = 0 # On/off
self.Doc.recompute()
self.assertAlmostEqual(self.Hole.Shape.Volume, 10**3 - pi * 3**2 * 10)
def testTaperedHole(self):
self.Hole.Diameter = 6
self.Hole.Depth = 5
self.Hole.TaperedAngle = 60
self.Hole.ThreadType = 0
self.Hole.HoleCutType = 0
self.Hole.DepthType = 0
self.Hole.DrillPoint = 0
self.Hole.Tapered = 1
self.Doc.recompute()
self.assertEqual(len(self.Hole.Shape.Faces), 8)
def METHOD_NAME(self):
self.Hole.Diameter = 6
self.Hole.Depth = 10
self.Hole.DrillPointAngle = 118
self.Hole.ThreadType = 0
self.Hole.HoleCutType = 0
self.Hole.DepthType = 0
self.Hole.DrillPoint = 1
self.Hole.Tapered = 0
self.Hole.DrillForDepth = 1
self.Doc.recompute()
self.assertEqual(len(self.Hole.Shape.Faces), 8)
def testCounterboreHole(self):
self.Hole.Diameter = 6
self.Hole.Depth = 10
self.Hole.ThreadType = 0
self.Hole.HoleCutType = 1
self.Hole.HoleCutDiameter = 8
self.Hole.HoleCutDepth = 5
self.Hole.DepthType = 0
self.Hole.DrillPoint = 0
self.Hole.Tapered = 0
self.Doc.recompute()
self.assertAlmostEqual(self.Hole.Shape.Volume, 10**3 - pi * 3**2 * 5 - pi * 4**2 * 5)
def testCountersinkHole(self):
self.Hole.Diameter = 6
self.Hole.Depth = 10
self.Hole.ThreadType = 0
self.Hole.HoleCutType = 2
self.Hole.HoleCutDiameter = 9
self.Hole.HoleCutCountersinkAngle = 90
self.Hole.DepthType = 0
self.Hole.DrillPoint = 0
self.Hole.Tapered = 0
self.Doc.recompute()
self.assertAlmostEqual(self.Hole.Shape.Volume, 10**3 - pi * 3**2 * 10 - 24.7400421)
def tearDown(self):
#closing doc
FreeCAD.closeDocument("PartDesignTestHole")
#print ("omit closing document for debugging")
|
4,183 |
parse file entry
|
# -*- coding: utf-8 -*-
"""File system stat object parser."""
import pytsk3
from dfvfs.lib import definitions as dfvfs_definitions
from plaso.containers import events
from plaso.parsers import interface
from plaso.parsers import manager
class FileStatEventData(events.EventData):
"""File system stat event data.
Attributes:
access_time (dfdatetime.DateTimeValues): file entry last access date
and time.
added_time (dfdatetime.DateTimeValues): file entry added date and time.
attribute_names ([str]): extended attribute names.
backup_time (dfdatetime.DateTimeValues): file entry backup date and time.
change_time (dfdatetime.DateTimeValues): file entry inode change
(or metadata last modification) date and time.
creation_time (dfdatetime.DateTimeValues): file entry creation date
and time.
deletion_time (dfdatetime.DateTimeValues): file entry deletion date
and time.
display_name (str): display name.
file_entry_type (int): dfVFS file entry type.
file_size (int): file size in bytes.
file_system_type (str): file system type.
filename (str): name of the file.
group_identifier (int): group identifier (GID), equivalent to st_gid.
inode (int): inode of the file.
is_allocated (bool): True if the file is allocated.
mode (int): access mode, equivalent to st_mode & 0x0fff.
modification_time (dfdatetime.DateTimeValues): file entry last modification
date and time.
number_of_links (int): number of hard links, equivalent to st_nlink.
owner_identifier (int): user identifier (UID) of the owner, equivalent to
st_uid.
"""
DATA_TYPE = 'fs:stat'
def __init__(self):
"""Initializes event data."""
super(FileStatEventData, self).__init__(data_type=self.DATA_TYPE)
self.access_time = None
self.added_time = None
self.attribute_names = None
self.backup_time = None
self.change_time = None
self.creation_time = None
self.deletion_time = None
self.display_name = None
self.file_entry_type = None
self.file_size = None
self.file_system_type = None
self.filename = None
self.group_identifier = None
self.inode = None
self.is_allocated = None
self.mode = None
self.modification_time = None
self.number_of_links = None
self.owner_identifier = None
class FileStatParser(interface.FileEntryParser):
"""Parses file system stat object."""
NAME = 'filestat'
DATA_FORMAT = 'file system stat information'
_TSK_FS_TYPE_MAP = {}
# Maps SleuthKit file system type enumeration values to a string.
_TSK_FS_TYPE_MAP = {
pytsk3.TSK_FS_TYPE_EXFAT: 'exFAT',
pytsk3.TSK_FS_TYPE_EXT2: 'EXT2',
pytsk3.TSK_FS_TYPE_EXT3: 'EXT3',
pytsk3.TSK_FS_TYPE_EXT4: 'EXT4',
pytsk3.TSK_FS_TYPE_EXT_DETECT: 'EXT',
pytsk3.TSK_FS_TYPE_FAT12: 'FAT12',
pytsk3.TSK_FS_TYPE_FAT16: 'FAT16',
pytsk3.TSK_FS_TYPE_FAT32: 'FAT32',
pytsk3.TSK_FS_TYPE_FAT_DETECT: 'FAT',
pytsk3.TSK_FS_TYPE_FFS1B: 'FFS1b',
pytsk3.TSK_FS_TYPE_FFS1: 'FFS1',
pytsk3.TSK_FS_TYPE_FFS2: 'FFS2',
pytsk3.TSK_FS_TYPE_FFS_DETECT: 'FFS',
pytsk3.TSK_FS_TYPE_HFS: 'HFS',
pytsk3.TSK_FS_TYPE_HFS_DETECT: 'HFS',
pytsk3.TSK_FS_TYPE_ISO9660: 'ISO9660',
pytsk3.TSK_FS_TYPE_ISO9660_DETECT: 'ISO9660',
pytsk3.TSK_FS_TYPE_NTFS: 'NTFS',
pytsk3.TSK_FS_TYPE_NTFS_DETECT: 'NTFS',
pytsk3.TSK_FS_TYPE_YAFFS2: 'YAFFS2',
pytsk3.TSK_FS_TYPE_YAFFS2_DETECT: 'YAFFS2'}
def _GetFileSystemTypeFromFileEntry(self, file_entry):
"""Retrieves the file system type indicator of a file entry.
Args:
file_entry (dfvfs.FileEntry): a file entry.
Returns:
str: file system type.
"""
if file_entry.type_indicator != dfvfs_definitions.TYPE_INDICATOR_TSK:
type_string = file_entry.type_indicator
else:
file_system = file_entry.GetFileSystem()
tsk_fs_type = file_system.GetFsType()
try:
type_string = self._TSK_FS_TYPE_MAP.get(tsk_fs_type, None)
except TypeError:
# Older version of pytsk3 can raise:
# TypeError: unhashable type: 'pytsk3.TSK_FS_TYPE_ENUM'
type_string = '{0!s}'.format(tsk_fs_type)
if type_string.startswith('TSK_FS_TYPE_'):
type_string = type_string[12:]
if type_string.endswith('_DETECT'):
type_string = type_string[:-7]
if not type_string:
type_string = 'UNKNOWN'
return type_string
def METHOD_NAME(self, parser_mediator, file_entry):
"""Parses a file entry.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
file_entry (dfvfs.FileEntry): a file entry.
"""
file_system_type = self._GetFileSystemTypeFromFileEntry(file_entry)
stat_attribute = file_entry.GetStatAttribute()
attribute_names = []
for attribute in file_entry.attributes:
attribute_name = getattr(attribute, 'name', None)
if isinstance(attribute_name, bytes):
attribute_name = attribute_name.decode('utf-8')
if file_system_type != 'NTFS' and attribute_name:
attribute_names.append(attribute_name)
event_data = FileStatEventData()
event_data.access_time = file_entry.access_time
event_data.added_time = file_entry.added_time
event_data.attribute_names = attribute_names or None
event_data.backup_time = file_entry.backup_time
event_data.creation_time = file_entry.creation_time
event_data.change_time = file_entry.change_time
event_data.deletion_time = file_entry.deletion_time
event_data.display_name = parser_mediator.GetDisplayNameForPathSpec(
file_entry.path_spec)
event_data.file_entry_type = file_entry.entry_type
event_data.file_size = file_entry.size
event_data.file_system_type = file_system_type
event_data.filename = parser_mediator.GetRelativePathForPathSpec(
file_entry.path_spec)
event_data.is_allocated = file_entry.IsAllocated()
event_data.modification_time = file_entry.modification_time
if stat_attribute:
event_data.group_identifier = stat_attribute.group_identifier
event_data.inode = stat_attribute.inode_number
if stat_attribute.mode is not None:
event_data.mode = stat_attribute.mode & 0x0fff
event_data.number_of_links = stat_attribute.number_of_links
event_data.owner_identifier = stat_attribute.owner_identifier
parser_mediator.ProduceEventData(event_data)
manager.ParsersManager.RegisterParser(FileStatParser)
|
4,184 |
monkey patch
|
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
from contextlib import contextmanager
from functools import wraps
import inspect
import six
#
# Monkey patch.
#
# Usage:
# ---
# import monkeypatch
#
# class TestSomething():
#
# def __init__(self):
# self.patch = monkeypatch.Patch([
# (subprocess, 'Popen', lambda x: None),
# (os, 'chown', lambda *x: 0)
# ])
#
# def setUp(self):
# self.patch.apply()
#
# def tearDown(self):
# self.patch.revert()
#
# def testThis(self):
# # using patched functions
#
# def testThat(self):
# # using patched functions
# ---
#
class Patch(object):
def __init__(self, what):
self.what = what
self.old = []
@staticmethod
def _is_static_method(cls, method_name, method):
is_static_py2 = six.PY2 and inspect.isfunction(method)
# In Python 3, static methods are returned as 'function' and lose
# 'staticmethod' class relationship when returned by 'getattr'
# so we have to reach to __dict__ directly. Calling 'inspect.ismethod'
# to differentiate between a regular method and a static method won't
# work without referring to *bound* method and thus, creating
# an instance of a class.
is_static_py3 = six.PY3 and isinstance(cls.__dict__[method_name],
staticmethod)
return is_static_py2 or is_static_py3
@staticmethod
def _is_class_method(method):
return (inspect.ismethod(method) and
getattr(method, '__self__', None) is not None)
def apply(self):
assert self.old == []
for module, name, that in self.what:
old = getattr(module, name)
self.old.append((module, name, old))
# The following block is done so that if it is a method we are
# patching in, that it will have the same type as the method it
# replaced.
if inspect.isclass(module):
if self._is_static_method(module, name, old):
that = staticmethod(that)
elif self._is_class_method(old):
that = classmethod(that)
setattr(module, name, that)
def revert(self):
assert self.old != []
while self.old:
module, name, that = self.old.pop()
# Python 2 wrongly sets the function `that' as an instancemethod
# instead of keeping it as staticmethod.
if inspect.isclass(module) and self._is_static_method(module,
name, that):
that = staticmethod(that)
setattr(module, name, that)
#
# Monkey patch scope.
#
# Usage:
# ---
# from monkeypatch import MonkeyPatchScope
#
# def test():
# with MonkeyPatchScope([
# (subprocess, 'Popen', lambda x: None),
# (os, 'chown', lambda *x: 0)
# ])
# logic
# ---
#
@contextmanager
def MonkeyPatchScope(what):
patch = Patch(what)
patch.apply()
try:
yield {}
finally:
patch.revert()
#
# Monkey patch function decorator.
#
# Usage:
# ---
# from monkeypatch import MonkeyPatch
#
# @MonkeyPatch(subprocess, 'Popen', lambda x: None)
# @MonkeyPatch(os, 'chown', lambda *x: 0)
# def test():
# logic
# ---
#
def METHOD_NAME(module, name, that):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
with MonkeyPatchScope([(module, name, that)]):
return f(*args, **kw)
return wrapper
return decorator
#
# Monkey patch class decorator.
#
# Usage:
# ---
# from monkeypatch import MonkeyClass
#
# @MonkeyClass(subprocess, 'Popen', lambda x: None)
# @MonkeyClass(os, 'chown', lambda *x: 0)
# class TestSomething():
#
# def testThis(self):
# # using patched functions
#
# def testThat(self):
# # using patched functions
# ---
#
def MonkeyClass(module, name, that):
def setup_decorator(func):
@wraps(func)
def setup(self, *a, **kw):
if not hasattr(self, '__monkeystack__'):
self.__monkeystack__ = []
patch = Patch([(module, name, that)])
self.__monkeystack__.append(patch)
patch.apply()
return func(self, *a, **kw)
return setup
def teardown_decorator(func):
@wraps(func)
def teardown(self, *a, **kw):
patch = self.__monkeystack__.pop()
patch.revert()
return func(self, *a, **kw)
return teardown
def wrapper(cls):
cls.setUp = setup_decorator(cls.setUp)
cls.tearDown = teardown_decorator(cls.tearDown)
return cls
return wrapper
|
4,185 |
get resolvable version
|
import platform
import json
import argparse
import urllib
import urllib.request
from subprocess import check_call, CalledProcessError
import sys
import os
import zipfile
import tarfile
import time
from packaging.version import Version, InvalidVersion
# SOURCE OF THIS FILE: https://github.com/actions/python-versions
# this is the official mapping file for gh-actions to retrieve python installers
MANIFEST_LOCATION = "https://raw.githubusercontent.com/actions/python-versions/main/versions-manifest.json"
MAX_INSTALLER_RETRY = 3
CURRENT_UBUNTU_VERSION = "22.04" # full title is ubuntu-20.04
MAX_PRECACHED_VERSION = (
"3.11.1" # reference: https://github.com/actions/runner-images/blob/main/images/linux/Ubuntu2004-Readme.md#python
)
UNIX_INSTALL_ARRAY = ["sh", "setup.sh"]
WIN_INSTALL_ARRAY = ["pwsh", "setup.ps1"]
def download_installer(remote_path, local_path):
retries = 0
while True:
try:
urllib.request.urlretrieve(remote_path, local_path)
break
except Exception as e:
print(e)
retries += 1
if retries >= MAX_INSTALLER_RETRY:
print("Unable to recover after attempting to download {} {} times".format(remote_path, retries))
exit(1)
time.sleep(10)
def install_selected_python_version(installer_url, installer_folder):
current_plat = platform.system().lower()
installer_folder = os.path.normpath(os.path.abspath(installer_folder))
if not os.path.exists(installer_folder):
os.mkdir(installer_folder)
local_installer_ref = os.path.join(
installer_folder,
"local" + (".zip" if installer_folder.endswith("zip") else ".tar.gz"),
)
download_installer(installer_url, local_installer_ref)
if current_plat == "windows":
with zipfile.ZipFile(local_installer_ref, "r") as zip_file:
zip_file.extractall(installer_folder)
try:
check_call(WIN_INSTALL_ARRAY, cwd=installer_folder)
except CalledProcessError as err:
print(err)
exit(1)
else:
with tarfile.open(local_installer_ref) as tar_file:
tar_file.extractall(installer_folder)
try:
check_call(UNIX_INSTALL_ARRAY, cwd=installer_folder)
except CalledProcessError as err:
print(err)
exit(1)
# when given a string with major.minor only (the devops/gh standard) we need to find the latest one
# in the manifest for the version we're requesting.
def METHOD_NAME(requested_version, version_manifest):
target = Version(requested_version)
if len(requested_version.split(".")) > 2:
return requested_version
else:
target_versions = [
Version(version) for version in version_manifest.keys() if version.startswith(requested_version)
]
if target_versions:
return target_versions[0]
else:
print(f'Unable to select a valid version from manifest for version "{requested_version}"')
def get_installer_url(requested_version, version_manifest):
current_plat = platform.system().lower()
print("Current Platform Is {}".format(platform.platform()))
actual_requested_version = METHOD_NAME(requested_version, version_manifest)
if actual_requested_version in version_manifest:
found_installers = version_manifest[actual_requested_version]["files"]
# filter anything that's not x64. we don't care.
x64_installers = [file_def for file_def in found_installers if file_def["arch"] == "x64"]
if current_plat == "windows":
return [installer for installer in x64_installers if installer["platform"] == "win32"][0]
elif current_plat == "darwin":
return [installer for installer in x64_installers if installer["platform"] == current_plat][0]
else:
return [
installer
for installer in x64_installers
if installer["platform"] == "linux" and installer["platform_version"] == CURRENT_UBUNTU_VERSION
][0]
else:
print(
f"Requested version {actual_requested_version} is not available from the manifest at {MANIFEST_LOCATION}."
)
def necessary_to_install(version_requested) -> bool:
version_from_spec = Version(version_requested)
precached_version = Version(MAX_PRECACHED_VERSION)
precached = True
# Azure Devops UsePythonVersion@0 task issues a warning if the input python version is an exact value like "3.11.1" or "3.9.4."
#
# As a result, this script needs to verify that the major/minor combo is present on the box. Unfortunately, one cannot
# safely compare just against the MAX_PRECACHED_VERSION, as Version("3.11") generates to a version with value "3.11.0."
# 3.11.0 is _not_ greater than 3.11.1, and as such will fail an easy version comparison against max_precached_version.
#
# Instead, if we detect an input that has major/minor only, we compare that against major/minor of max_precached_version only.
#
# We do not include >= because if the input major.minor == the max_precached major.minor, then we know that input
# is already present.
#
# In cases where we _are_ given a full input, we can simply check against the max precached version.
if len(version_requested.split(".")) <= 2:
if version_from_spec > Version(f"{precached_version.major}.{precached_version.minor}"):
precached = False
else:
precached = version_from_spec <= precached_version
return not precached
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="This python script ensures that a requested python version is present in the hostedtoolcache on azure devops agents. It does this by retrieving new versions of python from the gh-actions python manifest."
)
parser.add_argument(
"version_spec",
nargs="?",
help=("The version specifier passed in to the UsePythonVersion extended task."),
)
parser.add_argument(
"--installer_folder",
dest="installer_folder",
help=("The folder where the found installer will be extracted into and run from."),
)
args = parser.parse_args()
try:
version_from_spec = Version(args.version_spec)
except InvalidVersion:
print("Invalid Version Spec. Skipping custom install.")
exit(0)
if necessary_to_install(args.version_spec):
with urllib.request.urlopen(MANIFEST_LOCATION) as url:
version_manifest = json.load(url)
version_dict = {i["version"]: i for i in version_manifest}
print("Requested version {} is newer than versions pre-cached on agent. Invoking.".format(args.version_spec))
install_file_details = get_installer_url(args.version_spec, version_dict)
install_selected_python_version(install_file_details["download_url"], args.installer_folder)
else:
print(f'Requested version "{args.version_spec}" is precached on the current agent. Skipping installation.'
|
4,186 |
connect
|
# tbot, Embedded Automation Tool
# Copyright (C) 2019 Harald Seiler
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import abc
import getpass
import paramiko
import pathlib
import typing
import contextlib
import tbot
from .. import channel
from ..linux import auth
from . import connector
Self = typing.TypeVar("Self", bound="ParamikoConnector")
class ParamikoConnector(connector.Connector):
"""
Connect to an ssh server using `Paramiko`_.
.. _Paramiko: https://www.paramiko.org/
When inheriting from this connector, you should overwrite the attributes
documented below to make it connect to your remote.
**Example**:
.. code-block:: python
from tbot.machine import connector, linux
class MyMachine(
connector.ParamikoConnector,
linux.Bash,
):
hostname = "78.79.32.85"
username = "tbot-user"
with MyMachine() as remotehost:
remotehost.exec0("uname", "-a")
"""
__slots__ = ("_client", "_config")
@property
@abc.abstractmethod
def hostname(self) -> str:
"""
Hostname of this remote.
You must always specify this parameter in your Lab config!
"""
pass
@property
def username(self) -> str:
"""
Username to log in as.
Defaults to the username from ``~/.ssh/config`` or the local username.
"""
if "user" in self._config:
assert isinstance(self._config["user"], str)
return self._config["user"]
else:
return getpass.getuser()
@property
def authenticator(self) -> auth.Authenticator:
"""
Return an authenticator that allows logging in on this machine.
See :mod:`tbot.machine.linux.auth` for available authenticators.
:rtype: tbot.machine.linux.auth.Authenticator
"""
if "identityfile" in self._config:
assert isinstance(self._config["identityfile"], list)
return auth.PrivateKeyAuthenticator(
pathlib.Path(self._config["identityfile"][0])
)
return auth.NoneAuthenticator()
@property
def port(self) -> int:
"""
Port the remote SSH server is listening on.
Defaults to ``22`` or the value of ``Port`` in ``~/.ssh/config``.
"""
if "port" in self._config:
assert isinstance(self._config["port"], str)
return int(self._config["port"])
else:
return 22
@property
def ignore_hostkey(self) -> bool:
"""
Ignore remote host key.
Set this to true if the remote changes its host key often.
Defaults to ``False`` or the value of ``StrictHostKeyChecking`` in
``~/.ssh/config``.
"""
if "stricthostkeychecking" in self._config:
assert isinstance(self._config["stricthostkeychecking"], str)
return self._config["stricthostkeychecking"] == "no"
else:
return False
def __repr__(self) -> str:
return (
f"<{self.__class__.__name__} {self.username}@{self.hostname}:{self.port}>"
)
def __init__(self, other: "typing.Optional[ParamikoConnector]" = None) -> None:
"""
:param ParamikoConnector other: Build this connection by opening a new
channel in an existing ssh-connection.
"""
self._client: typing.Optional[paramiko.SSHClient] = None
self._config: typing.Dict[str, typing.Union[str, typing.List[str]]] = {}
if other is not None:
self._client = other._client
self._config = other._config
@classmethod
@contextlib.contextmanager
def from_context(
cls: typing.Type[Self], ctx: "tbot.Context"
) -> typing.Iterator[Self]:
with cls() as m:
yield m
def METHOD_NAME(self) -> channel.Channel:
if self._client is None:
self._client = paramiko.SSHClient()
try:
c = paramiko.config.SSHConfig()
with open(pathlib.Path.home() / ".ssh" / "config") as cfg:
c.parse(cfg)
self._config = c.lookup(self.hostname)
except FileNotFoundError:
# Config file does not exist
pass
except Exception as e:
# Invalid config
tbot.log.warning(
tbot.log.c("Invalid").red + f" .ssh/config: {str(e):s}"
)
raise
if self.ignore_hostkey:
self._client.set_missing_host_key_policy(
paramiko.client.AutoAddPolicy()
)
else:
self._client.load_system_host_keys()
password = None
key_file = None
authenticator = self.authenticator
if isinstance(authenticator, auth.NoneAuthenticator):
pass
elif isinstance(authenticator, auth.PrivateKeyAuthenticator):
key_file = authenticator.get_key_for_host(None)
elif isinstance(authenticator, auth.PasswordAuthenticator):
password = authenticator.password
else:
if typing.TYPE_CHECKING:
authenticator._undefined_marker
raise ValueError(f"Unknown authenticator {authenticator!r}")
tbot.log.message(
"Logging in on "
+ tbot.log.c(f"{self.username}@{self.hostname}:{self.port}").yellow
+ " ...",
verbosity=tbot.log.Verbosity.COMMAND,
)
if "hostname" in self._config:
hostname = str(self._config["hostname"])
else:
hostname = self.hostname
self._client.connect(
hostname,
username=self.username,
port=self.port,
password=password,
key_filename=key_file,
)
return channel.ParamikoChannel(self._client.get_transport().open_session())
def clone(self: Self) -> Self:
"""
Clone this machine.
Note that an ssh-session cannot hold an umlimited number of channels so
cloning too much might lead to issues. The exact limit is dependent on
the server configuration.
"""
new = type(self)(self)
new._orig = self._orig or self
return new
|
4,187 |
word problem
|
"""
Abelian group elements
AUTHORS:
- David Joyner (2006-02); based on free_abelian_monoid_element.py, written by David Kohel.
- David Joyner (2006-05); bug fix in order
- David Joyner (2006-08); bug fix+new method in pow for negatives+fixed corresponding examples.
- David Joyner (2009-02): Fixed bug in order.
- Volker Braun (2012-11) port to new Parent base. Use tuples for immutables.
EXAMPLES:
Recall an example from abelian groups::
sage: F = AbelianGroup(5,[4,5,5,7,8],names = list("abcde"))
sage: (a,b,c,d,e) = F.gens()
sage: x = a*b^2*e*d^20*e^12
sage: x
a*b^2*d^6*e^5
sage: x = a^10*b^12*c^13*d^20*e^12
sage: x
a^2*b^2*c^3*d^6*e^4
sage: y = a^13*b^19*c^23*d^27*e^72
sage: y
a*b^4*c^3*d^6
sage: x*y
a^3*b*c*d^5*e^4
sage: x.list()
[2, 2, 3, 6, 4]
"""
###########################################################################
# Copyright (C) 2006 William Stein <[email protected]>
# Copyright (C) 2006 David Joyner <[email protected]>
# Copyright (C) 2012 Volker Braun <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
###########################################################################
from sage.groups.abelian_gps.element_base import AbelianGroupElementBase
def is_AbelianGroupElement(x):
"""
Return true if x is an abelian group element, i.e., an element of
type AbelianGroupElement.
EXAMPLES: Though the integer 3 is in the integers, and the integers
have an abelian group structure, 3 is not an AbelianGroupElement::
sage: from sage.groups.abelian_gps.abelian_group_element import is_AbelianGroupElement
sage: is_AbelianGroupElement(3)
False
sage: F = AbelianGroup(5, [3,4,5,8,7], 'abcde')
sage: is_AbelianGroupElement(F.0)
True
"""
return isinstance(x, AbelianGroupElement)
class AbelianGroupElement(AbelianGroupElementBase):
"""
Elements of an
:class:`~sage.groups.abelian_gps.abelian_group.AbelianGroup`
INPUT:
- ``x`` -- list/tuple/iterable of integers (the element vector)
- ``parent`` -- the parent
:class:`~sage.groups.abelian_gps.abelian_group.AbelianGroup`
EXAMPLES::
sage: F = AbelianGroup(5, [3,4,5,8,7], 'abcde')
sage: a, b, c, d, e = F.gens()
sage: a^2 * b^3 * a^2 * b^-4
a*b^3
sage: b^-11
b
sage: a^-11
a
sage: a*b in F
True
"""
def as_permutation(self):
r"""
Return the element of the permutation group G (isomorphic to the
abelian group A) associated to a in A.
EXAMPLES::
sage: G = AbelianGroup(3,[2,3,4],names="abc"); G
Multiplicative Abelian group isomorphic to C2 x C3 x C4
sage: a,b,c = G.gens()
sage: Gp = G.permutation_group(); Gp
Permutation Group with generators [(6,7,8,9), (3,4,5), (1,2)]
sage: a.as_permutation()
(1,2)
sage: ap = a.as_permutation(); ap
(1,2)
sage: ap in Gp
True
"""
from sage.libs.gap.libgap import libgap
G = self.parent()
A = libgap.AbelianGroup(G.gens_orders())
phi = libgap.IsomorphismPermGroup(A)
gens = libgap.GeneratorsOfGroup(A)
L2 = libgap.Product([geni**Li for geni, Li in zip(gens, self.list())])
pg = libgap.Image(phi, L2)
return G.permutation_group()(pg)
def METHOD_NAME(self, words):
"""
TODO - this needs a rewrite - see stuff in the matrix_grp
directory.
G and H are abelian groups, g in G, H is a subgroup of G generated
by a list (words) of elements of G. If self is in H, return the
expression for self as a word in the elements of (words).
This function does not solve the word problem in Sage. Rather
it pushes it over to GAP, which has optimized (non-deterministic)
algorithms for the word problem.
.. warning::
Don't use E (or other GAP-reserved letters) as a generator
name.
EXAMPLES::
sage: G = AbelianGroup(2,[2,3], names="xy")
sage: x,y = G.gens()
sage: x.word_problem([x,y])
[[x, 1]]
sage: y.word_problem([x,y])
[[y, 1]]
sage: v = (y*x).word_problem([x,y]); v #random
[[x, 1], [y, 1]]
sage: prod([x^i for x,i in v]) == y*x
True
"""
from sage.groups.abelian_gps.abelian_group import METHOD_NAME
return METHOD_NAME(words, self)
|
4,188 |
is random datapipe
|
import inspect
import warnings
from typing import Any, List, Optional, Set
import torch
from torch.utils.data.datapipes.iter.sharding import (
_ShardingIterDataPipe,
SHARDING_PRIORITIES,
)
from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps
__all__ = [
"apply_random_seed",
"apply_sharding",
"apply_shuffle_seed",
"apply_shuffle_settings",
"get_all_graph_pipes",
]
def get_all_graph_pipes(graph: DataPipeGraph) -> List[DataPipe]:
return _get_all_graph_pipes_helper(graph, set())
def _get_all_graph_pipes_helper(graph: DataPipeGraph, id_cache: Set[int]) -> List[DataPipe]:
results: List[DataPipe] = []
for dp_id, (datapipe, sub_graph) in graph.items():
if dp_id in id_cache:
continue
id_cache.add(dp_id)
results.append(datapipe)
results.extend(_get_all_graph_pipes_helper(sub_graph, id_cache))
return results
def _is_sharding_datapipe(datapipe: DataPipe) -> bool:
if isinstance(datapipe, _ShardingIterDataPipe):
return True
if hasattr(datapipe, "apply_sharding") and inspect.ismethod(datapipe.apply_sharding):
return True
return False
def apply_sharding(datapipe: DataPipe,
num_of_instances: int,
instance_id: int,
sharding_group=SHARDING_PRIORITIES.DEFAULT) -> DataPipe:
r"""
Apply dynamic sharding over the ``sharding_filter`` DataPipe that has a method ``apply_sharding``.
RuntimeError will be raised when multiple ``sharding_filter`` are presented in the same branch.
"""
graph = traverse_dps(datapipe)
def _helper(graph, prev_applied=None):
for (dp, sub_graph) in graph.values():
applied = None
if _is_sharding_datapipe(dp):
if prev_applied is not None:
raise RuntimeError("Sharding twice on a single pipeline is likely unintended and will cause data loss. "
f"Sharding already applied to {prev_applied} while trying to apply to {dp}")
# For BC, only provide sharding_group if accepted
sig = inspect.signature(dp.apply_sharding)
if len(sig.parameters) < 3:
dp.apply_sharding(num_of_instances, instance_id)
else:
dp.apply_sharding(num_of_instances, instance_id, sharding_group=sharding_group)
applied = dp
if applied is None:
applied = prev_applied
_helper(sub_graph, applied)
_helper(graph)
return datapipe
def _is_shuffle_datapipe(datapipe: DataPipe) -> bool:
if not hasattr(datapipe, "set_shuffle") or not hasattr(datapipe, "set_seed"):
return False
if not inspect.ismethod(datapipe.set_shuffle) or not inspect.ismethod(datapipe.set_seed):
return False
return True
def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe:
r"""
Traverse the graph of ``DataPipes`` to find and set shuffle attribute
to each `DataPipe` that has APIs of ``set_shuffle`` and ``set_seed``.
Args:
datapipe: DataPipe that needs to set shuffle attribute
shuffle: Shuffle option (default: ``None`` and no-op to the graph)
"""
if shuffle is None:
return datapipe
graph = traverse_dps(datapipe)
all_pipes = get_all_graph_pipes(graph)
shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)]
if not shufflers and shuffle:
warnings.warn(
"`shuffle=True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. "
"Be aware that the default buffer size might not be sufficient for your task."
)
datapipe = datapipe.shuffle()
shufflers = [datapipe, ] # type: ignore[list-item]
for shuffler in shufflers:
shuffler.set_shuffle(shuffle)
return datapipe
def apply_shuffle_seed(datapipe: DataPipe, rng: Any) -> DataPipe:
warnings.warn(
"`apply_shuffle_seed` is deprecated since 1.12 and will be removed in the future releases."
"\nPlease use `apply_random_seed` instead."
)
return apply_random_seed(datapipe, rng)
def METHOD_NAME(datapipe: DataPipe) -> bool:
if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed):
return True
return False
def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe:
r"""
Traverse the graph of ``DataPipes`` to find random ``DataPipe`` with an API of
``set_seed`` then set the random seed based on the provided RNG.
Args:
datapipe: DataPipe that needs to set randomness
rng: Random number generator to generate random seeds
"""
graph = traverse_dps(datapipe)
all_pipes = get_all_graph_pipes(graph)
# Using a set to track id of DataPipe to prevent setting randomness per DataPipe more than once.
# And, `id` is used in case of unhashable DataPipe
cache = set()
random_datapipes = []
for pipe in all_pipes:
if id(pipe) in cache:
continue
if METHOD_NAME(pipe):
random_datapipes.append(pipe)
cache.add(id(pipe))
for pipe in random_datapipes:
random_seed = int(torch.empty((), dtype=torch.int64).random_(generator=rng).item())
pipe.set_seed(random_seed)
return datapipe
|
4,189 |
scan completed
|
import logging
from ipaddress import IPv4Address
from queue import Queue
from threading import Barrier, Event
from typing import Callable, Collection, Iterable, Sequence, Tuple
from unittest.mock import MagicMock
import pytest
from tests.unit_tests.infection_monkey.master.mock_puppet import MockPuppet
from common import OperatingSystem
from common.agent_configuration import AgentConfiguration, ExploitationConfiguration
from infection_monkey.i_puppet import TargetHost
from infection_monkey.master import Exploiter
logger = logging.getLogger()
@pytest.fixture(autouse=True)
def patch_queue_timeout(monkeypatch):
monkeypatch.setattr("infection_monkey.master.exploiter.QUEUE_TIMEOUT", 0.001)
@pytest.fixture
def METHOD_NAME() -> Event:
return Event()
@pytest.fixture
def stop() -> Event:
return Event()
@pytest.fixture
def callback() -> Callable:
return MagicMock()
EXPLOITERS = {
"SSHExploiter": {},
"ZerologonExploiter": {},
"Exploiter1": {"timeout": 10},
}
@pytest.fixture(params=[EXPLOITERS])
def exploiter_config(
request, default_agent_configuration: AgentConfiguration
) -> ExploitationConfiguration:
return ExploitationConfiguration(
options=default_agent_configuration.propagation.exploitation.options,
exploiters=request.param,
)
@pytest.fixture
def hosts() -> Iterable[TargetHost]:
host_1 = TargetHost(ip=IPv4Address("10.0.0.1"))
host_2 = TargetHost(ip=IPv4Address("10.0.0.3"))
return [host_1, host_2]
@pytest.fixture
def hosts_to_exploit(hosts: Iterable[TargetHost]):
return enqueue_hosts(hosts)
def enqueue_hosts(hosts: Iterable[TargetHost]) -> Queue:
q: Queue = Queue()
for h in hosts:
q.put(h)
return q
def get_host_exploit_combos_from_call_args_list(
call_args_list: Sequence,
) -> Collection[Tuple[str, TargetHost]]:
host_exploit_combos = set()
for call_args in call_args_list:
exploiter_name = call_args[0][0]
target_host = call_args[0][1]
host_exploit_combos.add((exploiter_name, target_host))
return host_exploit_combos
CREDENTIALS_FOR_PROPAGATION = {"usernames": ["m0nk3y", "user"], "passwords": ["1234", "pword"]}
SERVERS = ["127.0.0.1:5000", "10.10.10.10:5007"]
@pytest.fixture
def run_exploiters(
hosts_to_exploit,
exploiter_config,
callback,
METHOD_NAME,
stop,
):
def inner(puppet, num_workers, hosts=hosts_to_exploit, exploiter_config=exploiter_config):
# Set this so that Exploiter() exits once it has processed all victims
METHOD_NAME.set()
e = Exploiter(puppet, num_workers)
e.exploit_hosts(exploiter_config, hosts, 1, SERVERS, callback, METHOD_NAME, stop)
return inner
def test_exploiter(callback, hosts, run_exploiters):
run_exploiters(MockPuppet(), 2)
assert callback.call_count == 6
host_exploit_combos = get_host_exploit_combos_from_call_args_list(callback.call_args_list)
assert ("ZerologonExploiter", hosts[0]) in host_exploit_combos
assert ("SSHExploiter", hosts[0]) in host_exploit_combos
assert ("Exploiter1", hosts[0]) in host_exploit_combos
assert ("ZerologonExploiter", hosts[1]) in host_exploit_combos
assert ("Exploiter1", hosts[1]) in host_exploit_combos
assert ("SSHExploiter", hosts[1]) in host_exploit_combos
def test_exploiter_order(callback, run_exploiters):
run_exploiters(MockPuppet(), 1)
assert callback.call_args_list[0][0][0] == "SSHExploiter"
assert callback.call_args_list[1][0][0] == "ZerologonExploiter"
assert callback.call_args_list[2][0][0] == "Exploiter1"
assert callback.call_args_list[3][0][0] == "SSHExploiter"
assert callback.call_args_list[4][0][0] == "ZerologonExploiter"
assert callback.call_args_list[5][0][0] == "Exploiter1"
def test_stop_after_callback(
exploiter_config,
callback,
METHOD_NAME,
stop,
hosts_to_exploit,
):
callback_barrier_count = 2
def _callback(*_):
# Block all threads here until 2 threads reach this barrier, then set stop
# and test that neither thread continues to scan.
_callback.barrier.wait()
stop.set()
_callback.barrier = Barrier(callback_barrier_count)
stoppable_callback = MagicMock(side_effect=_callback)
# Intentionally NOT setting scan_completed.set(); _callback() will set stop
e = Exploiter(MockPuppet(), callback_barrier_count + 2)
e.exploit_hosts(
exploiter_config, hosts_to_exploit, 1, SERVERS, stoppable_callback, METHOD_NAME, stop
)
assert stoppable_callback.call_count == 2
def test_exploiter_raises_exception(callback, hosts, hosts_to_exploit, run_exploiters):
error_message = "Unexpected error"
mock_puppet = MockPuppet()
mock_puppet.exploit_host = MagicMock(side_effect=Exception(error_message))
run_exploiters(mock_puppet, 3)
assert callback.call_count == 6
for i in range(0, 6):
exploit_result_data = callback.call_args_list[i][0][2]
assert exploit_result_data.exploitation_success is False
assert exploit_result_data.propagation_success is False
assert error_message in exploit_result_data.error_message
def test_windows_exploiters_run_on_windows_host(callback, hosts, hosts_to_exploit, run_exploiters):
host = TargetHost(ip=IPv4Address("10.0.0.1"), operating_system=OperatingSystem.WINDOWS)
q = enqueue_hosts([host])
run_exploiters(MockPuppet(), 1, q)
assert callback.call_count == 2
host_exploit_combos = get_host_exploit_combos_from_call_args_list(callback.call_args_list)
assert ("SSHExploiter", host) not in host_exploit_combos
def test_linux_exploiters_run_on_linux_host(callback, hosts, hosts_to_exploit, run_exploiters):
host = TargetHost(ip=IPv4Address("10.0.0.1"), operating_system=OperatingSystem.LINUX)
q = enqueue_hosts([host])
run_exploiters(MockPuppet(), 1, q)
assert callback.call_count == 1
host_exploit_combos = get_host_exploit_combos_from_call_args_list(callback.call_args_list)
assert ("SSHExploiter", host) in host_exploit_combos
def test_all_exploiters_run_on_unknown_host(callback, hosts, hosts_to_exploit, run_exploiters):
host = TargetHost(ip=IPv4Address("10.0.0.1"))
q = enqueue_hosts([host])
run_exploiters(MockPuppet(), 1, q)
assert callback.call_count == 3
host_exploit_combos = get_host_exploit_combos_from_call_args_list(callback.call_args_list)
assert ("ZerologonExploiter", hosts[0]) in host_exploit_combos
assert ("SSHExploiter", host) in host_exploit_combos
assert ("Exploiter1", hosts[0]) in host_exploit_combos
def test_callback_skipped_on_rejected_request(callback, run_exploiters, exploiter_config):
exploiter_config.exploiters = {"ZerologonExploiter": {}}
host = TargetHost(ip=IPv4Address("10.0.0.1"), operating_system=OperatingSystem.LINUX)
q = enqueue_hosts([host])
run_exploiters(MockPuppet(), 1, q, exploiter_config)
assert callback.call_count == 0
|
4,190 |
service loop
|
#
# Copyright (c) 2011-2014 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
# cases as published by the Free Software Foundation.
#
import os
import sys
import time
import logging
from threading import Condition
try:
import _thread as thread
except ImportError:
import thread
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
import cPickle as pickle
except ImportError:
import pickle
from syncdutils import Thread, select, lf
pickle_proto = 2
repce_version = 1.0
def ioparse(i, o):
if isinstance(i, int):
i = os.fdopen(i, 'rb')
# rely on duck typing for recognizing
# streams as that works uniformly
# in py2 and py3
if hasattr(o, 'fileno'):
o = o.fileno()
return (i, o)
def send(out, *args):
"""pickle args and write out wholly in one syscall
ie. not use the ability of pickle to dump directly to
a stream, as that would potentially mess up messages
by interleaving them
"""
os.write(out, pickle.dumps(args, pickle_proto))
def recv(inf):
"""load an object from input stream
python2 and python3 compatibility, inf is sys.stdin
and is opened as text stream by default. Hence using the
buffer attribute in python3
"""
if hasattr(inf, "buffer"):
return pickle.load(inf.buffer)
else:
return pickle.load(inf)
class RepceServer(object):
"""RePCe is Hungarian for canola, http://hu.wikipedia.org/wiki/Repce
... also our homebrewed RPC backend where the transport layer is
reduced to a pair of filehandles.
This is the server component.
"""
def __init__(self, obj, i, o, wnum=6):
"""register a backend object .obj to which incoming messages
are dispatched, also incoming/outcoming streams
"""
self.obj = obj
self.inf, self.out = ioparse(i, o)
self.wnum = wnum
self.q = Queue()
def METHOD_NAME(self):
"""fire up worker threads, get messages and dispatch among them"""
for i in range(self.wnum):
t = Thread(target=self.worker)
t.start()
try:
while True:
self.q.put(recv(self.inf))
except EOFError:
logging.info("terminating on reaching EOF.")
def worker(self):
"""life of a worker
Get message, extract its id, method name and arguments
(kwargs not supported), call method on .obj.
Send back message id + return value.
If method call throws an exception, rescue it, and send
back the exception as result (with flag marking it as
exception).
"""
while True:
in_data = self.q.get(True)
rid = in_data[0]
rmeth = in_data[1]
exc = False
if rmeth == '__repce_version__':
res = repce_version
else:
try:
res = getattr(self.obj, rmeth)(*in_data[2:])
except:
res = sys.exc_info()[1]
exc = True
logging.exception("call failed: ")
send(self.out, rid, exc, res)
class RepceJob(object):
"""class representing message status we can use
for waiting on reply"""
def __init__(self, cbk):
"""
- .rid: (process-wise) unique id
- .cbk: what we do upon receiving reply
"""
self.rid = (os.getpid(), thread.get_ident(), time.time())
self.cbk = cbk
self.lever = Condition()
self.done = False
def __repr__(self):
return ':'.join([str(x) for x in self.rid])
def wait(self):
self.lever.acquire()
if not self.done:
self.lever.wait()
self.lever.release()
return self.result
def wakeup(self, data):
self.result = data
self.lever.acquire()
self.done = True
self.lever.notify()
self.lever.release()
class RepceClient(object):
"""RePCe is Hungarian for canola, http://hu.wikipedia.org/wiki/Repce
... also our homebrewed RPC backend where the transport layer is
reduced to a pair of filehandles.
This is the client component.
"""
def __init__(self, i, o):
self.inf, self.out = ioparse(i, o)
self.jtab = {}
t = Thread(target=self.listen)
t.start()
def listen(self):
while True:
select((self.inf,), (), ())
rid, exc, res = recv(self.inf)
rjob = self.jtab.pop(rid)
if rjob.cbk:
rjob.cbk(rjob, [exc, res])
def push(self, meth, *args, **kw):
"""wrap arguments in a RepceJob, send them to server
and return the RepceJob
@cbk to pass on RepceJob can be given as kwarg.
"""
cbk = kw.get('cbk')
if not cbk:
def cbk(rj, res):
if res[0]:
raise res[1]
rjob = RepceJob(cbk)
self.jtab[rjob.rid] = rjob
logging.debug("call %s %s%s ..." % (repr(rjob), meth, repr(args)))
send(self.out, rjob.rid, meth, *args)
return rjob
def __call__(self, meth, *args):
"""RePCe client is callabe, calling it implements a synchronous
remote call.
We do a .push with a cbk which does a wakeup upon receiving answer,
then wait on the RepceJob.
"""
rjob = self.push(
meth, *args, **{'cbk': lambda rj, res: rj.wakeup(res)})
exc, res = rjob.wait()
if exc:
logging.error(lf('call failed',
call=repr(rjob),
method=meth,
error=str(type(res).__name__)))
raise res
logging.debug("call %s %s -> %s" % (repr(rjob), meth, repr(res)))
return res
class mprx(object):
"""method proxy, standard trick to implement rubyesque
method_missing in Python
A class is a closure factory, you know what I mean, or go read
some SICP.
"""
def __init__(self, ins, meth):
self.ins = ins
self.meth = meth
def __call__(self, *a):
return self.ins(self.meth, *a)
def __getattr__(self, meth):
"""this implements transparent method dispatch to remote object,
so that you don't need to call the RepceClient instance like
rclient('how_old_are_you_if_born_in', 1979)
but you can make it into an ordinary method call like
rclient.how_old_are_you_if_born_in(1979)
"""
return self.mprx(self, meth)
def __version__(self):
"""used in handshake to verify compatibility"""
d = {'proto': self('__repce_version__')}
try:
d['object'] = self('version')
except AttributeError:
pass
return d
|
4,191 |
get form kwargs
|
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
from django.db import transaction
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.views.generic import (
CreateView,
UpdateView,
ListView,
)
from tapir.accounts.models import TapirUser
from tapir.core.views import TapirFormMixin
from tapir.log.util import freeze_for_log
from tapir.log.views import UpdateViewLogMixin
from tapir.settings import PERMISSION_SHIFTS_EXEMPTIONS
from tapir.shifts.forms import (
ShiftExemptionForm,
)
from tapir.shifts.models import (
ShiftAttendance,
ShiftAttendanceTemplate,
ShiftUserData,
ShiftExemption,
CreateExemptionLogEntry,
UpdateExemptionLogEntry,
)
from tapir.utils.user_utils import UserUtils
class CreateShiftExemptionView(
LoginRequiredMixin, PermissionRequiredMixin, TapirFormMixin, CreateView
):
model = ShiftExemption
form_class = ShiftExemptionForm
permission_required = PERMISSION_SHIFTS_EXEMPTIONS
def get_target_user_data(self) -> ShiftUserData:
return ShiftUserData.objects.get(pk=self.kwargs["shift_user_data_pk"])
def METHOD_NAME(self, *args, **kwargs):
self.object = self.model()
self.object.shift_user_data = self.get_target_user_data()
# Will pass the object to the form
return super().METHOD_NAME(*args, **kwargs)
def form_valid(self, form):
with transaction.atomic():
exemption: ShiftExemption = form.instance
self.cancel_attendances_covered_by_exemption(exemption)
CreateExemptionLogEntry().populate(
start_date=exemption.start_date,
end_date=exemption.end_date,
actor=self.request.user,
tapir_user=exemption.shift_user_data.user,
).save()
return super().form_valid(form)
@staticmethod
def cancel_attendances_covered_by_exemption(exemption: ShiftExemption):
user = exemption.shift_user_data.user
for attendance in ShiftExemption.get_attendances_cancelled_by_exemption(
user=user,
start_date=exemption.start_date,
end_date=exemption.end_date,
):
attendance.state = ShiftAttendance.State.CANCELLED
attendance.excused_reason = (
_("Is covered by shift exemption: ") + exemption.description
)
attendance.save()
if ShiftExemption.must_unregister_from_abcd_shift(
start_date=exemption.start_date, end_date=exemption.end_date
):
ShiftAttendanceTemplate.objects.filter(user=user).delete()
def get_success_url(self):
return self.get_target_user_data().user.get_absolute_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tapir_user = self.get_target_user_data().user
context["page_title"] = _("Shift exemption: %(name)s") % {
"name": UserUtils.build_display_name_for_viewer(
tapir_user, self.request.user
)
}
context["card_title"] = _("Create shift exemption for: %(link)s") % {
"link": UserUtils.build_html_link_for_viewer(tapir_user, self.request.user)
}
return context
class EditShiftExemptionView(
LoginRequiredMixin,
PermissionRequiredMixin,
TapirFormMixin,
UpdateViewLogMixin,
UpdateView,
):
model = ShiftExemption
form_class = ShiftExemptionForm
permission_required = PERMISSION_SHIFTS_EXEMPTIONS
def get_success_url(self):
return reverse("shifts:shift_exemption_list")
def form_valid(self, form):
with transaction.atomic():
response = super().form_valid(form)
exemption: ShiftExemption = form.instance
CreateShiftExemptionView.cancel_attendances_covered_by_exemption(exemption)
new_frozen = freeze_for_log(form.instance)
if self.old_object_frozen != new_frozen:
UpdateExemptionLogEntry().populate(
old_frozen=self.old_object_frozen,
new_frozen=new_frozen,
tapir_user=ShiftUserData.objects.get(
shift_exemptions=self.kwargs["pk"]
).user,
actor=self.request.user,
).save()
return response
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
tapir_user: TapirUser = self.object.shift_user_data.user
context["page_title"] = _("Shift exemption: %(name)s") % {
"name": UserUtils.build_display_name_for_viewer(
tapir_user, self.request.user
)
}
context["card_title"] = _("Edit shift exemption for: %(link)s") % {
"link": UserUtils.build_html_link_for_viewer(tapir_user, self.request.user)
}
return context
class ShiftExemptionListView(LoginRequiredMixin, ListView):
model = ShiftExemption
def get_queryset(self):
queryset = super().get_queryset()
if not self.request.user.has_perm(PERMISSION_SHIFTS_EXEMPTIONS):
queryset = queryset.filter(
shift_user_data__id=self.request.user.shift_user_data.id
)
shift_user_data_id = self.request.GET.get("shift_user_data_id", None)
if shift_user_data_id is not None:
queryset = queryset.filter(shift_user_data__id=shift_user_data_id)
return queryset
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
shift_user_data_id = self.request.GET.get("shift_user_data_id", None)
if shift_user_data_id is not None:
context_data["shift_user_data"] = ShiftUserData.objects.get(
pk=shift_user_data_id
)
return context_data
|
4,192 |
ensure localedir
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
Support for translation tools such as msgfmt and intltool
Usage::
def configure(conf):
conf.load('gnu_dirs intltool')
def build(bld):
# process the .po files into .gmo files, and install them in LOCALEDIR
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
# process an input file, substituting the translations from the po dir
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory.
"""
from __future__ import with_statement
import os, re
from waflib import Context, Task, Utils, Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature, before_method, taskgen_method
from waflib.Logs import error
from waflib.Configure import conf
_style_flags = {
'ba': '-b',
'desktop': '-d',
'keys': '-k',
'quoted': '--quoted-style',
'quotedxml': '--quotedxml-style',
'rfc822deb': '-r',
'schemas': '-s',
'xml': '-x',
}
@taskgen_method
def METHOD_NAME(self):
"""
Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale
"""
# use the tool gnu_dirs to provide options to define this
if not self.env.LOCALEDIR:
if self.env.DATAROOTDIR:
self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale')
else:
self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale')
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
"""
Creates tasks to translate files by intltool-merge::
def build(bld):
bld(
features = "intltool_in",
podir = "../po",
style = "desktop",
flags = ["-u"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
:param podir: location of the .po files
:type podir: string
:param source: source files to process
:type source: list of string
:param style: the intltool-merge mode of operation, can be one of the following values:
``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``.
See the ``intltool-merge`` man page for more information about supported modes of operation.
:type style: string
:param flags: compilation flags ("-quc" by default)
:type flags: list of string
:param install_path: installation path
:type install_path: string
"""
try:
self.meths.remove('process_source')
except ValueError:
pass
self.METHOD_NAME()
podir = getattr(self, 'podir', '.')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
return
cache = getattr(self, 'intlcache', '.intlcache')
self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)]
self.env.INTLPODIR = podirnode.bldpath()
self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT))
if '-c' in self.env.INTLFLAGS:
self.bld.fatal('Redundant -c flag in intltool task %r' % self)
style = getattr(self, 'style', None)
if style:
try:
style_flag = _style_flags[style]
except KeyError:
self.bld.fatal('intltool_in style "%s" is not valid' % style)
self.env.append_unique('INTLFLAGS', [style_flag])
for i in self.to_list(self.source):
node = self.path.find_resource(i)
task = self.create_task('intltool', node, node.change_ext(''))
inst = getattr(self, 'install_path', None)
if inst:
self.add_install_files(install_to=inst, install_from=task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
"""
Creates tasks to process po files::
def build(bld):
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
The relevant task generator arguments are:
:param podir: directory of the .po files
:type podir: string
:param appname: name of the application
:type appname: string
:param install_path: installation directory
:type install_path: string
The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process.
"""
try:
self.meths.remove('process_source')
except ValueError:
pass
self.METHOD_NAME()
appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name'))
podir = getattr(self, 'podir', '.')
inst = getattr(self, 'install_path', '${LOCALEDIR}')
linguas = self.path.find_node(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
with open(linguas.abspath()) as f:
langs = []
for line in f.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po', node, node.change_ext('.mo'))
if inst:
filename = task.outputs[0].name
(langname, ext) = os.path.splitext(filename)
inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.add_install_as(install_to=inst_file, install_from=task.outputs[0],
chmod=getattr(self, 'chmod', Utils.O644))
else:
Logs.pprint('RED', "Error no LINGUAS file found in po directory")
class po(Task.Task):
"""
Compiles .po files into .gmo files
"""
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
class intltool(Task.Task):
"""
Calls intltool-merge to update translation files
"""
run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color = 'BLUE'
@conf
def find_msgfmt(conf):
"""
Detects msgfmt and sets the ``MSGFMT`` variable
"""
conf.find_program('msgfmt', var='MSGFMT')
@conf
def find_intltool_merge(conf):
"""
Detects intltool-merge
"""
if not conf.env.PERL:
conf.find_program('perl', var='PERL')
conf.env.INTLCACHE_ST = '--cache=%s'
conf.env.INTLFLAGS_DEFAULT = ['-q', '-u']
conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL')
def configure(conf):
"""
Detects the program *msgfmt* and set *conf.env.MSGFMT*.
Detects the program *intltool-merge* and set *conf.env.INTLTOOL*.
It is possible to set INTLTOOL in the environment, but it must not have spaces in it::
$ INTLTOOL="/path/to/the program/intltool" waf configure
If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*.
"""
conf.find_msgfmt()
conf.find_intltool_merge()
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
|
4,193 |
forward
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from .conv_tbc import ConvTBC
from typing import Dict, Optional
from torch import Tensor
@with_incremental_state
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)
# don't store redundant _linearized_weight in checkpoints
if prefix + "_linearized_weight" in state:
del state[prefix + "_linearized_weight"]
return state
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
if prefix + "_linearized_weight" in state_dict:
del state_dict[prefix + "_linearized_weight"]
@torch.jit.export
def METHOD_NAME(
self,
input,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = self.conv_tbc(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[: -self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
@torch.jit.unused
def reorder_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
@torch.jit.unused
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
@torch.jit.unused
def _set_input_buffer(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
new_buffer,
):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
@torch.jit.unused
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
return weight.view(self.out_channels, -1)
return self._linearized_weight
@torch.jit.unused
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
|
4,194 |
test record deletion status default
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2023 TU Wien.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Tests for the tombstone field and record deletion field."""
import datetime
import pytest
from invenio_requests.resolvers.registry import ResolverRegistry
from invenio_rdm_records.records.api import RDMRecord
from invenio_rdm_records.records.systemfields.deletion_status import (
RecordDeletionStatus,
RecordDeletionStatusEnum,
)
from invenio_rdm_records.records.systemfields.tombstone import Tombstone
#
# Tombstone
#
def test_tombstone_creation(app):
"""Test the normal creation of a tombstone."""
t = Tombstone({})
assert t.removed_by is None
assert t.removal_reason is None
assert t.note == ""
assert t.removal_date
assert isinstance(t.removal_date, str)
assert t.citation_text == ""
assert t.is_visible
data = {
"removed_by": {"user": "1"},
"removal_reason": {"id": "spam"},
"note": "nothing in particular",
"removal_date": datetime.datetime.utcnow(),
"citation_text": "No citation available, sorry",
"is_visible": False,
}
t = Tombstone(data)
assert t.removed_by == data["removed_by"]
assert t.removal_reason == data["removal_reason"]
assert t.note == data["note"]
assert t.removal_date == data["removal_date"].isoformat()
assert t.citation_text == data["citation_text"]
assert not t.is_visible
def test_tombstone_invalid_removed_by(app):
"""Test the failure of tombstone creation if the `removed_by` entry is invalid."""
for invalid_value in [[], datetime.datetime.utcnow()]:
with pytest.raises(ValueError):
Tombstone({"removed_by": invalid_value})
def test_tombstone_valid_removed_by(app, users):
"""Test various ways to set the `removed_by` value for a tombstone."""
user = users[0]
# if the assigned value is an int or string, we assume it's a user ID
t = Tombstone({})
t.removed_by = user.id
assert t.removed_by == {"user": str(user.id)}
assert t.removed_by_proxy.resolve() == user
# this comes in handy for setting the system to be the one who deleted the record
t.removed_by = "system"
assert t.removed_by == {"user": "system"}
assert t.removed_by_proxy.resolve() == {
"id": "system",
"is_ghost": True,
"profile": {"full_name": "System"},
"username": "System",
}
# None should work as expected
t.removed_by = None
assert t.removed_by is None
assert t.removed_by_proxy is None
# setting a referenceable entity should use the `ResolverRegistry`
t.removed_by = user
assert ResolverRegistry.reference_entity(user) is not None
assert t.removed_by == {"user": str(user.id)}
assert t.removed_by_proxy.resolve() == user
#
# Record deletion status
#
def METHOD_NAME(app):
"""Test the default value of the deletion status."""
deletion_status = RecordDeletionStatus(None)
assert deletion_status.status == RecordDeletionStatusEnum.PUBLISHED.value
assert not deletion_status.is_deleted
def test_record_deletion_status_valid_values(app):
"""Test setting the deletion status to valid values."""
deletion_status = RecordDeletionStatus(RecordDeletionStatusEnum.PUBLISHED)
assert deletion_status.status == RecordDeletionStatusEnum.PUBLISHED.value
assert not deletion_status.is_deleted
deletion_status.status = RecordDeletionStatusEnum.DELETED
assert deletion_status.status == RecordDeletionStatusEnum.DELETED.value
assert deletion_status.is_deleted
deletion_status.status = RecordDeletionStatusEnum.MARKED
assert deletion_status.status == RecordDeletionStatusEnum.MARKED.value
assert deletion_status.is_deleted
def test_record_deletion_status_invalid_values(app):
"""Test setting the record deletion status to invalid values."""
for invalid_value in ["s", 1, []]:
with pytest.raises(ValueError):
RecordDeletionStatus(invalid_value)
def test_record_deletion_status(app, minimal_record):
"""Test if changing the record deletion status impacts the DB model."""
record = RDMRecord.create(minimal_record)
assert record.deletion_status.status == RecordDeletionStatusEnum.PUBLISHED.value
assert record.deletion_status._status == RecordDeletionStatusEnum.PUBLISHED
assert (
RecordDeletionStatusEnum(record.model.deletion_status)
== RecordDeletionStatusEnum.PUBLISHED
)
assert not record.deletion_status.is_deleted
assert not record.is_deleted
# NOTE that the `record.deletion_status.is_deleted` relates to the
# RDM record deletion workflow, while `record.is_deleted` is defined
# in `invenio_records.api.RecordBase` and marks the absolute deletion
# of the record
record.deletion_status = RecordDeletionStatusEnum.DELETED
assert record.deletion_status.status == RecordDeletionStatusEnum.DELETED.value
assert record.deletion_status._status == RecordDeletionStatusEnum.DELETED
assert (
RecordDeletionStatusEnum(record.model.deletion_status)
== RecordDeletionStatusEnum.DELETED
)
assert record.deletion_status.is_deleted
assert not record.is_deleted
|
4,195 |
uploaded image file
|
import os
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files.base import ContentFile
from grandchallenge.algorithms.models import Algorithm, AlgorithmImage
from grandchallenge.archives.models import Archive, ArchiveItem
from grandchallenge.cases.models import Image, ImageFile
from grandchallenge.challenges.models import Challenge
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
from grandchallenge.core.fixtures import create_uploaded_image
from grandchallenge.evaluation.models import Method, Phase
from grandchallenge.evaluation.utils import SubmissionKindChoices
from grandchallenge.workstations.models import Workstation
def run():
print("👷 Creating Algorithm Evaluation Fixtures")
users = _get_users()
inputs = _get_inputs()
outputs = _get_outputs()
challenge_count = Challenge.objects.count()
archive = _create_archive(
creator=users["demo"], interfaces=inputs, suffix=challenge_count
)
_create_challenge(
creator=users["demo"],
participant=users["demop"],
archive=archive,
suffix=challenge_count,
inputs=inputs,
outputs=outputs,
)
_create_algorithm(
creator=users["demop"],
inputs=inputs,
outputs=outputs,
suffix=f"Image {challenge_count}",
)
_create_algorithm(
creator=users["demop"],
inputs=_get_json_file_inputs(),
outputs=outputs,
suffix=f"File {challenge_count}",
)
def _get_users():
users = get_user_model().objects.filter(username__in=["demo", "demop"])
return {u.username: u for u in users}
def _get_inputs():
return ComponentInterface.objects.filter(
slug__in=["generic-medical-image"]
)
def _get_json_file_inputs():
return [
ComponentInterface.objects.get_or_create(
title="JSON File",
relative_path="json-file",
kind=ComponentInterface.Kind.ANY,
store_in_database=False,
)[0]
]
def _get_outputs():
return ComponentInterface.objects.filter(
slug__in=["generic-medical-image", "results-json-file"]
)
def _create_archive(*, creator, interfaces, suffix, items=5):
a = Archive.objects.create(
title=f"Algorithm Evaluation {suffix} Test Set",
logo=create_uploaded_image(),
workstation=Workstation.objects.get(
slug=settings.DEFAULT_WORKSTATION_SLUG
),
)
a.add_editor(creator)
for n in range(items):
ai = ArchiveItem.objects.create(archive=a)
for interface in interfaces:
v = ComponentInterfaceValue.objects.create(interface=interface)
im = Image.objects.create(
name=f"Test Image {n}", width=10, height=10
)
im_file = ImageFile.objects.create(image=im)
with METHOD_NAME() as f:
im_file.file.save(f"test_image_{n}.mha", f)
im_file.save()
v.image = im
v.save()
ai.values.add(v)
return a
def _create_challenge(
*, creator, participant, archive, suffix, inputs, outputs
):
c = Challenge.objects.create(
short_name=f"algorithm-evaluation-{suffix}",
creator=creator,
hidden=False,
logo=create_uploaded_image(),
)
c.add_participant(participant)
p = Phase.objects.create(challenge=c, title="Phase 1")
p.algorithm_inputs.set(inputs)
p.algorithm_outputs.set(outputs)
p.title = "Algorithm Evaluation"
p.submission_kind = SubmissionKindChoices.ALGORITHM
p.archive = archive
p.score_jsonpath = "score"
p.submissions_limit_per_user_per_period = 10
p.save()
m = Method(creator=creator, phase=p)
with _uploaded_container_image() as container:
m.image.save("algorithm_io.tar", container)
def _create_algorithm(*, creator, inputs, outputs, suffix):
algorithm = Algorithm.objects.create(
title=f"Test Algorithm Evaluation {suffix}",
logo=create_uploaded_image(),
)
algorithm.inputs.set(inputs)
algorithm.outputs.set(outputs)
algorithm.add_editor(creator)
algorithm_image = AlgorithmImage(creator=creator, algorithm=algorithm)
with _uploaded_container_image() as container:
algorithm_image.image.save("algorithm_io.tar", container)
@contextmanager
def _uploaded_container_image():
path = "scripts/algorithm_io.tar"
yield from _uploaded_file(path=path)
@contextmanager
def METHOD_NAME():
path = "scripts/image10x10x10.mha"
yield from _uploaded_file(path=path)
def _uploaded_file(*, path):
with open(os.path.join(settings.SITE_ROOT, path), "rb") as f:
with ContentFile(f.read()) as content:
yield content
|
4,196 |
balance
|
from PyQt5.QtCore import pyqtProperty, pyqtSignal, pyqtSlot, QObject
from electrum.logging import get_logger
from .auth import auth_protect, AuthMixin
from .qetransactionlistmodel import QETransactionListModel
from .qetypes import QEAmount
from .qewallet import QEWallet
class QEAddressDetails(AuthMixin, QObject):
_logger = get_logger(__name__)
detailsChanged = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self._wallet = None
self._address = None
self._label = None
self._frozen = False
self._scriptType = None
self._status = None
self._balance = QEAmount()
self._pubkeys = None
self._privkey = None
self._derivationPath = None
self._numtx = 0
self._historyModel = None
walletChanged = pyqtSignal()
@pyqtProperty(QEWallet, notify=walletChanged)
def wallet(self):
return self._wallet
@wallet.setter
def wallet(self, wallet: QEWallet):
if self._wallet != wallet:
self._wallet = wallet
self.walletChanged.emit()
addressChanged = pyqtSignal()
@pyqtProperty(str, notify=addressChanged)
def address(self):
return self._address
@address.setter
def address(self, address: str):
if self._address != address:
self._logger.debug('address changed')
self._address = address
self.addressChanged.emit()
self.update()
@pyqtProperty(str, notify=detailsChanged)
def scriptType(self):
return self._scriptType
@pyqtProperty(QEAmount, notify=detailsChanged)
def METHOD_NAME(self):
return self._balance
@pyqtProperty('QStringList', notify=detailsChanged)
def pubkeys(self):
return self._pubkeys
@pyqtProperty(str, notify=detailsChanged)
def privkey(self):
return self._privkey
@pyqtProperty(str, notify=detailsChanged)
def derivationPath(self):
return self._derivationPath
@pyqtProperty(int, notify=detailsChanged)
def numTx(self):
return self._numtx
frozenChanged = pyqtSignal()
@pyqtProperty(bool, notify=frozenChanged)
def isFrozen(self):
return self._frozen
labelChanged = pyqtSignal()
@pyqtProperty(str, notify=labelChanged)
def label(self):
return self._label
@pyqtSlot(bool)
def freeze(self, freeze: bool):
if freeze != self._frozen:
self._wallet.wallet.set_frozen_state_of_addresses([self._address], freeze=freeze)
self._frozen = freeze
self.frozenChanged.emit()
self._wallet.balanceChanged.emit()
@pyqtSlot(str)
def setLabel(self, label: str):
if label != self._label:
self._wallet.wallet.set_label(self._address, label)
self._label = label
self.labelChanged.emit()
historyModelChanged = pyqtSignal()
@pyqtProperty(QETransactionListModel, notify=historyModelChanged)
def historyModel(self):
if self._historyModel is None:
self._historyModel = QETransactionListModel(self._wallet.wallet,
onchain_domain=[self._address], include_lightning=False)
return self._historyModel
@pyqtSlot()
def requestShowPrivateKey(self):
self.retrieve_private_key()
@auth_protect(method='wallet')
def retrieve_private_key(self):
try:
self._privkey = self._wallet.wallet.export_private_key(self._address, self._wallet.password)
except Exception:
self._privkey = ''
self.detailsChanged.emit()
def update(self):
if self._wallet is None:
self._logger.error('wallet undefined')
return
self._frozen = self._wallet.wallet.is_frozen_address(self._address)
self.frozenChanged.emit()
self._scriptType = self._wallet.wallet.get_txin_type(self._address)
self._label = self._wallet.wallet.get_label_for_address(self._address)
c, u, x = self._wallet.wallet.get_addr_balance(self._address)
self._balance = QEAmount(amount_sat=c + u + x)
self._pubkeys = self._wallet.wallet.get_public_keys(self._address)
self._derivationPath = self._wallet.wallet.get_address_path_str(self._address)
if self._wallet.derivationPrefix:
self._derivationPath = self._derivationPath.replace('m', self._wallet.derivationPrefix)
self._numtx = self._wallet.wallet.adb.get_address_history_len(self._address)
self.detailsChanged.emit()
|
4,197 |
test composition level stateful function resets
|
import psyneulink as pnl
import pytest
import numpy as np
@pytest.mark.composition
class TestCompositionMethods:
def test_get_output_values_prop(self):
A = pnl.ProcessingMechanism()
c = pnl.Composition()
c.add_node(A)
result = c.run(inputs={A: [1]}, num_trials=2)
assert result == c.output_values == [np.array([1])]
def test_add_pathway_methods_return_pathway(self):
c = pnl.Composition()
p = c.add_linear_processing_pathway(pathway=[pnl.ProcessingMechanism(), pnl.ProcessingMechanism()])
assert isinstance(p, pnl.Pathway)
c = pnl.Composition()
p = c.add_linear_learning_pathway(pathway=[pnl.ProcessingMechanism(), pnl.ProcessingMechanism()],
learning_function=pnl.BackPropagation)
assert isinstance(p, pnl.Pathway)
# test whether xor model created as autodiff composition learns properly
@pytest.mark.pytorch
@pytest.mark.parametrize("minibatch_size", [1, 2, 3, 4])
def test_learning_output_shape(self, autodiff_mode, minibatch_size):
'''
Tests for correct output from composition.learn
Expected: All results from last epoch
'''
xor_in = pnl.TransferMechanism(name='xor_in',
default_variable=np.zeros(2))
xor_hid = pnl.TransferMechanism(name='xor_hid',
default_variable=np.zeros(10),
function=pnl.Logistic())
xor_out = pnl.TransferMechanism(name='xor_out',
default_variable=np.zeros(1),
function=pnl.Logistic())
hid_map = pnl.MappingProjection(matrix=np.random.rand(2,10), sender=xor_in, receiver=xor_hid)
out_map = pnl.MappingProjection(matrix=np.random.rand(10,1))
xor = pnl.AutodiffComposition()
xor.add_node(xor_in)
xor.add_node(xor_hid)
xor.add_node(xor_out)
xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid)
xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out)
xor_inputs = np.array( # the inputs we will provide to the model
[[0, 0], [0, 1], [1, 0], [1, 1]])
xor_targets = np.array( # the outputs we wish to see from the model
[[0], [1], [1], [0]])
results = xor.learn(inputs={"inputs": {xor_in:xor_inputs},
"targets": {xor_out:xor_targets},
"epochs": 10
},
minibatch_size=minibatch_size,
execution_mode=autodiff_mode)
assert len(results) == 1
assert len(xor.learning_results) == 4 // minibatch_size
def METHOD_NAME(self):
A = pnl.TransferMechanism(
name='A',
integrator_mode=True,
integration_rate=0.5
)
B = pnl.TransferMechanism(
name='B',
integrator_mode=True,
integration_rate=0.5
)
C = pnl.TransferMechanism(name='C')
comp = pnl.Composition(
pathways=[[A, C], [B, C]]
)
A.log.set_log_conditions('value')
B.log.set_log_conditions('value')
comp.run(
inputs={A: [1.0],
B: [1.0]},
reset_stateful_functions_when={
A: pnl.AtTrial(3),
B: pnl.AtTrial(4)
},
reset_stateful_functions_to = {
A: 0.5,
B: 0.5
},
num_trials = 5
)
# Mechanism A - resets to 0.5 at the beginning of Trial 3. Its value at the end of Trial 3 will
# be exactly one step of integration forward from 0.5.
# Trial 0: 0.5, Trial 1: 0.75, Trial 2: 0.875, Trial 3: 0.75, Trial 4: 0.875
np.testing.assert_allclose(
A.log.nparray_dictionary('value')[comp.default_execution_id]['value'],
[
[np.array([0.5])],
[np.array([0.75])],
[np.array([0.875])],
[np.array([0.75])],
[np.array([0.875])]
]
)
# Mechanism B - resets to 0.5 at the beginning of Trial 4. Its value at the end of Trial 4 will
# be exactly one step of integration forward from 0.5.
# Trial 0: 0.5, Trial 1: 0.75, Trial 2: 0.875, Trial 3: 0.9375. Trial 4: 0.75
np.testing.assert_allclose(
B.log.nparray_dictionary('value')[comp.default_execution_id]['value'],
[
[np.array([0.5])],
[np.array([0.75])],
[np.array([0.875])],
[np.array([0.9375])],
[np.array([0.75])]
]
)
comp.reset()
comp.run(
inputs={A: [1.0],
B: [1.0]}
)
# Mechanisms A and B should have been reset, so verify that their new values are equal to their values at the
# end of trial 0
assert A.parameters.value.get(comp) == B.parameters.value.get(comp) == [[0.5]]
|
4,198 |
main
|
"""Image classification benchmark.
This script runs image classification benchmark with "dogs vs cats" datasets.
It supports the following 3 models:
- EfficientNetV2B0
- Xception
- ResNet50V2
To run the benchmark, make sure you are in model_benchmark/ directory, and run
the command below:
python3 -m model_benchmark.image_classification_benchmark \
--model="EfficientNetV2B0" \
--epochs=2 \
--batch_size=32 \
--mixed_precision_policy="mixed_float16"
"""
import time
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from absl import app
from absl import flags
from absl import logging
from model_benchmark.benchmark_utils import BenchmarkMetricsCallback
import keras_core as keras
flags.DEFINE_string("model", "EfficientNetV2B0", "The model to benchmark.")
flags.DEFINE_integer("epochs", 1, "The number of epochs.")
flags.DEFINE_integer("batch_size", 4, "Batch Size.")
flags.DEFINE_string(
"mixed_precision_policy",
"mixed_float16",
"The global precision policy to use, e.g., 'mixed_float16' or 'float32'.",
)
FLAGS = flags.FLAGS
BATCH_SIZE = 32
IMAGE_SIZE = (224, 224)
CHANNELS = 3
MODEL_MAP = {
"EfficientNetV2B0": keras.applications.EfficientNetV2B0,
"Xception": keras.applications.Xception,
"ResNet50V2": keras.applications.ResNet50V2,
}
def load_data():
# Load cats vs dogs dataset, and split into train and validation sets.
train_dataset, val_dataset = tfds.load(
"cats_vs_dogs", split=["train[:90%]", "train[90%:]"], as_supervised=True
)
resizing = keras.layers.Resizing(
IMAGE_SIZE[0], IMAGE_SIZE[1], crop_to_aspect_ratio=True
)
def preprocess_inputs(image, label):
image = tf.cast(image, "float32")
return resizing(image), label
train_dataset = (
train_dataset.map(
preprocess_inputs, num_parallel_calls=tf.data.AUTOTUNE
)
.batch(FLAGS.batch_size)
.prefetch(tf.data.AUTOTUNE)
)
val_dataset = (
val_dataset.map(preprocess_inputs, num_parallel_calls=tf.data.AUTOTUNE)
.batch(FLAGS.batch_size)
.cache()
.prefetch(tf.data.AUTOTUNE)
)
return train_dataset, val_dataset
def load_model():
model_class = MODEL_MAP[FLAGS.model]
# Load the EfficientNetV2B0 model and add a classification head.
model = model_class(include_top=False, weights="imagenet")
classifier = keras.models.Sequential(
[
keras.Input([IMAGE_SIZE[0], IMAGE_SIZE[1], CHANNELS]),
model,
keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(2),
]
)
return classifier
def METHOD_NAME(_):
keras.mixed_precision.set_dtype_policy(FLAGS.mixed_precision_policy)
logging.info(
"Benchmarking configs...\n"
"=========================\n"
f"MODEL: {FLAGS.model}\n"
f"TASK: image classification/dogs-vs-cats \n"
f"BATCH_SIZE: {FLAGS.batch_size}\n"
f"EPOCHS: {FLAGS.epochs}\n"
"=========================\n"
)
# Load datasets.
train_ds, validation_ds = load_data()
# Load the model.
classifier = load_model()
lr = keras.optimizers.schedules.PolynomialDecay(
5e-4,
decay_steps=train_ds.cardinality() * FLAGS.epochs,
end_learning_rate=0.0,
)
optimizer = keras.optimizers.Adam(lr)
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
benchmark_metrics_callback = BenchmarkMetricsCallback(
start_batch=1,
stop_batch=train_ds.cardinality().numpy() - 1,
)
classifier.compile(
optimizer=optimizer,
loss=loss,
metrics=["sparse_categorical_accuracy"],
)
# Start training.
logging.info("Starting Training...")
st = time.time()
history = classifier.fit(
train_ds,
validation_data=validation_ds,
epochs=FLAGS.epochs,
callbacks=[benchmark_metrics_callback],
)
wall_time = time.time() - st
validation_accuracy = history.history["val_sparse_categorical_accuracy"][-1]
examples_per_second = (
np.mean(np.array(benchmark_metrics_callback.state["throughput"]))
* FLAGS.batch_size
)
logging.info("Training Finished!")
logging.info(f"Wall Time: {wall_time:.4f} seconds.")
logging.info(f"Validation Accuracy: {validation_accuracy:.4f}")
logging.info(f"examples_per_second: {examples_per_second:.4f}")
if __name__ == "__main__":
app.run(METHOD_NAME)
|
4,199 |
json
|
"""
*******************************************************************
Copyright (c) 2017, 2019 IBM Corp.
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v2.0
and Eclipse Distribution License v1.0 which accompany this distribution.
The Eclipse Public License is available at
http://www.eclipse.org/legal/epl-v10.html
and the Eclipse Distribution License is available at
http://www.eclipse.org/org/documents/edl-v10.php.
Contributors:
Ian Craggs - initial implementation and/or documentation
*******************************************************************
"""
import sys
class MQTTException(Exception):
pass
class SubscribeOptions(object):
"""The MQTT v5.0 subscribe options class.
The options are:
qos: As in MQTT v3.1.1.
noLocal: True or False. If set to True, the subscriber will not receive its own publications.
retainAsPublished: True or False. If set to True, the retain flag on received publications will be as set
by the publisher.
retainHandling: RETAIN_SEND_ON_SUBSCRIBE, RETAIN_SEND_IF_NEW_SUB or RETAIN_DO_NOT_SEND
Controls when the broker should send retained messages:
- RETAIN_SEND_ON_SUBSCRIBE: on any successful subscribe request
- RETAIN_SEND_IF_NEW_SUB: only if the subscribe request is new
- RETAIN_DO_NOT_SEND: never send retained messages
"""
# retain handling options
RETAIN_SEND_ON_SUBSCRIBE, RETAIN_SEND_IF_NEW_SUB, RETAIN_DO_NOT_SEND = range(
0, 3)
def __init__(self, qos=0, noLocal=False, retainAsPublished=False, retainHandling=RETAIN_SEND_ON_SUBSCRIBE):
"""
qos: 0, 1 or 2. 0 is the default.
noLocal: True or False. False is the default and corresponds to MQTT v3.1.1 behavior.
retainAsPublished: True or False. False is the default and corresponds to MQTT v3.1.1 behavior.
retainHandling: RETAIN_SEND_ON_SUBSCRIBE, RETAIN_SEND_IF_NEW_SUB or RETAIN_DO_NOT_SEND
RETAIN_SEND_ON_SUBSCRIBE is the default and corresponds to MQTT v3.1.1 behavior.
"""
object.__setattr__(self, "names",
["QoS", "noLocal", "retainAsPublished", "retainHandling"])
self.QoS = qos # bits 0,1
self.noLocal = noLocal # bit 2
self.retainAsPublished = retainAsPublished # bit 3
self.retainHandling = retainHandling # bits 4 and 5: 0, 1 or 2
assert self.QoS in [0, 1, 2]
assert self.retainHandling in [
0, 1, 2], "Retain handling should be 0, 1 or 2"
def __setattr__(self, name, value):
if name not in self.names:
raise MQTTException(
name + " Attribute name must be one of "+str(self.names))
object.__setattr__(self, name, value)
def pack(self):
assert self.QoS in [0, 1, 2]
assert self.retainHandling in [
0, 1, 2], "Retain handling should be 0, 1 or 2"
noLocal = 1 if self.noLocal else 0
retainAsPublished = 1 if self.retainAsPublished else 0
data = [(self.retainHandling << 4) | (retainAsPublished << 3) |
(noLocal << 2) | self.QoS]
if sys.version_info[0] >= 3:
buffer = bytes(data)
else:
buffer = bytearray(data)
return buffer
def unpack(self, buffer):
b0 = buffer[0]
self.retainHandling = ((b0 >> 4) & 0x03)
self.retainAsPublished = True if ((b0 >> 3) & 0x01) == 1 else False
self.noLocal = True if ((b0 >> 2) & 0x01) == 1 else False
self.QoS = (b0 & 0x03)
assert self.retainHandling in [
0, 1, 2], "Retain handling should be 0, 1 or 2, not %d" % self.retainHandling
assert self.QoS in [
0, 1, 2], "QoS should be 0, 1 or 2, not %d" % self.QoS
return 1
def __repr__(self):
return str(self)
def __str__(self):
return "{QoS="+str(self.QoS)+", noLocal="+str(self.noLocal) +\
", retainAsPublished="+str(self.retainAsPublished) +\
", retainHandling="+str(self.retainHandling)+"}"
def METHOD_NAME(self):
data = {
"QoS": self.QoS,
"noLocal": self.noLocal,
"retainAsPublished": self.retainAsPublished,
"retainHandling": self.retainHandling,
}
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.