id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
900 |
import random
class Colour:
BLACK = "\033[30m"
RED = "\033[91m"
GREEN = "\033[32m"
END = "\033[0m"
suits = (
Colour.RED + "Hearts" + Colour.END,
Colour.RED + "Diamonds" + Colour.END,
Colour.BLACK + "Spades" + Colour.END,
Colour.BLACK + "Clubs" + Colour.END,
)
ranks = (
"Two",
"Three",
"Four",
"Five",
"Six",
"Seven",
"Eight",
"Nine",
"Ten",
"Jack",
"Queen",
"King",
"Ace",
)
values = {
"Two": 2,
"Three": 3,
"Four": 4,
"Five": 5,
"Six": 6,
"Seven": 7,
"Eight": 8,
"Nine": 9,
"Ten": 10,
"Jack": 10,
"Queen": 10,
"King": 10,
"Ace": 11,
}
playing = True
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return self.rank + " of " + self.suit
class Deck:
def __init__(self):
self.deck = []
for suit in suits:
for rank in ranks:
self.deck.append(Card(suit, rank))
def __str__(self):
deck_comp = ""
for card in self.deck:
deck_comp += "\n " + card.__str__()
def shuffle(self):
random.shuffle(self.deck)
def deal(self):
single_card = self.deck.pop()
return single_card
class Hand:
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0 # to keep track of aces
def add_card(self, card):
self.cards.append(card)
self.value += values[card.rank]
if card.rank == "Ace":
self.aces += 1
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
class Chips:
def __init__(self):
self.total = 100
self.bet = 0
def win_bet(self):
self.total += self.bet
def lose_bet(self):
self.total -= self.bet
def take_bet(chips):
while True:
try:
chips.bet = int(input("How many chips would you like to bet? "))
except ValueError:
print("Your bet must be an integer! Try again.")
else:
if chips.bet > chips.total or chips.bet <= 0:
print(
"Your bet cannot exceed your balance and you have to enter a positive bet! Your current balance is: ",
chips.total,
)
else:
break
def hit(deck, hand):
hand.add_card(deck.deal())
hand.adjust_for_ace()
def hit_or_stand(deck, hand):
global playing
while True:
x = input("Would you like to Hit or Stand? Enter '1' or '0' ")
if x.lower() == "1":
hit(deck, hand)
elif x.lower() == "0":
print("You chose to stand. Dealer will hit.")
playing = False
else:
print("Wrong input, please try again.")
continue
break
def show_some(player, dealer):
print("\nDealer's Hand:")
print(" { hidden card }")
print("", dealer.cards[1])
print("\nYour Hand:", *player.cards, sep="\n ")
def show_all(player, dealer):
print("\nDealer's Hand:", *dealer.cards, sep="\n ")
print("Dealer's Hand =", dealer.value)
print("\nYour Hand:", *player.cards, sep="\n ")
print("Your Hand =", player.value)
def player_busts(player, dealer, chips):
print("You are BUSTED !")
chips.lose_bet()
def player_wins(player, dealer, chips):
print("You are the winner!")
chips.win_bet()
def METHOD_NAME(player, dealer, chips):
print("Dealer has BUSTED !")
chips.win_bet()
def dealer_wins(player, dealer, chips):
print("Dealer is the winner!")
chips.lose_bet()
def push(player, dealer):
print("The match is tie !")
# GAMEPLAY
player_chips = Chips()
while True:
print("\t **********************************************************")
print(
"\t Welcome to the game Casino - BLACK JACK ! "
)
print("\t **********************************************************")
print(Colour.BLACK + "\t ***************")
print("\t * A *")
print("\t * *")
print("\t * * *")
print("\t * *** *")
print("\t * ***** *")
print("\t * *** *")
print("\t * * *")
print("\t * *")
print("\t * *")
print("\t ***************" + Colour.END)
print(
"\nRULES: Get as close to 21 as you can but if you get more than 21 you will lose!\n Aces count as 1 or 11."
)
deck = Deck()
deck.shuffle()
player_hand = Hand()
player_hand.add_card(deck.deal())
player_hand.add_card(deck.deal())
dealer_hand = Hand()
dealer_hand.add_card(deck.deal())
dealer_hand.add_card(deck.deal())
take_bet(player_chips)
show_some(player_hand, dealer_hand)
while playing:
hit_or_stand(deck, player_hand)
show_some(player_hand, dealer_hand)
if player_hand.value > 21:
player_busts(player_hand, dealer_hand, player_chips)
break
if player_hand.value <= 21:
while dealer_hand.value < 17:
hit(deck, dealer_hand)
show_all(player_hand, dealer_hand)
if dealer_hand.value > 21:
METHOD_NAME(player_hand, dealer_hand, player_chips)
elif dealer_hand.value > player_hand.value:
dealer_wins(player_hand, dealer_hand, player_chips)
elif dealer_hand.value < player_hand.value:
player_wins(player_hand, dealer_hand, player_chips)
else:
push(player_hand, dealer_hand)
print("\nYour current balance stands at", player_chips.total)
if player_chips.total > 0:
new_game = input("Would you like to play another hand? Enter '1' or '0' ")
if new_game.lower() == "1":
playing = True
continue
else:
print(
"Thanks for playing!\n"
+ Colour.GREEN
+ "\t$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n \t Congratulations! You won "
+ str(player_chips.total)
+ " coins!\n\t$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n "
+ Colour.END
)
break
else:
print(
"Oops! You have bet all your chips and we are sorry you can't play more.\nThanks for playing! Do come again to Casino BLACK JACK!"
)
break
| null |
901 |
# Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import asyncio
import time
import asyncprawcore.exceptions
import logging
import octobot_commons.constants as commons_constants
import octobot_services.channel as services_channel
import octobot_services.constants as services_constants
import octobot_services.service_feeds as service_feeds
import tentacles.Services.Services_bases as Services_bases
class RedditServiceFeedChannel(services_channel.AbstractServiceFeedChannel):
pass
class RedditServiceFeed(service_feeds.AbstractServiceFeed):
FEED_CHANNEL = RedditServiceFeedChannel
REQUIRED_SERVICES = [Services_bases.RedditService]
MAX_CONNECTION_ATTEMPTS = 10
def __init__(self, config, main_async_loop, bot_id):
service_feeds.AbstractServiceFeed.__init__(self, config, main_async_loop, bot_id)
self.subreddits = None
self.counter = 0
self.connect_attempts = 0
self.credentials_ok = False
self.listener_task = None
# merge new config into existing config
def update_feed_config(self, config):
if services_constants.CONFIG_REDDIT_SUBREDDITS in self.feed_config:
self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS] = {
**self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS],
**config[services_constants.CONFIG_REDDIT_SUBREDDITS]}
else:
self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS] = config[
services_constants.CONFIG_REDDIT_SUBREDDITS]
def _init_subreddits(self):
self.subreddits = ""
for symbol in self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS]:
for subreddit in self.feed_config[services_constants.CONFIG_REDDIT_SUBREDDITS][symbol]:
if subreddit not in self.subreddits:
if self.subreddits:
self.subreddits = self.subreddits + "+" + subreddit
else:
self.subreddits = self.subreddits + subreddit
def _initialize(self):
if not self.subreddits:
self._init_subreddits()
def _something_to_watch(self):
return services_constants.CONFIG_REDDIT_SUBREDDITS in self.feed_config and self.feed_config[
services_constants.CONFIG_REDDIT_SUBREDDITS]
@staticmethod
def METHOD_NAME(entry_age):
if entry_age > 0:
# entry in history => weight proportional to entry's age
# last 12 hours: weight = 4
# last 2 days: weight = 3
# last 7 days: weight = 2
# older: weight = 1
if entry_age / commons_constants.HOURS_TO_SECONDS <= 12:
return 4
elif entry_age / commons_constants.DAYS_TO_SECONDS <= 2:
return 3
elif entry_age / commons_constants.DAYS_TO_SECONDS <= 7:
return 2
else:
return 1
# new entry => max weight
return 5
async def _start_listener(self):
# avoid debug log at each asyncprawcore fetch
logging.getLogger("asyncprawcore").setLevel(logging.WARNING)
subreddit = await self.services[0].get_endpoint().subreddit(self.subreddits)
start_time = time.time()
async for entry in subreddit.stream.submissions():
self.credentials_ok = True
self.connect_attempts = 0
self.counter += 1
# check if we are in the 100 history or if it's a new entry (new posts are more valuables)
# the older the entry is, the les weight it gets
entry_age_when_feed_started_in_sec = start_time - entry.created_utc
entry_weight = self.METHOD_NAME(entry_age_when_feed_started_in_sec)
await self._async_notify_consumers(
{
services_constants.FEED_METADATA: entry.subreddit.display_name.lower(),
services_constants.CONFIG_REDDIT_ENTRY: entry,
services_constants.CONFIG_REDDIT_ENTRY_WEIGHT: entry_weight
}
)
async def _start_listener_task(self):
while not self.should_stop and self.connect_attempts < self.MAX_CONNECTION_ATTEMPTS:
try:
await self._start_listener()
except asyncprawcore.exceptions.RequestException:
# probably a connexion loss, try again
time.sleep(self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC)
except asyncprawcore.exceptions.InvalidToken as e:
# expired, try again
self.logger.exception(e, True, f"Error when receiving Reddit feed: '{e}'")
self.logger.info(f"Try to continue after {self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC} seconds.")
time.sleep(self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC)
except asyncprawcore.exceptions.ServerError as e:
# server error, try again
self.logger.exception(e, True, "Error when receiving Reddit feed: '{e}'")
self.logger.info(f"Try to continue after {self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC} seconds.")
time.sleep(self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC)
except asyncprawcore.exceptions.OAuthException as e:
self.logger.exception(e, True, f"Error when receiving Reddit feed: '{e}' this may mean that reddit "
f"login info in config.json are wrong")
self.keep_running = False
self.should_stop = True
except asyncprawcore.exceptions.ResponseException as e:
message_complement = "this may mean that reddit login info in config.json are invalid." \
if not self.credentials_ok else \
f"Try to continue after {self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC} seconds."
self.logger.exception(e, True,
f"Error when receiving Reddit feed: '{e}' this may mean {message_complement}")
if not self.credentials_ok:
self.connect_attempts += 1
else:
self.connect_attempts += 0.1
time.sleep(self._SLEEPING_TIME_BEFORE_RECONNECT_ATTEMPT_SEC)
except Exception as e:
self.logger.error(f"Error when receiving Reddit feed: '{e}'")
self.logger.exception(e, True, f"Error when receiving Reddit feed: '{e}'")
self.keep_running = False
self.should_stop = True
return False
async def _start_service_feed(self):
self.listener_task = asyncio.create_task(self._start_listener_task())
return True
async def stop(self):
await super().stop()
if self.listener_task is not None:
self.listener_task.cancel()
self.listener_task = None
| null |
902 |
################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2015-2023 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from __future__ import annotations
import re
from html.entities import entitydefs as html_entities
from typing import TYPE_CHECKING
import bleach
from bleach.css_sanitizer import CSSSanitizer
from django.conf import settings
from django.utils.encoding import force_str
from django.utils.html import mark_safe
if TYPE_CHECKING:
from typing import Callable, Dict, Sequence, Union
AllowedAttributesDict = Dict[str, Union[Sequence[str], Callable[[str, str, str], bool]]]
IMG_SAFE_ATTRIBUTES = {'title', 'alt', 'width', 'height'}
ALLOWED_ATTRIBUTES: AllowedAttributesDict = {
**bleach.ALLOWED_ATTRIBUTES,
'*': ['style', 'class'],
'a': ['href', 'rel'],
'img': ['src', *IMG_SAFE_ATTRIBUTES], # NB: 'filter_img_src' can be used here
}
ALLOWED_TAGS = {
'a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', # 'audio',
'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1',
'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins',
'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter',
'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option',
'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong',
'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot',
'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', # 'video',
'html', 'head', 'title', 'body',
# 'style' # TODO: if we allow <style>, we have to sanitize the inline CSS (it's hard)
}
# TODO: see html5lib: mathml_elements, svg_elements ??
ALLOWED_STYLES = {
# 'azimuth',
'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color',
'clear',
'color',
# 'cursor',
'direction', 'display', 'elevation', 'float',
'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow',
# 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
# 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate',
# 'stress',
'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align',
# 'voice-family', 'volume',
'white-space', 'width',
}
def filter_img_src(tag, attr, value):
if attr in IMG_SAFE_ATTRIBUTES:
return True
# XXX: this feature is probably broken (& not used) -- see urls.py
# TODO: remove the external image feature ??
if attr == 'src':
return value.startswith(settings.MEDIA_URL)
return False
def sanitize_html(html: str, allow_external_img: bool = False) -> str:
attributes: AllowedAttributesDict = (
ALLOWED_ATTRIBUTES
if allow_external_img else
{**ALLOWED_ATTRIBUTES, 'img': filter_img_src}
)
return bleach.clean(
html,
tags=ALLOWED_TAGS, attributes=attributes,
# styles=ALLOWED_STYLES,
css_sanitizer=CSSSanitizer(allowed_css_properties=ALLOWED_STYLES),
strip=True,
)
JSON_ESCAPES = {
ord('\\'): '\\u005C',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
# JSON_ESCAPES.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejson(value: str) -> str:
return mark_safe(force_str(value).translate(JSON_ESCAPES))
def METHOD_NAME(text: str) -> str:
""" Removes HTML markups from a string, & replaces HTML entities by unicode.
THX to:
http://effbot.org/zone/re-sub.htm#strip-html
"""
def fix_up(m):
sub_text = m.group(0)
startswith = sub_text.startswith
if startswith('<'):
return '' # ignore tags
if startswith('&'):
if startswith('&#'):
try:
if startswith('&#x'):
return chr(int(sub_text[3:-1], 16))
else:
return chr(int(sub_text[2:-1]))
except ValueError:
pass
else:
entity = html_entities.get(sub_text[1:-1])
if entity:
return entity # TODO: encode ?
return sub_text # Leave as is
return re.sub(r'(?s)<[^>]*>|&#?\w+;', fix_up, text)
| null |
903 |
from datetime import timedelta
from os.path import exists
from django.db.transaction import atomic
from creme.creme_core.creme_jobs import temp_files_cleaner_type
from creme.creme_core.models import (
FakeDocument,
FakeFileComponent,
FakeFolder,
FileRef,
Job,
)
from creme.creme_core.utils.date_period import date_period_registry
from .. import base
class FileRefTestCase(base.CremeTestCase):
def _get_job(self, days=1):
job = self.get_object_or_fail(Job, type_id=temp_files_cleaner_type.id)
job.data = {'delay': date_period_registry.get_period('days', days).as_dict()}
job.save()
return job
@staticmethod
def _oldify_temp_file(temp_file, days):
"Make the instance older."
FileRef.objects.filter(
id=temp_file.id,
).update(
created=(
temp_file.created
- date_period_registry.get_period('days', days).as_timedelta()
- timedelta(hours=1)
),
)
def test_basename01(self):
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_basename01.txt',
dir_name='models',
)
with self.assertNoException():
FileRef.objects.create(filedata=path, basename='test_basename01.txt')
def test_basename02(self):
name = 'FileRefTestCase_test_basename02.txt'
path = self.create_uploaded_file(file_name=name, dir_name='models')
with self.assertNoException():
file_ref = FileRef.objects.create(filedata=path)
self.assertEqual(name, file_ref.basename)
def test_job01(self):
"File is too young to be deleted (just created)"
job = self._get_job(days=1)
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_job01.txt',
dir_name='models',
)
temp_file = FileRef.objects.create(filedata=path)
self.assertIs(temp_file.temporary, True)
self.assertIsNone(temp_file.user)
temp_files_cleaner_type.execute(job)
self.assertStillExists(temp_file)
self.assertTrue(exists(temp_file.filedata.path))
def test_job02(self):
"File is old enough to be deleted."
days = 1
job = self._get_job(days=days)
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_job02.txt',
dir_name='models',
)
file_ref = FileRef.objects.create(filedata=path)
full_path = file_ref.filedata.path
self._oldify_temp_file(file_ref, days)
temp_files_cleaner_type.execute(job)
self.assertDoesNotExist(file_ref)
self.assertFalse(exists(full_path))
def test_job03(self):
"File is too young to be deleted."
job = self._get_job(days=2)
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_job03.txt',
dir_name='models',
)
file_ref = FileRef.objects.create(filedata=path)
self._oldify_temp_file(file_ref, days=1)
temp_files_cleaner_type.execute(job)
self.assertStillExists(file_ref)
def METHOD_NAME(self):
"File is not temporary."
job = self._get_job(days=1)
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_job04.txt',
dir_name='models',
)
file_ref = FileRef.objects.create(filedata=path, temporary=False)
self._oldify_temp_file(file_ref, days=2)
temp_files_cleaner_type.execute(job)
self.assertStillExists(file_ref)
def test_create_at_deletion01(self):
user = self.get_root_user()
existing_ids = [*FileRef.objects.values_list('id', flat=True)]
path = self.create_uploaded_file(
file_name='FileRefTestCase_test_create_at_deletion.txt',
dir_name='models',
)
folder = FakeFolder.objects.create(user=user, title='X-files')
doc = FakeDocument.objects.create(
user=user,
title='Roswell.txt',
linked_folder=folder,
filedata=path,
)
full_path = doc.filedata.path
doc.delete()
self.assertDoesNotExist(doc)
file_ref = self.get_alone_element(FileRef.objects.exclude(id__in=existing_ids))
self.assertTrue(file_ref.temporary)
self.assertIsNone(file_ref.user)
self.assertEqual(full_path, file_ref.filedata.path)
self.assertTrue(exists(full_path))
def test_create_at_deletion02(self):
"Empty FileField."
existing_ids = [*FileRef.objects.values_list('id', flat=True)]
embed_doc = FakeFileComponent.objects.create()
self.assertNoException(embed_doc.delete)
self.assertFalse(FileRef.objects.exclude(id__in=existing_ids))
class FileRefTestDeleteCase(base.CremeTransactionTestCase):
def test_delete_model_with_file01(self):
user = self.create_user()
existing_ids = [*FileRef.objects.values_list('id', flat=True)]
path = self.create_uploaded_file(
file_name='FileRefTestDeleteCase_test_delete_model_with_file01.txt',
dir_name='models',
)
folder = FakeFolder.objects.create(user=user, title='X-files')
doc = FakeDocument.objects.create(
user=user, title='Roswell.txt', linked_folder=folder, filedata=path,
)
full_path = doc.filedata.path
with atomic():
doc.delete()
self.assertDoesNotExist(doc)
file_ref = self.get_alone_element(FileRef.objects.exclude(id__in=existing_ids))
self.assertTrue(file_ref.temporary)
self.assertIsNone(file_ref.user)
self.assertEqual(full_path, file_ref.filedata.path)
self.assertTrue(exists(full_path))
def test_delete_model_with_file02(self):
user = self.create_user()
existing_ids = [*FileRef.objects.values_list('id', flat=True)]
path = self.create_uploaded_file(
file_name='FileRefTestDeleteCase_test_delete_model_with_file02.txt',
dir_name='models',
)
folder = FakeFolder.objects.create(user=user, title='X-files')
doc = FakeDocument.objects.create(
user=user, title='Roswell.txt', linked_folder=folder, filedata=path,
)
full_path = doc.filedata.path
try:
with atomic():
doc.delete()
raise ValueError('I cause rollback')
except ValueError:
pass
doc = self.get_object_or_fail(FakeDocument, id=doc.id)
self.assertEqual(full_path, doc.filedata.path)
self.assertTrue(exists(full_path))
self.assertFalse(FileRef.objects.exclude(id__in=existing_ids))
| null |
904 |
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.dataset_builder_read."""
from unittest import mock
import pytest
import tensorflow as tf
from tensorflow_datasets import testing
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import dataset_utils
from tensorflow_datasets.core import logging as tfds_logging
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.utils import read_config as read_config_lib
def METHOD_NAME(dummy_dataset: dataset_builder.DatasetBuilder):
"""Tests `add_tfds_id=True`."""
read_config = read_config_lib.ReadConfig(add_tfds_id=True)
ds = dummy_dataset.as_dataset(split='train', read_config=read_config)
assert ds.element_spec == {
'id': tf.TensorSpec(shape=(), dtype=tf.int64),
'tfds_id': tf.TensorSpec(shape=(), dtype=tf.string),
}
assert list(dataset_utils.as_numpy(ds)) == [
{'id': 0, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__0'},
{'id': 1, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__1'},
{'id': 2, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__2'},
]
# Subsplit API works too
ds = dummy_dataset.as_dataset(split='train[1:]', read_config=read_config)
assert ds.element_spec == {
'id': tf.TensorSpec(shape=(), dtype=tf.int64),
'tfds_id': tf.TensorSpec(shape=(), dtype=tf.string),
}
assert list(dataset_utils.as_numpy(ds)) == [
{'id': 1, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__1'},
{'id': 2, 'tfds_id': b'dummy_dataset-train.tfrecord-00000-of-00001__2'},
]
def test_add_tfds_id_as_supervised(
dummy_dataset: dataset_builder.DatasetBuilder,
):
"""Tests `add_tfds_id=True` with `as_supervised=True`."""
read_config = read_config_lib.ReadConfig(add_tfds_id=True)
ds = dummy_dataset.as_dataset(
split='train',
read_config=read_config,
as_supervised=True,
)
# `add_tfds_id=True` is ignored when `as_supervised=True`
assert ds.element_spec == (
tf.TensorSpec(shape=(), dtype=tf.int64),
tf.TensorSpec(shape=(), dtype=tf.int64),
)
def test_registered_logger_is_called(
dummy_dataset: dataset_builder.DatasetBuilder,
):
logger = mock.MagicMock()
tfds_logging.register(logger)
read_config = read_config_lib.ReadConfig(add_tfds_id=True)
read_config.try_autocache = False
read_config.num_parallel_calls_for_decode = 42
ds = dummy_dataset.as_dataset(
split='train',
read_config=read_config,
as_supervised=True,
)
# Logging doesn't change the result:
assert ds.element_spec == (
tf.TensorSpec(shape=(), dtype=tf.int64),
tf.TensorSpec(shape=(), dtype=tf.int64),
)
# Logger was indeed called:
assert logger.as_dataset.call_args_list == [
mock.call(
metadata=mock.ANY,
name='dummy_dataset',
config_name='',
version='1.0.0',
data_path=mock.ANY,
split='train',
batch_size=None,
shuffle_files=None,
read_config=read_config,
as_supervised=True,
decoders=None,
)
]
def test_canonical_version_for_config():
get_version = dataset_builder.canonical_version_for_config
# No config
version = get_version(testing.DummyDataset)
assert version == utils.Version('1.0.0')
class DummyDatasetWithConfig(testing.DummyDataset, skip_registration=True):
BUILDER_CONFIGS = [
dataset_builder.BuilderConfig(name='x', version='2.0.0'),
dataset_builder.BuilderConfig(name='y'),
]
with pytest.raises(ValueError, match='Cannot infer version'):
version = get_version(DummyDatasetWithConfig)
version = get_version(
DummyDatasetWithConfig,
DummyDatasetWithConfig.builder_configs['x'],
)
assert version == utils.Version('2.0.0')
version = get_version(
DummyDatasetWithConfig,
DummyDatasetWithConfig.builder_configs['y'],
)
assert version == utils.Version('1.0.0')
| null |
905 |
'''
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import argparse
import csv
import datetime
import os
import re
from io import StringIO
from django.conf import settings
from django.core.management.base import BaseCommand
import boto
from boto.s3.key import Key
from dashboard.models import Bounty, Profile
from dashboard.utils import all_sendcryptoasset_models
from economy.utils import convert_amount
from marketing.mails import send_mail
DATE_FORMAT = '%Y/%m/%d'
DATE_FORMAT_HYPHENATED = '%Y-%m-%d'
REPORT_URL_EXPIRATION_TIME = 60 * 60 * 24 * 30 # seconds
GITHUB_REPO_PATTERN = re.compile(r'github.com/[\w-]+/([\w-]+)')
imap = map
def get_bio(handle):
try:
profile = Profile.objects.filter(handle=handle.replace('@', '')).first()
return profile.data.get('location', 'unknown'), profile.data.get('bio', 'unknown')
except Exception:
return 'unknown', 'unknown'
def valid_date(v):
try:
return datetime.datetime.strptime(v, DATE_FORMAT)
except ValueError:
raise argparse.ArgumentTypeError('%s is not a date in YYYY/MM/DD format' % v)
class Command(BaseCommand):
help = 'emails activity report of tips and bounties to settings.CONTACT_EMAIL'
def add_arguments(self, parser):
parser.add_argument('start_date', type=valid_date, help='Start of date range (inclusive) in YYYY/MM/DD format for activities to be collected')
parser.add_argument('end_date', type=valid_date, help='End of date range (inclusive) in YYYY/MM/DD format for activities to be collected')
def extract_github_repo(self, url):
match = GITHUB_REPO_PATTERN.search(url)
if not match:
self.stdout.write(self.style.WARNING('WARNING: malformed github url: %s, using value as is' % url))
return url
return match.groups()[0]
def METHOD_NAME(self, bounty):
from dashboard.models import BountyFulfillment
try:
bounty_fulfillment = bounty.fulfillments.filter(accepted=True).latest('created_on')
claimee_address = bounty_fulfillment.fulfiller_address
fulfiller_github_username = bounty_fulfillment.fulfiller_github_username
except BountyFulfillment.DoesNotExist:
claimee_address = ''
fulfiller_github_username = ''
location, bio = get_bio(fulfiller_github_username)
return {
'type': 'bounty',
'created_on': bounty.web3_created,
'last_activity': bounty.modified_on,
'amount': bounty.get_natural_value(),
'denomination': bounty.token_name,
'amount_eth': bounty.value_in_eth / 10**18 if bounty.value_in_eth else None,
'amount_usdt': bounty.value_in_usdt,
'from_address': bounty.bounty_owner_address,
'claimee_address': claimee_address,
'repo': self.extract_github_repo(bounty.github_url),
'from_username': bounty.bounty_owner_github_username or '',
'fulfiller_github_username': fulfiller_github_username,
'status': bounty.status,
'comments': bounty.github_url,
'payee_bio': bio,
'payee_location': location,
}
def format_cryptoasset(self, ca):
_type = type(ca)
location, bio = get_bio(ca.username)
return {
'type': _type,
'created_on': ca.created_on,
'last_activity': ca.modified_on,
'amount': ca.amount_in_whole_units,
'denomination': ca.tokenName,
'amount_eth': ca.value_in_eth,
'amount_usdt': ca.value_in_usdt,
'from_address': ca.from_address,
'claimee_address': ca.receive_address,
'repo': self.extract_github_repo(ca.github_url) if ca.github_url else '',
'from_username': ca.from_name,
'fulfiller_github_username': ca.username,
'status': ca.status,
'comments': ca.github_url,
'payee_bio': bio,
'payee_location': location,
}
def upload_to_s3(self, filename, contents):
s3 = boto.connect_s3(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
bucket = s3.get_bucket(settings.S3_REPORT_BUCKET)
key = Key(bucket)
key.key = os.path.join(settings.S3_REPORT_PREFIX, filename)
key.set_contents_from_string(contents)
return key.generate_url(expires_in=REPORT_URL_EXPIRATION_TIME)
def handle(self, *args, **options):
bounties = Bounty.objects.prefetch_related('fulfillments').current().filter(
network='mainnet',
web3_created__gte=options['start_date'],
web3_created__lte=options['end_date']
).order_by('web3_created', 'id')
formatted_bounties = imap(self.METHOD_NAME, bounties)
all_scram = []
for _class in all_sendcryptoasset_models():
objs = _class.objects.filter(
network='mainnet',
created_on__gte=options['start_date'],
created_on__lte=options['end_date']
).send_success().order_by('created_on', 'id')
objs = imap(self.format_cryptoasset, objs)
objs = [x for x in objs]
all_scram += objs
# python3 list hack
formatted_bounties = [x for x in formatted_bounties]
all_items = formatted_bounties + all_scram
csvfile = StringIO()
csvwriter = csv.DictWriter(csvfile, fieldnames=[
'type', 'created_on', 'last_activity', 'amount', 'denomination', 'amount_eth',
'amount_usdt', 'from_address', 'claimee_address', 'repo', 'from_username',
'fulfiller_github_username', 'status', 'comments', 'payee_bio', 'payee_location'])
csvwriter.writeheader()
items = sorted(all_items, key=lambda x: x['created_on'])
has_rows = False
for item in items:
has_rows = True
csvwriter.writerow(item)
start = options['start_date'].strftime(DATE_FORMAT_HYPHENATED)
end = options['end_date'].strftime(DATE_FORMAT_HYPHENATED)
now = str(datetime.datetime.now())
if has_rows:
subject = f'Gitcoin Activity report from {start} to {end}'
url = self.upload_to_s3(f'activity_report_{start}_{end}_generated_on_{now}.csv', csvfile.getvalue())
body = f'<a href="{url}">{url}</a>'
print(url)
send_mail(
settings.CONTACT_EMAIL,
settings.CONTACT_EMAIL,
subject,
body='',
html=body,
categories=['admin', 'activity_report'],
)
self.stdout.write(
self.style.SUCCESS('Sent activity report from %s to %s to %s' % (start, end, settings.CONTACT_EMAIL))
)
else:
self.stdout.write(self.style.WARNING('No activity from %s to %s to report' % (start, end)))
| null |
906 |
# A versioning object representing the wavelength level in the .xinfo
# hierarchy. This will include all of the methods for performing operations
# on a wavelength as well as stuff for integration with the rest of the
# .xinfo hierarchy.
#
# The following are properties defined for an XWavelength object:
#
# wavelength
# f_pr
# f_prpr
#
# However, these objects are not versioned, since they do not (in the current
# implementation) impact on the data reduction process. These are mostly
# passed through.
#
# FIXME 05/SEP/06 this also needs to be able to handle the information
# pertaining to the lattice, because it is critcial that
# all of the sweeps for a wavelength share the same
# lattice.
#
# FIXME 05/SEP/06 also don't forget about ordering the sweeps in collection
# order for the data reduction, to make sure that we
# reduce the least damaged data first.
from __future__ import annotations
import inspect
import logging
from xia2.Handlers.Phil import PhilIndex
from xia2.Schema.XSweep import XSweep
logger = logging.getLogger("xia2.Schema.XWavelength")
class XWavelength:
"""An object representation of a wavelength, which will after data
reduction correspond to an MTZ hierarchy dataset."""
def __init__(
self, name, crystal, wavelength, f_pr=0.0, f_prpr=0.0, dmin=0.0, dmax=0.0
):
"""Create a new wavelength named name, belonging to XCrystal object
crystal, with wavelength and optionally f_pr, f_prpr assigned."""
# set up this object
self._name = name
self._crystal = crystal
self._wavelength = wavelength
self._f_pr = f_pr
self._f_prpr = f_prpr
self._resolution_high = dmin
self._resolution_low = dmax
# then create space to store things which are contained
# in here - the sweeps
self._sweeps = []
# serialization functions
def to_dict(self):
obj = {"__id__": "XWavelength"}
attributes = inspect.getmembers(self, lambda m: not (inspect.isroutine(m)))
for a in attributes:
if a[0] == "_sweeps":
sweeps = []
for sweep in a[1]:
sweeps.append(sweep.to_dict())
obj[a[0]] = sweeps
elif a[0] == "_crystal":
# don't serialize this since the parent xwavelength *should* contain
# the reference to the child xsweep
continue
elif a[0].startswith("__"):
continue
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "XWavelength"
return_obj = cls(name=None, crystal=None, wavelength=None)
for k, v in obj.items():
if k == "_sweeps":
v = [XSweep.from_dict(s_dict) for s_dict in v]
for sweep in v:
sweep._wavelength = return_obj
setattr(return_obj, k, v)
return return_obj
def get_output(self):
result = "Wavelength name: %s\n" % self._name
result += "Wavelength %7.5f\n" % self._wavelength
if self._f_pr != 0.0 and self._f_prpr != 0.0:
result += f"F', F'' = ({self._f_pr:5.2f}, {self._f_prpr:5.2f})\n"
result += "Sweeps:\n"
remove = []
params = PhilIndex.get_python_object()
failover = params.xia2.settings.failover
for s in self._sweeps:
# would be nice to put this somewhere else in the hierarchy - not
# sure how to do that though (should be handled in Interfaces?)
try:
result += "%s\n" % s.get_output()
except Exception as e:
if failover:
logger.warning(
"Processing sweep %s failed: %s", s.get_name(), str(e)
)
remove.append(s)
else:
raise
for s in remove:
self._sweeps.remove(s)
return result[:-1]
def summarise(self):
summary = [f"Wavelength: {self._name} ({self._wavelength:7.5f})"]
for s in self._sweeps:
for record in s.summarise():
summary.append(record)
return summary
def get_wavelength(self):
return self._wavelength
def set_wavelength(self, wavelength):
if self._wavelength != 0.0:
raise RuntimeError("setting wavelength when already set")
self._wavelength = wavelength
def set_resolution_high(self, resolution_high):
self._resolution_high = resolution_high
def set_resolution_low(self, resolution_low):
self._resolution_low = resolution_low
def get_resolution_high(self):
return self._resolution_high
def get_resolution_low(self):
return self._resolution_low
def get_f_pr(self):
return self._f_pr
def get_f_prpr(self):
return self._f_prpr
def METHOD_NAME(self):
return self._crystal
def get_name(self):
return self._name
def get_all_image_names(self):
"""Get a full list of all images in this wavelength..."""
# for RD analysis ...
result = []
for sweep in self._sweeps:
result.extend(sweep.get_all_image_names())
return result
def add_sweep(
self,
name,
sample,
directory=None,
image=None,
beam=None,
reversephi=False,
distance=None,
gain=0.0,
dmin=0.0,
dmax=0.0,
polarization=0.0,
frames_to_process=None,
user_lattice=None,
user_cell=None,
epoch=0,
ice=False,
excluded_regions=None,
):
"""Add a sweep to this wavelength."""
if excluded_regions is None:
excluded_regions = []
xsweep = XSweep(
name,
self,
sample=sample,
directory=directory,
image=image,
beam=beam,
reversephi=reversephi,
distance=distance,
gain=gain,
dmin=dmin,
dmax=dmax,
polarization=polarization,
frames_to_process=frames_to_process,
user_lattice=user_lattice,
user_cell=user_cell,
epoch=epoch,
ice=ice,
excluded_regions=excluded_regions,
)
self._sweeps.append(xsweep)
return xsweep
def get_sweeps(self):
return self._sweeps
def remove_sweep(self, sweep):
"""Remove a sweep object from this wavelength."""
try:
self._sweeps.remove(sweep)
except ValueError:
pass
def _get_integraters(self):
return [s._get_integrater() for s in self._sweeps]
def _get_indexers(self):
return [s._get_indexer() for s in self._sweeps]
| null |
907 |
import unittest
from utils import ApproxComparisonTestCase
import meep as mp
class TestHoleyWvgCavity(ApproxComparisonTestCase):
def setUp(self):
eps = 13
self.w = 1.2
r = 0.36
d = 1.4
N = 3
sy = 6
pad = 2
self.dpml = 1
self.sx = (2 * (pad + self.dpml + N)) + d - 1
self.fcen = 0.25
self.df = 0.2
self.nfreq = 500
cell = mp.Vector3(self.sx, sy, 0)
blk = mp.Block(
size=mp.Vector3(mp.inf, self.w, mp.inf), material=mp.Medium(epsilon=eps)
)
geometry = [blk]
geometry.extend(mp.Cylinder(r, center=mp.Vector3(d / 2 + i)) for i in range(3))
for i in range(3):
geometry.append(mp.Cylinder(r, center=mp.Vector3(d / -2 - i)))
self.sim = mp.Simulation(
cell_size=cell,
geometry=geometry,
sources=[],
boundary_layers=[mp.PML(self.dpml)],
resolution=20,
)
@classmethod
def setUpClass(cls):
cls.temp_dir = mp.make_output_directory()
@classmethod
def tearDownClass(cls):
mp.delete_directory(cls.temp_dir)
def test_resonant_modes(self):
self.sim.sources = [
mp.Source(mp.GaussianSource(self.fcen, fwidth=self.df), mp.Hz, mp.Vector3())
]
self.sim.symmetries = [mp.Mirror(mp.Y, phase=-1), mp.Mirror(mp.X, phase=-1)]
self.sim.use_output_directory(self.temp_dir)
h = mp.Harminv(mp.Hz, mp.Vector3(), self.fcen, self.df)
self.sim.run(
mp.at_beginning(mp.output_epsilon),
mp.after_sources(h),
until_after_sources=400,
)
expected = [
0.23445415346009466,
-3.147812367338531e-4,
372.40808234438254,
5.8121430334347135,
-3.763107485715599,
-4.429450156854109,
]
m = h.modes[0]
res = [m.freq, m.decay, m.Q, abs(m.amp), m.amp.real, m.amp.imag]
tol = 1e-6 if mp.is_single_precision() else 1e-8
self.assertClose(expected, res, epsilon=tol)
def METHOD_NAME(self):
expected = [
(0.15, 7.218492264696595e-6),
(0.1504008016032064, 6.445696315927592e-6),
(0.1508016032064128, 5.140949243632777e-6),
(0.15120240480961922, 3.6159747936427164e-6),
(0.15160320641282563, 2.263940553705969e-6),
(0.15200400801603203, 1.4757165844336744e-6),
(0.15240480961923844, 1.5491803919142815e-6),
(0.15280561122244485, 2.612053246626972e-6),
(0.15320641282565126, 4.577504371188737e-6),
(0.15360721442885766, 7.1459089162998185e-6),
(0.15400801603206407, 9.856622013418823e-6),
(0.15440881763527048, 1.2182309227954296e-5),
(0.1548096192384769, 1.3647726444709649e-5),
(0.1552104208416833, 1.3947420613633674e-5),
(0.1556112224448897, 1.303466755716231e-5),
(0.1560120240480961, 1.115807915037775e-5),
(0.15641282565130252, 8.832335196969796e-6),
(0.15681362725450892, 6.743645773127985e-6),
(0.15721442885771533, 5.605913756087576e-6),
(0.15761523046092174, 5.996668564026961e-6),
(0.15801603206412815, 8.209400611614078e-6),
(0.15841683366733456, 1.2158641936828497e-5),
(0.15881763527054096, 1.73653230513453e-5),
(0.15921843687374737, 2.303382576477893e-5),
(0.15961923847695378, 2.821180350795834e-5),
(0.1600200400801602, 3.200359292911769e-5),
(0.1604208416833666, 3.3792624373001934e-5),
(0.160821643286573, 3.342171394788991e-5),
(0.1612224448897794, 3.1284866146526904e-5),
(0.16162324649298582, 2.830022088581398e-5),
(0.16202404809619222, 2.5758413657344014e-5),
(0.16242484969939863, 2.506899997971769e-5),
(0.16282565130260504, 2.7453508915303887e-5),
(0.16322645290581145, 3.365089813497114e-5),
(0.16362725450901786, 4.370486834112e-5),
(0.16402805611222426, 5.689050715055283e-5),
(0.16442885771543067, 7.181133157470506e-5),
(0.16482965931863708, 8.666168027415369e-5),
(0.16523046092184349, 9.961094123261317e-5),
(0.1656312625250499, 1.0923388232657953e-4),
(0.1660320641282563, 1.1489334204708105e-4),
(0.1664328657314627, 1.1698318060032011e-4),
(0.16683366733466912, 1.169621456132733e-4),
(0.16723446893787552, 1.1714995241571987e-4),
(0.16763527054108193, 1.2030783847222252e-4),
(0.16803607214428834, 1.2907652919660887e-4),
]
self.sim.sources = [
mp.Source(
mp.GaussianSource(self.fcen, fwidth=self.df),
mp.Ey,
mp.Vector3(self.dpml + (-0.5 * self.sx)),
size=mp.Vector3(0, self.w),
)
]
self.sim.symmetries = [mp.Mirror(mp.Y, phase=-1)]
freg = mp.FluxRegion(
center=mp.Vector3((0.5 * self.sx) - self.dpml - 0.5),
size=mp.Vector3(0, 2 * self.w),
)
trans = self.sim.add_flux(
self.fcen, self.df, self.nfreq, freg, decimation_factor=1
)
self.sim.run(
until_after_sources=mp.stop_when_fields_decayed(
50, mp.Ey, mp.Vector3((0.5 * self.sx) - self.dpml - 0.5, 0), 1e-1
)
)
res = zip(mp.get_flux_freqs(trans), mp.get_fluxes(trans))
tol = 1e-8 if mp.is_single_precision() else 1e-10
for e, r in zip(expected, res):
self.assertClose(e, r, epsilon=tol)
if __name__ == "__main__":
unittest.main()
| null |
908 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerHTTPListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'CreateLoadBalancerHTTPListener','ens')
self.set_method('POST')
def get_ListenerForward(self): # String
return self.get_query_params().get('ListenerForward')
def set_ListenerForward(self, ListenerForward): # String
self.add_query_param('ListenerForward', ListenerForward)
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_XForwardedFor(self): # String
return self.get_query_params().get('XForwardedFor')
def set_XForwardedFor(self, XForwardedFor): # String
self.add_query_param('XForwardedFor', XForwardedFor)
def get_HealthCheckURI(self): # String
return self.get_query_params().get('HealthCheckURI')
def set_HealthCheckURI(self, HealthCheckURI): # String
self.add_query_param('HealthCheckURI', HealthCheckURI)
def get_HealthCheck(self): # String
return self.get_query_params().get('HealthCheck')
def set_HealthCheck(self, HealthCheck): # String
self.add_query_param('HealthCheck', HealthCheck)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckDomain(self): # String
return self.get_query_params().get('HealthCheckDomain')
def set_HealthCheckDomain(self, HealthCheckDomain): # String
self.add_query_param('HealthCheckDomain', HealthCheckDomain)
def get_RequestTimeout(self): # Integer
return self.get_query_params().get('RequestTimeout')
def set_RequestTimeout(self, RequestTimeout): # Integer
self.add_query_param('RequestTimeout', RequestTimeout)
def get_LoadBalancerId(self): # String
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self, LoadBalancerId): # String
self.add_query_param('LoadBalancerId', LoadBalancerId)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_Scheduler(self): # String
return self.get_query_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_query_param('Scheduler', Scheduler)
def get_ForwardPort(self): # Integer
return self.get_query_params().get('ForwardPort')
def set_ForwardPort(self, ForwardPort): # Integer
self.add_query_param('ForwardPort', ForwardPort)
def get_ListenerPort(self): # Integer
return self.get_query_params().get('ListenerPort')
def set_ListenerPort(self, ListenerPort): # Integer
self.add_query_param('ListenerPort', ListenerPort)
def get_IdleTimeout(self): # Integer
return self.get_query_params().get('IdleTimeout')
def set_IdleTimeout(self, IdleTimeout): # Integer
self.add_query_param('IdleTimeout', IdleTimeout)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort)
def METHOD_NAME(self): # String
return self.get_query_params().get('HealthCheckHttpCode')
def set_HealthCheckHttpCode(self, HealthCheckHttpCode): # String
self.add_query_param('HealthCheckHttpCode', HealthCheckHttpCode)
| null |
909 |
from methods.regular.regular_api import *
from default.tests.test_utils import testing_setup
from shared.tests.test_utils import common_actions, data_mocking
from base64 import b64encode
from shared.regular import regular_log
from methods.video import sequence_preview_create
class TestSequencePreviewCreate(testing_setup.DiffgramBaseTestCase):
"""
"""
def setUp(self):
# TODO: this test is assuming the 'my-sandbox-project' exists and some object have been previously created.
# For future tests a mechanism of setting up and tearing down the database should be created.
super(TestSequencePreviewCreate, self).setUp()
project_data = data_mocking.create_project_with_context(
{
'users': [
{'username': 'Test',
'email': '[email protected]',
'password': 'diffgram123',
}
]
},
self.session
)
project_data2 = data_mocking.create_project_with_context(
{
'users': [
{'username': 'Test',
'email': '[email protected]',
'password': 'diffgram123',
}
]
},
self.session
)
self.project2 = project_data2['project']
self.project = project_data['project']
def METHOD_NAME(self):
label = data_mocking.create_label({
'name': 'apple',
}, self.session)
label_file = data_mocking.create_label_file({
'label': label,
'project_id': self.project.id
}, self.session)
video_file = data_mocking.create_file({
'project_id': self.project.id,
'type': 'video'
}, self.session)
sequence = data_mocking.create_sequence({
'label_file_id': label_file.id,
'video_file_id': video_file.id,
'cache_expiry': time.time() + 500000,
'number': 1,
}, self.session)
video_file_bad = data_mocking.create_file({
'project_id': self.project2.id,
'type': 'video'
}, self.session)
preview_url = 'https://picsum.photos/200/300'
instance = data_mocking.create_instance({
'project_id': self.project.id,
'type': 'box',
'x_min': 0,
'x_max': 0,
'y_min': 0,
'y_max': 0,
'file_id': video_file.id,
'soft_delete': False,
'sequence_id': sequence.id,
'preview_image_url': preview_url,
'preview_image_url_expiry': 900000000,
}, self.session)
sequence.instance_preview_cache = {
'id': instance.id,
'file_id': sequence.video_file.id,
'preview_image_url': preview_url,
}
self.session.commit()
endpoint = "/api/project/{}/sequence/{}/create-preview".format(
self.project.project_string_id,
sequence.id,
)
auth_api = common_actions.create_project_auth(project = self.project, session = self.session)
credentials = b64encode(f"{auth_api.client_id}:{auth_api.client_secret}".encode()).decode('utf-8')
response = self.client.post(
endpoint,
data = json.dumps({}),
headers = {
'directory_id': str(self.project.directory_default_id),
'Authorization': f"Basic {credentials}"
}
)
data = response.json
self.assertTrue('result' in data)
self.assertTrue('log' in data)
self.assertFalse(len(data['log']['error'].keys()), 0)
self.assertEqual(data['result']['instance_preview']['id'], instance.id)
self.assertEqual(data['result']['instance_preview']['file_id'], video_file.id)
self.assertEqual(data['result']['instance_preview']['preview_image_url'], preview_url)
# Error case
sequence2 = data_mocking.create_sequence({
'label_file_id': label_file.id,
'video_file_id': video_file_bad.id,
'cache_expiry': time.time() + 500000,
'number': 1,
}, self.session)
result, log = sequence_preview_create.create_sequence_preview_core(
session = self.session,
log = regular_log.default(),
project = self.project,
sequence_id = sequence2.id
)
self.assertEqual(len(log['error'].keys()), 1)
self.assertTrue('project_id' in log['error'])
def test_create_sequence_preview_core(self):
label = data_mocking.create_label({
'name': 'apple',
}, self.session)
label_file = data_mocking.create_label_file({
'label': label,
'project_id': self.project.id
}, self.session)
video_file = data_mocking.create_file({
'project_id': self.project.id,
'type': 'video'
}, self.session)
video_file_bad = data_mocking.create_file({
'project_id': self.project2.id,
'type': 'video'
}, self.session)
sequence = data_mocking.create_sequence({
'label_file_id': label_file.id,
'video_file_id': video_file.id,
'cache_expiry': time.time() + 500000,
'number': 1,
}, self.session)
sequence2 = data_mocking.create_sequence({
'label_file_id': label_file.id,
'video_file_id': video_file_bad.id,
'cache_expiry': time.time() + 500000,
'number': 1,
}, self.session)
preview_url = 'https://picsum.photos/200/300'
instance = data_mocking.create_instance({
'project_id': self.project.id,
'type': 'box',
'x_min': 0,
'x_max': 0,
'y_min': 0,
'y_max': 0,
'file_id': video_file.id,
'soft_delete': False,
'sequence_id': sequence.id,
'preview_image_url': preview_url,
'preview_image_url_expiry': 900000000,
}, self.session)
sequence.instance_preview_cache = {
'id': instance.id,
'file_id': sequence.video_file.id,
'preview_image_url': preview_url,
}
self.session.commit()
result, log = sequence_preview_create.create_sequence_preview_core(
session = self.session,
log = regular_log.default(),
project = self.project,
sequence_id = sequence.id
)
self.assertFalse(len(log['error'].keys()), 0)
self.assertEqual(result['instance_preview']['id'], instance.id)
self.assertEqual(result['instance_preview']['file_id'], video_file.id)
self.assertEqual(result['instance_preview']['preview_image_url'], preview_url)
# Error case
result, log = sequence_preview_create.create_sequence_preview_core(
session = self.session,
log = regular_log.default(),
project = self.project,
sequence_id = sequence2.id
)
self.assertEqual(len(log['error'].keys()), 1)
self.assertTrue('project_id' in log['error'])
| null |
910 |
# coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessing utilities."""
import io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import preprocess_utils # local file import from experimental.multimodal
class PreprocessUtilsTest(tf.test.TestCase):
def get_data(self):
return {
"image": tf.random.uniform([640, 480, 3], 0, 255),
"rng": tf.random.create_rng_state(42, 2)
}
def test_resize(self):
data = self.get_data()
data = preprocess_utils.Resize([120, 80])(data)
self.assertEqual(data["image"].numpy().shape, (120, 80, 3))
def test_resize_small(self):
data = self.get_data()
data = preprocess_utils.ResizeSmall(240)(data)
self.assertEqual(data["image"].numpy().shape, (320, 240, 3))
def test_inception_crop(self):
data = self.get_data()
data = preprocess_utils.InceptionCrop()(data)
self.assertEqual(data["image"].numpy().shape[-1], 3)
def test_decode_jpeg_and_inception_crop(self):
f = io.BytesIO()
plt.imsave(
f,
np.random.randint(0, 256, [224, 224, 3]).astype("uint8"),
format="jpg")
data = self.get_data()
data["image"] = f.getvalue()
data = preprocess_utils.DecodeJpegAndInceptionCrop()(data)
self.assertEqual(data["image"].numpy().shape[-1], 3)
def test_random_crop(self):
data = self.get_data()
data = preprocess_utils.RandomCrop([120, 80])(data)
self.assertEqual(data["image"].numpy().shape, (120, 80, 3))
def test_central_crop(self):
data = self.get_data()
data = preprocess_utils.CentralCrop([120, 80])(data)
self.assertEqual(data["image"].numpy().shape, (120, 80, 3))
def test_flip_lr(self):
data = self.get_data()
data_after_pp = preprocess_utils.FlipLr()(data)
self.assertTrue(
np.all(data["image"].numpy() == data_after_pp["image"].numpy()) or
np.all(data["image"][:, ::-1].numpy() ==
data_after_pp["image"].numpy()))
def test_value_range(self):
data = self.get_data()
data = preprocess_utils.ValueRange(-0.5, 0.5)(data)
self.assertLessEqual(np.max(data["image"].numpy()), 0.5)
self.assertGreaterEqual(np.min(data["image"].numpy()), -0.5)
def test_value_range_custom_input_range(self):
data = self.get_data()
data = preprocess_utils.ValueRange(-0.5, 0.5, -256, 255, True)(data)
self.assertLessEqual(np.max(data["image"].numpy()), 0.5)
self.assertGreaterEqual(np.min(data["image"].numpy()), 0.0)
def test_keep(self):
data = {"image": 1, "labels": 2, "something": 3}
data_keep = preprocess_utils.Keep(["image", "labels"])(data)
self.assertAllEqual(list(data_keep.keys()), ["image", "labels"])
def METHOD_NAME(self):
data = {"labels": tf.constant(np.asarray(2), dtype=tf.int64)}
output_data = preprocess_utils.Onehot(4, multi=True, key="labels")(data)
self.assertAllClose(output_data["labels"].numpy(), np.asarray(
[0., 0., 1., 0.], dtype=np.float32))
def test_onehot_multi(self):
data = {"labels": tf.constant(np.asarray([2, 3, 0]), dtype=tf.int64)}
output_data = preprocess_utils.Onehot(4, multi=False, key="labels")(data)
self.assertAllClose(output_data["labels"].numpy(), np.asarray([
[0., 0., 1., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]], dtype=np.float32))
data = {"labels": tf.constant(np.asarray([2, 3, 0]), dtype=tf.int64)}
output_data = preprocess_utils.Onehot(4, multi=True, key="labels")(data)
self.assertAllClose(output_data["labels"].numpy(),
np.asarray([1., 0., 1., 1.], dtype=np.float32))
def test_onehot_smoothing(self):
data = {"labels": tf.constant(np.asarray([2, 3, 0]), dtype=tf.int64)}
output_data = preprocess_utils.Onehot(
4, multi=False, on=0.8, off=0.1, key="labels")(
data)
self.assertAllClose(output_data["labels"].numpy(), np.asarray([
[0.1, 0.1, 0.8, 0.1],
[0.1, 0.1, 0.1, 0.8],
[0.8, 0.1, 0.1, 0.1]], dtype=np.float32))
data = {"labels": tf.constant(np.asarray([2, 3, 0]), dtype=tf.int64)}
output_data = preprocess_utils.Onehot(
4, multi=True, on=0.8, off=0.1, key="labels")(
data)
self.assertAllClose(output_data["labels"].numpy(),
np.asarray([0.8, 0.1, 0.8, 0.8], dtype=np.float32))
def test_get_coco_captions(self):
data = {
"captions": {
"text": [tf.constant(x) for x in ["a", "b", "c", "d", "e"]]
},
"rng": tf.random.create_rng_state(42, 2)
}
output_data = preprocess_utils.GetCocoCaptions()(data)
self.assertEqual(output_data["text"], tf.constant("b"))
def test_clip_i1k_label_names(self):
data = {"labels": tf.constant([0, 1])}
output_data = preprocess_utils.ClipI1kLabelNames(key="labels")(data)
labels = output_data["labels"].numpy().tolist()
self.assertAllEqual(labels, ["tench", "goldfish"])
def test_randaug(self):
rng = tf.random.create_rng_state(42, 2)
data = {
"image": tf.cast(tf.clip_by_value(tf.random.stateless_uniform(
[5, 5, 3], rng, 0, 255), 0.0, 255.0), tf.uint8),
"rng": rng
}
output_data = preprocess_utils.Randaug()(data)
expected_output = np.array(
[[[136, 241, 255], [9, 38, 255], [255, 83, 60], [24, 255, 110],
[229, 117, 255]],
[[255, 255, 81], [252, 17, 0], [115, 224, 255], [255, 255, 89],
[255, 60, 255]],
[[146, 255, 255], [255, 100, 255], [255, 255, 255], [0, 255, 255],
[255, 255, 255]],
[[255, 255, 3], [255, 0, 255], [1, 0, 17], [197, 255, 255],
[102, 58, 255]],
[[136, 255, 68], [255, 255, 91], [255, 255, 93], [119, 184, 255],
[255, 140, 218]]],
dtype=np.uint8)
self.assertAllClose(output_data["image"].numpy(), expected_output)
def test_clip_tokenize(self):
data = {
"text": tf.constant("This is a test string.", dtype=tf.string),
}
output_data = preprocess_utils.ClipTokenize(
key="text",
max_len=10,
bpe_path="third_party/py/uncertainty_baselines/experimental/multimodal/bpe_simple_vocab_16e6.txt.gz"
)(data)
expected_output = np.array(
[49406, 589, 533, 320, 1628, 9696, 269, 49407, 0, 0], dtype=np.uint32)
self.assertAllClose(output_data["text"].numpy(), expected_output)
def test_normalize(self):
data = {
"image": tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.uint8),
}
output_data = preprocess_utils.Normalize(
mean=[2.5, 3.5, 4.5],
std=[0.5, 0.5, 0.5]
)(data)
expected_output = np.array([[-3., -3., -3.], [3., 3., 3.]],
dtype=np.float32)
self.assertAllClose(output_data["image"].numpy(), expected_output)
def test_shuffle_join(self):
data = {
"text": [tf.constant(x) for x in ["a", "b", "c", "d", "e"]],
"rng": tf.random.create_rng_state(42, 2)
}
output_data = preprocess_utils.ShuffleJoin(
key="text",
)(data)
print(output_data["text"])
expected_output = tf.constant("e. d. c. a. b", dtype=tf.string)
self.assertEqual(output_data["text"], expected_output)
if __name__ == "__main__":
tf.test.main()
| null |
911 |
import IMP
import IMP.test
import IMP.algebra
import math
import pickle
class Tests(IMP.test.TestCase):
def test_trivial_constructor(self):
"""Test trivial Transformation2D constructor"""
t = IMP.algebra.Transformation2D()
def test_identity(self):
"""Test Transformation2D identity"""
t = IMP.algebra.get_identity_transformation_2d()
self.assertAlmostEqual(t.get_rotation().get_angle(), 0., delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t.get_translation(), IMP.algebra.Vector2D(0, 0)), 1e-4)
def test_rotate_about_point(self):
"""Test rotation about a 2D point"""
p = IMP.algebra.Vector2D(1., 2.)
r = IMP.algebra.Rotation2D(math.pi / 2.)
t = IMP.algebra.get_rotation_about_point(p, r)
n = t * IMP.algebra.Vector2D(2., 3.)
self.assertLess(IMP.algebra.get_distance(
n, IMP.algebra.Vector2D(0, 3)), 1e-4)
def test_inverse(self):
"""Test inverse of Transformation2D"""
t = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 2.),
IMP.algebra.Vector2D(1, 2))
t2 = t.get_inverse()
self.assertAlmostEqual(t2.get_rotation().get_angle(), -math.pi / 2.,
delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t2.get_translation(), IMP.algebra.Vector2D(-2, 1)), 1e-4)
t2 = t * t.get_inverse()
self.assertAlmostEqual(t2.get_rotation().get_angle(), 0., delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t2.get_translation(), IMP.algebra.Vector2D(0, 0)), 1e-4)
def test_compose(self):
"""Test compose of Transformation2Ds"""
t = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 2.),
IMP.algebra.Vector2D(1, 2))
t2 = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 2.),
IMP.algebra.Vector2D(4, 3))
t3 = IMP.algebra.compose(t, t2)
self.assertAlmostEqual(t3.get_rotation().get_angle(), math.pi,
delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t3.get_translation(), IMP.algebra.Vector2D(-2, 6)), 1e-4)
def test_operations(self):
"""Test operations on a Transformation2D"""
v = IMP.algebra.Vector2D(1, 2)
r = IMP.algebra.Rotation2D(math.pi / 2.)
t = IMP.algebra.Transformation2D(r, v)
t.show()
t2 = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 2.),
IMP.algebra.Vector2D(4, 3))
o = t.get_transformed(IMP.algebra.Vector2D(3, 4))
self.assertLess(IMP.algebra.get_distance(
o, IMP.algebra.Vector2D(-3, 5)), 1e-4)
o = t * IMP.algebra.Vector2D(3, 4)
self.assertLess(IMP.algebra.get_distance(
o, IMP.algebra.Vector2D(-3, 5)), 1e-4)
tt2 = t * t2
self.assertAlmostEqual(tt2.get_rotation().get_angle(), math.pi,
delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
tt2.get_translation(), IMP.algebra.Vector2D(-2, 6)), 1e-4)
t *= t2
self.assertAlmostEqual(t.get_rotation().get_angle(), math.pi,
delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t.get_translation(), IMP.algebra.Vector2D(-2, 6)), 1e-4)
t = IMP.algebra.Transformation2D(r, v)
tt2 = t / t2
self.assertAlmostEqual(tt2.get_rotation().get_angle(), 0., delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
tt2.get_translation(), IMP.algebra.Vector2D(-3, -1)), 1e-4)
t /= t2
self.assertAlmostEqual(t.get_rotation().get_angle(), 0., delta=1e-4)
self.assertLess(IMP.algebra.get_distance(
t.get_translation(), IMP.algebra.Vector2D(-3, -1)), 1e-4)
def METHOD_NAME(self):
"""Check building a Transformation2D from point sets"""
x1 = IMP.algebra.Vector2D(1, 2)
x2 = IMP.algebra.Vector2D(6, 8)
angle_applied = math.pi / 4.
shift_applied = IMP.algebra.Vector2D(-2, 4)
R = IMP.algebra.Rotation2D(angle_applied)
y1 = R.get_rotated(x1) + shift_applied
y2 = R.get_rotated(x2) + shift_applied
set1 = [x1, x2]
set2 = [y1, y2]
self.assertRaisesUsageException(
IMP.algebra.get_transformation_aligning_pair, set1, [y1])
T = IMP.algebra.get_transformation_aligning_pair(set1, set2)
self.assertAlmostEqual(angle_applied, T.get_rotation().get_angle(),
delta=.01)
self.assertAlmostEqual(shift_applied[0], T.get_translation()[0],
delta=.01)
self.assertAlmostEqual(shift_applied[1], T.get_translation()[1],
delta=.01)
def test_pickle(self):
"""Test (un-)pickle of Transformation2D"""
t1 = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 4.),
IMP.algebra.Vector2D(1, 2))
t2 = IMP.algebra.Transformation2D(IMP.algebra.Rotation2D(math.pi / 2.),
IMP.algebra.Vector2D(4, 3))
t2.foo = 'bar'
tdump = pickle.dumps((t1, t2))
newt1, newt2 = pickle.loads(tdump)
self.assertLess(
IMP.algebra.get_distance(t1.get_translation(),
newt1.get_translation()), 1e-4)
self.assertAlmostEqual(t1.get_rotation().get_angle(),
newt1.get_rotation().get_angle(),
delta=.01)
self.assertLess(
IMP.algebra.get_distance(t2.get_translation(),
newt2.get_translation()), 1e-4)
self.assertAlmostEqual(t2.get_rotation().get_angle(),
newt2.get_rotation().get_angle(),
delta=.01)
self.assertEqual(newt2.foo, 'bar')
if __name__ == '__main__':
IMP.test.main()
| null |
912 |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
import unittest as ut
from h5py import h5p, h5f, version
from .common import TestCase
class TestLibver(TestCase):
"""
Feature: Setting/getting lib ver bounds
"""
def test_libver(self):
""" Test libver bounds set/get """
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v18(self):
""" Test libver bounds set/get for H5F_LIBVER_V18"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_V18)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_V18),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v110(self):
""" Test libver bounds set/get for H5F_LIBVER_V110"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V110)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V110),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 11, 4),
'Requires HDF5 1.11.4 or later')
def test_libver_v112(self):
""" Test libver bounds set/get for H5F_LIBVER_V112"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V112)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V112),
plist.get_libver_bounds())
class TestDA(TestCase):
'''
Feature: setting/getting chunk cache size on a dataset access property list
'''
def test_chunk_cache(self):
'''test get/set chunk cache '''
dalist = h5p.create(h5p.DATASET_ACCESS)
nslots = 10000 # 40kb hash table
nbytes = 1000000 # 1MB cache size
w0 = .5 # even blend of eviction strategy
dalist.set_chunk_cache(nslots, nbytes, w0)
self.assertEqual((nslots, nbytes, w0),
dalist.get_chunk_cache())
@ut.skipIf(version.hdf5_version_tuple < (1, 8, 17),
'Requires HDF5 1.8.17 or later')
def METHOD_NAME(self):
'''test get/set efile prefix '''
dalist = h5p.create(h5p.DATASET_ACCESS)
self.assertEqual(dalist.get_efile_prefix().decode(), '')
efile_prefix = "path/to/external/dataset"
dalist.set_efile_prefix(efile_prefix.encode('utf-8'))
self.assertEqual(dalist.get_efile_prefix().decode(),
efile_prefix)
efile_prefix = "${ORIGIN}"
dalist.set_efile_prefix(efile_prefix.encode('utf-8'))
self.assertEqual(dalist.get_efile_prefix().decode(),
efile_prefix)
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_virtual_prefix(self):
'''test get/set virtual prefix '''
dalist = h5p.create(h5p.DATASET_ACCESS)
self.assertEqual(dalist.get_virtual_prefix().decode(), '')
virtual_prefix = "path/to/virtual/dataset"
dalist.set_virtual_prefix(virtual_prefix.encode('utf-8'))
self.assertEqual(dalist.get_virtual_prefix().decode(),
virtual_prefix)
class TestFA(TestCase):
'''
Feature: setting/getting mdc config on a file access property list
'''
def test_mdc_config(self):
'''test get/set mdc config '''
falist = h5p.create(h5p.FILE_ACCESS)
config = falist.get_mdc_config()
falist.set_mdc_config(config)
def test_set_alignment(self):
'''test get/set chunk cache '''
falist = h5p.create(h5p.FILE_ACCESS)
threshold = 10 * 1024 # threshold of 10kiB
alignment = 1024 * 1024 # threshold of 1kiB
falist.set_alignment(threshold, alignment)
self.assertEqual((threshold, alignment),
falist.get_alignment())
@ut.skipUnless(
version.hdf5_version_tuple >= (1, 12, 1) or
(version.hdf5_version_tuple[:2] == (1, 10) and version.hdf5_version_tuple[2] >= 7),
'Requires HDF5 1.12.1 or later or 1.10.x >= 1.10.7')
def test_set_file_locking(self):
'''test get/set file locking'''
falist = h5p.create(h5p.FILE_ACCESS)
use_file_locking = False
ignore_when_disabled = False
falist.set_file_locking(use_file_locking, ignore_when_disabled)
self.assertEqual((use_file_locking, ignore_when_disabled),
falist.get_file_locking())
class TestPL(TestCase):
def test_obj_track_times(self):
"""
tests if the object track times set/get
"""
# test for groups
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_obj_track_times(False)
self.assertEqual(False, gcid.get_obj_track_times())
gcid.set_obj_track_times(True)
self.assertEqual(True, gcid.get_obj_track_times())
# test for datasets
dcid = h5p.create(h5p.DATASET_CREATE)
dcid.set_obj_track_times(False)
self.assertEqual(False, dcid.get_obj_track_times())
dcid.set_obj_track_times(True)
self.assertEqual(True, dcid.get_obj_track_times())
# test for generic objects
ocid = h5p.create(h5p.OBJECT_CREATE)
ocid.set_obj_track_times(False)
self.assertEqual(False, ocid.get_obj_track_times())
ocid.set_obj_track_times(True)
self.assertEqual(True, ocid.get_obj_track_times())
def test_link_creation_tracking(self):
"""
tests the link creation order set/get
"""
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_link_creation_order(0)
self.assertEqual(0, gcid.get_link_creation_order())
flags = h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED
gcid.set_link_creation_order(flags)
self.assertEqual(flags, gcid.get_link_creation_order())
# test for file creation
fcpl = h5p.create(h5p.FILE_CREATE)
fcpl.set_link_creation_order(flags)
self.assertEqual(flags, fcpl.get_link_creation_order())
def test_attr_phase_change(self):
"""
test the attribute phase change
"""
cid = h5p.create(h5p.OBJECT_CREATE)
# test default value
ret = cid.get_attr_phase_change()
self.assertEqual((8,6), ret)
# max_compact must < 65536 (64kb)
with self.assertRaises(ValueError):
cid.set_attr_phase_change(65536, 6)
# Using dense attributes storage to avoid 64kb size limitation
# for a single attribute in compact attribute storage.
cid.set_attr_phase_change(0, 0)
self.assertEqual((0,0), cid.get_attr_phase_change())
| null |
913 |
import pytest
from lhotse.testing.dummies import dummy_cut, dummy_multi_cut, dummy_supervision
# Note: dummy_cut, dummy_multi_cut, and dummy_supervision have a duration of 1.0 by default.
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_identity(cut):
fcut = cut.fill_supervision()
assert cut == fcut
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_expand(cut):
cut.duration = 7.51
fcut = cut.fill_supervision()
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 7.51
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_shrink(cut):
cut.duration = 0.5
fcut = cut.fill_supervision(shrink_ok=True)
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 0.5
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_shrink_raises_default(cut):
cut.duration = 0.5
with pytest.raises(ValueError):
fcut = cut.fill_supervision()
@pytest.mark.parametrize(
"cut",
[
# MonoCut with no supervision
dummy_cut(0, supervisions=[]),
# MultiCut with no supervision
dummy_multi_cut(0, supervisions=[]),
],
)
def METHOD_NAME(cut):
fcut = cut.fill_supervision()
# Original is not modified
assert len(cut.supervisions) == 0
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 1
@pytest.mark.parametrize(
"cut",
[
# MonoCut with no supervision
dummy_cut(0, supervisions=[]),
# MultiCut with no supervision
dummy_multi_cut(0, supervisions=[]),
],
)
def test_cut_fill_supervision_add_empty_false(cut):
fcut = cut.fill_supervision(add_empty=False)
assert cut == fcut
def test_mono_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
with pytest.raises(AssertionError):
fcut = cut.fill_supervision()
def test_multi_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_multi_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
with pytest.raises(AssertionError):
fcut = cut.fill_supervision()
def test_mixed_cut_fill_supervision_identity():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)) # cuts are 100% overlapping
fcut = cut.fill_supervision()
assert cut == fcut
def test_mixed_cut_fill_supervision_expand():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.pad(duration=7.51)
fcut = cut.fill_supervision()
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 7.51
def test_mixed_cut_fill_supervision_shrink():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)).truncate(duration=0.5) # cuts are 100% overlapping
fcut = cut.fill_supervision(shrink_ok=True)
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 0.5
def test_mixed_cut_fill_supervision_shrink_raises_default():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)).truncate(duration=0.5) # cuts are 100% overlapping
with pytest.raises(ValueError):
fcut = cut.fill_supervision()
def test_mixed_cut_fill_supervision_add_empty_true():
cut = dummy_cut(0)
cut = cut.pad(duration=10)
fcut = cut.fill_supervision()
# Original is not modified
assert len(cut.supervisions) == 0
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 10
def test_mixed_cut_fill_supervision_add_empty_false():
cut = dummy_cut(0)
cut = cut.pad(duration=10)
fcut = cut.fill_supervision(add_empty=False)
assert cut == fcut
def test_mixed_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
cut = cut.pad(duration=10)
with pytest.raises(AssertionError):
fcut = cut.fill_supervision()
| null |
914 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import logging
from typing import Callable, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from gym import spaces
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.core.registry_meta import RegistryMeta
from reagent.training.utils import rescale_actions
# types for reference
ObsPreprocessor = Callable[[np.ndarray], rlt.FeatureData]
ServingObsPreprocessor = Callable[[np.ndarray], rlt.ServingFeatureData]
ActionExtractor = Callable[[rlt.ActorOutput], np.ndarray]
ServingActionExtractor = ActionExtractor
CONTINUOUS_MODEL_LOW = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0])
CONTINUOUS_MODEL_HIGH = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1])
logger = logging.getLogger(__name__)
@dataclass
class EnvWrapper(gym.core.Wrapper, metaclass=RegistryMeta):
"""Wrapper around it's environment, to simplify configuration."""
def __post_init_post_parse__(self):
super().__init__(self.make())
logger.info(
f"Env: {self.env};\n"
f"observation_space: {self.env.observation_space};\n"
f"action_space: {self.env.action_space};"
)
@abc.abstractmethod
def make(self) -> gym.Env:
pass
@abc.abstractmethod
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
pass
@abc.abstractmethod
def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData:
pass
def get_obs_preprocessor(self, *ctor_args, **ctor_kwargs):
# ctor_args go to .to call
ctor_kwargs["non_blocking"] = True
return lambda *args, **kwargs: self.obs_preprocessor(*args, **kwargs).to(
*ctor_args, **ctor_kwargs
)
def get_serving_obs_preprocessor(self):
return lambda *args, **kwargs: self.serving_obs_preprocessor(*args, **kwargs)
def action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
# Canonical rule to return one-hot encoded actions for discrete
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action} (shape: {action.shape}) is not a single action!"
if isinstance(action_space, spaces.Discrete):
return action.squeeze(0).argmax()
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
# Canonical rule to scale actions to CONTINUOUS_TRAINING_ACTION_RANGE
elif isinstance(action_space, spaces.Box):
assert len(action_space.shape) == 1, f"{action_space} not supported."
return rescale_actions(
action.squeeze(0),
new_min=torch.tensor(action_space.low),
new_max=torch.tensor(action_space.high),
prev_min=CONTINUOUS_MODEL_LOW,
prev_max=CONTINUOUS_MODEL_HIGH,
)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def METHOD_NAME(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action.shape} isn't (1, action_dim)"
if isinstance(action_space, spaces.Discrete):
return action.squeeze(0).argmax().view([])
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
elif isinstance(action_space, spaces.Box):
assert (
len(action_space.shape) == 1
), f"Unsupported Box with shape {action_space.shape}"
return action.squeeze(0)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def get_action_extractor(self):
return (
lambda *args, **kwargs: self.action_extractor(*args, **kwargs).cpu().numpy()
)
def get_serving_action_extractor(self):
return (
lambda *args, **kwargs: self.METHOD_NAME(*args, **kwargs)
.cpu()
.numpy()
)
# TODO: add more methods to simplify gym code
# e.g. normalization, specific preprocessor, etc.
# This can move a lot of the if statements from create_from_env methods.
@property
def max_steps(self) -> Optional[int]:
possible_keys = [
# gym should have _max_episode_steps
"_max_episode_steps",
# Minigrid should have max_steps
"max_steps",
]
for key in possible_keys:
res = getattr(self.env, key, None)
if res is not None:
return res
return None
@property
def possible_actions_mask(self) -> Optional[np.ndarray]:
ret = getattr(self.env, "possible_actions_mask", None)
if ret is not None:
ret = ret.copy()
return ret
| null |
915 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class SearchTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'SearchTask')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ActualTimeLte(self): # Long
return self.get_query_params().get('ActualTimeLte')
def set_ActualTimeLte(self, ActualTimeLte): # Long
self.add_query_param('ActualTimeLte', ActualTimeLte)
def get_OtherId(self): # String
return self.get_query_params().get('OtherId')
def set_OtherId(self, OtherId): # String
self.add_query_param('OtherId', OtherId)
def get_TaskCreateTimeLte(self): # Long
return self.get_query_params().get('TaskCreateTimeLte')
def set_TaskCreateTimeLte(self, TaskCreateTimeLte): # Long
self.add_query_param('TaskCreateTimeLte', TaskCreateTimeLte)
def get_JobId(self): # String
return self.get_query_params().get('JobId')
def set_JobId(self, JobId): # String
self.add_query_param('JobId', JobId)
def get_TaskCreateTimeGte(self): # Long
return self.get_query_params().get('TaskCreateTimeGte')
def set_TaskCreateTimeGte(self, TaskCreateTimeGte): # Long
self.add_query_param('TaskCreateTimeGte', TaskCreateTimeGte)
def get_CalledNumber(self): # String
return self.get_query_params().get('CalledNumber')
def set_CalledNumber(self, CalledNumber): # String
self.add_query_param('CalledNumber', CalledNumber)
def get_UserIdMatch(self): # String
return self.get_query_params().get('UserIdMatch')
def set_UserIdMatch(self, UserIdMatch): # String
self.add_query_param('UserIdMatch', UserIdMatch)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ScriptNameQuery(self): # String
return self.get_query_params().get('ScriptNameQuery')
def set_ScriptNameQuery(self, ScriptNameQuery): # String
self.add_query_param('ScriptNameQuery', ScriptNameQuery)
def get_PageIndex(self): # Integer
return self.get_query_params().get('PageIndex')
def set_PageIndex(self, PageIndex): # Integer
self.add_query_param('PageIndex', PageIndex)
def get_SortOrder(self): # String
return self.get_query_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_query_param('SortOrder', SortOrder)
def get_TaskStatusStringList(self): # String
return self.get_query_params().get('TaskStatusStringList')
def set_TaskStatusStringList(self, TaskStatusStringList): # String
self.add_query_param('TaskStatusStringList', TaskStatusStringList)
def get_JobGroupNameQuery(self): # String
return self.get_query_params().get('JobGroupNameQuery')
def set_JobGroupNameQuery(self, JobGroupNameQuery): # String
self.add_query_param('JobGroupNameQuery', JobGroupNameQuery)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_RecordingDurationGte(self): # Long
return self.get_query_params().get('RecordingDurationGte')
def set_RecordingDurationGte(self, RecordingDurationGte): # Long
self.add_query_param('RecordingDurationGte', RecordingDurationGte)
def get_CallDurationLte(self): # Long
return self.get_query_params().get('CallDurationLte')
def set_CallDurationLte(self, CallDurationLte): # Long
self.add_query_param('CallDurationLte', CallDurationLte)
def get_JobGroupId(self): # String
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self, JobGroupId): # String
self.add_query_param('JobGroupId', JobGroupId)
def get_SortBy(self): # String
return self.get_query_params().get('SortBy')
def set_SortBy(self, SortBy): # String
self.add_query_param('SortBy', SortBy)
def get_JobStatusStringList(self): # String
return self.get_query_params().get('JobStatusStringList')
def set_JobStatusStringList(self, JobStatusStringList): # String
self.add_query_param('JobStatusStringList', JobStatusStringList)
def get_ActualTimeGte(self): # Long
return self.get_query_params().get('ActualTimeGte')
def set_ActualTimeGte(self, ActualTimeGte): # Long
self.add_query_param('ActualTimeGte', ActualTimeGte)
def get_CallDurationGte(self): # Long
return self.get_query_params().get('CallDurationGte')
def METHOD_NAME(self, CallDurationGte): # Long
self.add_query_param('CallDurationGte', CallDurationGte)
def get_RecordingDurationLte(self): # Long
return self.get_query_params().get('RecordingDurationLte')
def set_RecordingDurationLte(self, RecordingDurationLte): # Long
self.add_query_param('RecordingDurationLte', RecordingDurationLte)
| null |
916 |
# Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from topologies.BaseTopology import BaseTopology
class Cluster(BaseTopology):
"""A cluster is a group of nodes which are all one hop from eachother
Clusters can also contain other clusters
When creating this kind of topology, return a single cluster (usually
the root cluster) from create_system in configs/ruby/<protocol>.py
"""
_num_int_links = 0
_num_ext_links = 0
_num_routers = 0
# Below methods for auto counting
@classmethod
def num_int_links(cls):
cls._num_int_links += 1
return cls._num_int_links - 1
@classmethod
def num_ext_links(cls):
cls._num_ext_links += 1
return cls._num_ext_links - 1
@classmethod
def num_routers(cls):
cls._num_routers += 1
return cls._num_routers - 1
def __init__(self, intBW=0, extBW=0, intLatency=0, extLatency=0):
"""internalBandwidth is bandwidth of all links within the cluster
externalBandwidth is bandwidth from this cluster to any cluster
connecting to it.
internal/externalLatency are similar
**** When creating a cluster with sub-clusters, the sub-cluster
external bandwidth overrides the internal bandwidth of the
super cluster
"""
self.nodes = []
self.router = None # created in makeTopology
self.intBW = intBW
self.extBW = extBW
self.intLatency = intLatency
self.extLatency = extLatency
def add(self, node):
self.nodes.append(node)
def METHOD_NAME(self, options, network, IntLink, ExtLink, Router):
"""Recursively make all of the links and routers"""
# make a router to connect all of the nodes
self.router = Router(router_id=self.num_routers())
network.routers.append(self.router)
for node in self.nodes:
if type(node) == Cluster:
node.METHOD_NAME(options, network, IntLink, ExtLink, Router)
# connect this cluster to the router
link_out = IntLink(
link_id=self.num_int_links(),
src_node=self.router,
dst_node=node.router,
)
link_in = IntLink(
link_id=self.num_int_links(),
src_node=node.router,
dst_node=self.router,
)
if node.extBW:
link_out.bandwidth_factor = node.extBW
link_in.bandwidth_factor = node.extBW
# if there is an internal b/w for this node
# and no ext b/w to override
elif self.intBW:
link_out.bandwidth_factor = self.intBW
link_in.bandwidth_factor = self.intBW
if node.extLatency:
link_out.latency = node.extLatency
link_in.latency = node.extLatency
elif self.intLatency:
link_out.latency = self.intLatency
link_in.latency = self.intLatency
network.int_links.append(link_out)
network.int_links.append(link_in)
else:
# node is just a controller,
# connect it to the router via a ext_link
link = ExtLink(
link_id=self.num_ext_links(),
ext_node=node,
int_node=self.router,
)
if self.intBW:
link.bandwidth_factor = self.intBW
if self.intLatency:
link.latency = self.intLatency
network.ext_links.append(link)
def __len__(self):
return len([i for i in self.nodes if type(i) != Cluster]) + sum(
[len(i) for i in self.nodes if type(i) == Cluster]
)
| null |
917 |
"""
This type stub file was generated by pyright.
"""
from collections import UserDict
from celery.utils.serialization import strtobool
"""Worker remote control command implementations."""
__all__ = ("Panel",)
DEFAULT_TASK_INFO_ITEMS = ...
logger = ...
controller_info_t = ...
def ok(value): ...
def nok(value): ...
class Panel(UserDict):
"""Global registry of remote control commands."""
data = ...
meta = ...
@classmethod
def register(cls, *args, **kwargs): ...
def METHOD_NAME(**kwargs): ...
def inspect_command(**kwargs): ...
@inspect_command()
def report(state): # -> dict[str, Unknown]:
"""Information about Celery installation for bug reports."""
...
@inspect_command(
alias="dump_conf",
signature="[include_defaults=False]",
args=[("with_defaults", strtobool)],
)
def conf(
state, with_defaults=..., **kwargs
): # -> dict[Unknown, Unknown | Any] | list[Unknown] | dict[Unknown, Unknown] | str:
"""List configuration."""
...
@inspect_command(variadic="ids", signature="[id1 [id2 [... [idN]]]]")
def query_task(
state, ids, **kwargs
): # -> dict[Unknown, tuple[Literal['active', 'reserved', 'ready'], Unknown]]:
"""Query for task information by id."""
...
@METHOD_NAME(variadic="task_id", signature="[id1 [id2 [... [idN]]]]")
def revoke(state, task_id, terminate=..., signal=..., **kwargs): # -> dict[str, str]:
"""Revoke task by task id (or list of ids).
Keyword Arguments:
terminate (bool): Also terminate the process if the task is active.
signal (str): Name of signal to use for terminate (e.g., ``KILL``).
"""
...
@METHOD_NAME(
variadic="task_id",
args=[("signal", str)],
signature="<signal> [id1 [id2 [... [idN]]]]",
)
def terminate(state, signal, task_id, **kwargs): # -> dict[str, str]:
"""Terminate task by task id (or list of ids)."""
...
@METHOD_NAME(
args=[("task_name", str), ("rate_limit", str)],
signature="<task_name> <rate_limit (e.g., 5/s | 5/m | 5/h)>",
)
def rate_limit(state, task_name, rate_limit, **kwargs): # -> dict[str, str]:
"""Tell worker(s) to modify the rate limit for a task by type.
See Also:
:attr:`celery.app.task.Task.rate_limit`.
Arguments:
task_name (str): Type of task to set rate limit for.
rate_limit (int, str): New rate limit.
"""
...
@METHOD_NAME(
args=[("task_name", str), ("soft", float), ("hard", float)],
signature="<task_name> <soft_secs> [hard_secs]",
)
def time_limit(state, task_name=..., hard=..., soft=..., **kwargs): # -> dict[str, str]:
"""Tell worker(s) to modify the time limit for task by type.
Arguments:
task_name (str): Name of task to change.
hard (float): Hard time limit.
soft (float): Soft time limit.
"""
...
@inspect_command()
def clock(state, **kwargs): # -> dict[str, Unknown]:
"""Get current logical clock value."""
...
@METHOD_NAME()
def election(state, id, topic, action=..., **kwargs): # -> None:
"""Hold election.
Arguments:
id (str): Unique election id.
topic (str): Election topic.
action (str): Action to take for elected actor.
"""
...
@METHOD_NAME()
def enable_events(state): # -> dict[str, str]:
"""Tell worker(s) to send task-related events."""
...
@METHOD_NAME()
def disable_events(state): # -> dict[str, str]:
"""Tell worker(s) to stop sending task-related events."""
...
@METHOD_NAME()
def heartbeat(state): # -> None:
"""Tell worker(s) to send event heartbeat immediately."""
...
@inspect_command(visible=False)
def hello(
state, from_node, revoked=..., **kwargs
): # -> dict[str, Unknown | dict[Unknown, Unknown]] | None:
"""Request mingle sync-data."""
...
@inspect_command(default_timeout=0.2)
def ping(state, **kwargs): # -> dict[str, str]:
"""Ping worker(s)."""
...
@inspect_command()
def stats(state, **kwargs):
"""Request worker statistics/information."""
...
@inspect_command(alias="dump_schedule")
def scheduled(
state, **kwargs
): # -> list[dict[str, Unknown | str | dict[str, Unknown | bool | dict[str, Unknown | Any | None] | None] | None]]:
"""List of currently scheduled ETA/countdown tasks."""
...
@inspect_command(alias="dump_reserved")
def reserved(state, **kwargs): # -> list[Unknown]:
"""List of currently reserved tasks, not including scheduled/active."""
...
@inspect_command(alias="dump_active")
def active(state, safe=..., **kwargs): # -> list[Unknown]:
"""List of tasks currently being executed."""
...
@inspect_command(alias="dump_revoked")
def revoked(state, **kwargs): # -> list[Unknown]:
"""List of revoked task-ids."""
...
@inspect_command(
alias="dump_tasks",
variadic="taskinfoitems",
signature="[attr1 [attr2 [... [attrN]]]]",
)
def registered(
state, taskinfoitems=..., builtins=..., **kwargs
): # -> list[str | Unknown]:
"""List of registered tasks.
Arguments:
taskinfoitems (Sequence[str]): List of task attributes to include.
Defaults to ``exchange,routing_key,rate_limit``.
builtins (bool): Also include built-in tasks.
"""
...
@inspect_command(
default_timeout=60,
args=[("type", str), ("num", int), ("max_depth", int)],
signature="[object_type=Request] [num=200 [max_depth=10]]",
)
def objgraph(state, num=..., max_depth=..., type=...): # -> dict[str, str]:
"""Create graph of uncollected objects (memory-leak debugging).
Arguments:
num (int): Max number of objects to graph.
max_depth (int): Traverse at most n levels deep.
type (str): Name of object to graph. Default is ``"Request"``.
"""
...
@inspect_command()
def memsample(state, **kwargs): # -> str | None:
"""Sample current RSS memory usage."""
...
@inspect_command(args=[("samples", int)], signature="[n_samples=10]")
def memdump(state, samples=..., **kwargs): # -> str:
"""Dump statistics of previous memsample requests."""
...
@METHOD_NAME(args=[("n", int)], signature="[N=1]")
def pool_grow(state, n=..., **kwargs): # -> dict[str, str]:
"""Grow pool by n processes/threads."""
...
@METHOD_NAME(args=[("n", int)], signature="[N=1]")
def pool_shrink(state, n=..., **kwargs): # -> dict[str, str]:
"""Shrink pool by n processes/threads."""
...
@METHOD_NAME()
def pool_restart(
state, modules=..., reload=..., reloader=..., **kwargs
): # -> dict[str, str]:
"""Restart execution pool."""
...
@METHOD_NAME(args=[("max", int), ("min", int)], signature="[max [min]]")
def autoscale(state, max=..., min=...): # -> dict[str, str]:
"""Modify autoscale settings."""
...
@METHOD_NAME()
def shutdown(state, msg=..., **kwargs):
"""Shutdown worker(s)."""
...
@METHOD_NAME(
args=[
("queue", str),
("exchange", str),
("exchange_type", str),
("routing_key", str),
],
signature="<queue> [exchange [type [routing_key]]]",
)
def add_consumer(
state, queue, exchange=..., exchange_type=..., routing_key=..., **options
): # -> dict[str, str]:
"""Tell worker(s) to consume from task queue by name."""
...
@METHOD_NAME(args=[("queue", str)], signature="<queue>")
def cancel_consumer(state, queue, **_): # -> dict[str, str]:
"""Tell worker(s) to stop consuming from task queue by name."""
...
@inspect_command()
def active_queues(state): # -> list[dict[Unknown, Unknown]]:
"""List the task queues a worker is currently consuming from."""
...
| null |
918 |
import search_quality_test
from termcolor import colored
def print_specific_queries_result(
current: list[search_quality_test.Evaluation],
comparison: list[search_quality_test.Evaluation],
) -> None:
"""Print the results of the specific queries"""
comparison_dict = {comp.query: comp for comp in comparison}
for search in current:
comp = comparison_dict[search.query]
s_pos_indicator = _gen_pos_indicator(search)
# Grade
s_grade = {
"1.0": colored("1.0", "green", attrs=["bold"]),
"2.0": colored("2.0", "green", attrs=[]),
"3.0": colored("3.0", "yellow", attrs=["bold"]),
"4.0": colored("4.0", "yellow", attrs=[]),
"4.7": colored("4.7", "red", attrs=[]),
"5.0": colored("5.0", "red", attrs=["bold"]),
}[str(round(search.grade, 1))]
# Grade cmp
s_cmp = METHOD_NAME(search, comp)
s_query = _gen_colored_query(search)
s_stats = _gen_colored_stats(search)
print(f"{s_pos_indicator} {s_grade}{s_cmp} {s_query} {s_stats}")
num_searches = sum(len(s.query.query) + 1 for s in current)
print(f"Performed {num_searches} searches")
def _gen_colored_query(search: search_quality_test.Evaluation) -> str:
"""
Generates the colored Query
- Green indicates when a better position is reached
- White (not formatted) indicates minimum to reach top 5
- Underline indicates minimum to reach final position
"""
green_end_pos = (
search.len_to_best_pos
if (
search.best_pos is not None
and search.len_to_best_pos is not None
and search.full_search.target_pos is not None
and (not search.full_search.was_successful or search.best_pos < search.full_search.target_pos)
)
else 0
)
white_end_pos = search.len_to_reach_top_5 if search.len_to_reach_top_5 is not None else 0
underline_end_pos = search.len_to_reach_final if search.len_to_reach_final is not None else 0
s_query = ""
for i, query_char in enumerate(search.query.query):
# This is not the best way of formatting, but sufficient here
if i >= green_end_pos and i >= white_end_pos:
s_query += colored(
str(query_char),
color="white", # this is gray
attrs=(["underline"] if i < underline_end_pos else []),
)
elif green_end_pos < white_end_pos:
s_query += colored(
str(query_char),
color="green" if i < green_end_pos else None,
attrs=(["underline"] if i < underline_end_pos else []),
)
else:
s_query += colored(
str(query_char),
color=None if i < white_end_pos else "green",
attrs=(["underline"] if i < underline_end_pos else []),
)
s_query += " " * max(0, 50 - len(search.query.query))
return s_query
def METHOD_NAME(search: search_quality_test.Evaluation, comp: search_quality_test.Evaluation) -> str:
grade_diff = abs(round(search.grade - comp.grade, 1))
if comp.grade > search.grade:
return colored(f" +{grade_diff}", "red", attrs=["bold"])
if comp.grade < search.grade:
return colored(f" -{grade_diff}", "green", attrs=["bold"])
return " "
def _gen_colored_stats(search: search_quality_test.Evaluation) -> str:
"""Generate the colored statistics"""
num_results = search.full_search.num_results
return f"{num_results:>4}" + colored(" hits, target: '", "white") + search.query.target + colored("')", "white")
def _gen_pos_indicator(search: search_quality_test.Evaluation) -> str:
"""The position indicator shows rougly how the results looked like and where the target entry was located"""
if search.full_search.was_top5:
target_pos = search.full_search.target_pos or 0
return (
colored("[", "white")
+ " " * target_pos
+ colored("*", "cyan", attrs=["bold"])
+ " " * (min(search.full_search.num_results, 5) - target_pos - 1)
+ colored("]", "white")
+ colored("-" * (5 - min(search.full_search.num_results, 5)), "white")
+ " "
)
if search.full_search.was_top20:
return colored("[ ]", "white") + colored(">", "yellow") + " "
if search.full_search.num_results > 0:
return (
colored("[", "white")
+ colored("x" * min(search.full_search.num_results, 5), "red")
+ colored("]", "white")
+ colored("-" * (5 - min(search.full_search.num_results, 5)), "white")
+ " "
)
return colored("[]-----", "red") + " "
| null |
919 |
# Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Test framework configuration."""
from typing import TYPE_CHECKING, Any, List, Sequence, Tuple
from pydantic import BaseModel, Field
from tests.harness.model.base import BaseTestConfigModel
from tests.harness.model.deployment import DeploymentConfig
from tests.harness.model.environment import EnvironmentConfig
from tests.harness.model.requirements import TestRequirements
from tests.harness.model.secret import Secret
from tests.harness.model.test import TestConfig
if TYPE_CHECKING:
from tests.harness.harness import TestHarness
class Configuration(BaseTestConfigModel):
"""ZenML configuration settings."""
deployments: List[DeploymentConfig] = Field(default_factory=list)
secrets: List[Secret] = Field(default_factory=list)
tests: List[TestConfig] = Field(default_factory=list)
requirements: List[TestRequirements] = Field(default_factory=list)
environments: List[EnvironmentConfig] = Field(default_factory=list)
_config_file: str
def __init__(self, config_file: str, **data: Any) -> None:
"""Initializes the ZenML test configuration.
Args:
config_file: The path to the configuration file from which this
configuration was loaded.
**data: configuration data keyword arguments.
"""
self._config_file = config_file
super().__init__(**data)
def merge(self, config: "Configuration") -> None:
"""Updates the configuration with the contents of another configuration.
Args:
config: The configuration to merge into this one.
Raises:
ValueError: If there are duplicate keys in the lists of
deployments, secrets, tests, requirements or environments.
"""
def check_duplicate_keys(
entries: Sequence[BaseModel], key_attr: str
) -> List[str]:
"""Checks for duplicated keys.
Args:
entries: List of pydantic objects.
key_attr: The attribute to use as key.
Returns:
A list of duplicate keys.
"""
keys = [getattr(entry, key_attr) for entry in entries]
return [key for key in keys if keys.count(key) > 1]
self.deployments += config.deployments
self.secrets += config.secrets
self.tests += config.tests
self.requirements += config.requirements
self.environments += config.environments
# Check each list for duplicate keys after merging
lists_to_check: Sequence[Tuple[List[Any], str, str]] = [
(self.deployments, "name", "deployment"),
(self.secrets, "name", "secret"),
(self.tests, "module", "test requirement"),
(self.requirements, "name", "global requirement"),
(self.environments, "name", "environment"),
]
for item_list, key_attr, item_name in lists_to_check:
duplicates = check_duplicate_keys(item_list, key_attr)
if duplicates:
raise ValueError(
f"Configuration error: {item_name}s with duplicate "
f"names loaded from configuration file "
f"`{self._config_file}`: {', '.join(duplicates)}"
)
def METHOD_NAME(self, harness: "TestHarness") -> None:
"""Validates and compiles the configuration.
Args:
harness: The test harness to validate against.
"""
lists_to_compile: Sequence[List[Any]] = [
self.secrets,
self.deployments,
self.requirements,
self.environments,
self.tests,
]
for item_list in lists_to_compile:
for item in item_list:
item.METHOD_NAME(harness)
| null |
920 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveTaskForUpdatingRegistrantInfoByIdentityCredentialRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveTaskForUpdatingRegistrantInfoByIdentityCredential')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Country(self): # String
return self.get_query_params().get('Country')
def set_Country(self, Country): # String
self.add_query_param('Country', Country)
def get_IdentityCredentialType(self): # String
return self.get_query_params().get('IdentityCredentialType')
def set_IdentityCredentialType(self, IdentityCredentialType): # String
self.add_query_param('IdentityCredentialType', IdentityCredentialType)
def get_City(self): # String
return self.get_query_params().get('City')
def set_City(self, City): # String
self.add_query_param('City', City)
def get_IdentityCredential(self): # String
return self.get_body_params().get('IdentityCredential')
def set_IdentityCredential(self, IdentityCredential): # String
self.add_body_params('IdentityCredential', IdentityCredential)
def get_TransferOutProhibited(self): # Boolean
return self.get_query_params().get('TransferOutProhibited')
def set_TransferOutProhibited(self, TransferOutProhibited): # Boolean
self.add_query_param('TransferOutProhibited', TransferOutProhibited)
def get_ZhCity(self): # String
return self.get_query_params().get('ZhCity')
def set_ZhCity(self, ZhCity): # String
self.add_query_param('ZhCity', ZhCity)
def get_TelExt(self): # String
return self.get_query_params().get('TelExt')
def set_TelExt(self, TelExt): # String
self.add_query_param('TelExt', TelExt)
def get_Province(self): # String
return self.get_query_params().get('Province')
def set_Province(self, Province): # String
self.add_query_param('Province', Province)
def get_ZhRegistrantName(self): # String
return self.get_query_params().get('ZhRegistrantName')
def set_ZhRegistrantName(self, ZhRegistrantName): # String
self.add_query_param('ZhRegistrantName', ZhRegistrantName)
def get_PostalCode(self): # String
return self.get_query_params().get('PostalCode')
def set_PostalCode(self, PostalCode): # String
self.add_query_param('PostalCode', PostalCode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Email(self): # String
return self.get_query_params().get('Email')
def set_Email(self, Email): # String
self.add_query_param('Email', Email)
def get_ZhRegistrantOrganization(self): # String
return self.get_query_params().get('ZhRegistrantOrganization')
def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String
self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization)
def get_Address(self): # String
return self.get_query_params().get('Address')
def set_Address(self, Address): # String
self.add_query_param('Address', Address)
def get_TelArea(self): # String
return self.get_query_params().get('TelArea')
def set_TelArea(self, TelArea): # String
self.add_query_param('TelArea', TelArea)
def get_ZhAddress(self): # String
return self.get_query_params().get('ZhAddress')
def set_ZhAddress(self, ZhAddress): # String
self.add_query_param('ZhAddress', ZhAddress)
def get_RegistrantType(self): # String
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self, RegistrantType): # String
self.add_query_param('RegistrantType', RegistrantType)
def get_DomainNames(self): # RepeatList
return self.get_query_params().get('DomainName')
def set_DomainNames(self, DomainName): # RepeatList
for depth1 in range(len(DomainName)):
self.add_query_param('DomainName.' + str(depth1 + 1), DomainName[depth1])
def get_Telephone(self): # String
return self.get_query_params().get('Telephone')
def set_Telephone(self, Telephone): # String
self.add_query_param('Telephone', Telephone)
def get_ZhProvince(self): # String
return self.get_query_params().get('ZhProvince')
def set_ZhProvince(self, ZhProvince): # String
self.add_query_param('ZhProvince', ZhProvince)
def get_RegistrantOrganization(self): # String
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self, RegistrantOrganization): # String
self.add_query_param('RegistrantOrganization', RegistrantOrganization)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def METHOD_NAME(self): # String
return self.get_query_params().get('IdentityCredentialNo')
def set_IdentityCredentialNo(self, IdentityCredentialNo): # String
self.add_query_param('IdentityCredentialNo', IdentityCredentialNo)
def get_RegistrantName(self): # String
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self, RegistrantName): # String
self.add_query_param('RegistrantName', RegistrantName)
| null |
921 |
# Copyright 2021-2023 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List, Set, Optional, Tuple, Iterable
from unified_planning.environment import get_environment
from unified_planning.model import FNode, TimepointKind, Timing, StartTiming, EndTiming
from unified_planning.model.walkers import AnyChecker
class TemporalConstraints:
"""A set of temporal constraints on a set of timepoints.
`TotalOrder` and `PartialOrder` are two special cases that are represented as subclasses.
"""
def __init__(self, constraints: List[FNode]):
self.constraints = constraints
def __repr__(self):
return str(self.constraints)
class PartialOrder(TemporalConstraints):
"""A purely qualitative set of constraints that define a partial order on its elements."""
def __init__(self, precedences: List[Tuple[str, str]]):
self.precedences = precedences
constraints = [
get_environment().expression_manager.LT(
EndTiming(container=a), StartTiming(container=b)
)
for (a, b) in precedences
]
super().__init__(constraints)
def __repr__(self):
precs = map(lambda p: f"{p[0]} < {p[1]}", self.precedences)
return f"[{', '.join(precs)}]"
class TotalOrder(PartialOrder):
"""A purely qualitative set of constraints that define a total order on its elements."""
def __init__(self, order: List[str]):
self.order = order
precedences = [(order[i - 1], order[i]) for i in range(1, len(order))]
super().__init__(precedences)
def __repr__(self):
return f"[{', '.join(self.order)}]"
def METHOD_NAME(
task_ids: Iterable[str], time_constraints: List[FNode]
) -> TemporalConstraints:
has_time = AnyChecker(predicate=lambda e: e.is_timing_exp())
assert all(
has_time.any(c) for c in time_constraints
), "A temporal constraint has no time expression"
precedences = []
# try transforming all constraints into precedences
for c in time_constraints:
if not c.is_lt():
break
lhs = c.arg(0)
rhs = c.arg(1)
if not lhs.is_timing_exp() or not rhs.is_timing_exp():
break
lhs = lhs.timing() # type: ignore
rhs = rhs.timing() # type: ignore
if lhs.delay != 0 or rhs.delay != 0: # type: ignore
break
lhs = lhs.timepoint # type: ignore
rhs = rhs.timepoint # type: ignore
if lhs.kind != TimepointKind.END or rhs.kind != TimepointKind.START: # type: ignore
break
if lhs.container is None or rhs.container is None: # type: ignore
break
precedences.append((lhs.container, rhs.container)) # type: ignore
qualitative = len(precedences) == len(time_constraints)
if not qualitative:
# At least one constraint cannot be encoded as a precedence
return TemporalConstraints(time_constraints)
else:
to = _build_total_order(set(task_ids), precedences)
if to is not None:
return TotalOrder(to)
else:
return PartialOrder(precedences)
def _build_total_order(
tasks: Set[str], precedences: List[Tuple[str, str]]
) -> Optional[List[str]]:
"""Returns a total order over all elements, or None if the given precedences are not sufficient to impose a total order."""
order = []
pending_tasks = tasks.copy()
pending_precedences = precedences.copy()
while len(pending_tasks) > 0:
# find all elements with no predecessors
firsts = [
t
for t in pending_tasks
if all(tgt != t for (src, tgt) in pending_precedences)
]
if len(firsts) != 1:
return None # not exactly one leading element => not a total order
first = firsts[0]
order.append(first)
# remove `first` from the pending tasks/constraints before continuing
pending_tasks.remove(first)
pending_precedences = [
(src, tgt) for (src, tgt) in pending_precedences if src != first
]
assert len(pending_tasks) == 0
assert len(order) == len(tasks)
assert set(order) == tasks
return order
| null |
922 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ModifyVpcPrefixListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ModifyVpcPrefixList','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_MaxEntries(self): # Integer
return self.get_query_params().get('MaxEntries')
def set_MaxEntries(self, MaxEntries): # Integer
self.add_query_param('MaxEntries', MaxEntries)
def get_RemovePrefixListEntrys(self): # RepeatList
return self.get_query_params().get('RemovePrefixListEntry')
def set_RemovePrefixListEntrys(self, RemovePrefixListEntry): # RepeatList
for depth1 in range(len(RemovePrefixListEntry)):
if RemovePrefixListEntry[depth1].get('Cidr') is not None:
self.add_query_param('RemovePrefixListEntry.' + str(depth1 + 1) + '.Cidr', RemovePrefixListEntry[depth1].get('Cidr'))
if RemovePrefixListEntry[depth1].get('Description') is not None:
self.add_query_param('RemovePrefixListEntry.' + str(depth1 + 1) + '.Description', RemovePrefixListEntry[depth1].get('Description'))
def get_PrefixListId(self): # String
return self.get_query_params().get('PrefixListId')
def set_PrefixListId(self, PrefixListId): # String
self.add_query_param('PrefixListId', PrefixListId)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AddPrefixListEntrys(self): # RepeatList
return self.get_query_params().get('AddPrefixListEntry')
def set_AddPrefixListEntrys(self, AddPrefixListEntry): # RepeatList
for depth1 in range(len(AddPrefixListEntry)):
if AddPrefixListEntry[depth1].get('Cidr') is not None:
self.add_query_param('AddPrefixListEntry.' + str(depth1 + 1) + '.Cidr', AddPrefixListEntry[depth1].get('Cidr'))
if AddPrefixListEntry[depth1].get('Description') is not None:
self.add_query_param('AddPrefixListEntry.' + str(depth1 + 1) + '.Description', AddPrefixListEntry[depth1].get('Description'))
def get_PrefixListName(self): # String
return self.get_query_params().get('PrefixListName')
def set_PrefixListName(self, PrefixListName): # String
self.add_query_param('PrefixListName', PrefixListName)
def get_PrefixListDescription(self): # String
return self.get_query_params().get('PrefixListDescription')
def set_PrefixListDescription(self, PrefixListDescription): # String
self.add_query_param('PrefixListDescription', PrefixListDescription)
| null |
923 |
from __future__ import print_function
from six.moves import range
__author__ = 'pbmanis'
"""
Copyright 2014 Paul Manis and Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
import re
from acq4.util import Qt
import pyqtgraph as pg
try:
import matplotlib as MP
from matplotlib.ticker import FormatStrFormatter
import matplotlib.pyplot as PL
import matplotlib.gridspec as gridspec
import matplotlib.gridspec as GS
HAVE_MPL = True
except ImportError:
HAVE_MPL = False
if HAVE_MPL:
MP.use('TKAgg')
# Do not modify the following code
# sets up matplotlib with sans-serif plotting...
PL.rcParams['text.usetex'] = True
PL.rcParams['interactive'] = False
PL.rcParams['font.family'] = 'sans-serif'
PL.rcParams['font.sans-serif'] = 'Arial'
PL.rcParams['mathtext.default'] = 'sf'
PL.rcParams['figure.facecolor'] = 'white'
# next setting allows pdf font to be readable in Adobe Illustrator
PL.rcParams['pdf.fonttype'] = 42
PL.rcParams['text.dvipnghack'] = True
# to here (matplotlib stuff - touchy!)
stdFont = 'Arial'
def cleanRepl(matchobj):
"""
Clean up a directory name so that it can be written to a
matplotlib title without encountering LaTeX escape sequences
Replace backslashes with forward slashes
replace underscores (subscript) with escaped underscores
"""
if matchobj.group(0) == '\\':
return '/'
if matchobj.group(0) == '_':
return '\_'
if matchobj.group(0) == '/':
return '/'
else:
return ''
def METHOD_NAME(gridlayout=None, title=None):
"""
Constructs a matplotlib window that shows the current plots laid out in the same
format as the pyqtgraph window
You might use this for publication purposes, since matplotlib allows export
of the window to a variety of formats, and will contain proper fonts (not "outlined").
Also can be used for automatic generation of PDF files with savefig.
:param: QtGridLayout object that specifies how the grid was built
The layout will contain pyqtgraph widgets added with .addLayout
:return: nothing
"""
if not HAVE_MPL:
raise Exception("Method matplotlibExport requires matplotlib; not importable.")
if gridlayout is None or gridlayout.__class__ != Qt.QGridLayout().__class__:
raise Exception("Method matplotlibExport requires a QGridLayout")
fig = PL.figure()
PL.rcParams['text.usetex'] = False
# escape filename information so it can be rendered by removing
# common characters that trip up latex...:
escs = re.compile('[\\\/_]')
print(title)
if title is not None:
tiname = '%r' % title
tiname = re.sub(escs, cleanRepl, tiname)[1:-1]
fig.suptitle(r''+tiname)
PL.autoscale(enable=True, axis='both', tight=None)
# build the plot based on the grid layout
gs = gridspec.GridSpec(gridlayout.rowCount(), gridlayout.columnCount()) # build matplotlib gridspec
for i in range(gridlayout.count()):
w = gridlayout.itemAt(i).widget() # retrieve the plot widget...
(x, y, c, r) = gridlayout.getItemPosition(i) # and gridspecs paramters
mplax = PL.subplot(gs[x:(c+x), y:(r+y)]) # map to mpl subplot geometry
export_panel(w, mplax) # now fill the plot
gs.update(wspace=0.25, hspace=0.5) # adjust spacing
# PL.draw()
# hook to save figure - not used here
# PL.savefig(os.path.join(self.commonPrefix, self.protocolfile))
PL.show()
def export_panel(pgitem, ax):
"""
export_panel recreates the contents of one pyqtgraph plot item into a specified
matplotlib axis item
:param fileName:
:return:
"""
# get labels from the pyqtgraph graphic item
plitem = pgitem.getPlotItem()
xlabel = plitem.axes['bottom']['item'].label.toPlainText()
ylabel = plitem.axes['left']['item'].label.toPlainText()
title = plitem.titleLabel.text
fn = pg.functions
ax.clear()
cleanAxes(ax) # make a "nice" plot
for item in plitem.curves:
x, y = item.getData()
opts = item.opts
pen = fn.mkPen(opts['pen'])
if pen.style() == Qt.Qt.NoPen:
linestyle = ''
else:
linestyle = '-'
color = tuple([c/255. for c in fn.colorTuple(pen.color())])
symbol = opts['symbol']
if symbol == 't':
symbol = '^'
symbolPen = fn.mkPen(opts['symbolPen'])
symbolBrush = fn.mkBrush(opts['symbolBrush'])
markeredgecolor = tuple([c/255. for c in fn.colorTuple(symbolPen.color())])
markerfacecolor = tuple([c/255. for c in fn.colorTuple(symbolBrush.color())])
markersize = opts['symbolSize']
if opts['fillLevel'] is not None and opts['fillBrush'] is not None:
fillBrush = fn.mkBrush(opts['fillBrush'])
fillcolor = tuple([c/255. for c in fn.colorTuple(fillBrush.color())])
ax.fill_between(x=x, y1=y, y2=opts['fillLevel'], facecolor=fillcolor)
pl = ax.plot(x, y, marker=symbol, color=color, linewidth=pen.width(),
linestyle=linestyle, markeredgecolor=markeredgecolor, markerfacecolor=markerfacecolor,
markersize=markersize)
xr, yr = plitem.viewRange()
ax.set_xbound(*xr)
ax.set_ybound(*yr)
ax.set_xlabel(xlabel) # place the labels.
ax.set_ylabel(ylabel)
# for matplotlib cleanup:
# These were borrowed from Manis' "PlotHelpers.py"
#
def cleanAxes(axl):
if type(axl) is not list:
axl = [axl]
for ax in axl:
for loc, spine in ax.spines.items():
if loc in ['left', 'bottom']:
pass
elif loc in ['right', 'top']:
spine.set_color('none') # do not draw the spine
else:
raise ValueError('Unknown spine location: %s' % loc)
# turn off ticks when there is no spine
ax.xaxis.set_ticks_position('bottom')
# stopped working in matplotlib 1.10
ax.yaxis.set_ticks_position('left')
update_font(ax)
def update_font(axl, size=6, font=stdFont):
if type(axl) is not list:
axl = [axl]
fontProperties = {'family': 'sans-serif', 'sans-serif': [font],
'weight': 'normal', 'font-size': size}
for ax in axl:
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_family('sans-serif')
tick.label1.set_fontname(stdFont)
tick.label1.set_size(size)
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_family('sans-serif')
tick.label1.set_fontname(stdFont)
tick.label1.set_size(size)
# xlab = ax.axes.get_xticklabels()
# print xlab
# print dir(xlab)
# for x in xlab:
# x.set_fontproperties(fontProperties)
# ylab = ax.axes.get_yticklabels()
# for y in ylab:
# y.set_fontproperties(fontProperties)
#ax.set_xticklabels(ax.get_xticks(), fontProperties)
#ax.set_yticklabels(ax.get_yticks(), fontProperties)
ax.xaxis.set_smart_bounds(True)
ax.yaxis.set_smart_bounds(True)
ax.tick_params(axis='both', labelsize=9)
def formatTicks(axl, axis='xy', fmt='%d', font='Arial'):
"""
Convert tick labels to intergers
to do just one axis, set axis = 'x' or 'y'
control the format with the formatting string
"""
if type(axl) is not list:
axl = [axl]
majorFormatter = FormatStrFormatter(fmt)
for ax in axl:
if 'x' in axis:
ax.xaxis.set_major_formatter(majorFormatter)
if 'y' in axis:
ax.yaxis.set_major_formatter(majorFormatter)
| null |
924 |
import functools
import os
import shutil
from typing import (
Any,
Dict,
List,
Optional,
)
from typing_extensions import Unpack
from galaxy import exceptions
from galaxy.util.path import (
safe_contains,
safe_path,
safe_walk,
)
from . import (
BaseFilesSource,
FilesSourceOptions,
FilesSourceProperties,
)
DEFAULT_ENFORCE_SYMLINK_SECURITY = True
DEFAULT_DELETE_ON_REALIZE = False
DEFAULT_ALLOW_SUBDIR_CREATION = True
class PosixFilesSourceProperties(FilesSourceProperties, total=False):
root: str
enforce_symlink_security: bool
delete_on_realize: bool
allow_subdir_creation: bool
class PosixFilesSource(BaseFilesSource):
plugin_type = "posix"
# If this were a PyFilesystem2FilesSource all that would be needed would be,
# but we couldn't enforce security our way I suspect.
# def _open_fs(self):
# from fs.osfs import OSFS
# handle = OSFS(**self._props)
# return handle
def __init__(self, **kwd: Unpack[PosixFilesSourceProperties]):
props = self._parse_common_config_opts(kwd)
self.root = props.get("root")
if not self.root:
self.writable = False
self.enforce_symlink_security = props.get("enforce_symlink_security", DEFAULT_ENFORCE_SYMLINK_SECURITY)
self.delete_on_realize = props.get("delete_on_realize", DEFAULT_DELETE_ON_REALIZE)
self.allow_subdir_creation = props.get("allow_subdir_creation", DEFAULT_ALLOW_SUBDIR_CREATION)
def METHOD_NAME(self, path="/", recursive=True, user_context=None, opts: Optional[FilesSourceOptions] = None):
if not self.root:
raise exceptions.ItemAccessibilityException("Listing files at file:// URLs has been disabled.")
dir_path = self._to_native_path(path, user_context=user_context)
if not self._safe_directory(dir_path):
raise exceptions.ObjectNotFound(f"The specified directory does not exist [{dir_path}].")
if recursive:
res: List[Dict[str, Any]] = []
effective_root = self._effective_root(user_context)
for p, dirs, files in safe_walk(dir_path, allowlist=self._allowlist):
rel_dir = os.path.relpath(p, effective_root)
to_dict = functools.partial(self._resource_info_to_dict, rel_dir, user_context=user_context)
res.extend(map(to_dict, dirs))
res.extend(map(to_dict, files))
return res
else:
res = os.listdir(dir_path)
to_dict = functools.partial(self._resource_info_to_dict, path, user_context=user_context)
return list(map(to_dict, res))
def _realize_to(
self, source_path: str, native_path: str, user_context=None, opts: Optional[FilesSourceOptions] = None
):
if not self.root and (not user_context or not user_context.is_admin):
raise exceptions.ItemAccessibilityException("Writing to file:// URLs has been disabled.")
effective_root = self._effective_root(user_context)
source_native_path = self._to_native_path(source_path, user_context=user_context)
if self.enforce_symlink_security:
if not safe_contains(effective_root, source_native_path, allowlist=self._allowlist):
raise Exception("Operation not allowed.")
else:
source_native_path = os.path.normpath(source_native_path)
assert source_native_path.startswith(os.path.normpath(effective_root))
if not self.delete_on_realize:
shutil.copyfile(source_native_path, native_path)
else:
shutil.move(source_native_path, native_path)
def _write_from(
self, target_path: str, native_path: str, user_context=None, opts: Optional[FilesSourceOptions] = None
):
effective_root = self._effective_root(user_context)
target_native_path = self._to_native_path(target_path, user_context=user_context)
if self.enforce_symlink_security:
if not safe_contains(effective_root, target_native_path, allowlist=self._allowlist):
raise Exception("Operation not allowed.")
else:
target_native_path = os.path.normpath(target_native_path)
assert target_native_path.startswith(os.path.normpath(effective_root))
target_native_path_parent = os.path.dirname(target_native_path)
if not os.path.exists(target_native_path_parent):
if self.allow_subdir_creation:
os.makedirs(target_native_path_parent)
else:
raise Exception("Parent directory does not exist.")
shutil.copyfile(native_path, target_native_path)
def _to_native_path(self, source_path: str, user_context=None):
source_path = os.path.normpath(source_path)
if source_path.startswith("/"):
source_path = source_path[1:]
return os.path.join(self._effective_root(user_context), source_path)
def _effective_root(self, user_context=None):
return self._evaluate_prop(self.root or "/", user_context=user_context)
def _resource_info_to_dict(self, dir: str, name: str, user_context=None):
rel_path = os.path.normpath(os.path.join(dir, name))
full_path = self._to_native_path(rel_path, user_context=user_context)
uri = self.uri_from_path(rel_path)
if os.path.isdir(full_path):
return {"class": "Directory", "name": name, "uri": uri, "path": rel_path}
else:
statinfo = os.lstat(full_path)
return {
"class": "File",
"name": name,
"size": statinfo.st_size,
"ctime": self.to_dict_time(statinfo.st_ctime),
"uri": uri,
"path": rel_path,
}
def _safe_directory(self, directory):
if self.enforce_symlink_security:
if not safe_path(directory, allowlist=self._allowlist):
raise exceptions.ConfigDoesNotAllowException(
f"directory ({directory}) is a symlink to a location not on the allowlist"
)
if not os.path.exists(directory):
return False
return True
def _serialization_props(self, user_context=None) -> PosixFilesSourceProperties:
return {
# abspath needed because will be used by external Python from
# a job working directory
"root": os.path.abspath(self._effective_root(user_context)),
"enforce_symlink_security": self.enforce_symlink_security,
"delete_on_realize": self.delete_on_realize,
"allow_subdir_creation": self.allow_subdir_creation,
}
@property
def _allowlist(self):
return self._file_sources_config.symlink_allowlist
def score_url_match(self, url: str):
# For security, we need to ensure that a partial match doesn't work. e.g. file://{root}something/myfiles
if self.root and (
url.startswith(f"{self.get_uri_root()}://{self.root}/") or url == f"self.get_uri_root()://{self.root}"
):
return len(f"self.get_uri_root()://{self.root}")
elif self.root and (url.startswith(f"file://{self.root}/") or url == f"file://{self.root}"):
return len(f"file://{self.root}")
elif not self.root and url.startswith("file://"):
return len("file://")
else:
return super().score_url_match(url)
def to_relative_path(self, url: str) -> str:
if url.startswith(f"file://{self.root}"):
return url[len(f"file://{self.root}") :]
elif url.startswith("file://"):
return url[7:]
else:
return super().to_relative_path(url)
__all__ = ("PosixFilesSource",)
| null |
925 |
import pytest
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
from ai.backend.manager.models.minilang.ordering import QueryOrderParser
from ai.backend.manager.models.utils import agg_to_array
@pytest.fixture
async def virtual_grid_db(database_engine):
base = declarative_base()
metadata = base.metadata
grid = sa.Table(
"test_query_order_users",
metadata,
sa.Column("id", sa.Integer, sa.Sequence("user_id_seq"), primary_key=True),
sa.Column("data1", sa.Integer),
sa.Column("data2", sa.Float),
sa.Column("data3", sa.String(10)),
)
foreign_grid = sa.Table(
"test_query_order_dogs",
metadata,
sa.Column("id", sa.Integer, sa.Sequence("dog_id_seq"), primary_key=True),
sa.Column("user_id", sa.ForeignKey("test_query_order_users.id")),
sa.Column("name", sa.String(10)),
)
def _create_tables(conn, *args, **kwargs):
return metadata.create_all(conn, [grid, foreign_grid])
def METHOD_NAME(conn, *args, **kwargs):
return metadata.drop_all(conn, [grid, foreign_grid])
async with database_engine.begin() as conn:
await conn.run_sync(_create_tables)
await conn.execute(
grid.insert(),
[
{"data1": 10, "data2": 0.2, "data3": "a"},
{"data1": 10, "data2": 0.1, "data3": "c"},
{"data1": 20, "data2": 0.0, "data3": "b"},
{"data1": 20, "data2": -0.1, "data3": "d"},
],
)
await conn.execute(
foreign_grid.insert(),
[
{"user_id": 1, "name": "b"},
{"user_id": 1, "name": "c"},
{"user_id": 2, "name": "a"},
],
)
try:
yield conn, grid, foreign_grid
finally:
await conn.run_sync(METHOD_NAME)
async def test_select_queries(virtual_grid_db) -> None:
conn, grid, _ = virtual_grid_db
parser = QueryOrderParser()
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"+data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (2,), (3,), (4,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(3,), (4,), (1,), (2,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data2",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(4,), (3,), (2,), (1,)]
assert test_ret == actual_ret
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data3,-data2",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(3,), (4,), (1,), (2,)]
assert test_ret == actual_ret
# default ordering
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (2,), (3,), (4,)]
assert test_ret == actual_ret
# without order marks, it's assumed to be ascending
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"data3,-data2,data1",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(1,), (3,), (2,), (4,)]
assert test_ret == actual_ret
# invalid syntax
with pytest.raises(ValueError):
parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"xxx",
)
async def test_column_map(virtual_grid_db) -> None:
conn, grid, _ = virtual_grid_db
parser = QueryOrderParser(
{
"v1": ("data1", None),
"v2": ("data2", None),
"v3": ("data3", None),
}
)
sa_query = parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-v3",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(4,), (2,), (3,), (1,)]
assert test_ret == actual_ret
# non-existent column in the column map
with pytest.raises(ValueError):
parser.append_ordering(
sa.select([grid.c.id]).select_from(grid),
"-data1,+data2",
)
async def test_aggregated_foreign_fields(virtual_grid_db) -> None:
conn, grid, foreign_grid = virtual_grid_db
parser = QueryOrderParser(
{
"dogs_name": ("test_query_order_dogs_name", agg_to_array),
}
)
orig_query = (
sa.select(
[
grid.c.id,
agg_to_array(foreign_grid.c.name).label("dogs_name"),
]
)
.select_from(sa.join(grid, foreign_grid, grid.c.id == foreign_grid.c.user_id))
.group_by(grid)
)
sa_query = parser.append_ordering(
orig_query,
"dogs_name",
)
actual_ret = list(await conn.execute(sa_query))
test_ret = [(2, ["a"]), (1, ["b", "c"])]
assert test_ret == actual_ret
| null |
926 |
##########################################################################
#
# Copyright (c) 2007-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import IECore
import IECoreImage
import IECoreGL
IECoreGL.init( False )
class TestToGLTexureConverter( unittest.TestCase ) :
def METHOD_NAME( self ) :
""" Test conversion from an ImagePrimitive """
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "images", "colorBarsWithAlphaF512x512.exr" ) ).read()
t = IECoreGL.ToGLTextureConverter( i ).convert()
self.assertFalse( not t.isInstanceOf( IECoreGL.Texture.staticTypeId() ) )
ii = t.imagePrimitive()
res = IECoreImage.ImageDiffOp()(
imageA = i,
imageB = ii,
maxError = 0.01,
skipMissingChannels = False
)
self.assertFalse( res.value )
def testFromCompoundData( self ) :
""" Test conversion from a CompoundData representation of an ImagePrimitive """
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "images", "colorBarsWithAlphaF512x512.exr" ) ).read()
cd = IECore.CompoundData()
cd["displayWindow"] = IECore.Box2iData( i.displayWindow )
cd["dataWindow"] = IECore.Box2iData( i.dataWindow )
cnd = IECore.CompoundData()
for channel in i.channelNames() :
cnd[ channel ] = i[ channel ]
cd["channels"] = cnd
t = IECoreGL.ToGLTextureConverter( cd ).convert()
self.assertFalse( not t.isInstanceOf( IECoreGL.Texture.staticTypeId() ) )
ii = t.imagePrimitive()
res = IECoreImage.ImageDiffOp()(
imageA = i,
imageB = ii,
maxError = 0.01,
skipMissingChannels = False
)
self.assertFalse( res.value )
def testMissingChannelCreation( self ) :
""" Test the creation of missing channels """
i = IECore.Reader.create( os.path.join( os.path.dirname( __file__ ), "images", "colorBarsWithAlphaF512x512.exr" ) ).read()
cd = IECore.CompoundData()
cd["displayWindow"] = IECore.Box2iData( i.displayWindow )
cd["dataWindow"] = IECore.Box2iData( i.dataWindow )
cnd = IECore.CompoundData()
cnd[ "R" ] = i[ "R" ]
cd["channels"] = cnd
# We are missing a channel and so an exception should be thrown if we try to convert it with the default arguments.
self.assertRaises( RuntimeError, IECoreGL.ToGLTextureConverter( cd ).convert )
t = IECoreGL.ToGLTextureConverter( cd, True ).convert()
ii = t.imagePrimitive()
self.assertTrue( "R" in ii.channelNames() )
self.assertTrue( "G" in ii.channelNames() )
self.assertTrue( "B" in ii.channelNames() )
if __name__ == "__main__":
unittest.main()
| null |
927 |
## \example domino/six_particles_optimization.py
# Optimize six particles on a 2D unit grid. In order to remove translation
# degrees of freedom, the 0th particle is pinned at the origin by allowing
# it only a single conformation. To remove flips, the first particle is
# restrained to have a positive x coordinate.
from __future__ import print_function
import IMP
import IMP.domino
import IMP.core
import IMP.container
import sys
IMP.setup_from_argv(sys.argv, "six particles optimization")
# set restraints
def METHOD_NAME(m, ps):
pairs = [[0, 1], [0, 2], [1, 3], [2, 3], [3, 4], [4, 5], [1, 5]]
# we will restrain various pairs to be 1 apart
score = IMP.core.HarmonicDistancePairScore(1, 1)
# the restraint will be broken apart during optimization
# map the indices above to actual particles
pc = IMP.container.ListPairContainer(m,
[(ps[p[0]], ps[p[1]]) for p in pairs],
"Restrained pairs")
pr = IMP.container.PairsRestraint(score, pc)
pr.set_maximum_score(.01)
d = IMP.core.DistanceToSingletonScore(IMP.core.HarmonicUpperBound(2, 1),
IMP.algebra.Vector3D(2, 0, 0))
# force ps[1] to be on the positive side to remove flip degree of freedom
dr = IMP.core.SingletonRestraint(m, d, ps[1])
# we are not interested in conformations which don't fit the distances
# exactly, but using 0 is tricky
dr.set_maximum_score(.01)
return [pr, dr]
def create_representation(m):
ps = []
# create size particles, initial coordinates don't matter.
for i in range(0, 6):
p = m.add_particle("P%d" % i)
IMP.core.XYZ.setup_particle(m, p, IMP.algebra.Vector3D(i, 0., 0.))
ps.append(p)
return ps
def create_discrete_states(m, ps):
pst = IMP.domino.ParticleStatesTable()
vs = [IMP.algebra.Vector3D(1, 0, 0),
IMP.algebra.Vector3D(0, 1, 0),
IMP.algebra.Vector3D(1, 1, 0),
IMP.algebra.Vector3D(2, 1, 0),
IMP.algebra.Vector3D(2, 0, 0)]
vs = vs + [-v for v in vs]
print(len(vs), "states for each particle")
states = IMP.domino.XYZStates(vs)
# special case ps[0] to remove a sliding degree of freedom
# all other particles are given the same set of states
for p in ps[1:]:
pst.set_particle_states(m.get_particle(p), states)
return pst
def create_sampler(m, r, pst):
# create the sampler and pass it the states for each patricle
s = IMP.domino.DominoSampler(m, pst)
s.set_restraints(r)
# the following lines recreate the defaults and so are optional
filters = []
# create a restraint cache to avoid re-evaluating restraints
rc = IMP.domino.RestraintCache(pst)
# add the list of restraints we want to use
rc.add_restraints(r)
# do not allow particles with the same ParticleStates object
# to have the same state index
filters.append(IMP.domino.ExclusionSubsetFilterTable(pst))
# filter states that score worse than the cutoffs in the Model
filters.append(IMP.domino.RestraintScoreSubsetFilterTable(rc))
filters[-1].set_log_level(IMP.SILENT)
# try to be intelligent about enumerating the states in each subset
states = IMP.domino.BranchAndBoundAssignmentsTable(pst, filters)
states.set_log_level(IMP.SILENT)
s.set_assignments_table(states)
s.set_subset_filter_tables(filters)
return s
IMP.set_log_level(IMP.TERSE)
m = IMP.Model()
# don't print information during Model.evaluate
m.set_log_level(IMP.SILENT)
print("creating representation")
ps = create_representation(m)
print("creating discrete states")
pst = create_discrete_states(m, ps)
print("creating score function")
rs = METHOD_NAME(m, ps)
print("creating sampler")
s = create_sampler(m, rs, pst)
print("sampling")
# get an IMP.ConfigurationSet with the sampled states. If there are very
# many, it might be better to use s.get_sample_states() and then
# IMP.domino.load_particle_states() to handle the states as that takes
# much less memory, and time.
cs = s.create_sample()
print("found ", cs.get_number_of_configurations(), "solutions")
sf = IMP.core.RestraintsScoringFunction(rs)
for i in range(cs.get_number_of_configurations()):
cs.load_configuration(i)
print("solution number:", i, " is:", sf.evaluate(False))
for p in ps:
print(IMP.core.XYZ(m, p).get_x(), IMP.core.XYZ(m, p).get_y())
| null |
928 |
from __future__ import annotations
import warnings
import pytest
from packaging.version import parse as parse_version
scipy = pytest.importorskip("scipy")
import numpy as np
import dask.array as da
import dask.array.stats
from dask.array.utils import allclose, assert_eq
from dask.delayed import Delayed
@pytest.mark.parametrize(
"kind, kwargs", [("skew", {}), ("kurtosis", {}), ("kurtosis", {"fisher": False})]
)
@pytest.mark.parametrize("single_dim", [True, False])
def test_measures(kind, kwargs, single_dim):
np.random.seed(seed=1337)
if single_dim:
x = np.random.random(size=(30,))
else:
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
dfunc = getattr(dask.array.stats, kind)
sfunc = getattr(scipy.stats, kind)
expected = sfunc(x, **kwargs)
result = dfunc(y, **kwargs)
if np.isscalar(expected):
# make it an array to account for possible numeric errors
expected = np.array(expected)
assert_eq(result, expected)
assert isinstance(result, da.Array)
def test_bias_raises():
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
with pytest.raises(NotImplementedError):
dask.array.stats.skew(y, bias=False)
with pytest.raises(NotImplementedError):
dask.array.stats.kurtosis(y, bias=False)
@pytest.mark.parametrize(
"kind", ["chisquare", "power_divergence", "normaltest", "skewtest", "kurtosistest"]
)
def test_one(kind):
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
result = dask_test(a_)
expected = scipy_test(a)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"kind, kwargs",
[
("ttest_ind", {}),
("ttest_ind", {"equal_var": False}),
pytest.param(
"ttest_1samp",
{},
marks=pytest.mark.xfail(
# NOTE: using nested `parse_version` calls here to handle night scipy releases
parse_version(parse_version(scipy.__version__).base_version)
>= parse_version("1.10.0"),
reason="https://github.com/dask/dask/issues/9499",
),
),
("ttest_rel", {}),
("chisquare", {}),
("power_divergence", {}),
("power_divergence", {"lambda_": 0}),
("power_divergence", {"lambda_": -1}),
("power_divergence", {"lambda_": "neyman"}),
],
)
def METHOD_NAME(kind, kwargs):
# The sums of observed and expected frequencies must match
a = np.random.random(size=30)
b = a[::-1]
a_ = da.from_array(a, 3)
b_ = da.from_array(b, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
with warnings.catch_warnings(): # maybe overflow warning (power_divergence)
warnings.simplefilter("ignore", category=RuntimeWarning)
result = dask_test(a_, b_, **kwargs)
expected = scipy_test(a, b, **kwargs)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
# fails occasionally. shouldn't this be exact?
# assert dask.compute(*result) == expected
@pytest.mark.parametrize("k", range(5))
def test_moments(k):
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
expected = scipy.stats.moment(x, k)
result = dask.array.stats.moment(y, k)
assert_eq(result, expected)
def test_anova():
np_args = [i * np.random.random(size=(30,)) for i in range(4)]
da_args = [da.from_array(x, chunks=10) for x in np_args]
result = dask.array.stats.f_oneway(*da_args)
expected = scipy.stats.f_oneway(*np_args)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"func, nargs",
[
(dask.array.stats.ttest_1samp, 2),
(dask.array.stats.ttest_rel, 2),
(dask.array.stats.skewtest, 1),
(dask.array.stats.kurtosis, 1),
(dask.array.stats.kurtosistest, 1),
(dask.array.stats.normaltest, 1),
(dask.array.stats.moment, 1),
],
)
@pytest.mark.parametrize("nan_policy", ["omit", "raise"])
def test_nan_raises(func, nargs, nan_policy):
with pytest.raises(NotImplementedError):
func(*(None,) * nargs, nan_policy=nan_policy)
def test_power_divergence_invalid():
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
with pytest.raises(ValueError):
dask.array.stats.power_divergence(a_, lambda_="wrong")
def test_skew_raises():
a = da.ones((7,), chunks=(7,))
with pytest.raises(ValueError, match="7 samples"):
dask.array.stats.skewtest(a)
def test_skew_single_return_type():
"""This function tests the return type for the skew method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.skew(dask_array).compute()
assert isinstance(result, np.float64)
def test_kurtosis_single_return_type():
"""This function tests the return type for the kurtosis method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.kurtosis(dask_array).compute()
result_non_fisher = dask.array.stats.kurtosis(dask_array, fisher=False).compute()
assert isinstance(result, np.float64)
assert isinstance(result_non_fisher, np.float64)
| null |
929 |
##########################################################################
#
# Copyright (c) 2007-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os.path
import tempfile
import shutil
import IECore
import IECoreGL
IECoreGL.init( False )
class ShaderLoaderTest( unittest.TestCase ) :
def test( self ) :
sp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders" ) )
l = IECoreGL.ShaderLoader( sp )
s = l.load( "3dLabs/Toon" )
self.assertTrue( s.typeName()=="IECoreGL::Shader" )
ss = l.load( "3dLabs/Toon" )
self.assertTrue( s.isSame( ss ) )
# shader is too complicated for my graphics card
s = l.load( "3dLabs/Mandel" )
self.assertTrue( s.typeName()=="IECoreGL::Shader" )
self.assertTrue( IECoreGL.ShaderLoader.defaultShaderLoader().isSame( IECoreGL.ShaderLoader.defaultShaderLoader() ) )
def METHOD_NAME( self ) :
sp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders" ) )
psp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders", "include" ) )
# this should work
l = IECoreGL.ShaderLoader( sp, psp )
s = l.load( "failWithoutPreprocessing" )
# but turning off preprocessing should cause a throw
l = IECoreGL.ShaderLoader( sp )
self.assertRaises( RuntimeError, l.load, "failWithoutPreprocessing" )
def testPreprocessingAllowsVersionAndExtension( self ) :
sp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders" ) )
psp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders", "include" ) )
l = IECoreGL.ShaderLoader( sp, psp )
l.load( "versionAndExtension" )
def testPreprocessingThrowsOnBadDirective( self ) :
sp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders" ) )
psp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders", "include" ) )
l = IECoreGL.ShaderLoader( sp, psp )
self.assertRaises( RuntimeError, l.load, "badPreprocessingDirective" )
def testLoadSourceMessagesAndCaching( self ) :
sp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders" ) )
psp = IECore.SearchPath( os.path.join( os.path.dirname( __file__ ), "shaders", "include" ) )
l = IECoreGL.ShaderLoader( sp, psp )
with IECore.CapturingMessageHandler() as mh :
source = l.loadSource( "thisShaderDoesntExist" )
self.assertEqual( source, ( "", "", "" ) )
source = l.loadSource( "thisShaderDoesntExist" )
self.assertEqual( source, ( "", "", "" ) )
# we don't want messages over and over for repeated failures to
# load.
self.assertEqual( len( mh.messages ), 1 )
# but we do want a nice sensible message the first time.
self.assertTrue( "thisShaderDoesntExist" in mh.messages[0].message )
def testClear( self ) :
temporaryDirectory = tempfile.mkdtemp( prefix="IECoreGL" )
sp = IECore.SearchPath( temporaryDirectory )
l = IECoreGL.ShaderLoader( sp )
f = open( os.path.join( temporaryDirectory, "testShader.frag" ), 'w' )
f.write(
"""void main()
{
gl_FragColor = vec4( 1.0, 0.0, 0.0, 1.0 );
}"""
)
f.close()
s = l.load( "testShader" )
f = open(os.path.join( temporaryDirectory, "testShader.frag" ), 'w' )
f.write(
"""void main()
{
gl_FragColor = vec4( 0.0, 1.0, 0.0, 1.0 );
}"""
)
f.close()
# Source is updated, but we will still reuse the cache
s2 = l.load( "testShader" )
self.assertTrue( s.isSame( s2 ) )
l.clear()
# After clearing, the shader is now updated. ( Ideally we would test the modified functionality of the shader here, but that seems hard. )
s3 = l.load( "testShader" )
self.assertTrue( not s.isSame( s3 ) )
shutil.rmtree( temporaryDirectory )
if __name__ == "__main__":
unittest.main()
| null |
930 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class CreateHanaRestoreRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'CreateHanaRestore')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SidAdmin(self): # String
return self.get_query_params().get('SidAdmin')
def set_SidAdmin(self, SidAdmin): # String
self.add_query_param('SidAdmin', SidAdmin)
def get_RecoveryPointInTime(self): # Long
return self.get_query_params().get('RecoveryPointInTime')
def set_RecoveryPointInTime(self, RecoveryPointInTime): # Long
self.add_query_param('RecoveryPointInTime', RecoveryPointInTime)
def get_LogPosition(self): # Long
return self.get_query_params().get('LogPosition')
def set_LogPosition(self, LogPosition): # Long
self.add_query_param('LogPosition', LogPosition)
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('ClearLog')
def set_ClearLog(self, ClearLog): # Boolean
self.add_query_param('ClearLog', ClearLog)
def get_Mode(self): # String
return self.get_query_params().get('Mode')
def set_Mode(self, Mode): # String
self.add_query_param('Mode', Mode)
def get_CheckAccess(self): # Boolean
return self.get_query_params().get('CheckAccess')
def set_CheckAccess(self, CheckAccess): # Boolean
self.add_query_param('CheckAccess', CheckAccess)
def get_BackupId(self): # Long
return self.get_query_params().get('BackupId')
def set_BackupId(self, BackupId): # Long
self.add_query_param('BackupId', BackupId)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_UseDelta(self): # Boolean
return self.get_query_params().get('UseDelta')
def set_UseDelta(self, UseDelta): # Boolean
self.add_query_param('UseDelta', UseDelta)
def get_UseCatalog(self): # Boolean
return self.get_query_params().get('UseCatalog')
def set_UseCatalog(self, UseCatalog): # Boolean
self.add_query_param('UseCatalog', UseCatalog)
def get_BackupPrefix(self): # String
return self.get_query_params().get('BackupPrefix')
def set_BackupPrefix(self, BackupPrefix): # String
self.add_query_param('BackupPrefix', BackupPrefix)
def get_DatabaseName(self): # String
return self.get_query_params().get('DatabaseName')
def set_DatabaseName(self, DatabaseName): # String
self.add_query_param('DatabaseName', DatabaseName)
def get_VolumeId(self): # Integer
return self.get_query_params().get('VolumeId')
def set_VolumeId(self, VolumeId): # Integer
self.add_query_param('VolumeId', VolumeId)
def get_SourceClusterId(self): # String
return self.get_query_params().get('SourceClusterId')
def set_SourceClusterId(self, SourceClusterId): # String
self.add_query_param('SourceClusterId', SourceClusterId)
def get_SystemCopy(self): # Boolean
return self.get_query_params().get('SystemCopy')
def set_SystemCopy(self, SystemCopy): # Boolean
self.add_query_param('SystemCopy', SystemCopy)
| null |
931 |
"""Test schedulers."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import pytest
from otx.algorithms.segmentation.adapters.mmseg.models.schedulers import (
ConstantScalarScheduler,
PolyScalarScheduler,
StepScalarScheduler,
)
class TestSchedulers:
"""Test schedulers."""
def test_constant_scalar_scheduler(self):
"""Test constant scalar scheduler.
Learning rate should not change over time.
"""
scheduler = ConstantScalarScheduler(scale=30.0)
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 10) == 30.0
def test_constant_scalar_scheduler_invalid_scale(self):
"""Test constant scalar scheduler with invalid scale."""
with pytest.raises(AssertionError):
ConstantScalarScheduler(scale=-1.0)
@pytest.mark.xfail
def METHOD_NAME(self):
"""Test constant scalar scheduler with invalid step.
TODO: ConstantScalarScheculer should be modified to raise this error
"""
scheduler = ConstantScalarScheduler(scale=30.0)
with pytest.raises(AssertionError):
scheduler(-1, 1)
def test_poly_scalar_scheduler_by_epoch_false(self):
"""Test poly scalar scheduler."""
# By epoch is False
scheduler = PolyScalarScheduler(
start_scale=30.0,
end_scale=0.0,
num_iters=100,
power=0.9,
by_epoch=False,
)
# learning rate should decrease over time
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) < 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) == scheduler(50, 1) # as this is not by epoch
# learning rate should not change after num_iters
assert scheduler(100, 1) == 0.0
assert scheduler(101, 1) == 0.0
assert scheduler(102, 1) == 0.0
def test_poly_scalar_scheduler_by_epoch_true(self):
scheduler = PolyScalarScheduler(
start_scale=30.0,
end_scale=0.0,
num_iters=100,
power=0.9,
by_epoch=True,
)
# learning rate should decrease over time
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) < 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) != scheduler(50, 1) # as this is by epoch
# learning rate should not change after num_iters
assert scheduler(100, 1) == 0.0
assert scheduler(101, 1) == 0.0
assert scheduler(102, 1) == 0.0
def test_step_scalar_scheduler_by_epoch_false(self):
"""Test step scalar scheduler."""
# By epoch is False
scheduler = StepScalarScheduler(
scales=[30.0, 20.0, 10.0, 5.0],
num_iters=[2, 3, 4],
by_epoch=False,
)
# learning rate should decrease over time as a step function
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(50, 10) == scheduler(50, 1)
assert scheduler(5, 2) == 5.0
assert scheduler(5, 0) == scheduler(10, 1)
assert scheduler(10, 1) == 5.0 # steps greater than total num_iters
def test_step_scalar_scheduler_by_epoch_true(self):
# By epoch is True
scheduler = StepScalarScheduler(
scales=[30.0, 20.0, 10.0, 5.0],
num_iters=[2, 3, 4],
by_epoch=True,
)
# learning rate should decrease over time as a step function
assert scheduler(0, 1) == 30.0
assert scheduler(1, 1) == 30.0
assert scheduler(2, 1) < scheduler(1, 1)
assert scheduler(3, 1) < scheduler(2, 1)
assert scheduler(9, 5) == 30.0
assert scheduler(5, 2) == 20.0
assert scheduler(5, 2) < scheduler(10, 11)
| null |
932 |
"""
Views related to personal access tokens. Intended for OSF internal use only
"""
from django.db.models import Q
from rest_framework.exceptions import APIException, NotFound
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from api.base.renderers import JSONAPIRenderer, JSONRendererWithESISupport
from framework.auth import cas
from framework.auth.oauth_scopes import CoreScopes
from api.base.filters import ListFilterMixin
from api.base.utils import get_object_or_error
from api.base.views import JSONAPIBaseView
from api.base.parsers import JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON
from api.base import permissions as base_permissions
from api.scopes.serializers import ScopeSerializer
from api.tokens.serializers import ApiOAuth2PersonalTokenWritableSerializer
from osf.models import ApiOAuth2PersonalToken
class TokenList(JSONAPIBaseView, generics.ListCreateAPIView, ListFilterMixin):
"""
Get a list of personal access tokens that the user has registered
"""
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.OwnerOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.TOKENS_READ]
required_write_scopes = [CoreScopes.TOKENS_WRITE]
serializer_class = ApiOAuth2PersonalTokenWritableSerializer
view_category = 'tokens'
view_name = 'token-list'
renderer_classes = [JSONRendererWithESISupport, JSONAPIRenderer, ] # Hide from web-browsable API tool
ordering = ('-id',)
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
def get_default_queryset(self):
return ApiOAuth2PersonalToken.objects.filter(owner=self.request.user, is_active=True)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
def METHOD_NAME(self, serializer):
"""Add user to the created object"""
serializer.validated_data['owner'] = self.request.user
serializer.save()
class TokenDetail(JSONAPIBaseView, generics.RetrieveUpdateDestroyAPIView):
"""
Get information about a specific personal access token that the user has registered
Should not return information if the token belongs to a different user
"""
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.OwnerOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.TOKENS_READ]
required_write_scopes = [CoreScopes.TOKENS_WRITE]
serializer_class = ApiOAuth2PersonalTokenWritableSerializer
view_category = 'tokens'
view_name = 'token-detail'
renderer_classes = [JSONRendererWithESISupport, JSONAPIRenderer, ] # Hide from web-browsable API tool
parser_classes = (JSONAPIMultipleRelationshipsParser, JSONAPIMultipleRelationshipsParserForRegularJSON,)
# overrides RetrieveAPIView
def get_object(self):
try:
obj = get_object_or_error(ApiOAuth2PersonalToken, Q(_id=self.kwargs['_id'], is_active=True), self.request)
except ApiOAuth2PersonalToken.DoesNotExist:
raise NotFound
self.check_object_permissions(self.request, obj)
return obj
# overrides DestroyAPIView
def perform_destroy(self, instance):
"""Instance is not actually deleted from DB- just flagged as inactive, which hides it from views"""
obj = self.get_object()
try:
obj.deactivate(save=True)
except cas.CasHTTPError:
raise APIException('Could not revoke tokens; please try again later')
def perform_update(self, serializer):
"""Necessary to prevent owner field from being blanked on updates"""
serializer.validated_data['owner'] = self.request.user
serializer.save(owner=self.request.user)
class TokenScopesList(JSONAPIBaseView, generics.ListAPIView):
"""
Get information about the scopes associated with a personal access token
Should not return information if the token belongs to a different user
"""
permission_classes = (
drf_permissions.IsAuthenticated,
base_permissions.OwnerOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.TOKENS_READ]
required_write_scopes = [CoreScopes.TOKENS_WRITE]
serializer_class = ScopeSerializer
view_category = 'tokens'
view_name = 'token-scopes-list'
renderer_classes = [JSONRendererWithESISupport, JSONAPIRenderer, ] # Hide from web-browsable API tool
def get_default_queryset(self):
try:
obj = get_object_or_error(ApiOAuth2PersonalToken, Q(_id=self.kwargs['_id'], is_active=True), self.request)
except ApiOAuth2PersonalToken.DoesNotExist:
raise NotFound
self.check_object_permissions(self.request, obj)
return obj.scopes.all()
# overrides ListAPIView
def get_queryset(self):
return self.get_default_queryset()
| null |
933 |
import os
import tempfile
from unittest.mock import patch
from galaxy.exceptions import (
ObjectNotFound,
ReferenceDataError,
)
from galaxy_test.driver import integration_util
BUILDS_DATA = (
"?\tunspecified (?)",
"hg_test\tdescription of hg_test",
"hg_test_nolen\tdescription of hg_test_nolen",
)
LEN_DATA = (
"chr1\t248956422",
"chr2\t242193529",
"chr3\t198295559",
)
def METHOD_NAME(has_len_file=True):
pos = 1 if has_len_file else 2
return BUILDS_DATA[pos].split("\t")[0]
class TestGenomes(integration_util.IntegrationTestCase):
@classmethod
def handle_galaxy_config_kwds(cls, config):
super().handle_galaxy_config_kwds(config)
genomes_dir = cls.temp_config_dir("test_genomes")
os.makedirs(genomes_dir)
cls._setup_builds_file(config, genomes_dir)
cls._setup_len_file(config, genomes_dir)
@classmethod
def _setup_builds_file(cls, config, genomes_dir):
"""Create builds file + set config option."""
builds_file_path = os.path.join(genomes_dir, "builds.txt")
config["builds_file_path"] = builds_file_path
with open(builds_file_path, "w") as f:
f.write("\n".join(BUILDS_DATA))
@classmethod
def _setup_len_file(cls, config, genomes_dir):
"""Create len file + set config option."""
config["len_file_path"] = genomes_dir # the config option is a dir
key = METHOD_NAME()
len_file_path = os.path.join(genomes_dir, f"{key}.len")
with open(len_file_path, "w") as f:
f.write("\n".join(LEN_DATA))
def test_index(self):
response = self._get("genomes")
self._assert_status_code_is(response, 200)
rval = response.json()
expected_data = [item.split("\t")[::-1] for item in BUILDS_DATA]
assert rval == expected_data
def test_show_valid(self):
key = METHOD_NAME()
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 200)
rval = response.json()
assert rval["id"] == key
assert len(rval["chrom_info"]) == len(LEN_DATA)
def test_show_valid_no_refdata(self):
key = METHOD_NAME(has_len_file=False)
response = self._get(f"genomes/{key}")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_show_invalid(self):
response = self._get("genomes/invalid")
self._assert_status_code_is(response, 404)
assert response.json()["err_code"] == ObjectNotFound.err_code.code
def test_sequences(self):
class RefDataMock:
sequence = "test-value"
key = METHOD_NAME()
with patch.object(self._app.genomes, "has_reference_data", return_value=True), patch.object(
self._app.genomes, "_get_reference_data", return_value=RefDataMock()
):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 200)
assert response.content == bytes(RefDataMock.sequence, "utf-8")
def test_sequences_no_data(self):
key = METHOD_NAME()
with patch.object(self._app.genomes, "has_reference_data", return_value=False):
response = self._get(f"genomes/{key}/sequences")
self._assert_status_code_is(response, 500)
assert response.json()["err_code"] == ReferenceDataError.err_code.code
def test_indexes(self):
mock_key, mock_content, index_type, suffix = "mykey", "mydata", "fasta_indexes", ".fai"
# write some data to a tempfile
with tempfile.NamedTemporaryFile(dir=self._tempdir, suffix=suffix, mode="w", delete=False) as tf:
tf.write(mock_content)
# make a mock containing the path to the tempfile
tmpfile_path = tf.name[: -len(suffix)] # chop off the extention
mock_data = [[mock_key, tmpfile_path]]
with patch.object(self._app.tool_data_tables.data_tables[index_type], "data", new=mock_data):
response = self._get(f"genomes/{mock_key}/indexes?type={index_type}")
self._assert_status_code_is(response, 200)
assert response.content == bytes(mock_content, "utf-8")
| null |
934 |
""" handle reading a csv from an external service, defaults are from Goodreads """
import csv
from datetime import timedelta
from typing import Iterable, Optional
from django.utils import timezone
from bookwyrm.models import ImportJob, ImportItem, SiteSettings, User
class Importer:
"""Generic class for csv data import from an outside service"""
service = "Import"
delimiter = ","
encoding = "UTF-8"
# these are from Goodreads
row_mappings_guesses = [
("id", ["id", "book id"]),
("title", ["title"]),
("authors", ["author", "authors", "primary author"]),
("isbn_10", ["isbn10", "isbn", "isbn/uid"]),
("isbn_13", ["isbn13", "isbn", "isbns", "isbn/uid"]),
("shelf", ["shelf", "exclusive shelf", "read status", "bookshelf"]),
("review_name", ["review name"]),
("review_body", ["my review", "review"]),
("rating", ["my rating", "rating", "star rating"]),
("date_added", ["date added", "entry date", "added"]),
("date_started", ["date started", "started"]),
("date_finished", ["date finished", "last date read", "date read", "finished"]),
]
date_fields = ["date_added", "date_started", "date_finished"]
shelf_mapping_guesses = {
"to-read": ["to-read", "want to read"],
"read": ["read", "already read"],
"reading": ["currently-reading", "reading", "currently reading"],
}
# pylint: disable=too-many-locals
def create_job(
self, user: User, csv_file: Iterable[str], include_reviews: bool, privacy: str
) -> ImportJob:
"""check over a csv and creates a database entry for the job"""
csv_reader = csv.DictReader(csv_file, delimiter=self.delimiter)
rows = list(csv_reader)
if len(rows) < 1:
raise ValueError("CSV file is empty")
mappings = (
self.create_row_mappings(list(fieldnames))
if (fieldnames := csv_reader.fieldnames)
else {}
)
job = ImportJob.objects.create(
user=user,
include_reviews=include_reviews,
privacy=privacy,
mappings=mappings,
source=self.service,
)
enforce_limit, allowed_imports = self.get_import_limit(user)
if enforce_limit and allowed_imports <= 0:
job.complete_job()
return job
for index, entry in enumerate(rows):
if enforce_limit and index >= allowed_imports:
break
self.create_item(job, index, entry)
return job
def update_legacy_job(self, job: ImportJob) -> None:
"""patch up a job that was in the old format"""
items = job.items
first_item = items.first()
if first_item is None:
return
headers = list(first_item.data.keys())
job.mappings = self.create_row_mappings(headers)
job.updated_date = timezone.now()
job.save()
for item in items.all():
normalized = self.normalize_row(item.data, job.mappings)
normalized["shelf"] = self.get_shelf(normalized)
item.normalized_data = normalized
item.save()
def create_row_mappings(self, headers: list[str]) -> dict[str, Optional[str]]:
"""guess what the headers mean"""
mappings = {}
for (key, guesses) in self.row_mappings_guesses:
values = [h for h in headers if h.lower() in guesses]
value = values[0] if len(values) else None
if value:
headers.remove(value)
mappings[key] = value
return mappings
def create_item(self, job: ImportJob, index: int, data: dict[str, str]) -> None:
"""creates and saves an import item"""
normalized = self.normalize_row(data, job.mappings)
normalized["shelf"] = self.get_shelf(normalized)
ImportItem(job=job, index=index, data=data, normalized_data=normalized).save()
def get_shelf(self, normalized_row: dict[str, Optional[str]]) -> Optional[str]:
"""determine which shelf to use"""
shelf_name = normalized_row.get("shelf")
if not shelf_name:
return None
shelf_name = shelf_name.lower()
shelf = [
s for (s, gs) in self.shelf_mapping_guesses.items() if shelf_name in gs
]
return shelf[0] if shelf else None
# pylint: disable=no-self-use
def normalize_row(
self, entry: dict[str, str], mappings: dict[str, Optional[str]]
) -> dict[str, Optional[str]]:
"""use the dataclass to create the formatted row of data"""
return {k: entry.get(v) if v else None for k, v in mappings.items()}
# pylint: disable=no-self-use
def get_import_limit(self, user: User) -> tuple[int, int]:
"""check if import limit is set and return how many imports are left"""
site_settings = SiteSettings.objects.get()
import_size_limit = site_settings.import_size_limit
import_limit_reset = site_settings.import_limit_reset
enforce_limit = import_size_limit and import_limit_reset
allowed_imports = 0
if enforce_limit:
time_range = timezone.now() - timedelta(days=import_limit_reset)
import_jobs = ImportJob.objects.filter(
user=user, created_date__gte=time_range
)
# pylint: disable=consider-using-generator
imported_books = sum([job.successful_item_count for job in import_jobs])
allowed_imports = import_size_limit - imported_books
return enforce_limit, allowed_imports
def METHOD_NAME(
self, user: User, original_job: ImportJob, items: list[ImportItem]
) -> ImportJob:
"""retry items that didn't import"""
job = ImportJob.objects.create(
user=user,
include_reviews=original_job.include_reviews,
privacy=original_job.privacy,
source=original_job.source,
# TODO: allow users to adjust mappings
mappings=original_job.mappings,
retry=True,
)
enforce_limit, allowed_imports = self.get_import_limit(user)
if enforce_limit and allowed_imports <= 0:
job.complete_job()
return job
for index, item in enumerate(items):
if enforce_limit and index >= allowed_imports:
break
# this will re-normalize the raw data
self.create_item(job, item.index, item.data)
return job
| null |
935 |
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import os
import random
import sys
import tempfile
from uuid import uuid4
from toil.common import getNodeID
from toil.lib.exceptions import panic, raise_
from toil.lib.io import AtomicFileCreate, atomic_install, atomic_tmp_file
from toil.lib.misc import CalledProcessErrorStderr, call_command
from toil.test import ToilTest, slow
log = logging.getLogger(__name__)
logging.basicConfig()
class MiscTests(ToilTest):
"""
This class contains miscellaneous tests that don't have enough content to be their own test
file, and that don't logically fit in with any of the other test suites.
"""
def METHOD_NAME(self):
super().METHOD_NAME()
self.testDir = self._createTempDir()
def testIDStability(self):
prevNodeID = None
for i in range(10, 1):
nodeID = getNodeID()
self.assertEqual(nodeID, prevNodeID)
prevNodeID = nodeID
@slow
def testGetSizeOfDirectoryWorks(self):
'''A test to make sure toil.common.getDirSizeRecursively does not
underestimate the amount of disk space needed.
Disk space allocation varies from system to system. The computed value
should always be equal to or slightly greater than the creation value.
This test generates a number of random directories and randomly sized
files to test this using getDirSizeRecursively.
'''
from toil.common import getDirSizeRecursively
# a list of the directories used in the test
directories = [self.testDir]
# A dict of {FILENAME: FILESIZE} for all files used in the test
files = {}
# Create a random directory structure
for i in range(0,10):
directories.append(tempfile.mkdtemp(dir=random.choice(directories), prefix='test'))
# Create 50 random file entries in different locations in the directories. 75% of the time
# these are fresh files of size [1, 10] MB and 25% of the time they are hard links to old
# files.
while len(files) <= 50:
fileName = os.path.join(random.choice(directories), self._getRandomName())
if random.randint(0,100) < 75:
# Create a fresh file in the range of 1-10 MB
fileSize = int(round(random.random(), 2) * 10 * 1024 * 1024)
with open(fileName, 'wb') as fileHandle:
fileHandle.write(os.urandom(fileSize))
files[fileName] = fileSize
else:
# Link to one of the previous files
if len(files) == 0:
continue
linkSrc = random.choice(list(files.keys()))
os.link(linkSrc, fileName)
files[fileName] = 'Link to %s' % linkSrc
computedDirectorySize = getDirSizeRecursively(self.testDir)
totalExpectedSize = sum(x for x in list(files.values()) if isinstance(x, int))
self.assertGreaterEqual(computedDirectorySize, totalExpectedSize)
@staticmethod
def _getRandomName():
return uuid4().hex
def _get_test_out_file(self, tail):
outf = os.path.join(self.testDir, self.id() + "." + tail)
if os.path.exists(outf):
os.unlink(outf)
return outf
def _write_test_file(self, outf_tmp):
with open(outf_tmp, "w") as fh:
fh.write(self.id() + '\n')
def test_atomic_install(self):
outf = self._get_test_out_file(".foo.gz")
outf_tmp = atomic_tmp_file(outf)
self._write_test_file(outf_tmp)
atomic_install(outf_tmp, outf)
self.assertTrue(os.path.exists(outf))
def test_atomic_install_dev(self):
devn = '/dev/null'
tmp = atomic_tmp_file(devn)
self.assertEqual(tmp, devn)
atomic_install(tmp, devn)
def test_atomic_context_ok(self):
outf = self._get_test_out_file(".tar")
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
self.assertTrue(os.path.exists(outf))
def test_atomic_context_error(self):
outf = self._get_test_out_file(".tar")
try:
with AtomicFileCreate(outf) as outf_tmp:
self._write_test_file(outf_tmp)
raise Exception("stop!")
except Exception as ex:
self.assertEqual(str(ex), "stop!")
self.assertFalse(os.path.exists(outf))
def test_call_command_ok(self):
o = call_command(["echo", "Fred"])
self.assertEqual("Fred\n", o)
self.assertTrue(isinstance(o, str), str(type(o)))
def test_call_command_err(self):
with self.assertRaisesRegex(CalledProcessErrorStderr,
"^Command '\\['cat', '/dev/Frankenheimer']' exit status 1: cat: /dev/Frankenheimer: No such file or directory\n$"):
call_command(["cat", "/dev/Frankenheimer"])
class TestPanic(ToilTest):
def test_panic_by_hand(self):
try:
self.try_and_panic_by_hand()
except:
self.__assert_raised_exception_is_primary()
def test_panic(self):
try:
self.try_and_panic()
except:
self.__assert_raised_exception_is_primary()
def test_panic_with_secondary(self):
try:
self.try_and_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
def test_nested_panic(self):
try:
self.try_and_nested_panic_with_secondary()
except:
self.__assert_raised_exception_is_primary()
def try_and_panic_by_hand(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except Exception:
exc_type, exc_value, traceback = sys.exc_info()
try:
raise RuntimeError("secondary")
except Exception:
pass
raise_(exc_type, exc_value, traceback)
def try_and_panic(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic(log):
pass
def try_and_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
raise RuntimeError("secondary")
def try_and_nested_panic_with_secondary(self):
try:
self.line_of_primary_exc = inspect.currentframe().f_lineno + 1
raise ValueError("primary")
except:
with panic( log ):
with panic( log ):
raise RuntimeError("secondary")
def __assert_raised_exception_is_primary(self):
exc_type, exc_value, exc_traceback = sys.exc_info()
self.assertEqual(exc_type, ValueError)
self.assertEqual(str(exc_value), "primary")
while exc_traceback.tb_next is not None:
exc_traceback = exc_traceback.tb_next
self.assertEqual(exc_traceback.tb_lineno, self.line_of_primary_exc)
| null |
936 |
######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""Functionality for configuring restraints on collective variables."""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = ["Restraint"]
from ..Types._type import Type as _Type
class Restraint:
def __init__(self, value, force_constant=100.0, slope=0.0):
"""
Constructor.
Set a restraint on the value of a collective variable.
The expression for the bias is:
.. math::
k/2 (x - a)^2 + m (x - a)
The default restraint is purely harmonic.
Parameters
----------
value : int, float, :class:`Type <BioSimSpace.Types>`
The value of the restraint. Use 'int' or 'float' for dimensionless
collective variables.
force_constant : float
The force constant (k) for the harmonic term of the restraint.
slope : float
The slope (m) for the linar term of the restraint.
"""
self.setValue(value)
self.setForceConstant(force_constant)
self.setSlope(slope)
def __str__(self):
"""Return a human readable string representation of the object."""
return (
"<BioSimSpace.Metadynamics.Restraint: value=%s, force_constant=%s, slope=%s>"
% (self._value, self._force_constant, self._slope)
)
def __repr__(self):
"""Return a human readable string representation of the object."""
return self.__str__()
def setValue(self, value):
"""
Set the value of the bound.
Parameters
----------
value : int, float, :class:`Type <BioSimSpace.Types>`
The value of the bound.
"""
if not isinstance(value, (float, _Type)) and not type(value) is int:
raise TypeError(
"'value' must be of type 'int', 'float', or 'BioSimSpace.Types._type.Type'"
)
self._value = value
def getValue(self):
"""
Get the value of the bound.
Returns
-------
value : int, float, :class:`Type <BioSimSpace.Types>`
The value of the bound.
"""
return self._value
def setForceConstant(self, force_constant):
"""
Set the force constant (k) for the harmonic term of the restraint.
Parameters
----------
force_constant : float
The force constant for the harmonic term of the restraint.
"""
try:
self._force_constant = float(force_constant)
except:
raise TypeError("'force_constant' must be of type 'float'")
def getForceConstant(self):
"""
Get the force constant (k) for the harmonic term of the restraint.
Returns
-------
force_constant : float
The force constant for the harmonic term of the restraint.
"""
return self._force_constant
def setSlope(self, slope):
"""
Set the slope (m) for the linear term of the restraint.
Parameters
----------
slope : float
The slope for the linear term of the restraint.
"""
try:
self._slope = float(slope)
except:
raise TypeError("'slope' must be of type 'float'")
def METHOD_NAME(self):
"""
Get the slope (m) for the linear term of the restraint.
Returns
-------
slope : float
The slope for the linear term of the restraint.
"""
return self._slope
| null |
937 |
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from binstar_client.errors import NotFound
from pytest_mock import MockerFixture
from conda_env.env import Environment
from conda_env.specs.binstar import BinstarSpec
def test_name_not_present():
"""No name provided."""
spec = BinstarSpec()
assert not spec.package
assert not spec.can_handle()
assert spec.msg == "Can't process without a name"
def test_invalid_name():
"""Invalid name provided."""
spec = BinstarSpec("invalid-name")
assert not spec.package
assert not spec.can_handle()
assert spec.msg == "Invalid name 'invalid-name', try the format: user/package"
def test_package_not_exist(mocker: MockerFixture):
"""Package doesn't exist."""
mocker.patch(
"conda_env.specs.binstar.BinstarSpec.binstar",
new_callable=mocker.PropertyMock,
return_value=mocker.MagicMock(
package=mocker.MagicMock(side_effect=NotFound("msg"))
),
)
spec = BinstarSpec("darth/no-exist")
assert not spec.package
assert not spec.can_handle()
def test_package_without_environment_file(mocker: MockerFixture):
"""Package exists but no environment file is present."""
mocker.patch(
"conda_env.specs.binstar.BinstarSpec.binstar",
new_callable=mocker.PropertyMock,
return_value=mocker.MagicMock(
package=mocker.MagicMock(return_value={"files": []})
),
)
spec = BinstarSpec("darth/no-env-file")
assert spec.package
assert not spec.can_handle()
def test_download_environment(mocker: MockerFixture):
"""Package exists with an environment file."""
mocker.patch(
"conda_env.specs.binstar.BinstarSpec.binstar",
new_callable=mocker.PropertyMock,
return_value=mocker.MagicMock(
package=mocker.MagicMock(
return_value={
"files": [
{"type": "env", "version": "1", "basename": "environment.yml"}
],
},
),
download=mocker.MagicMock(return_value=mocker.MagicMock(text="name: env")),
),
)
spec = BinstarSpec("darth/env-file")
assert spec.package
assert spec.can_handle()
assert isinstance(spec.environment, Environment)
def test_environment_version_sorting(mocker: MockerFixture):
"""Package exists with multiple environment files, get latest version."""
downloader = mocker.MagicMock(return_value=mocker.MagicMock(text="name: env"))
mocker.patch(
"conda_env.specs.binstar.BinstarSpec.binstar",
new_callable=mocker.PropertyMock,
return_value=mocker.MagicMock(
package=mocker.MagicMock(
return_value={
"files": [
{
"type": "env",
"version": "0.1.1",
"basename": "environment.yml",
},
{
"type": "env",
"version": "0.1a.2",
"basename": "environment.yml",
},
{
"type": "env",
"version": "0.2.0",
"basename": "environment.yml",
},
],
},
),
download=downloader,
),
)
spec = BinstarSpec("darth/env-file")
assert spec.package
assert spec.can_handle()
assert isinstance(spec.environment, Environment)
downloader.assert_called_with("darth", "env-file", "0.2.0", "environment.yml")
def METHOD_NAME(mocker: MockerFixture):
"""Mock anaconda-client not installed."""
mocker.patch(
"conda_env.specs.binstar.BinstarSpec.binstar",
new_callable=mocker.PropertyMock,
return_value=None,
)
spec = BinstarSpec("user/package")
assert not spec.package
assert not spec.can_handle()
assert spec.msg == (
"Anaconda Client is required to interact with anaconda.org or an Anaconda API. "
"Please run `conda install anaconda-client -n base`."
)
| null |
938 |
import re
import pytest
from alfasim_sdk._internal.types import MultipleReference
from alfasim_sdk._internal.types import Reference
@pytest.mark.parametrize("expression_type", ["enable_expr", "visible_expr"])
def METHOD_NAME(expression_type):
from alfasim_sdk._internal.types import String
inputs = {"value": "value", "caption": "caption", expression_type: ""}
with pytest.raises(TypeError, match=f"'{expression_type}' must be callable"):
String(**inputs)
def function_definition(): # pragma: no cover
pass
valid_input_1 = {"value": "value", "caption": "caption", expression_type: None}
valid_input_2 = {
"value": "value",
"caption": "caption",
expression_type: function_definition,
}
String(**valid_input_1)
String(**valid_input_2)
def test_string():
from alfasim_sdk._internal.types import String
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
String(value="acme")
with pytest.raises(
TypeError, match=re.escape("'caption' must be 'str' (got 1 that is a 'int')")
):
String(value="acme", caption=1)
with pytest.raises(
TypeError, match=re.escape("'value' must be 'str' (got 1 that is a 'int')")
):
String(value=1, caption="caption")
def test_enum():
from alfasim_sdk._internal.types import Enum
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Enum(values=["s"], initial="")
with pytest.raises(TypeError, match="values must be a list, got a 'str'."):
Enum(values="", caption="caption")
with pytest.raises(
TypeError, match="values must be a list of strings, the item '1' is a 'int'"
):
Enum(values=[1], caption="caption")
with pytest.raises(
ValueError, match='Enum type cannot have an empty string on field "values"'
):
Enum(values=[""], caption="caption")
enum = Enum(values=["value"], caption="caption")
assert enum.initial is None
enum = Enum(values=["value"], initial="value", caption="caption")
assert enum.initial == "value"
with pytest.raises(
TypeError, match="The initial condition must be within the declared values"
):
Enum(values=["value1, value2"], initial="", caption="caption")
@pytest.mark.parametrize("class_", [Reference, MultipleReference])
def test_reference(class_):
from alfasim_sdk._internal.types import TracerType
from alfasim_sdk._internal.models import data_model, container_model
@data_model(caption="caption")
class Data:
pass
@container_model(caption="caption", model=Data, icon="")
class DataContainer:
pass
class InvalidClass:
pass
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
class_(ref_type="")
with pytest.raises(TypeError, match="ref_type must be a class"):
class_(ref_type="", caption="caption")
with pytest.raises(
TypeError,
match="ref_type must be an ALFAsim type or a class decorated with 'data_model'",
):
class_(ref_type=InvalidClass, caption="caption")
error_msg = "ref_type must be an ALFAsim type or a class decorated with 'data_model', got a class decorated with 'container_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=DataContainer, caption="caption")
error_msg = "The container_type field must be given when ref_type is a class decorated with 'data_model'"
with pytest.raises(TypeError, match=error_msg):
class_(ref_type=Data, caption="caption")
with pytest.raises(ValueError, match='The field "container_type" cannot be empty'):
class_(ref_type=Data, container_type="", caption="caption")
assert (
class_(ref_type=Data, container_type="DataContainer", caption="caption")
is not None
)
assert class_(ref_type=TracerType, caption="caption") is not None
def test_quantity():
from alfasim_sdk._internal.types import Quantity
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Quantity(value="", unit="")
with pytest.raises(TypeError, match="'value' must be <class 'numbers.Real'>"):
Quantity(value="", unit="", caption="caption")
with pytest.raises(
TypeError, match=re.escape("'unit' must be 'str' (got 1 that is a 'int')")
):
Quantity(value=1, unit=1, caption="caption")
def test_table():
from alfasim_sdk._internal.types import Table
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Table(rows=[])
with pytest.raises(TypeError, match="rows must be a list with TableColumn."):
Table(rows=[], caption="caption")
with pytest.raises(TypeError, match="rows must be a list of TableColumn."):
Table(rows=[""], caption="caption")
def test_table_column():
from alfasim_sdk._internal.types import TableColumn, Quantity
with pytest.raises(
TypeError, match="value must be a Quantity, got a <class 'str'>."
):
TableColumn(id="id", value="")
column = TableColumn(
id="id", value=Quantity(value=1, unit="m", caption="CAPTION FOR COLUMN")
)
assert column.caption == column.value.caption
def test_boolean():
from alfasim_sdk._internal.types import Boolean
with pytest.raises(
TypeError, match="missing 1 required keyword-only argument: 'caption'"
):
Boolean(value="")
with pytest.raises(TypeError, match="'value' must be <class 'bool'"):
Boolean(value=1, caption="caption")
def test_file_content():
from alfasim_sdk._internal.types import FileContent
FileContent(caption="Test")
def test_tooltips():
from alfasim_sdk._internal.types import Boolean
field = Boolean(value=True, caption="caption")
assert field.tooltip == ""
field = Boolean(value=True, caption="caption", tooltip="Test123")
assert field.tooltip == "Test123"
expected_msg = re.escape(
"'tooltip' must be <class 'str'> (got 2 that is a <class 'int'>)."
)
with pytest.raises(TypeError, match=expected_msg):
Boolean(value=True, caption="caption", tooltip=2)
field = Boolean(value=True, caption="caption", tooltip="∩ ∪ ∫ ∬ ∭ ∮")
assert field.tooltip == "∩ ∪ ∫ ∬ ∭ ∮"
| null |
939 |
import datetime
import getpass
import logging
import os
import random
import socket
import subprocess
import sys
import time
import typing
from contextlib import closing
from typing import Iterator, List, Optional, Union
import pytz
logger = logging.getLogger(__name__)
def get_public_ip() -> str:
"""Get the IP that this machine uses to contact the internet.
If behind a NAT, this will still be this computer's IP, and not the router's."""
try:
# Try to get the internet-facing IP by attempting a connection
# to a non-existent server and reading what IP was used.
ip = '127.0.0.1'
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as sock:
# 203.0.113.0/24 is reserved as TEST-NET-3 by RFC 5737, so
# there is guaranteed to be no one listening on the other
# end (and we won't accidentally DOS anyone).
sock.connect(('203.0.113.1', 1))
ip = sock.getsockname()[0]
return ip
except:
# Something went terribly wrong. Just give loopback rather
# than killing everything, because this is often called just
# to provide a default argument
return '127.0.0.1'
def get_user_name() -> str:
"""
Get the current user name, or a suitable substitute string if the user name
is not available.
"""
try:
try:
return getpass.getuser()
except KeyError:
# This is expected if the user isn't in /etc/passwd, such as in a
# Docker container when running as a weird UID. Make something up.
return 'UnknownUser' + str(os.getuid())
except Exception as e:
# We can't get the UID, or something weird has gone wrong.
logger.error('Unexpected error getting user name: %s', e)
return 'UnknownUser'
def utc_now() -> datetime.datetime:
"""Return a datetime in the UTC timezone corresponding to right now."""
return datetime.datetime.utcnow().replace(tzinfo=pytz.UTC)
def METHOD_NAME() -> float:
"""Return the current time in milliseconds since the Unix epoch."""
return time.time() * 1000
def slow_down(seconds: float) -> float:
"""
Toil jobs that have completed are not allowed to have taken 0 seconds, but
Kubernetes timestamps round things to the nearest second. It is possible in
some batch systems for a pod to have identical start and end timestamps.
This function takes a possibly 0 job length in seconds and enforces a
minimum length to satisfy Toil.
:param float seconds: Timestamp difference
:return: seconds, or a small positive number if seconds is 0
:rtype: float
"""
return max(seconds, sys.float_info.epsilon)
def printq(msg: str, quiet: bool) -> None:
if not quiet:
print(msg)
def truncExpBackoff() -> Iterator[float]:
# as recommended here https://forums.aws.amazon.com/thread.jspa?messageID=406788#406788
# and here https://cloud.google.com/storage/docs/xml-api/reference-status
yield 0
t = 1
while t < 1024:
# google suggests this dither
yield t + random.random()
t *= 2
while True:
yield t
class CalledProcessErrorStderr(subprocess.CalledProcessError):
"""Version of CalledProcessError that include stderr in the error message if it is set"""
def __str__(self) -> str:
if (self.returncode < 0) or (self.stderr is None):
return str(super())
else:
err = self.stderr if isinstance(self.stderr, str) else self.stderr.decode("ascii", errors="replace")
return "Command '%s' exit status %d: %s" % (self.cmd, self.returncode, err)
def call_command(cmd: List[str], *args: str, input: Optional[str] = None, timeout: Optional[float] = None,
useCLocale: bool = True, env: Optional[typing.Dict[str, str]] = None, quiet: Optional[bool] = False) -> str:
"""
Simplified calling of external commands.
If the process fails, CalledProcessErrorStderr is raised.
The captured stderr is always printed, regardless of
if an exception occurs, so it can be logged.
Always logs the command at debug log level.
:param quiet: If True, do not log the command output. If False (the
default), do log the command output at debug log level.
:param useCLocale: If True, C locale is forced, to prevent failures that
can occur in some batch systems when using UTF-8 locale.
:returns: Command standard output, decoded as utf-8.
"""
# NOTE: Interface MUST be kept in sync with call_sacct and call_scontrol in
# test_slurm.py, which monkey-patch this!
# using non-C locales can cause GridEngine commands, maybe other to
# generate errors
if useCLocale:
env = dict(os.environ) if env is None else dict(env) # copy since modifying
env["LANGUAGE"] = env["LC_ALL"] = "C"
logger.debug("run command: {}".format(" ".join(cmd)))
start_time = datetime.datetime.now()
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
encoding='utf-8', errors="replace", env=env)
stdout, stderr = proc.communicate(input=input, timeout=timeout)
end_time = datetime.datetime.now()
runtime = (end_time - start_time).total_seconds()
sys.stderr.write(stderr)
if proc.returncode != 0:
logger.debug("command failed in {}s: {}: {}".format(runtime, " ".join(cmd), stderr.rstrip()))
raise CalledProcessErrorStderr(proc.returncode, cmd, output=stdout, stderr=stderr)
logger.debug("command succeeded in {}s: {}{}".format(runtime, " ".join(cmd), (': ' + stdout.rstrip()) if not quiet else ''))
return stdout
| null |
940 |
#!/usr/bin/env python
import argparse
import logging
from glob import glob
from html.parser import HTMLParser
import requests
from .util import MULLED_SOCKET_TIMEOUT
QUAY_API_ENDPOINT = "https://quay.io/api/v1/repository"
def get_quay_containers(repository="biocontainers"):
"""
Get all quay containers in the biocontainers repo
"""
containers = []
repos_parameters = {"public": "true", "namespace": repository}
repos_headers = {"Accept-encoding": "gzip", "Accept": "application/json"}
repos_response = requests.get(QUAY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=12)
repos = repos_response.json()["repositories"]
for repo in repos:
logging.info(repo)
tags_response = requests.get(f"{QUAY_API_ENDPOINT}/{repository}/{repo['name']}", timeout=MULLED_SOCKET_TIMEOUT)
tags = tags_response.json()["tags"]
for tag in tags:
containers.append(f"{repo['name']}:{tag}")
return containers
def get_singularity_containers():
"""
Get all existing singularity containers from "https://depot.galaxyproject.org/singularity/"
"""
class GetContainerNames(HTMLParser): # small parser which gets list of containers
def __init__(self):
HTMLParser.__init__(self)
self.containers = []
def handle_starttag(self, tag, attrs):
try:
for attr in attrs:
if attr[0] == "href" and attr[1] != "../":
self.containers.append(attr[1].replace("%3A", ":"))
except IndexError:
pass
parser = GetContainerNames()
index = requests.get("https://depot.galaxyproject.org/singularity/", timeout=MULLED_SOCKET_TIMEOUT)
parser.feed(index.text)
return parser.containers
def METHOD_NAME(filepath):
"""
Get list of already existing envs
"""
return [n.split("__")[-1].replace("@", ":") for n in glob(f"{filepath}/*")]
def get_missing_containers(quay_list, singularity_list, blocklist_file=None):
r"""
Return list of quay containers that do not exist as singularity containers. Files stored in a blocklist will be ignored
"""
blocklist = []
if blocklist_file:
with open(blocklist_file) as fh:
blocklist = fh.read().split("\n")
return [n for n in quay_list if n not in singularity_list and n not in blocklist]
def get_missing_envs(quay_list, conda_list, blocklist_file=None):
r"""
Compares list of conda envs and docker containers and returns missing conda envs
"""
blocklist = []
if blocklist_file:
with open(blocklist_file) as fh:
blocklist = fh.read().split("\n")
return [n for n in quay_list if n.split("--")[0] not in conda_list and n.split("--")[0] not in blocklist]
def main():
parser = argparse.ArgumentParser(
description="Returns list of Docker containers in the quay.io biocontainers repository."
)
parser.add_argument("--source", "-s", help="Docker, Singularity or Conda.")
parser.add_argument(
"--not-singularity",
dest="not_singularity",
action="store_true",
help="Exclude Docker containers from which Singularity containers have already been built.",
)
parser.add_argument(
"--not-conda",
dest="not_conda",
action="store_true",
help="Exclude Docker containers from which Conda environments have already been extracted.",
)
parser.add_argument(
"--conda-filepath",
dest="conda_filepath",
default=None,
help="If searching for conda environments or employing the --not-conda option, a filepath where the environments are located.",
)
parser.add_argument(
"-b",
"--blocklist",
"--blacklist",
dest="blocklist",
default=None,
help="Provide a 'blocklist file' containing containers which should be excluded from the list.",
)
parser.add_argument(
"-f",
"--file",
dest="output",
default=None,
help="File to write list to. If not given output will be returned on the command line.",
)
args = parser.parse_args()
if args.source == "docker":
containers = get_quay_containers()
if args.not_singularity:
containers = get_missing_containers(containers, get_singularity_containers(), args.blocklist)
if args.not_conda:
containers = get_missing_envs(containers, METHOD_NAME(args.conda_filepath), args.blocklist)
elif args.source == "singularity":
containers = get_singularity_containers()
elif args.source == "conda":
containers = METHOD_NAME(args.conda_filepath)
else:
print("The 'source' argument was not understood.")
return
if args.output:
with open(args.output, "a") as f:
for container in containers:
f.write(f"{container}\n")
else:
print(containers)
if __name__ == "__main__":
main()
| null |
941 |
from typing import Any, Tuple
from AnyQt.QtCore import Qt, QSize, QAbstractItemModel, Property
from AnyQt.QtWidgets import (
QWidget, QSlider, QFormLayout, QComboBox, QStyle, QSizePolicy
)
from AnyQt.QtCore import Signal
from Orange.widgets.utils import itemmodels, colorpalettes
from Orange.widgets.utils.spinbox import DoubleSpinBox, DBL_MIN, DBL_MAX
from Orange.widgets.utils.intervalslider import IntervalSlider
class ColorGradientSelection(QWidget):
activated = Signal(int)
currentIndexChanged = Signal(int)
thresholdsChanged = Signal(float, float)
centerChanged = Signal(float)
def __init__(self, *args, thresholds=(0.0, 1.0), center=None, **kwargs):
super().__init__(*args, **kwargs)
low = round(clip(thresholds[0], 0., 1.), 2)
high = round(clip(thresholds[1], 0., 1.), 2)
high = max(low, high)
self.__threshold_low, self.__threshold_high = low, high
self.__center = center
form = QFormLayout(
formAlignment=Qt.AlignLeft,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QFormLayout.AllNonFixedFieldsGrow
)
form.setContentsMargins(0, 0, 0, 0)
self.gradient_cb = QComboBox(
None, objectName="gradient-combo-box",
)
self.gradient_cb.setAttribute(Qt.WA_LayoutUsesWidgetRect)
icsize = self.style().pixelMetric(
QStyle.PM_SmallIconSize, None, self.gradient_cb
)
self.gradient_cb.setIconSize(QSize(64, icsize))
model = itemmodels.ContinuousPalettesModel()
model.setParent(self)
self.gradient_cb.setModel(model)
self.gradient_cb.activated[int].connect(self.activated)
self.gradient_cb.currentIndexChanged.connect(self.currentIndexChanged)
self.gradient_cb.currentIndexChanged.connect(
self.__update_center_visibility)
form.setWidget(0, QFormLayout.SpanningRole, self.gradient_cb)
def on_center_spin_value_changed(value):
if self.__center != value:
self.__center = value
self.centerChanged.emit(self.__center)
if center is not None:
self.center_edit = DoubleSpinBox(
value=self.__center,
minimum=DBL_MIN, maximum=DBL_MAX, minimumStep=0.01,
minimumContentsLenght=8, alignment=Qt.AlignRight,
stepType=DoubleSpinBox.AdaptiveDecimalStepType,
keyboardTracking=False,
sizePolicy=QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed),
)
self.center_edit.valueChanged.connect(on_center_spin_value_changed)
else:
self.center_edit = None
slider = self.slider = IntervalSlider(
int(low * 100), int(high * 100), minimum=0, maximum=100,
tickPosition=QSlider.NoTicks,
toolTip=self.tr("Low gradient threshold"),
whatsThis=self.tr("Applying a low threshold will squeeze the "
"gradient from the lower end")
)
form.addRow(self.tr("Range:"), slider)
self.slider.intervalChanged.connect(self.__on_slider_moved)
self.setLayout(form)
def setModel(self, model: QAbstractItemModel) -> None:
self.gradient_cb.setModel(model)
def model(self) -> QAbstractItemModel:
return self.gradient_cb.model()
def findData(self, data: Any, role: Qt.ItemDataRole) -> int:
return self.gradient_cb.findData(data, role)
def setCurrentIndex(self, index: int) -> None:
self.gradient_cb.setCurrentIndex(index)
self.__update_center_visibility()
def currentIndex(self) -> int:
return self.gradient_cb.currentIndex()
currentIndex_ = Property(
int, currentIndex, setCurrentIndex, notify=currentIndexChanged)
def currentData(self, role=Qt.UserRole) -> Any:
return self.gradient_cb.currentData(role)
def thresholds(self) -> Tuple[float, float]:
return self.__threshold_low, self.__threshold_high
thresholds_ = Property(object, thresholds, notify=thresholdsChanged)
def thresholdLow(self) -> float:
return self.__threshold_low
def setThresholdLow(self, low: float) -> None:
self.METHOD_NAME(low, max(self.__threshold_high, low))
thresholdLow_ = Property(
float, thresholdLow, setThresholdLow, notify=thresholdsChanged)
def thresholdHigh(self) -> float:
return self.__threshold_high
def setThresholdHigh(self, high: float) -> None:
self.METHOD_NAME(min(self.__threshold_low, high), high)
thresholdHigh_ = Property(
float, thresholdLow, setThresholdLow, notify=thresholdsChanged)
def __on_slider_moved(self, low: int, high: int) -> None:
old = self.__threshold_low, self.__threshold_high
self.__threshold_low = low / 100.
self.__threshold_high = high / 100.
new = self.__threshold_low, self.__threshold_high
if new != old:
self.thresholdsChanged.emit(*new)
def METHOD_NAME(self, low: float, high: float) -> None:
low = round(clip(low, 0., 1.), 2)
high = round(clip(high, 0., 1.), 2)
if low > high:
high = low
if self.__threshold_low != low or self.__threshold_high != high:
self.__threshold_high = high
self.__threshold_low = low
self.slider.setInterval(int(low * 100), int(high * 100))
self.thresholdsChanged.emit(high, low)
def __update_center_visibility(self):
palette = self.currentData()
if self.center_edit is None or \
(visible := self.center_edit.parent() is not None) \
== bool(isinstance(palette, colorpalettes.Palette)
and palette.flags & palette.Flags.Diverging):
return
if visible:
self.layout().takeRow(1).labelItem.widget().setParent(None)
self.center_edit.setParent(None)
else:
self.layout().insertRow(1, "Center at:", self.center_edit)
def center(self) -> float:
return self.__center
def setCenter(self, center: float) -> None:
if self.__center != center:
self.__center = center
self.center_edit.setValue(center)
self.centerChanged.emit(center)
center_ = Property(float, center, setCenter, notify=centerChanged)
def clip(a, amin, amax):
return min(max(a, amin), amax)
| null |
942 |
# TODO: Remove all TODO comments once the implementation is complete.
"""
TODO: Add the Paper Title on this line.
TODO: Add the paper's PDF URL (preferably from arXiv) on this line.
TODO: Write a Short Description of the task.
Homepage: TODO: Add the URL to the task's Homepage here.
"""
from lm_eval.base import Task
# TODO: Add the BibTeX citation for the task.
_CITATION = """
"""
# TODO: Replace `NewTask` with the name of your Task.
class NewTask(Task):
VERSION = 0
# TODO: Add the `DATASET_PATH` string. This will be the name of the `Task`
# dataset as denoted in HuggingFace `datasets`.
DATASET_PATH = ""
# TODO: Add the `DATASET_NAME` string. This is the name of a subset within
# `DATASET_PATH`. If there aren't specific subsets you need, leave this as `None`.
DATASET_NAME = None
def has_training_docs(self):
# TODO: Fill in the return with `True` if the Task has training data; else `False`.
return False
def METHOD_NAME(self):
# TODO: Fill in the return with `True` if the Task has validation data; else `False`.
return False
def has_test_docs(self):
# TODO: Fill in the return with `True` if the Task has test data; else `False`.
return False
def training_docs(self):
if self.has_training_docs():
# We cache training documents in `self._training_docs` for faster
# few-shot processing. If the data is too large to fit in memory,
# return the training data as a generator instead of a list.
if self._training_docs is None:
# TODO: Return the training document generator from `self.dataset`.
# If you need to process the data, `map` over the documents with
# the custom processing function, `self._process_doc`. E.g.
# `map(self._process_doc, self.dataset["validation"])`
# In most case you can leave this as is unless the dataset split is
# named differently than the default `"train"`.
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
if self.METHOD_NAME():
# TODO: Return the validation document generator from `self.dataset`.
# If you need to process the data, `map` over the documents with the
# custom processing function, `self._process_doc`. E.g.
# `map(self._process_doc, self.dataset["validation"])`
# In most case you can leave this as is unless the dataset split is
# named differently than the default `"validation"`.
return self.dataset["validation"]
def test_docs(self):
if self.has_test_docs():
# TODO: Return the test document generator from `self.dataset`.
# If you need to process the data, `map` over the documents with the
# custom processing function, `self._process_doc`. E.g.
# `map(self._process_doc, self.dataset["test"])`
# In most case you can leave this as is unless the dataset split is
# named differently than the default `"test"`.
return self.dataset["test"]
def _process_doc(self, doc):
# TODO: Process (detokenize, strip, replace etc.) each individual `doc`
# with this function. You can map this across the docs in each available
# dataset split. See the TODOs in `train_docs`, `validation_docs`, and
# `test_docs` for snippets.
# NOTE: DELETE THIS FUNCTION IF UNUSED.
return doc
def doc_to_text(self, doc):
# TODO: Format the query prompt portion of the document example.
return ""
def doc_to_target(self, doc):
# TODO: Fill in the `target` ("gold answer") variable.
# The prepended `" "` is required to space out the `doc_to_text` and
# `doc_to_target` strings.
target = ""
return " " + target
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or
test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: Construct your language model requests with the request factory, `rf`,
# and return them as an iterable.
return []
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: For each (sub)metric in the task evaluation, add a key-value pair
# with the metric name as key and the corresponding metric result as value
# for the current `doc`.
return {}
def aggregation(self):
"""
:returns: {str: [metric_score] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metric scores
"""
# TODO: For each (sub)metric in the task evaluation, add a key-value pair
# with the metric name as key and an aggregation function as value which
# determines how to combine results from each document in the dataset.
# Check `lm_eval.metrics` to find built-in aggregation functions.
return {}
def higher_is_better(self):
# TODO: For each (sub)metric in the task evaluation, add a key-value pair
# with the metric name as key and a `bool` value determining whether or
# not higher values of that metric are deemed better.
return {}
| null |
943 |
import asyncio
import functools
from collections import defaultdict, deque
from unittest.mock import AsyncMock, PropertyMock
import aiohttp
class NetworkMockingAssistant:
def __init__(self):
super().__init__()
self._response_text_queues = defaultdict(asyncio.Queue)
self._response_json_queues = defaultdict(asyncio.Queue)
self._response_status_queues = defaultdict(deque)
self._sent_http_requests = defaultdict(asyncio.Queue)
self._incoming_websocket_json_queues = defaultdict(asyncio.Queue)
self._all_incoming_websocket_json_delivered_event = defaultdict(asyncio.Event)
self._incoming_websocket_text_queues = defaultdict(asyncio.Queue)
self._all_incoming_websocket_text_delivered_event = defaultdict(asyncio.Event)
self._incoming_websocket_aiohttp_queues = defaultdict(asyncio.Queue)
self._all_incoming_websocket_aiohttp_delivered_event = defaultdict(asyncio.Event)
self._sent_websocket_json_messages = defaultdict(list)
self._sent_websocket_text_messages = defaultdict(list)
self._ev_loop = asyncio.get_event_loop()
@staticmethod
def METHOD_NAME(function, *args, **kwargs):
async def partial_func(*args2, **kwargs2):
result = function(*args, *args2, **kwargs, **kwargs2)
if asyncio.iscoroutinefunction(function):
result = await result
return result
return partial_func
def _get_next_api_response_status(self, http_mock):
return self._response_status_queues[http_mock].popleft()
async def _get_next_api_response_json(self, http_mock):
ret = await self._response_json_queues[http_mock].get()
return ret
async def _get_next_api_response_text(self, http_mock):
return await self._response_text_queues[http_mock].get()
def _handle_http_request(self, http_mock, url, headers=None, params=None, data=None, *args, **kwargs):
response = AsyncMock()
type(response).status = PropertyMock(side_effect=functools.partial(
self._get_next_api_response_status, http_mock))
response.json.side_effect = self.METHOD_NAME(self._get_next_api_response_json, http_mock)
response.text.side_effect = self.METHOD_NAME(self._get_next_api_response_text, http_mock)
response.__aenter__.return_value = response
components = params if params else data
self._sent_http_requests[http_mock].put_nowait((url, headers, components))
return response
def configure_http_request_mock(self, http_request_mock):
http_request_mock.side_effect = functools.partial(self._handle_http_request, http_request_mock)
def add_http_response(self, http_request_mock, response_status, response_json=None, response_text=None):
self._response_status_queues[http_request_mock].append(response_status)
if response_json is not None:
self._response_json_queues[http_request_mock].put_nowait(response_json)
if response_text is not None:
self._response_text_queues[http_request_mock].put_nowait(response_text)
async def next_sent_request_data(self, http_request_mock):
return await self._sent_http_requests[http_request_mock].get()
async def _get_next_websocket_json_message(self, websocket_mock, *args, **kwargs):
queue = self._incoming_websocket_json_queues[websocket_mock]
message = await queue.get()
if queue.empty():
self._all_incoming_websocket_json_delivered_event[websocket_mock].set()
return message
async def _get_next_websocket_aiohttp_message(self, websocket_mock, *args, **kwargs):
queue = self._incoming_websocket_aiohttp_queues[websocket_mock]
message = await queue.get()
if queue.empty():
self._all_incoming_websocket_aiohttp_delivered_event[websocket_mock].set()
return message
async def _get_next_websocket_text_message(self, websocket_mock, *args, **kwargs):
queue = self._incoming_websocket_text_queues[websocket_mock]
message = await queue.get()
if queue.empty():
self._all_incoming_websocket_text_delivered_event[websocket_mock].set()
return message
def create_websocket_mock(self):
ws = AsyncMock()
ws.__aenter__.return_value = ws
ws.send_json.side_effect = lambda sent_message: self._sent_websocket_json_messages[ws].append(sent_message)
ws.send.side_effect = lambda sent_message: self._sent_websocket_text_messages[ws].append(sent_message)
ws.send_str.side_effect = lambda sent_message: self._sent_websocket_text_messages[ws].append(sent_message)
ws.receive_json.side_effect = self.METHOD_NAME(self._get_next_websocket_json_message, ws)
ws.receive_str.side_effect = self.METHOD_NAME(self._get_next_websocket_text_message, ws)
ws.receive.side_effect = self.METHOD_NAME(self._get_next_websocket_aiohttp_message, ws)
ws.recv.side_effect = self.METHOD_NAME(self._get_next_websocket_text_message, ws)
return ws
def add_websocket_json_message(self, websocket_mock, message):
self._incoming_websocket_json_queues[websocket_mock].put_nowait(message)
self._all_incoming_websocket_json_delivered_event[websocket_mock].clear()
def add_websocket_text_message(self, websocket_mock, message):
self._incoming_websocket_text_queues[websocket_mock].put_nowait(message)
self._all_incoming_websocket_text_delivered_event[websocket_mock].clear()
def add_websocket_aiohttp_message(
self, websocket_mock, message, message_type: aiohttp.WSMsgType = aiohttp.WSMsgType.TEXT
):
msg = aiohttp.WSMessage(message_type, message, extra=None)
self._incoming_websocket_aiohttp_queues[websocket_mock].put_nowait(msg)
self._all_incoming_websocket_aiohttp_delivered_event[websocket_mock].clear()
def json_messages_sent_through_websocket(self, websocket_mock):
return self._sent_websocket_json_messages[websocket_mock]
def text_messages_sent_through_websocket(self, websocket_mock):
return self._sent_websocket_text_messages[websocket_mock]
def run_until_all_text_messages_delivered(self, websocket_mock, timeout: int = 1):
all_delivered = self._all_incoming_websocket_text_delivered_event[websocket_mock]
self._ev_loop.run_until_complete(asyncio.wait_for(all_delivered.wait(), timeout))
def run_until_all_json_messages_delivered(self, websocket_mock, timeout: int = 1):
all_delivered = self._all_incoming_websocket_json_delivered_event[websocket_mock]
self._ev_loop.run_until_complete(asyncio.wait_for(all_delivered.wait(), timeout))
def run_until_all_aiohttp_messages_delivered(self, websocket_mock, timeout: int = 1):
all_delivered = self._all_incoming_websocket_aiohttp_delivered_event[websocket_mock]
self._ev_loop.run_until_complete(asyncio.wait_for(all_delivered.wait(), timeout))
| null |
944 |
# Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Provide data iterator for CIFAR10 examples.
'''
from contextlib import contextmanager
import numpy as np
import struct
import tarfile
import zlib
import time
import os
import errno
from nnabla.logger import logger
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils.data_source_loader import download, get_data_home
class Cifar10DataSource(DataSource):
'''
Get data directly from cifar10 dataset from Internet(yann.lecun.com).
'''
def _get_data(self, position):
image = self._images[self._indexes[position]]
label = self._labels[self._indexes[position]]
return (image, label)
def __init__(self, train=True, shuffle=False, rng=None):
super(Cifar10DataSource, self).__init__(shuffle=shuffle, rng=rng)
self._train = train
data_uri = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
logger.info('Getting labeled data from {}.'.format(data_uri))
r = download(data_uri) # file object returned
with tarfile.open(fileobj=r, mode="r:gz") as fpin:
# Training data
if train:
images = []
labels = []
for member in fpin.getmembers():
if "data_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images.append(data[b"data"])
labels.append(data[b"labels"])
self._size = 50000
self._images = np.concatenate(
images).reshape(self._size, 3, 32, 32)
self._labels = np.concatenate(labels).reshape(-1, 1)
# Validation data
else:
for member in fpin.getmembers():
if "test_batch" not in member.name:
continue
fp = fpin.extractfile(member)
data = np.load(fp, encoding="bytes", allow_pickle=True)
images = data[b"data"]
labels = data[b"labels"]
self._size = 10000
self._images = images.reshape(self._size, 3, 32, 32)
self._labels = np.array(labels).reshape(-1, 1)
r.close()
logger.info('Getting labeled data from {}.'.format(data_uri))
self._size = self._labels.size
self._variables = ('x', 'y')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.METHOD_NAME()
def METHOD_NAME(self):
if self._shuffle:
self._indexes = self.rng.permutation(self._size)
else:
self._indexes = np.arange(self._size)
super(Cifar10DataSource, self).METHOD_NAME()
@property
def images(self):
"""Get copy of whole data with a shape of (N, 1, H, W)."""
return self._images.copy()
@property
def labels(self):
"""Get copy of whole label with a shape of (N, 1)."""
return self._labels.copy()
def data_iterator_cifar10(batch_size,
train=True,
rng=None,
shuffle=True,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`Cifar10DataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`Cifar10DataSource` is able to store all data into memory.
'''
return data_iterator(Cifar10DataSource(train=train, shuffle=shuffle, rng=rng),
batch_size,
rng,
with_memory_cache,
with_file_cache)
| null |
945 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class CreateDataAPIServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'CreateDataAPIService')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_RequestParams(self):
return self.get_body_params().get('RequestParam')
def set_RequestParams(self, RequestParams):
for depth1 in range(len(RequestParams)):
if RequestParams[depth1].get('Name') is not None:
self.add_body_params('RequestParam.' + str(depth1 + 1) + '.Name', RequestParams[depth1].get('Name'))
if RequestParams[depth1].get('Type') is not None:
self.add_body_params('RequestParam.' + str(depth1 + 1) + '.Type', RequestParams[depth1].get('Type'))
if RequestParams[depth1].get('Desc') is not None:
self.add_body_params('RequestParam.' + str(depth1 + 1) + '.Desc', RequestParams[depth1].get('Desc'))
if RequestParams[depth1].get('Example') is not None:
self.add_body_params('RequestParam.' + str(depth1 + 1) + '.Example', RequestParams[depth1].get('Example'))
if RequestParams[depth1].get('Required') is not None:
self.add_body_params('RequestParam.' + str(depth1 + 1) + '.Required', RequestParams[depth1].get('Required'))
def get_IotInstanceId(self):
return self.get_body_params().get('IotInstanceId')
def set_IotInstanceId(self,IotInstanceId):
self.add_body_params('IotInstanceId', IotInstanceId)
def get_ApiPath(self):
return self.get_body_params().get('ApiPath')
def set_ApiPath(self,ApiPath):
self.add_body_params('ApiPath', ApiPath)
def get_TemplateSql(self):
return self.get_body_params().get('TemplateSql')
def set_TemplateSql(self,TemplateSql):
self.add_body_params('TemplateSql', TemplateSql)
def get_ResponseParams(self):
return self.get_body_params().get('ResponseParam')
def METHOD_NAME(self, ResponseParams):
for depth1 in range(len(ResponseParams)):
if ResponseParams[depth1].get('Name') is not None:
self.add_body_params('ResponseParam.' + str(depth1 + 1) + '.Name', ResponseParams[depth1].get('Name'))
if ResponseParams[depth1].get('Type') is not None:
self.add_body_params('ResponseParam.' + str(depth1 + 1) + '.Type', ResponseParams[depth1].get('Type'))
if ResponseParams[depth1].get('Desc') is not None:
self.add_body_params('ResponseParam.' + str(depth1 + 1) + '.Desc', ResponseParams[depth1].get('Desc'))
if ResponseParams[depth1].get('Example') is not None:
self.add_body_params('ResponseParam.' + str(depth1 + 1) + '.Example', ResponseParams[depth1].get('Example'))
if ResponseParams[depth1].get('Required') is not None:
self.add_body_params('ResponseParam.' + str(depth1 + 1) + '.Required', ResponseParams[depth1].get('Required'))
def get_OriginSql(self):
return self.get_body_params().get('OriginSql')
def set_OriginSql(self,OriginSql):
self.add_body_params('OriginSql', OriginSql)
def get_DisplayName(self):
return self.get_body_params().get('DisplayName')
def set_DisplayName(self,DisplayName):
self.add_body_params('DisplayName', DisplayName)
def get_Desc(self):
return self.get_body_params().get('Desc')
def set_Desc(self,Desc):
self.add_body_params('Desc', Desc
| null |
946 |
import re
from typing import (
List,
Optional,
Union,
)
from pcs import settings
from pcs.common import reports
from pcs.common.reports.item import (
ReportItem,
ReportItemList,
)
from pcs.common.tools import timeout_to_seconds
from pcs.lib.errors import LibraryError
from pcs.lib.external import CommandRunner
_BOOLEAN_TRUE = frozenset(["true", "on", "yes", "y", "1"])
_BOOLEAN_FALSE = frozenset(["false", "off", "no", "n", "0"])
BOOLEAN_VALUES = _BOOLEAN_TRUE | _BOOLEAN_FALSE
_ID_FIRST_CHAR_NOT_RE = re.compile("[^a-zA-Z_]")
_ID_REST_CHARS_NOT_RE = re.compile("[^a-zA-Z0-9_.-]")
SCORE_INFINITY = "INFINITY"
def is_boolean(val: str) -> bool:
"""
Does pacemaker consider a value to be a boolean?
Pacemaker ignores case of this values.
See crm_str_to_boolean in pacemaker/lib/common/strings.c
val -- checked value
"""
return val.lower() in BOOLEAN_VALUES
def is_true(val: str) -> bool:
"""
Does pacemaker consider a value to be true?
Pacemaker ignores case of this values.
See crm_str_to_boolean in pacemaker/lib/common/strings.c
val -- checked value
"""
return val.lower() in _BOOLEAN_TRUE
def is_false(val: str) -> bool:
"""
Does pacemaker consider a value to be false?
Pacemaker ignores case of this values.
See crm_str_to_boolean in pacemaker/lib/common/strings.c
val -- checked value
"""
return val.lower() in _BOOLEAN_FALSE
def is_score(value: str) -> bool:
if not value:
return False
unsigned_value = value[1:] if value[0] in ("+", "-") else value
return unsigned_value == SCORE_INFINITY or unsigned_value.isdigit()
def is_duration(runner: CommandRunner, value: str) -> bool:
cmd = [settings.iso8601_exec, "--duration", value]
_, _, retval = runner.run(cmd)
return retval == 0
def get_valid_timeout_seconds(
timeout_candidate: Union[str, int, None],
) -> Optional[int]:
"""
Transform pacemaker style timeout to number of seconds, raise LibraryError
on invalid timeout
timeout_candidate timeout string or None
"""
if timeout_candidate is None:
return None
wait_timeout = timeout_to_seconds(timeout_candidate)
if wait_timeout is None:
raise LibraryError(
ReportItem.error(
reports.messages.InvalidTimeoutValue(str(timeout_candidate))
)
)
return wait_timeout
def validate_id(
id_candidate: str,
description: Optional[str] = None,
reporter: Union[None, List, ReportItemList] = None,
):
"""
Validate a pacemaker id, raise LibraryError on invalid id.
id_candidate id's value
description id's role description (default "id")
"""
# see NCName definition
# http://www.w3.org/TR/REC-xml-names/#NT-NCName
# http://www.w3.org/TR/REC-xml/#NT-Name
description = "id" if not description else description # for mypy
if not id_candidate:
report_item = ReportItem.error(
reports.messages.InvalidIdIsEmpty(description)
)
if reporter is None:
# we check for None so it works with an empty list as well
raise LibraryError(report_item)
reporter.append(report_item)
return
if _ID_FIRST_CHAR_NOT_RE.match(id_candidate[0]):
report_item = ReportItem.error(
reports.messages.InvalidIdBadChar(
id_candidate,
description,
id_candidate[0],
True,
)
)
if reporter is not None:
reporter.append(report_item)
else:
raise LibraryError(report_item)
for char in id_candidate[1:]:
if _ID_REST_CHARS_NOT_RE.match(char):
report_item = ReportItem.error(
reports.messages.InvalidIdBadChar(
id_candidate,
description,
char,
False,
)
)
if reporter is not None:
reporter.append(report_item)
else:
raise LibraryError(report_item)
def METHOD_NAME(id_candidate, replacement=""):
if not id_candidate:
return id_candidate
return "".join(
[
""
if _ID_FIRST_CHAR_NOT_RE.match(id_candidate[0])
else id_candidate[0],
_ID_REST_CHARS_NOT_RE.sub(replacement, id_candidate[1:]),
]
)
| null |
947 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CopySnapshotRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CopySnapshot','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SnapshotId(self): # String
return self.get_query_params().get('SnapshotId')
def set_SnapshotId(self, SnapshotId): # String
self.add_query_param('SnapshotId', SnapshotId)
def get_DestinationRegionId(self): # String
return self.get_query_params().get('DestinationRegionId')
def set_DestinationRegionId(self, DestinationRegionId): # String
self.add_query_param('DestinationRegionId', DestinationRegionId)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_Arns(self): # RepeatList
return self.get_query_params().get('Arn')
def set_Arns(self, Arn): # RepeatList
for depth1 in range(len(Arn)):
if Arn[depth1].get('RoleType') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.RoleType', Arn[depth1].get('RoleType'))
if Arn[depth1].get('Rolearn') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.Rolearn', Arn[depth1].get('Rolearn'))
if Arn[depth1].get('AssumeRoleFor') is not None:
self.add_query_param('Arn.' + str(depth1 + 1) + '.AssumeRoleFor', Arn[depth1].get('AssumeRoleFor'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DestinationSnapshotName(self): # String
return self.get_query_params().get('DestinationSnapshotName')
def set_DestinationSnapshotName(self, DestinationSnapshotName): # String
self.add_query_param('DestinationSnapshotName', DestinationSnapshotName)
def get_DestinationSnapshotDescription(self): # String
return self.get_query_params().get('DestinationSnapshotDescription')
def set_DestinationSnapshotDescription(self, DestinationSnapshotDescription): # String
self.add_query_param('DestinationSnapshotDescription', DestinationSnapshotDescription)
def get_Encrypted(self): # Boolean
return self.get_query_params().get('Encrypted')
def set_Encrypted(self, Encrypted): # Boolean
self.add_query_param('Encrypted', Encrypted)
def get_RetentionDays(self): # Integer
return self.get_query_params().get('RetentionDays')
def set_RetentionDays(self, RetentionDays): # Integer
self.add_query_param('RetentionDays', RetentionDays)
def get_KMSKeyId(self): # String
return self.get_query_params().get('KMSKeyId')
def set_KMSKeyId(self, KMSKeyId): # String
self.add_query_param('KMSKeyId', KMSKeyId)
def get_DestinationStorageLocationArn(self): # String
return self.get_query_params().get('DestinationStorageLocationArn')
def set_DestinationStorageLocationArn(self, DestinationStorageLocationArn): # String
self.add_query_param('DestinationStorageLocationArn', DestinationStorageLocationArn)
| null |
948 |
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines FHIR-specific Python exceptions."""
import abc
import collections
from typing import List, Tuple
import logging
def _create_event_frequency_table(frequency_list: List[Tuple[str, int]]) -> str:
"""Returns a string depicting unique log events and their counts."""
result: List[str] = []
max_event_len = max(len(key) for key, _ in frequency_list)
for event, count in frequency_list:
result.append(f'{event:<{max_event_len}}: {count}')
return '\n'.join(result)
def METHOD_NAME(events: List[str]) -> List[Tuple[str, int]]:
"""Returns a list of tuples: (event string, count of times they appear).
The list is sorted descending by count and then ascending by event string.
Args:
events: A list of strings defining an event (either an error or a warning).
Returns:
List of tuples : (event string, number of times they appear).
"""
frequency_list = collections.Counter(events)
# Sort descending by count then ascending by event string.
return sorted(frequency_list.items(), key=lambda x: (-x[1], x[0]))
class InvalidFhirError(Exception):
"""Invalid FHIR data was encountered."""
pass
class ErrorReporter(abc.ABC):
"""An abstract base class for FHIRPath encoding errors."""
@abc.abstractmethod
def report_conversion_error(self, element_path: str, msg: str) -> None:
"""Reports the given error during FHIR conversion.
This indicates that the resource does not fully comply with the FHIR
specification or profile, and the field could not be converted to the target
structure. Data may have been lost during the conversion.
Args:
element_path: The path to the field where the issue occurred.
msg: The error message produced.
"""
@abc.abstractmethod
def report_validation_error(self, element_path: str, msg: str) -> None:
"""Reports the given error during FHIR validation.
This indicates that the resource does not fully comply with the FHIR
specification or profile.
Args:
element_path: The path to the field where the issue occurred.
msg: The error message produced.
"""
@abc.abstractmethod
def report_validation_warning(self, element_path: str, msg: str) -> None:
"""Reports the given warning during FHIR validation.
This indicates that the element complies with the FHIR specification, but
may be missing some desired-but-not-required property, like additional
fields that are useful to consumers.
Args:
element_path: The path to the field where the issue occurred.
msg: The warning message that was produced.
"""
@abc.abstractmethod
def report_fhir_path_error(self, element_path: str, fhir_path_constraint: str,
msg: str) -> None:
"""Reports a FHIRPath constraint error during validation and/or encoding.
The base implementation logs to the `error` context and raises `e` by
default. Subclasses should override this behavior as necessary.
Args:
element_path: The path to the field that the constraint is defined on.
fhir_path_constraint: The FHIRPath constraint expression.
msg: The error message produced.
"""
@abc.abstractmethod
def report_fhir_path_warning(self, element_path: str,
fhir_path_constraint: str, msg: str) -> None:
"""Reports a FHIRPath constraint warning during validation and/or encoding.
Args:
element_path: The path to the field that the constraint is defined on.
fhir_path_constraint: The FHIRPath constraint expression.
msg: The warning message produced.
"""
class ListErrorReporter(ErrorReporter):
"""A delegate for FHIRPath encoding errors.
Errors are logged to the corresponding `logging` context (e.g. "warning" or
"error") and any encountered messages are stored in the corresponding
attribute (`warnings` and `errors`, respectively). These can then be retrieved
by the caller within the context of the larger system.
Attributes:
errors: A list of error messages encountered.
warnings: A list of warning messages encountered.
"""
def __init__(self) -> None:
self.errors: List[str] = []
self.warnings: List[str] = []
def report_conversion_error(self, element_path: str, msg: str) -> None:
"""Logs to the `error` context and stores `msg` in `errors`."""
full_msg = f'Conversion Error: {element_path}; {msg}'
logging.error(full_msg)
self.errors.append(full_msg)
def report_validation_error(self, element_path: str, msg: str) -> None:
"""Logs to the `error` context and stores `msg` in `errors`."""
full_msg = f'Validation Error: {element_path}; {msg}'
logging.error(full_msg)
self.errors.append(full_msg)
def report_validation_warning(self, element_path: str, msg: str) -> None:
"""Logs to the `warning` context and stores `msg` in `warnings`."""
full_msg = f'Validation Warning: {element_path}; {msg}'
logging.warning(full_msg)
self.warnings.append(full_msg)
def report_fhir_path_error(self, element_path: str, fhir_path_constraint: str,
msg: str) -> None:
"""Logs to the `error` context and stores `msg` in `errors`."""
full_msg = _build_fhir_path_message('Error', element_path,
fhir_path_constraint, msg)
logging.error(full_msg)
self.errors.append(full_msg)
def report_fhir_path_warning(self, element_path: str,
fhir_path_constraint: str, msg: str) -> None:
"""Logs to the `warning` context and stores `msg` in `warnings`."""
full_msg = _build_fhir_path_message('Warning', element_path,
fhir_path_constraint, msg)
logging.warning(full_msg)
self.warnings.append(full_msg)
def get_error_report(self) -> str:
"""Returns an aggregated report of warnings and errors encountered."""
report = ''
if self.errors:
errors_freq_tbl = _create_event_frequency_table(
METHOD_NAME(self.errors))
report += f'Encountered {len(self.errors)} errors:\n{errors_freq_tbl}'
if self.warnings:
warnings_freq_tbl = _create_event_frequency_table(
METHOD_NAME(self.warnings))
report += (f'\n\nEncountered {len(self.warnings)} warnings:\n'
f'{warnings_freq_tbl}')
return report
def _build_fhir_path_message(level: str, element_path: str,
fhir_path_constraint: str, msg: str) -> str:
"""Builds a FHIR Path error message from the given components."""
return (f'FHIR Path {level}: {element_path + "; " if element_path else ""}'
f'{fhir_path_constraint}; {msg}')
| null |
949 |
# coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProteinNet dataset."""
from __future__ import annotations
import os
from typing import Dict, Iterator, List, Optional, Sequence, Tuple, Union
import urllib
import numpy as np
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_Example = Dict[str, Union[int, str, np.ndarray, List]]
_ExampleIterator = Iterator[Tuple[str, _Example]]
_PROTEINNET_HOMEPAGE = 'https://github.com/aqlaboratory/proteinnet'
_LINES_PER_ENTRY = 33
def _parse_array(lines: Sequence[str]) -> np.ndarray:
"""Parse lines of tab-separated numbers into an array."""
lines = [x.split('\t') for x in lines]
return np.array(lines, dtype=np.float32)
def _parse_mask(line: str) -> np.ndarray:
"""Parse a string of `+` and `-` into a bool array."""
return np.array([ch == '+' for ch in line], dtype=bool)
def _read_entry(fin: tf.io.gfile.GFile) -> Optional[Tuple[str, _Example]]:
"""Read an example from an input file.
Args:
fin: Input file object for reading dataset entries.
Returns:
The read exmple and its name, or None in cases of EOF.
Raises:
Exception: In case entry format is incorect.
"""
lines = []
for _ in range(_LINES_PER_ENTRY):
line = fin.readline().strip()
lines.append(line)
if all(not line for line in lines): # EOF?
return None
# Check structure.
if (
lines[0] != '[ID]'
or lines[2] != '[PRIMARY]'
or lines[4] != '[EVOLUTIONARY]'
or lines[26] != '[TERTIARY]'
or lines[30] != '[MASK]'
or lines[32]
):
raise ValueError('Incorrect data formatting.')
lines = lines[:-1] # Discard last empty (spacer) line.
# The transposes below is required because TFDS allows unknown tensor
# dimensions only in the first axis.
key = lines[1]
example = {
'id': key,
'primary': list(lines[3]),
'evolutionary': _parse_array(lines[5:26]).transpose(),
'tertiary': _parse_array(lines[27:30]).transpose(),
'mask': _parse_mask(lines[31]),
'length': len(lines[3]),
}
return key, example
class Builder(tfds.core.GeneratorBasedBuilder):
"""DatasetBuilder for the ProteinNet dataset."""
URL = (
'https://sharehost.hms.harvard.edu/sysbio/alquraishi/proteinnet'
'/human_readable/'
)
FILES = {
'casp7': 'casp7.tar.gz',
'casp8': 'casp8.tar.gz',
'casp9': 'casp9.tar.gz',
'casp10': 'casp10.tar.gz',
'casp11': 'casp11.tar.gz',
'casp12': 'casp12.tar.gz',
}
THRESHOLDS = [30, 50, 70, 90, 95, 100]
AMINOACIDS = [
'A',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'K',
'L',
'M',
'N',
'P',
'Q',
'R',
'S',
'T',
'V',
'W',
'Y',
]
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(name='casp7'),
tfds.core.BuilderConfig(name='casp8'),
tfds.core.BuilderConfig(name='casp9'),
tfds.core.BuilderConfig(name='casp10'),
tfds.core.BuilderConfig(name='casp11'),
tfds.core.BuilderConfig(name='casp12'),
]
VERSION = tfds.core.Version('1.0.0')
RELEASE_NOTES = {
'1.0.0': 'Initial release.',
}
def METHOD_NAME(self) -> tfds.core.DatasetInfo:
"""Returns the dataset metadata."""
return self.dataset_info_from_configs(
features=tfds.features.FeaturesDict({
'id': tfds.features.Text(),
'primary': tfds.features.Sequence(
tfds.features.ClassLabel(names=self.AMINOACIDS)
),
'evolutionary': tfds.features.Tensor(
shape=(None, 21), dtype=np.float32
),
'tertiary': tfds.features.Tensor(shape=(None, 3), dtype=np.float32),
'mask': tfds.features.Tensor(shape=(None,), dtype=np.bool_),
'length': tfds.features.Tensor(shape=(), dtype=np.int32),
}),
supervised_keys=('primary', 'tertiary'),
homepage=_PROTEINNET_HOMEPAGE,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> Dict[Union[str, tfds.Split], _ExampleIterator]:
"""Returns SplitGenerators."""
name = self.builder_config.name # Configurable dataset (config) name.
path = dl_manager.download_and_extract(
urllib.parse.urljoin(self.URL, self.FILES[name])
)
splits = {
tfds.Split.VALIDATION: self._generate_examples(
os.path.join(path, name, 'validation')
),
tfds.Split.TEST: self._generate_examples(
os.path.join(path, name, 'testing')
),
}
for threshold in self.THRESHOLDS: # Train splits.
split_path = os.path.join(path, name, f'training_{threshold}')
splits[f'train_{threshold}'] = self._generate_examples(split_path)
return splits
def _generate_examples(self, filename: str) -> _ExampleIterator:
"""Yields examples."""
with tf.io.gfile.GFile(filename, mode='r') as fin:
while True:
example = _read_entry(fin)
if example is None:
break
yield example
| null |
950 |
import logging
import operator
from functools import reduce
from time import sleep
from typing import Any, Dict, List
import boto3
import botocore.loaders as boto_loader
import botocore.regions as boto_regions
from botocore.config import Config as BotoConfig
from botocore.exceptions import ClientError, NoCredentialsError, ProfileNotFound
from taskcat.exceptions import TaskCatException
LOG = logging.getLogger(__name__)
REGIONAL_ENDPOINT_SERVICES = ["sts"]
class Boto3Cache:
RETRIES = 10
BACKOFF = 2
DELAY = 0.1
CLIENT_THROTTLE_RETRIES = 20
def __init__(self, _boto3=boto3):
self._boto3 = _boto3
self._session_cache: Dict[str, Dict[str, boto3.Session]] = {}
self._client_cache: Dict[str, Dict[str, Dict[str, boto3.client]]] = {}
self._resource_cache: Dict[str, Dict[str, Dict[str, boto3.resource]]] = {}
self._account_info: Dict[str, Dict[str, str]] = {}
self._lock_cache_update = False
def session(self, profile: str = "default", region: str = None) -> boto3.Session:
region = self._get_region(region, profile)
try:
session = self._cache_lookup(
self._session_cache,
[profile, region],
self._boto3.Session,
[],
{"region_name": region, "profile_name": profile},
)
except ProfileNotFound:
if profile != "default":
raise
session = self._boto3.Session(region_name=region)
self._cache_set(self._session_cache, [profile, region], session)
return session
def client(
self, service: str, profile: str = "default", region: str = None
) -> boto3.client:
region = self._get_region(region, profile)
session = self.session(profile, region)
kwargs = {"config": BotoConfig(retries={"max_attempts": 20})}
if service in REGIONAL_ENDPOINT_SERVICES:
kwargs.update({"endpoint_url": self._get_endpoint_url(service, region)})
return self._cache_lookup(
self._client_cache,
[profile, region, service],
session.client,
[service],
kwargs,
)
def resource(
self, service: str, profile: str = "default", region: str = None
) -> boto3.resource:
region = self._get_region(region, profile)
session = self.session(profile, region)
return self._cache_lookup(
self._resource_cache,
[profile, region, service],
session.resource,
[service],
)
def partition(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["partition"]
def account_id(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["account_id"]
def _get_account_info(self, profile):
partition, region = self._get_partition(profile)
session = self.session(profile, region)
sts_client = session.client("sts", region_name=region)
try:
account_id = sts_client.get_caller_identity()["Account"]
except ClientError as e:
if e.response["Error"]["Code"] == "AccessDenied":
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
raise
except NoCredentialsError as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
except ProfileNotFound as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
return {"partition": partition, "account_id": account_id}
def METHOD_NAME(self, cache: dict, keys: list):
if keys:
if not cache.get(keys[0]):
cache[keys[0]] = {}
self.METHOD_NAME(cache[keys[0]], keys[1:])
def _cache_lookup(self, cache, key_list, create_func, args=None, kwargs=None):
try:
value = self._cache_get(cache, key_list)
except KeyError:
args = [] if not args else args
kwargs = {} if not kwargs else kwargs
value = self._get_with_retry(create_func, args, kwargs)
self._cache_set(cache, key_list, value)
return value
def _get_with_retry(self, create_func, args, kwargs):
retries = self.RETRIES
delay = self.DELAY
while retries:
try:
return create_func(*args, **kwargs)
except KeyError as e:
if str(e) not in ["'credential_provider'", "'endpoint_resolver'"]:
raise
backoff = (self.RETRIES - retries + delay) * self.BACKOFF
sleep(backoff)
@staticmethod
def _get_endpoint_url(service, region):
data = boto_loader.create_loader().load_data("endpoints")
endpoint_data = boto_regions.EndpointResolver(data).construct_endpoint(
service, region
)
if not endpoint_data:
raise TaskCatException(
f"unable to resolve endpoint for {service} in {region}"
)
return f"https://{service}.{region}.{endpoint_data['dnsSuffix']}"
@staticmethod
def _cache_get(cache: dict, key_list: List[str]):
return reduce(operator.getitem, key_list, cache)
def _cache_set(self, cache: dict, key_list: list, value: Any):
self.METHOD_NAME(cache, key_list[:-1])
self._cache_get(cache, key_list[:-1])[key_list[-1]] = value
def _get_region(self, region, profile):
if not region:
region = self.get_default_region(profile)
return region
def _get_partition(self, profile):
partition_regions = [
("aws", "us-east-1"),
("aws-cn", "cn-north-1"),
("aws-us-gov", "us-gov-west-1"),
]
for partition, region in partition_regions:
try:
self.session(profile, region).client(
"sts", region_name=region
).get_caller_identity()
return (partition, region)
except ClientError as e:
if "InvalidClientTokenId" in str(e):
continue
raise
raise ValueError("cannot find suitable AWS partition")
def get_default_region(self, profile_name="default") -> str:
try:
if profile_name != "default":
region = self._boto3.session.Session(
profile_name=profile_name
).region_name
else:
region = self._boto3.session.Session().region_name
except ProfileNotFound:
if profile_name != "default":
raise
region = self._boto3.session.Session().region_name
if not region:
_, region = self._get_partition(profile_name)
LOG.warning(
"Region not set in credential chain, defaulting to {}".format(region)
)
return region
| null |
951 |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
Name: typing_extensions.TypeAlias = schemas.StrSchema
Properties = typing.TypedDict(
'Properties',
{
"name": typing.Type[Name],
}
)
class _1Dict(schemas.immutabledict[str, str]):
__required_keys__: typing.FrozenSet[str] = frozenset({
"test",
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"name",
})
def __new__(
cls,
*,
test: typing.Union[
schemas.INPUT_TYPES_ALL,
schemas.OUTPUT_BASE_TYPES
],
METHOD_NAME: typing.Union[
str,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {
"test": test,
}
for key, val in (
("name", METHOD_NAME),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
arg_.update(kwargs)
used_arg_ = typing.cast(_1DictInput, arg_)
return _1.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
_1DictInput,
_1Dict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> _1Dict:
return _1.validate(arg, configuration=configuration)
@property
def test(self) -> schemas.OUTPUT_BASE_TYPES:
return typing.cast(
schemas.OUTPUT_BASE_TYPES,
self.__getitem__("test")
)
@property
def METHOD_NAME(self) -> typing.Union[str, schemas.Unset]:
val = self.get("name", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
str,
val
)
def get_additional_property_(self, METHOD_NAME: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(METHOD_NAME, self.__required_keys__, self.__optional_keys__)
return self.get(METHOD_NAME, schemas.unset)
_1DictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class _1(
schemas.Schema[_1Dict, tuple]
):
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
required: typing.FrozenSet[str] = frozenset({
"test",
})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: _1Dict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
_1DictInput,
_1Dict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> _1Dict:
return super().validate_base(
arg,
configuration=configuration,
)
@dataclasses.dataclass(frozen=True)
class ObjectWithAllOfWithReqTestPropFromUnsetAddProp(
schemas.AnyTypeSchema[schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES], typing.Tuple[schemas.OUTPUT_BASE_TYPES, ...]],
):
"""NOTE: This class is auto generated by OpenAPI JSON Schema Generator.
Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
Do not edit the class manually.
"""
# any type
all_of: AllOf = dataclasses.field(default_factory=lambda: schemas.tuple_to_instance(AllOf)) # type: ignore
from petstore_api.components.schema import object_with_optional_test_prop
AllOf = typing.Tuple[
typing.Type[object_with_optional_test_prop.ObjectWithOptionalTestProp],
typing.Type[_1],
]
| null |
952 |
# Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Endpoint definitions for pipeline run secrets."""
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Security
from zenml.constants import API, SECRETS, VERSION_1
from zenml.enums import PermissionType
from zenml.models.page_model import Page
from zenml.models.secret_models import (
SecretFilterModel,
SecretResponseModel,
SecretUpdateModel,
)
from zenml.zen_server.auth import AuthContext, authorize
from zenml.zen_server.exceptions import error_response
from zenml.zen_server.utils import (
handle_exceptions,
make_dependable,
zen_store,
)
router = APIRouter(
prefix=API + VERSION_1 + SECRETS,
tags=["secrets"],
responses={401: error_response},
)
@router.get(
"",
response_model=Page[SecretResponseModel],
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def list_secrets(
secret_filter_model: SecretFilterModel = Depends(
make_dependable(SecretFilterModel)
),
auth_context: AuthContext = Security(
authorize, scopes=[PermissionType.READ]
),
) -> Page[SecretResponseModel]:
"""Gets a list of secrets.
Args:
secret_filter_model: Filter model used for pagination, sorting,
filtering
auth_context: Authentication context.
Returns:
List of secret objects.
"""
secrets = zen_store().list_secrets(secret_filter_model=secret_filter_model)
# Remove secrets from the response if the user does not have write
# permissions.
if PermissionType.WRITE not in auth_context.permissions:
for secret in secrets.items:
secret.remove_secrets()
return secrets
@router.get(
"/{secret_id}",
response_model=SecretResponseModel,
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def get_secret(
secret_id: UUID,
auth_context: AuthContext = Security(
authorize, scopes=[PermissionType.READ]
),
) -> SecretResponseModel:
"""Gets a specific secret using its unique id.
Args:
secret_id: ID of the secret to get.
auth_context: Authentication context.
Returns:
A specific secret object.
"""
secret = zen_store().get_secret(secret_id=secret_id)
# Remove secrets from the response if the user does not have write
# permissions.
if PermissionType.WRITE not in auth_context.permissions:
secret.remove_secrets()
return secret
@router.put(
"/{secret_id}",
response_model=SecretResponseModel,
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def METHOD_NAME(
secret_id: UUID,
secret_update: SecretUpdateModel,
patch_values: Optional[bool] = False,
_: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),
) -> SecretResponseModel:
"""Updates the attribute on a specific secret using its unique id.
Args:
secret_id: ID of the secret to get.
secret_update: the model containing the attributes to update.
patch_values: Whether to patch the secret values or replace them.
Returns:
The updated secret object.
"""
if not patch_values:
# If patch_values is False, interpret the update values as a complete
# replacement of the existing secret values. The only adjustment we
# need to make is to set the value of any keys that are not present in
# the update to None, so that they are deleted.
secret = zen_store().get_secret(secret_id=secret_id)
for key in secret.values.keys():
if key not in secret_update.values:
secret_update.values[key] = None
return zen_store().METHOD_NAME(
secret_id=secret_id, secret_update=secret_update
)
@router.delete(
"/{secret_id}",
responses={401: error_response, 404: error_response, 422: error_response},
)
@handle_exceptions
def delete_secret(
secret_id: UUID,
_: AuthContext = Security(authorize, scopes=[PermissionType.WRITE]),
) -> None:
"""Deletes a specific secret using its unique id.
Args:
secret_id: ID of the secret to delete.
"""
zen_store().delete_secret(secret_id=secret_id)
| null |
953 |
import textwrap
import unittest
import machine_common_sense as mcs
class TestObjectMetadata(unittest.TestCase):
str_output = ''' {
"uuid": "",
"dimensions": [],
"direction": {},
"distance": -1.0,
"distance_in_steps": -1.0,
"distance_in_world": -1.0,
"held": false,
"mass": 0.0,
"material_list": [],
"position": {},
"rotation": {},
"segment_color": {},
"shape": "",
"state_list": [],
"texture_color_list": [],
"visible": false,
"is_open": false,
"openable": false,
"locked": false,
"associated_with_agent": "",
"simulation_agent_held_object": "",
"simulation_agent_is_holding_held_object": false
}'''
@classmethod
def setUpClass(cls):
cls.object_metadata = mcs.ObjectMetadata()
@classmethod
def tearDownClass(cls):
# nothing to do
pass
def test_uuid(self):
self.assertEqual(self.object_metadata.uuid, "")
self.assertIsInstance(self.object_metadata.uuid, str)
def test_associated_with_agent(self):
self.assertEqual(self.object_metadata.associated_with_agent, "")
self.assertIsInstance(self.object_metadata.associated_with_agent, str)
def test_simulation_agent_held_object(self):
self.assertEqual(self.object_metadata.simulation_agent_held_object, "")
self.assertIsInstance(
self.object_metadata.simulation_agent_held_object, str)
def test_simulation_agent_is_holding_held_object(self):
self.assertFalse(
self.object_metadata.simulation_agent_is_holding_held_object)
self.assertIsInstance(
self.object_metadata.simulation_agent_is_holding_held_object, bool)
def test_dimensions(self):
self.assertFalse(self.object_metadata.dimensions)
self.assertIsInstance(self.object_metadata.dimensions, list)
def test_direction(self):
self.assertFalse(self.object_metadata.direction)
self.assertIsInstance(self.object_metadata.direction, dict)
def test_distance(self):
self.assertAlmostEqual(self.object_metadata.distance, -1.0)
self.assertIsInstance(self.object_metadata.distance, float)
def test_distance_in_steps(self):
self.assertAlmostEqual(self.object_metadata.distance_in_steps, -1.0)
self.assertIsInstance(self.object_metadata.distance_in_steps, float)
def test_distance_in_world(self):
self.assertAlmostEqual(self.object_metadata.distance_in_world, -1.0)
self.assertIsInstance(self.object_metadata.distance_in_world, float)
def test_held(self):
self.assertFalse(self.object_metadata.held)
self.assertIsInstance(self.object_metadata.held, bool)
def METHOD_NAME(self):
self.assertAlmostEqual(self.object_metadata.mass, 0.0)
self.assertIsInstance(self.object_metadata.mass, float)
def test_material_list(self):
self.assertFalse(self.object_metadata.material_list)
self.assertIsInstance(self.object_metadata.material_list, list)
def test_position(self):
self.assertFalse(self.object_metadata.position)
self.assertIsInstance(self.object_metadata.position, dict)
def test_rotation(self):
self.assertFalse(self.object_metadata.rotation)
self.assertIsInstance(self.object_metadata.rotation, dict)
def test_segment_color(self):
self.assertFalse(self.object_metadata.segment_color)
self.assertIsInstance(self.object_metadata.segment_color, dict)
def test_shape(self):
self.assertEqual(self.object_metadata.shape, "")
self.assertIsInstance(self.object_metadata.shape, str)
def test_state_list(self):
self.assertFalse(self.object_metadata.state_list)
self.assertIsInstance(self.object_metadata.state_list, list)
def test_texture_color_list(self):
self.assertFalse(self.object_metadata.texture_color_list)
self.assertIsInstance(self.object_metadata.texture_color_list, list)
def test_visible(self):
self.assertIsInstance(self.object_metadata.visible, bool)
self.assertFalse(self.object_metadata.visible)
def test_str(self):
self.assertEqual(str(self.object_metadata),
textwrap.dedent(self.str_output))
if __name__ == '__main__':
unittest.main()
| null |
954 |
import meep as mp
try:
import meep.adjoint as mpa
except:
import adjoint as mpa
import unittest
import numpy as np
from scipy.ndimage import gaussian_filter
def compute_transmittance(matgrid_symmetry=False):
resolution = 25
cell_size = mp.Vector3(6, 6, 0)
boundary_layers = [mp.PML(thickness=1.0)]
matgrid_size = mp.Vector3(2, 2, 0)
matgrid_resolution = 2 * resolution
Nx, Ny = int(matgrid_size.x * matgrid_resolution), int(
matgrid_size.y * matgrid_resolution
)
# ensure reproducible results
rng = np.random.RandomState(2069588)
w = rng.rand(Nx, Ny)
weights = w if matgrid_symmetry else 0.5 * (w + np.fliplr(w))
matgrid = mp.MaterialGrid(
mp.Vector3(Nx, Ny),
mp.air,
mp.Medium(index=3.5),
weights=weights,
do_averaging=False,
grid_type="U_MEAN",
)
geometry = [
mp.Block(
center=mp.Vector3(),
size=mp.Vector3(mp.inf, 1.0, mp.inf),
material=mp.Medium(index=3.5),
),
mp.Block(
center=mp.Vector3(),
size=mp.Vector3(matgrid_size.x, matgrid_size.y, 0),
material=matgrid,
),
]
if matgrid_symmetry:
geometry.append(
mp.Block(
center=mp.Vector3(),
size=mp.Vector3(matgrid_size.x, matgrid_size.y, 0),
material=matgrid,
e2=mp.Vector3(y=-1),
)
)
eig_parity = mp.ODD_Y + mp.EVEN_Z
fcen = 0.65
df = 0.2 * fcen
sources = [
mp.EigenModeSource(
src=mp.GaussianSource(fcen, fwidth=df),
center=mp.Vector3(-2.0, 0),
size=mp.Vector3(0, 4.0),
eig_parity=eig_parity,
)
]
sim = mp.Simulation(
resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
sources=sources,
geometry=geometry,
)
mode_mon = sim.add_flux(
fcen, 0, 1, mp.FluxRegion(center=mp.Vector3(2.0, 0), size=mp.Vector3(0, 4.0))
)
sim.run(until_after_sources=mp.stop_when_dft_decayed())
mode_coeff = sim.get_eigenmode_coefficients(mode_mon, [1], eig_parity).alpha[
0, :, 0
][0]
tran = np.power(np.abs(mode_coeff), 2)
print(f'tran:, {"sym" if matgrid_symmetry else "nosym"}, {tran}')
return tran
def compute_resonant_mode_2d(res, default_mat=False):
cell_size = mp.Vector3(1, 1, 0)
rad = 0.301943
fcen = 0.3
df = 0.2 * fcen
sources = [
mp.Source(
mp.GaussianSource(fcen, fwidth=df),
component=mp.Hz,
center=mp.Vector3(-0.1057, 0.2094, 0),
)
]
k_point = mp.Vector3(0.3892, 0.1597, 0)
matgrid_size = mp.Vector3(1, 1, 0)
matgrid_resolution = 1200
# for a fixed resolution, compute the number of grid points
# necessary which are defined on the corners of the voxels
Nx, Ny = int(matgrid_size.x * matgrid_resolution), int(
matgrid_size.y * matgrid_resolution
)
x = np.linspace(-0.5 * matgrid_size.x, 0.5 * matgrid_size.x, Nx)
y = np.linspace(-0.5 * matgrid_size.y, 0.5 * matgrid_size.y, Ny)
xv, yv = np.meshgrid(x, y)
weights = np.sqrt(np.square(xv) + np.square(yv)) < rad
filtered_weights = gaussian_filter(weights, sigma=3.0, output=np.double)
matgrid = mp.MaterialGrid(
mp.Vector3(Nx, Ny),
mp.air,
mp.Medium(index=3.5),
weights=filtered_weights,
do_averaging=True,
beta=1000,
eta=0.5,
)
geometry = [
mp.Block(
center=mp.Vector3(),
size=mp.Vector3(matgrid_size.x, matgrid_size.y, 0),
material=matgrid,
)
]
sim = mp.Simulation(
resolution=res,
cell_size=cell_size,
default_material=matgrid if default_mat else mp.Medium(),
geometry=[] if default_mat else geometry,
sources=sources,
k_point=k_point,
)
h = mp.Harminv(mp.Hz, mp.Vector3(0.3718, -0.2076), fcen, df)
sim.run(mp.after_sources(h), until_after_sources=200)
try:
for m in h.modes:
print(f"harminv:, {res}, {m.freq}, {m.Q}")
freq = h.modes[0].freq
except:
raise RuntimeError("No resonant modes found.")
return freq
def compute_resonant_mode_3d(use_matgrid=True):
resolution = 25
wvl = 1.27
fcen = 1 / wvl
df = 0.02 * fcen
nSi = 3.45
Si = mp.Medium(index=nSi)
nSiO2 = 1.45
SiO2 = mp.Medium(index=nSiO2)
s = 1.0
cell_size = mp.Vector3(s, s, s)
rad = 0.34 # radius of sphere
if use_matgrid:
matgrid_resolution = 2 * resolution
N = int(s * matgrid_resolution)
coord = np.linspace(-0.5 * s, 0.5 * s, N)
xv, yv, zv = np.meshgrid(coord, coord, coord)
weights = np.sqrt(np.square(xv) + np.square(yv) + np.square(zv)) < rad
filtered_weights = gaussian_filter(
weights, sigma=4 / resolution, output=np.double
)
matgrid = mp.MaterialGrid(
mp.Vector3(N, N, N),
SiO2,
Si,
weights=filtered_weights,
do_averaging=True,
beta=1000,
eta=0.5,
)
geometry = [mp.Block(center=mp.Vector3(), size=cell_size, material=matgrid)]
else:
geometry = [mp.Sphere(center=mp.Vector3(), radius=rad, material=Si)]
sources = [
mp.Source(
src=mp.GaussianSource(fcen, fwidth=df),
size=mp.Vector3(),
center=mp.Vector3(0.13, 0.25, 0.06),
component=mp.Ez,
)
]
k_point = mp.Vector3(0.23, -0.17, 0.35)
sim = mp.Simulation(
resolution=resolution,
cell_size=cell_size,
sources=sources,
default_material=SiO2,
k_point=k_point,
geometry=geometry,
)
h = mp.Harminv(mp.Ez, mp.Vector3(-0.2684, 0.1185, 0.0187), fcen, df)
sim.run(mp.after_sources(h), until_after_sources=200)
try:
for m in h.modes:
print(f"harminv:, {resolution}, {m.freq}, {m.Q}")
freq = h.modes[0].freq
except:
raise RuntimeError("No resonant modes found.")
return freq
class TestMaterialGrid(unittest.TestCase):
def METHOD_NAME(self):
# "exact" frequency computed using MaterialGrid at resolution = 300
freq_ref = 0.29826813873225283
res = [25, 50]
freq_matgrid = []
for r in res:
freq_matgrid.append(compute_resonant_mode_2d(r))
# verify that the frequency of the resonant mode is
# approximately equal to the reference value
self.assertAlmostEqual(freq_ref, freq_matgrid[-1], 2)
# verify that the relative error is decreasing with increasing resolution
# and is better than linear convergence because of subpixel smoothing
self.assertLess(
abs(freq_matgrid[1] - freq_ref) * (res[1] / res[0]),
abs(freq_matgrid[0] - freq_ref),
)
freq_matgrid_default_mat = compute_resonant_mode_2d(res[0], True)
self.assertAlmostEqual(freq_matgrid[0], freq_matgrid_default_mat)
def test_matgrid_3d(self):
freq_matgrid = compute_resonant_mode_3d(True)
freq_geomobj = compute_resonant_mode_3d(False)
self.assertAlmostEqual(freq_matgrid, freq_geomobj, places=2)
def test_symmetry(self):
tran_nosym = compute_transmittance(False)
tran_sym = compute_transmittance(True)
self.assertAlmostEqual(tran_nosym, tran_sym, places=5)
if __name__ == "__main__":
unittest.main()
| null |
955 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouterVpnAttachmentRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouterVpnAttachment')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CenId(self): # String
return self.get_query_params().get('CenId')
def set_CenId(self, CenId): # String
self.add_query_param('CenId', CenId)
def get_TransitRouterAttachmentName(self): # String
return self.get_query_params().get('TransitRouterAttachmentName')
def set_TransitRouterAttachmentName(self, TransitRouterAttachmentName): # String
self.add_query_param('TransitRouterAttachmentName', TransitRouterAttachmentName)
def get_Zones(self): # RepeatList
return self.get_query_params().get('Zone')
def set_Zones(self, Zone): # RepeatList
for depth1 in range(len(Zone)):
if Zone[depth1].get('ZoneId') is not None:
self.add_query_param('Zone.' + str(depth1 + 1) + '.ZoneId', Zone[depth1].get('ZoneId'))
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_AutoPublishRouteEnabled(self): # Boolean
return self.get_query_params().get('AutoPublishRouteEnabled')
def set_AutoPublishRouteEnabled(self, AutoPublishRouteEnabled): # Boolean
self.add_query_param('AutoPublishRouteEnabled', AutoPublishRouteEnabled)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouterId(self): # String
return self.get_query_params().get('TransitRouterId')
def set_TransitRouterId(self, TransitRouterId): # String
self.add_query_param('TransitRouterId', TransitRouterId)
def get_TransitRouterAttachmentDescription(self): # String
return self.get_query_params().get('TransitRouterAttachmentDescription')
def set_TransitRouterAttachmentDescription(self, TransitRouterAttachmentDescription): # String
self.add_query_param('TransitRouterAttachmentDescription', TransitRouterAttachmentDescription)
def get_VpnOwnerId(self): # Long
return self.get_query_params().get('VpnOwnerId')
def set_VpnOwnerId(self, VpnOwnerId): # Long
self.add_query_param('VpnOwnerId', VpnOwnerId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_VpnId(self): # String
return self.get_query_params().get('VpnId')
def set_VpnId(self, VpnId): # String
self.add_query_param('VpnId', VpnId)
| null |
956 |
from django.urls import reverse
from django.utils import timezone
from django.contrib.auth.models import Permission
from .. import models
from .generators import OrgFactory, Event2019Factory
from data.tests.util import ViewTestCase
import logging
logging.disable(logging.WARNING)
class OrgViewTest(ViewTestCase):
def setUp(self):
super(OrgViewTest, self).setUp()
self.o1 = OrgFactory.create(name="ababab")
def test_list(self):
# Should not have permission by default
self.assertOk(self.client.get(reverse("orgs:list")), 403)
permission = Permission.objects.get(codename="view_org")
self.user.user_permissions.add(permission)
response = self.client.get(reverse("orgs:list"))
self.assertContains(response, self.o1.name)
def test_detail(self):
# Set up an event to be displayed on the detail page
event = Event2019Factory(event_name="Test Event")
self.o1.events.add(event)
# Should not have permission by default
self.assertOk(self.client.get(reverse("orgs:detail", args=[self.o1.pk])), 403)
permission = Permission.objects.get(codename="view_org")
self.user.user_permissions.add(permission)
# Make sure everything loads ok
response = self.client.get(reverse("orgs:detail", args=[self.o1.pk]))
self.assertContains(response, self.o1.name)
self.assertNotContains(response, "Test Event")
# Will need permission to view events
self.o1.associated_users.add(self.user)
response = self.client.get(reverse("orgs:detail", args=[self.o1.pk]))
self.assertContains(response, "Test Event")
def test_add_org(self):
# Should not have permission by default
self.assertOk(self.client.get(reverse("orgs:add")), 403)
permission = Permission.objects.get(codename="view_org")
self.user.user_permissions.add(permission)
# Will need transfer_org_ownership, list_org_members, and edit_org permissions
permission = Permission.objects.get(codename="transfer_org_ownership")
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename="list_org_members")
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename="edit_org")
self.user.user_permissions.add(permission)
# Show and edit org billing permissions are required to update workday info
permission = Permission.objects.get(codename="edit_org_billing")
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename="show_org_billing")
self.user.user_permissions.add(permission)
self.assertOk(self.client.get(reverse("orgs:add")))
self.assertOk(self.client.post(reverse("orgs:add")))
# ie. with invalid data, it still reports the errors back with a valid page.
# Test invalid worktag
sample_data = {'name': "SAMPLE",
"user_in_charge": str(self.user.pk),
"phone": "(800) 123 4567",
'exec_email': '[email protected]',
"worktag": "test"}
self.assertOk(self.client.post(reverse("orgs:add"), sample_data))
sample_data['worktag'] = "1234-AB"
self.assertRedirects(self.client.post(reverse("orgs:add"), sample_data), reverse("orgs:detail", args=[2]))
# ie. it is valid and redirects to the detail page
self.assertTrue(models.Organization.objects.filter(**sample_data).exists())
# successfully created it
def test_edit_org(self):
# Will not have view_org permission by permission
self.assertOk(self.client.get(reverse("orgs:edit", args=[self.o1.pk])), 403)
permission = Permission.objects.get(codename="view_org")
self.user.user_permissions.add(permission)
# Will need at least edit_org and transfer_org_ownership
permission = Permission.objects.get(codename="edit_org")
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename="transfer_org_ownership")
self.user.user_permissions.add(permission)
self.assertOk(self.client.get(reverse("orgs:edit", args=[self.o1.pk])))
invalid_data = {
"name": "",
"user_in_charge": str(self.user.pk),
"phone": "(800) 123 4567",
"exec_email": "",
}
self.assertOk(self.client.post(reverse("orgs:edit", args=[self.o1.pk]), invalid_data))
# ie. with invalid data, it still reports the errors back with a valid page.
# Show and edit org billing permissions are required to update workday info
permission = Permission.objects.get(codename="edit_org_billing")
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename="show_org_billing")
self.user.user_permissions.add(permission)
# Test with invalid worktag
invalid_data = {
"name": "SAMPLE",
"user_in_charge": str(self.user.pk),
"phone": "(800) 123 4567",
"exec_email": "[email protected]",
"worktag": "test"
}
self.assertOk(self.client.post(reverse("orgs:edit", args=[self.o1.pk]), invalid_data))
sample_data = {'name': "SAMPLE",
"user_in_charge": str(self.user.pk),
"phone": "(800) 123 4567",
'exec_email': '[email protected]',
"worktag": "1234-AB"}
self.assertRedirects(self.client.post(reverse("orgs:edit", args=[self.o1.pk]), sample_data),
reverse("orgs:detail", args=[self.o1.pk]))
# ie. it is valid and redirects to the detail page
self.assertEqual(models.Organization.objects.filter(pk=self.o1.pk).first().name, "SAMPLE")
# successfully edited it
def test_verify(self):
# Should not have permission by default
self.assertOk(self.client.get(reverse("orgs:verify", args=[self.o1.pk])), 403)
permission = Permission.objects.get(codename="create_verifications")
self.user.user_permissions.add(permission)
# Will also need view_org permission for redirect
permission = Permission.objects.get(codename="view_org")
self.user.user_permissions.add(permission)
self.assertOk(self.client.get(reverse("orgs:verify", args=[self.o1.pk])))
valid_data = {
"date": timezone.now().date(),
"verified_by": str(self.user.pk),
"note": "",
"save": "Verify"
}
self.assertRedirects(self.client.post(reverse("orgs:verify", args=[self.o1.pk]), valid_data),
reverse("orgs:detail", args=[self.o1.pk]))
def METHOD_NAME(self):
# By default, should not have permission
self.assertOk(self.client.get(reverse("my:org-transfer", args=[self.o1.pk])), 403)
permission = Permission.objects.get(codename="transfer_org_ownership")
self.user.user_permissions.add(permission)
self.assertOk(self.client.get(reverse("my:org-transfer", args=[self.o1.pk])))
self.o1.associated_users.add(self.user)
valid_data = {
"new_user_in_charge": self.user.pk,
"save": "Submit Transfer"
}
self.assertRedirects(self.client.post(reverse("my:org-transfer", args=[self.o1.pk]), valid_data),
reverse("orgs:detail", args=[self.o1.pk]))
| null |
957 |
#!/usr/bin/env python3
import os
import time
import numpy
from tempfile import TemporaryDirectory
import logging
logger = logging.getLogger(__name__)
import h5py
#Needed for mutithreading:
from queue import Queue
from threading import Thread, Event
import multiprocessing
class Reader(Thread):
"""Thread executing tasks from a given tasks queue"""
def __init__(self, queue_in, queue_out, quit_event):
Thread.__init__(self)
self._queue_in = queue_in
self._queue_out = queue_out
self._quit_event = quit_event
self.daemon = True
self.start()
def run(self):
while not self._quit_event.is_set():
task = self._queue_in.get()
if task:
fn, ds, position = task
else:
logger.debug("Swallow a bitter pill: %s", task)
break
try:
r = fn(ds, position)
self._queue_out.put((position, r))
except Exception as e:
raise(e)
finally:
self._queue_in.task_done()
class SlicingBenchmark:
"""
Benchmark for reading slices in the most pathlogical way in a chunked dataset
Allows the test
"""
def __init__(self, ndim=3, size=1024, chunk=64, dtype="float32", precision=16, compression_kwargs=None):
"""
Defines some parameters for the benchmark, can be tuned later on.
:param ndim: work in 3D datasets
:param size: Volume size 1024**3 elements
:param chunk: size of one chunk, with itemsize = 32bits this makes block size of 1MB by default
:param dtype: the type of data to be stored
:param precision: to gain a bit in compression, number of trailing bits to be zeroed.
:param compression_kwargs: a dict with all options for configuring the compression
"""
self.ndim = ndim
self.size = size
self.dtype = numpy.dtype(dtype)
self.chunk = chunk
self.precision = precision
self.tmpdir = None
self.filename = None
self.h5path = "data"
self.total_size = self.size ** self.ndim * self.dtype.itemsize
self.needed_memory = self.size ** (self.ndim-1) * self.dtype.itemsize * self.chunk
if compression_kwargs is None:
self.compression = {}
else:
self.compression = dict(compression_kwargs)
def setup(self):
self.tmpdir = TemporaryDirectory()
self.filename = os.path.join(self.tmpdir.name, "benchmark_slicing.h5")
logger.info("Saving data in %s", self.filename)
logger.info("Total size: %i^%i volume size: %.3fGB, Needed memory: %.3fGB",
self.size, self.ndim, self.total_size/1e9, self.needed_memory/1e9)
shape = [self.size] * self.ndim
chunks = (self.chunk,) * self.ndim
if self.precision and self.dtype.char in "df":
if self.dtype.itemsize == 4:
mask = numpy.uint32(((1<<32) - (1<<(self.precision))))
elif self.dtype.itemsize == 8:
mask = numpy.uint64(((1<<64) - (1<<(self.precision))))
else:
logger.warning("Precision reduction: only float32 and float64 are supported")
else:
self.precision = 0
t0 = time.time()
with h5py.File(self.filename, 'w') as h:
ds = h.create_dataset(self.h5path,
shape,
chunks=chunks,
**self.compression)
for i in range(0, self.size, self.chunk):
x, y, z = numpy.ogrid[i:i+self.chunk, :self.size, :self.size]
data = (numpy.sin(x/3)*numpy.sin(y/5)*numpy.sin(z/7)).astype(self.dtype)
if self.precision:
idata = data.view(mask.dtype)
idata &= mask # mask out the last XX bits
ds[i:i+self.chunk] = data
t1 = time.time()
dt = t1 - t0
filesize = os.stat(self.filename).st_size
logger.info("Compression: %.3f time %.3fs uncompressed data saving speed %.3f MB/s effective write speed %.3f MB/s ",
self.total_size/filesize, dt, self.total_size/dt/1e6, filesize/dt/1e6)
def teardown(self):
self.tmpdir.cleanup()
self.filename = None
@staticmethod
def read_slice(dataset, position):
"""This reads all hyperplans crossing at the given position:
enforces many reads of different chunks,
Probably one of the most pathlogical use-case"""
assert dataset.ndim == len(position)
l = len(position)
res = []
noneslice = slice(None)
for i, w in enumerate(position):
where = [noneslice]*i + [w] + [noneslice]*(l - 1 - i)
res.append(dataset[tuple(where)])
return res
def METHOD_NAME(self, nb_read=64):
"Perform the reading of many orthogonal hyperplanes"
where = [[(i*(self.chunk+1+j))%self.size for j in range(self.ndim)] for i in range(nb_read)]
with h5py.File(self.filename, "r") as h:
ds = h[self.h5path]
t0 = time.time()
for i in where:
self.read_slice(ds, i)
t1 = time.time()
dt = t1 - t0
logger.info("Time for reading %sx%s slices: %.3fs fps: %.3f "%(self.ndim, nb_read, dt, self.ndim*nb_read/dt) +
"Uncompressed data read speed %.3f MB/s"%(self.ndim*nb_read*self.needed_memory/dt/1e6))
return dt
def time_threaded_reads(self, nb_read=64, nthreads=multiprocessing.cpu_count()):
"Perform the reading of many orthogonal hyperplanes, threaded version"
where = [[(i*(self.chunk+1+j))%self.size for j in range(self.ndim)] for i in range(nb_read)]
tasks = Queue()
results = Queue()
quitevent = Event()
pool = [Reader(tasks, results, quitevent) for i in range(nthreads)]
res = []
with h5py.File(self.filename, "r") as h:
ds = h[self.h5path]
t0 = time.time()
for i in where:
tasks.put((self.read_slice, ds, i))
for i in where:
a = results.get()
res.append(a[0])
results.task_done()
tasks.join()
results.join()
t1 = time.time()
# destroy the threads in the pool
quitevent.set()
for i in range(nthreads):
tasks.put(None)
dt = t1 - t0
logger.info("Time for %s-threaded reading %sx%s slices: %.3fs fps: %.3f "%(nthreads, self.ndim, nb_read, dt, self.ndim*nb_read/dt) +
"Uncompressed data read speed %.3f MB/s"%(self.ndim*nb_read*self.needed_memory/dt/1e6))
return dt
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
benckmark = SlicingBenchmark()
benckmark.setup()
benckmark.METHOD_NAME()
benckmark.time_threaded_reads()
benckmark.teardown()
| null |
958 |
import copy
import os
import json
import sys
import pytest
parent_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, f'{parent_path}/src')
sys.path.insert(0, parent_path)
from cosalib import meta
from cosalib.cmdlib import get_basearch, load_json
from jsonschema import ValidationError
TEST_META_PATH = os.environ.get(
"COSA_TEST_META_PATH", "/usr/lib/coreos-assembler/fixtures")
TEST_SCHEMA = os.environ.get(
"COSA_META_SCHEMA", "/usr/lib/coreos-assembler/cosalib/v1.json")
def _create_test_files(tmpdir, meta_data=None):
"""
Creates test data for each run.
"""
builds = {
"schema-version": "1.0.0",
"builds": [
{
"id": "1.2.3",
"arches": [
get_basearch()
]
}
],
"timestamp": "2019-01-1T15:19:45Z"
}
if meta_data is None:
meta_data = {
'test': 'data',
'name': 'fedora-coreos',
'a': {
'b': 'c',
}
}
buildsdir = os.path.join(tmpdir, 'builds')
os.makedirs(buildsdir, exist_ok=True)
with open(os.path.join(buildsdir, 'builds.json'), 'w') as f:
f.write(json.dumps(builds))
metadir = os.path.join(
tmpdir, 'builds', '1.2.3', get_basearch())
os.makedirs(metadir, exist_ok=True)
with open(os.path.join(metadir, 'meta.json'), 'w') as f:
f.write(json.dumps(meta_data))
return tmpdir
def test_init(tmpdir):
m = meta.GenericBuildMeta(_create_test_files(tmpdir), '1.2.3', schema=None)
assert m['test'] is not None
def test_get(tmpdir):
m = meta.GenericBuildMeta(_create_test_files(tmpdir), '1.2.3', schema=None)
assert m.get('test') == 'data'
assert m.get('nope', 'default') == 'default'
assert m.get(['a', 'b']) == 'c'
assert m.get(['a', 'd'], 'nope') == 'nope'
def test_set(tmpdir):
"""
Verify setting works as expected.
"""
m = meta.GenericBuildMeta(_create_test_files(tmpdir), '1.2.3', schema=None)
m.set('test', 'changed')
m.write()
m.read()
assert m.get('test') == 'changed'
m.read()
m.set(['a', 'b'], 'z')
m.write()
assert m.get(['a', 'b']) == 'z'
assert m['a']['b'] == 'z'
with pytest.raises(Exception):
m.set(['i', 'donot', 'exist'], 'boom')
def METHOD_NAME(tmpdir):
"""
Verifies the string representation is exactly the same as the
instance dict.
"""
m = meta.GenericBuildMeta(_create_test_files(tmpdir), '1.2.3', schema=None)
assert dict(m) == json.loads(str(m))
def test_valid_schema(tmpdir):
"""
Verifies that schema testing is enforced and checked against a known-good
meta.json.
"""
for meta_f in os.listdir(TEST_META_PATH):
print(f"Validating {meta_f}")
test_meta = os.path.join(TEST_META_PATH, meta_f)
with open(test_meta, 'r') as valid_data:
td = json.load(valid_data)
_ = meta.GenericBuildMeta(_create_test_files(tmpdir, meta_data=td),
'1.2.3')
def test_invalid_schema(tmpdir):
"""
Verifies that schema testing is enforced and checked against a known-good
meta.json.
"""
with pytest.raises(ValidationError):
_ = meta.GenericBuildMeta(_create_test_files(tmpdir), '1.2.3')
def test_merge_meta(tmpdir):
"""
Verifies merging meta.json works as expected.
"""
x = None
y = None
aws = {
"path": "/dev/null",
"size": 99999999,
"sha256": "ff279bc0207964d96571adfd720b1af1b65e587e589eee528d0315b7fb298773"
}
def get_aws(x, key="path"):
return x.get("images", {}).get("aws", {}).get(key)
for meta_f in os.listdir(TEST_META_PATH):
test_meta = os.path.join(TEST_META_PATH, meta_f)
with open(test_meta, 'r') as valid_data:
td = json.load(valid_data)
m = meta.GenericBuildMeta(_create_test_files(tmpdir, meta_data=td),
'1.2.3')
w = meta.GenericBuildMeta(_create_test_files(tmpdir, meta_data=td),
'1.2.3')
# create working copies
if x is None:
x = copy.deepcopy(m)
else:
y = copy.deepcopy(m)
# add the stamp
m.write()
old_stamp = m.get(meta.COSA_VER_STAMP)
assert old_stamp is not None
# check merging old into new
m["images"]["aws"] = aws
m[meta.COSA_VER_STAMP] = 10
m.write()
new_stamp = m.get(meta.COSA_VER_STAMP)
assert new_stamp > old_stamp
assert get_aws(m) != aws["path"]
# Now go full yolo and attempt to merge RHCOS into FCOS
# Srly? Whose going to do this...
y._meta_path = x.path
with pytest.raises(meta.COSAMergeError):
x.write()
#### Artifact merging tests
# clear the meta.json that's been corrupted
os.unlink(x.path)
# test that write went to meta.json
maws = x.write()
assert x.path == maws
# test that write went to meta.aws.json
x.set("coreos-assembler.delayed-meta-merge", True)
maws = x.write(artifact_name="aws")
assert maws.endswith("aws.json")
# make sure that meta.json != meta.aws.json
x.read()
d = load_json(maws)
assert get_aws(m) != get_aws(d)
# test that the write went to meta.<TS>.json
tnw = x.write()
assert maws != tnw
| null |
959 |
"""Report Generating for OTX CLI."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import sys
from collections import defaultdict
from pathlib import Path
from pprint import pformat
from typing import Any, Dict, Union
import torch
import otx
from otx.api.entities.model_template import ModelTemplate
def get_otx_report(
model_template: ModelTemplate,
task_config: Dict[str, Any],
data_config: Dict[str, Dict[str, str]],
results: Dict[str, Any],
output_path: Union[str, Path],
):
"""Generate CLI reports."""
dash_line = "-" * 60 + "\n\n"
# Header
report_str = get_otx_cli_ascii_banner()
report_str += dash_line
report_str += f"Current path: {Path.cwd()}\n"
report_str += f"sys.argv: {sys.argv}\n"
report_str += f"OTX: {otx.__version__}\n"
# 1. Machine Environment
report_str += sub_title_to_str("Running Environments")
report_str += env_info_to_str()
# 2. Task Information (Task, Train-type, Etc.)
if model_template and task_config:
report_str += sub_title_to_str("Template Information")
report_str += template_info_to_str(model_template)
# 3. Dataset Configuration
if data_config:
report_str += sub_title_to_str("Dataset Information")
report_str += METHOD_NAME(data_config)
# 4. Configurations
report_str += sub_title_to_str("Configurations")
report_str += task_config_to_str(task_config)
# 5. Result Summary
report_str += sub_title_to_str("Results")
for key, value in results.items():
report_str += f"\t{key}: {pformat(value)}\n"
Path(output_path).write_text(report_str, encoding="UTF-8")
def sub_title_to_str(title: str):
"""Add sub title for report."""
dash_line = "-" * 60
report_str = ""
report_str += dash_line + "\n\n"
report_str += title + "\n\n"
report_str += dash_line + "\n"
return report_str
def env_info_to_str():
"""Get Environments."""
report_str = ""
env_info = {}
try:
from mmcv.utils.env import collect_env
env_info = collect_env()
if "PyTorch compiling details" in env_info:
env_info.pop("PyTorch compiling details")
except ModuleNotFoundError:
env_info["sys.platform"] = sys.platform
env_info["Python"] = sys.version.replace("\n", "")
cuda_available = torch.cuda.is_available()
env_info["CUDA available"] = cuda_available
if cuda_available:
devices = defaultdict(list)
for k in range(torch.cuda.device_count()):
devices[torch.cuda.get_device_name(k)].append(str(k))
for name, device_ids in devices.items():
env_info["GPU " + ",".join(device_ids)] = name
env_info["PyTorch"] = torch.__version__
for key, value in env_info.items():
report_str += f"\t{key}: {value}\n"
return report_str
def template_info_to_str(model_template: ModelTemplate):
"""Get Template information."""
report_str = ""
for key, value in model_template.__dict__.items():
report_str += f"\t{key}: {pformat(value)}\n"
return report_str
def METHOD_NAME(data_config: Dict[str, Dict[str, str]]):
"""Get Dataset configuration."""
report_str = ""
for subset_key, subset_value in data_config.items():
report_str += f"{subset_key}:\n"
for key, value in subset_value.items():
report_str += f"\t{key}: {value}\n"
return report_str
def task_config_to_str(task_config: Dict[str, Any]):
"""Get Task configuration."""
report_str = ""
not_target = ["log_config"]
for target, value in task_config.items():
# Remove otx_dataset from the report as it is unnecessary.
if target == "data" and isinstance(value, dict):
for item in value.values():
if isinstance(item, dict) and "otx_dataset" in item:
del item["otx_dataset"]
if target not in not_target:
report_str += target + ": "
model_str = pformat(value)
report_str += model_str + "\n"
return report_str
def get_otx_cli_ascii_banner():
"""Get OTX ASCII banner."""
return """
██████╗ ████████╗ ██╗ ██╗
██╔═══██╗ ╚══██╔══╝ ╚██╗██╔╝
██║ ██║ ██║ ╚███╔╝
██║ ██║ ██║ ██╔██╗
╚██████╔╝ ██║ ██╔╝ ██╗
╚═════╝ ╚═╝ ╚═╝ ╚═╝
"""
| null |
960 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'DescribeInstances','ens')
self.set_method('POST')
def get_OrderByParams(self): # String
return self.get_query_params().get('OrderByParams')
def set_OrderByParams(self, OrderByParams): # String
self.add_query_param('OrderByParams', OrderByParams)
def get_EnsRegionId(self): # String
return self.get_query_params().get('EnsRegionId')
def set_EnsRegionId(self, EnsRegionId): # String
self.add_query_param('EnsRegionId', EnsRegionId)
def get_InstanceResourceType(self): # String
return self.get_query_params().get('InstanceResourceType')
def set_InstanceResourceType(self, InstanceResourceType): # String
self.add_query_param('InstanceResourceType', InstanceResourceType)
def get_EnsServiceId(self): # String
return self.get_query_params().get('EnsServiceId')
def set_EnsServiceId(self, EnsServiceId): # String
self.add_query_param('EnsServiceId', EnsServiceId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_InstanceName(self): # String
return self.get_query_params().get('InstanceName')
def set_InstanceName(self, InstanceName): # String
self.add_query_param('InstanceName', InstanceName)
def METHOD_NAME(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_NetworkId(self): # String
return self.get_query_params().get('NetworkId')
def set_NetworkId(self, NetworkId): # String
self.add_query_param('NetworkId', NetworkId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
def get_IntranetIp(self): # String
return self.get_query_params().get('IntranetIp')
def set_IntranetIp(self, IntranetIp): # String
self.add_query_param('IntranetIp', IntranetIp)
def get_ImageId(self): # String
return self.get_query_params().get('ImageId')
def set_ImageId(self, ImageId): # String
self.add_query_param('ImageId', ImageId)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_EnsRegionIds(self): # String
return self.get_query_params().get('EnsRegionIds')
def set_EnsRegionIds(self, EnsRegionIds): # String
self.add_query_param('EnsRegionIds', EnsRegionIds)
| null |
961 |
import logging
import operator
from functools import reduce
from time import sleep
from typing import Any, Dict, List
import boto3
import botocore.loaders as boto_loader
import botocore.regions as boto_regions
from botocore.config import Config as BotoConfig
from botocore.exceptions import ClientError, NoCredentialsError, ProfileNotFound
from taskcat.exceptions import TaskCatException
LOG = logging.getLogger(__name__)
REGIONAL_ENDPOINT_SERVICES = ["sts"]
class Boto3Cache:
RETRIES = 10
BACKOFF = 2
DELAY = 0.1
CLIENT_THROTTLE_RETRIES = 20
def __init__(self, _boto3=boto3):
self._boto3 = _boto3
self._session_cache: Dict[str, Dict[str, boto3.Session]] = {}
self._client_cache: Dict[str, Dict[str, Dict[str, boto3.client]]] = {}
self._resource_cache: Dict[str, Dict[str, Dict[str, boto3.resource]]] = {}
self._account_info: Dict[str, Dict[str, str]] = {}
self._lock_cache_update = False
def METHOD_NAME(self, profile: str = "default", region: str = None) -> boto3.Session:
region = self._get_region(region, profile)
try:
METHOD_NAME = self._cache_lookup(
self._session_cache,
[profile, region],
self._boto3.Session,
[],
{"region_name": region, "profile_name": profile},
)
except ProfileNotFound:
if profile != "default":
raise
METHOD_NAME = self._boto3.Session(region_name=region)
self._cache_set(self._session_cache, [profile, region], METHOD_NAME)
return METHOD_NAME
def client(
self, service: str, profile: str = "default", region: str = None
) -> boto3.client:
region = self._get_region(region, profile)
METHOD_NAME = self.METHOD_NAME(profile, region)
kwargs = {"config": BotoConfig(retries={"max_attempts": 20})}
if service in REGIONAL_ENDPOINT_SERVICES:
kwargs.update({"endpoint_url": self._get_endpoint_url(service, region)})
return self._cache_lookup(
self._client_cache,
[profile, region, service],
METHOD_NAME.client,
[service],
kwargs,
)
def resource(
self, service: str, profile: str = "default", region: str = None
) -> boto3.resource:
region = self._get_region(region, profile)
METHOD_NAME = self.METHOD_NAME(profile, region)
return self._cache_lookup(
self._resource_cache,
[profile, region, service],
METHOD_NAME.resource,
[service],
)
def partition(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["partition"]
def account_id(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["account_id"]
def _get_account_info(self, profile):
partition, region = self._get_partition(profile)
METHOD_NAME = self.METHOD_NAME(profile, region)
sts_client = METHOD_NAME.client("sts", region_name=region)
try:
account_id = sts_client.get_caller_identity()["Account"]
except ClientError as e:
if e.response["Error"]["Code"] == "AccessDenied":
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
raise
except NoCredentialsError as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
except ProfileNotFound as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
return {"partition": partition, "account_id": account_id}
def _make_parent_keys(self, cache: dict, keys: list):
if keys:
if not cache.get(keys[0]):
cache[keys[0]] = {}
self._make_parent_keys(cache[keys[0]], keys[1:])
def _cache_lookup(self, cache, key_list, create_func, args=None, kwargs=None):
try:
value = self._cache_get(cache, key_list)
except KeyError:
args = [] if not args else args
kwargs = {} if not kwargs else kwargs
value = self._get_with_retry(create_func, args, kwargs)
self._cache_set(cache, key_list, value)
return value
def _get_with_retry(self, create_func, args, kwargs):
retries = self.RETRIES
delay = self.DELAY
while retries:
try:
return create_func(*args, **kwargs)
except KeyError as e:
if str(e) not in ["'credential_provider'", "'endpoint_resolver'"]:
raise
backoff = (self.RETRIES - retries + delay) * self.BACKOFF
sleep(backoff)
@staticmethod
def _get_endpoint_url(service, region):
data = boto_loader.create_loader().load_data("endpoints")
endpoint_data = boto_regions.EndpointResolver(data).construct_endpoint(
service, region
)
if not endpoint_data:
raise TaskCatException(
f"unable to resolve endpoint for {service} in {region}"
)
return f"https://{service}.{region}.{endpoint_data['dnsSuffix']}"
@staticmethod
def _cache_get(cache: dict, key_list: List[str]):
return reduce(operator.getitem, key_list, cache)
def _cache_set(self, cache: dict, key_list: list, value: Any):
self._make_parent_keys(cache, key_list[:-1])
self._cache_get(cache, key_list[:-1])[key_list[-1]] = value
def _get_region(self, region, profile):
if not region:
region = self.get_default_region(profile)
return region
def _get_partition(self, profile):
partition_regions = [
("aws", "us-east-1"),
("aws-cn", "cn-north-1"),
("aws-us-gov", "us-gov-west-1"),
]
for partition, region in partition_regions:
try:
self.METHOD_NAME(profile, region).client(
"sts", region_name=region
).get_caller_identity()
return (partition, region)
except ClientError as e:
if "InvalidClientTokenId" in str(e):
continue
raise
raise ValueError("cannot find suitable AWS partition")
def get_default_region(self, profile_name="default") -> str:
try:
if profile_name != "default":
region = self._boto3.METHOD_NAME.Session(
profile_name=profile_name
).region_name
else:
region = self._boto3.METHOD_NAME.Session().region_name
except ProfileNotFound:
if profile_name != "default":
raise
region = self._boto3.METHOD_NAME.Session().region_name
if not region:
_, region = self._get_partition(profile_name)
LOG.warning(
"Region not set in credential chain, defaulting to {}".format(region)
)
return region
| null |
962 |
# coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from copy import copy
from mock import MagicMock
from evadb.optimizer.group_expression import GroupExpression
from evadb.optimizer.operators import Operator
from evadb.optimizer.optimizer_context import OptimizerContext
from evadb.optimizer.optimizer_tasks import OptimizeGroup
from evadb.optimizer.plan_generator import PlanGenerator
from evadb.optimizer.property import PropertyType
class CostModel(unittest.TestCase):
def execute_task_stack(self, task_stack):
while not task_stack.empty():
task = task_stack.pop()
task.execute()
def METHOD_NAME(self):
# mocking the cost model
def side_effect_func(value):
if value is grp_expr1:
return 1
elif value is grp_expr2:
return 2
cm = CostModel()
cm.calculate_cost = MagicMock(side_effect=side_effect_func)
opt_cxt = OptimizerContext(MagicMock(), cm)
grp_expr1 = GroupExpression(MagicMock())
grp_expr1.opr.is_logical = lambda: False
grp_expr2 = GroupExpression(MagicMock())
grp_expr2.opr.is_logical = lambda: False
opt_cxt.memo.add_group_expr(grp_expr1)
opt_cxt.memo.add_group_expr(grp_expr2, grp_expr1.group_id)
grp = opt_cxt.memo.get_group_by_id(grp_expr1.group_id)
opt_cxt.task_stack.push(OptimizeGroup(grp, opt_cxt))
self.execute_task_stack(opt_cxt.task_stack)
plan = PlanGenerator(MagicMock()).build_optimal_physical_plan(
grp_expr1.group_id, opt_cxt
)
self.assertEqual(plan, grp_expr1.opr)
self.assertEqual(grp.get_best_expr_cost(PropertyType.DEFAULT), 1)
def test_should_select_cheap_plan_with_tree(self):
# mocking the cost model
def side_effect_func(value):
cost = dict(
{
grp_expr00: 1,
grp_expr01: 2,
grp_expr10: 4,
grp_expr11: 3,
grp_expr20: 5,
}
)
return cost[value]
cm = CostModel()
cm.calculate_cost = MagicMock(side_effect=side_effect_func)
opt_cxt = OptimizerContext(MagicMock(), cm)
# group 0
grp_expr00 = GroupExpression(Operator(MagicMock()))
grp_expr00.opr.is_logical = lambda: False
grp_expr01 = GroupExpression(Operator(MagicMock()))
grp_expr01.opr.is_logical = lambda: False
opt_cxt.memo.add_group_expr(grp_expr00)
opt_cxt.memo.add_group_expr(grp_expr01, grp_expr00.group_id)
# group 1
grp_expr10 = GroupExpression(Operator(MagicMock()))
grp_expr10.opr.is_logical = lambda: False
opt_cxt.memo.add_group_expr(grp_expr10)
grp_expr11 = GroupExpression(Operator(MagicMock()))
grp_expr11.opr.is_logical = lambda: False
opt_cxt.memo.add_group_expr(grp_expr11, grp_expr10.group_id)
# group 2
grp_expr20 = GroupExpression(Operator(MagicMock()))
grp_expr20.opr.is_logical = lambda: False
opt_cxt.memo.add_group_expr(grp_expr20)
grp = opt_cxt.memo.get_group_by_id(grp_expr20.group_id)
# tree: 2->1->0
grp_expr10.children = [grp_expr01.group_id]
grp_expr11.children = [grp_expr01.group_id]
grp_expr20.children = [grp_expr10.group_id]
opt_cxt.task_stack.push(OptimizeGroup(grp, opt_cxt))
self.execute_task_stack(opt_cxt.task_stack)
plan = PlanGenerator(MagicMock()).build_optimal_physical_plan(
grp_expr20.group_id, opt_cxt
)
subplan = copy(grp_expr11.opr)
subplan.children = [copy(grp_expr01.opr)]
expected_plan = copy(grp_expr20.opr)
expected_plan.children = [subplan]
self.assertEqual(plan, expected_plan)
self.assertEqual(grp.get_best_expr_cost(PropertyType.DEFAULT), 9)
| null |
963 |
import importlib
import inspect
import pkgutil
import sys
import arm
import arm.logicnode.arm_nodes as arm_nodes
from arm.logicnode.arm_props import *
import arm.logicnode.arm_sockets as arm_sockets
from arm.logicnode.replacement import NodeReplacement
if arm.is_reload(__name__):
arm_nodes = arm.reload_module(arm_nodes)
arm.logicnode.arm_props = arm.reload_module(arm.logicnode.arm_props)
from arm.logicnode.arm_props import *
arm_sockets = arm.reload_module(arm_sockets)
arm.logicnode.replacement = arm.reload_module(arm.logicnode.replacement)
from arm.logicnode.replacement import NodeReplacement
HAS_RELOADED = True
else:
arm.enable_reload(__name__)
def init_categories():
"""Register default node menu categories."""
arm_nodes.add_category('Logic', icon='OUTLINER', section="basic",
description="Logic nodes are used to control execution flow using branching, loops, gates etc.")
arm_nodes.add_category('Event', icon='INFO', section="basic")
arm_nodes.add_category('Input', icon='GREASEPENCIL', section="basic")
arm_nodes.add_category('Native', icon='MEMORY', section="basic",
description="The Native category contains nodes which interact with the system (Input/Output functionality, etc.) or Haxe.")
arm_nodes.add_category('Camera', icon='OUTLINER_OB_CAMERA', section="data")
arm_nodes.add_category('Material', icon='MATERIAL', section="data")
arm_nodes.add_category('Light', icon='LIGHT', section="data")
arm_nodes.add_category('Object', icon='OBJECT_DATA', section="data")
arm_nodes.add_category('Scene', icon='SCENE_DATA', section="data")
arm_nodes.add_category('Trait', icon='NODETREE', section="data")
arm_nodes.add_category('Network', icon='WORLD', section="data")
arm_nodes.add_category('Animation', icon='SEQUENCE', section="motion")
arm_nodes.add_category('Navmesh', icon='UV_VERTEXSEL', section="motion")
arm_nodes.add_category('Transform', icon='TRANSFORM_ORIGINS', section="motion")
arm_nodes.add_category('Physics', icon='PHYSICS', section="motion")
arm_nodes.add_category('Array', icon='LIGHTPROBE_GRID', section="values")
arm_nodes.add_category('Map', icon='SHORTDISPLAY', section="values")
arm_nodes.add_category('Math', icon='FORCE_HARMONIC', section="values")
arm_nodes.add_category('Random', icon='SEQ_HISTOGRAM', section="values")
arm_nodes.add_category('String', icon='SORTALPHA', section="values")
arm_nodes.add_category('Variable', icon='OPTIONS', section="values")
arm_nodes.add_category('Draw', icon='GREASEPENCIL', section="graphics")
arm_nodes.add_category('Canvas', icon='RENDERLAYERS', section="graphics",
description="Note: To get the canvas, be sure that the node(s) and the canvas (UI) is attached to the same object.")
arm_nodes.add_category('Postprocess', icon='FREEZE', section="graphics")
arm_nodes.add_category('Renderpath', icon='STICKY_UVS_LOC', section="graphics")
arm_nodes.add_category('Sound', icon='OUTLINER_OB_SPEAKER', section="sound")
arm_nodes.add_category('Miscellaneous', icon='RESTRICT_COLOR_ON', section="misc")
arm_nodes.add_category('Layout', icon='SEQ_STRIP_DUPLICATE', section="misc")
# Make sure that logic node extension packs are displayed at the end
# of the menu by default unless they declare it otherwise
arm_nodes.add_category_section('default')
def METHOD_NAME(base_path=__path__, base_package=__package__, subpackages_only=False):
"""Calls the `on_register()` method on all logic nodes in a given
`base_package` and all its sub-packages relative to the given
`base_path`, in order to initialize them and to register them to Armory.
Be aware that calling this function will import all modules in the
given package, so module-level code will be executed.
If `subpackages_only` is true, modules directly inside the root of
the base package are not searched and imported.
"""
for loader, module_name, is_pkg in pkgutil.walk_packages(base_path, base_package + '.'):
if is_pkg:
# The package must be loaded as well so that the modules from that package can be accessed (see the
# pkgutil.walk_packages documentation for more information on this)
loader.find_module(module_name).load_module(module_name)
# Only look at modules in sub packages if specified
elif not subpackages_only or module_name.rsplit('.', 1)[0] != base_package:
if 'HAS_RELOADED' not in globals() or module_name not in sys.modules:
_module = importlib.import_module(module_name)
else:
# Reload the module if the SDK was reloaded at least once
_module = importlib.reload(sys.modules[module_name])
for name, obj in inspect.getmembers(_module, inspect.isclass):
if name in ("ArmLogicTreeNode", "ArmLogicVariableNodeMixin"):
continue
if issubclass(obj, arm_nodes.ArmLogicTreeNode):
obj.on_register()
| null |
964 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateFileRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateFile')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_FileType(self): # Integer
return self.get_body_params().get('FileType')
def set_FileType(self, FileType): # Integer
self.add_body_params('FileType', FileType)
def get_DependentNodeIdList(self): # String
return self.get_body_params().get('DependentNodeIdList')
def set_DependentNodeIdList(self, DependentNodeIdList): # String
self.add_body_params('DependentNodeIdList', DependentNodeIdList)
def get_Content(self): # String
return self.get_body_params().get('Content')
def METHOD_NAME(self, Content): # String
self.add_body_params('Content', Content)
def get_ProjectIdentifier(self): # String
return self.get_body_params().get('ProjectIdentifier')
def set_ProjectIdentifier(self, ProjectIdentifier): # String
self.add_body_params('ProjectIdentifier', ProjectIdentifier)
def get_ResourceGroupId(self): # Long
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # Long
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_StartImmediately(self): # Boolean
return self.get_body_params().get('StartImmediately')
def set_StartImmediately(self, StartImmediately): # Boolean
self.add_body_params('StartImmediately', StartImmediately)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_AdvancedSettings(self): # String
return self.get_body_params().get('AdvancedSettings')
def set_AdvancedSettings(self, AdvancedSettings): # String
self.add_body_params('AdvancedSettings', AdvancedSettings)
def get_StartEffectDate(self): # Long
return self.get_body_params().get('StartEffectDate')
def set_StartEffectDate(self, StartEffectDate): # Long
self.add_body_params('StartEffectDate', StartEffectDate)
def get_CycleType(self): # String
return self.get_body_params().get('CycleType')
def set_CycleType(self, CycleType): # String
self.add_body_params('CycleType', CycleType)
def get_Owner(self): # String
return self.get_body_params().get('Owner')
def set_Owner(self, Owner): # String
self.add_body_params('Owner', Owner)
def get_AutoRerunIntervalMillis(self): # Integer
return self.get_body_params().get('AutoRerunIntervalMillis')
def set_AutoRerunIntervalMillis(self, AutoRerunIntervalMillis): # Integer
self.add_body_params('AutoRerunIntervalMillis', AutoRerunIntervalMillis)
def get_InputList(self): # String
return self.get_body_params().get('InputList')
def set_InputList(self, InputList): # String
self.add_body_params('InputList', InputList)
def get_CreateFolderIfNotExists(self): # Boolean
return self.get_body_params().get('CreateFolderIfNotExists')
def set_CreateFolderIfNotExists(self, CreateFolderIfNotExists): # Boolean
self.add_body_params('CreateFolderIfNotExists', CreateFolderIfNotExists)
def get_RerunMode(self): # String
return self.get_body_params().get('RerunMode')
def set_RerunMode(self, RerunMode): # String
self.add_body_params('RerunMode', RerunMode)
def get_ConnectionName(self): # String
return self.get_body_params().get('ConnectionName')
def set_ConnectionName(self, ConnectionName): # String
self.add_body_params('ConnectionName', ConnectionName)
def get_OutputParameters(self): # String
return self.get_body_params().get('OutputParameters')
def set_OutputParameters(self, OutputParameters): # String
self.add_body_params('OutputParameters', OutputParameters)
def get_ParaValue(self): # String
return self.get_body_params().get('ParaValue')
def set_ParaValue(self, ParaValue): # String
self.add_body_params('ParaValue', ParaValue)
def get_ResourceGroupIdentifier(self): # String
return self.get_body_params().get('ResourceGroupIdentifier')
def set_ResourceGroupIdentifier(self, ResourceGroupIdentifier): # String
self.add_body_params('ResourceGroupIdentifier', ResourceGroupIdentifier)
def get_AutoRerunTimes(self): # Integer
return self.get_body_params().get('AutoRerunTimes')
def set_AutoRerunTimes(self, AutoRerunTimes): # Integer
self.add_body_params('AutoRerunTimes', AutoRerunTimes)
def get_CronExpress(self): # String
return self.get_body_params().get('CronExpress')
def set_CronExpress(self, CronExpress): # String
self.add_body_params('CronExpress', CronExpress)
def get_IgnoreParentSkipRunningProperty(self): # Boolean
return self.get_body_params().get('IgnoreParentSkipRunningProperty')
def set_IgnoreParentSkipRunningProperty(self, IgnoreParentSkipRunningProperty): # Boolean
self.add_body_params('IgnoreParentSkipRunningProperty', IgnoreParentSkipRunningProperty)
def get_EndEffectDate(self): # Long
return self.get_body_params().get('EndEffectDate')
def set_EndEffectDate(self, EndEffectDate): # Long
self.add_body_params('EndEffectDate', EndEffectDate)
def get_FileName(self): # String
return self.get_body_params().get('FileName')
def set_FileName(self, FileName): # String
self.add_body_params('FileName', FileName)
def get_InputParameters(self): # String
return self.get_body_params().get('InputParameters')
def set_InputParameters(self, InputParameters): # String
self.add_body_params('InputParameters', InputParameters)
def get_Stop(self): # Boolean
return self.get_body_params().get('Stop')
def set_Stop(self, Stop): # Boolean
self.add_body_params('Stop', Stop)
def get_DependentType(self): # String
return self.get_body_params().get('DependentType')
def set_DependentType(self, DependentType): # String
self.add_body_params('DependentType', DependentType)
def get_FileFolderPath(self): # String
return self.get_body_params().get('FileFolderPath')
def set_FileFolderPath(self, FileFolderPath): # String
self.add_body_params('FileFolderPath', FileFolderPath)
def get_FileDescription(self): # String
return self.get_body_params().get('FileDescription')
def set_FileDescription(self, FileDescription): # String
self.add_body_params('FileDescription', FileDescription)
def get_AutoParsing(self): # Boolean
return self.get_body_params().get('AutoParsing')
def set_AutoParsing(self, AutoParsing): # Boolean
self.add_body_params('AutoParsing', AutoParsing)
def get_SchedulerType(self): # String
return self.get_body_params().get('SchedulerType')
def set_SchedulerType(self, SchedulerType): # String
self.add_body_params('SchedulerType', SchedulerType)
| null |
965 |
# Drakkar-Software OctoBot-Tentacles
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import hashlib
import uuid
import octobot_services.constants as services_constants
import octobot_services.services as services
import octobot.constants as constants
class TradingViewService(services.AbstractService):
def __init__(self):
super().__init__()
self.requires_token = None
self.token = None
self._webhook_url = None
@staticmethod
def is_setup_correctly(config):
return True
@staticmethod
def get_is_enabled(config):
return True
def has_required_configuration(self):
return True
def get_required_config(self):
return [services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN,
services_constants.CONFIG_TRADING_VIEW_TOKEN]
def get_fields_description(self):
return {
services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN: "When enabled the Trading View webhook will require your "
"tradingview.com token to process any signal.",
services_constants.CONFIG_TRADING_VIEW_TOKEN: "Your personal unique tradingview.com token. Can be used to ensure only your "
"Trading View signals are triggering your OctoBot in case someone else get "
"your webhook link. You can change it at any moment but remember to change it "
"on your tradingview.com signal account as well."
}
def get_default_value(self):
return {
services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN: False,
services_constants.CONFIG_TRADING_VIEW_TOKEN: self.get_security_token(uuid.uuid4().hex)
}
def get_read_only_info(self):
return {
"Webhook url:": self._webhook_url
} if self._webhook_url else {}
@classmethod
def get_help_page(cls) -> str:
return f"{constants.OCTOBOT_DOCS_URL}/webhooks/tradingview-webhook"
def get_endpoint(self) -> None:
return None
def get_type(self) -> None:
return services_constants.CONFIG_TRADING_VIEW
def get_website_url(self):
return "https://www.tradingview.com/"
def METHOD_NAME(self):
return "https://in.tradingview.com/static/images/favicon.ico"
async def prepare(self) -> None:
try:
self.requires_token = \
self.config[services_constants.CONFIG_CATEGORY_SERVICES][services_constants.CONFIG_TRADING_VIEW][
services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN]
self.token = \
self.config[services_constants.CONFIG_CATEGORY_SERVICES][services_constants.CONFIG_TRADING_VIEW][
services_constants.CONFIG_TRADING_VIEW_TOKEN]
except KeyError:
if self.requires_token is None:
self.requires_token = self.get_default_value()[services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN]
if self.token is None:
self.token = self.get_default_value()[services_constants.CONFIG_TRADING_VIEW_TOKEN]
# save new values into config file
updated_config = {
services_constants.CONFIG_REQUIRE_TRADING_VIEW_TOKEN: self.requires_token,
services_constants.CONFIG_TRADING_VIEW_TOKEN: self.token
}
self.save_service_config(services_constants.CONFIG_TRADING_VIEW, updated_config)
@staticmethod
def get_security_token(pin_code):
"""
Generate unique token from pin. This adds a marginal amount of security.
:param pin_code: the pin code to use
:return: the generated token
"""
token = hashlib.sha224(pin_code.encode('utf-8'))
return token.hexdigest()
def register_webhook_url(self, webhook_url):
self._webhook_url = webhook_url
def get_successful_startup_message(self):
return "", True
| null |
966 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class DescribeConnectionStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'DescribeConnectionStatus','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_SourceEndpointRegion(self): # String
return self.get_query_params().get('SourceEndpointRegion')
def set_SourceEndpointRegion(self, SourceEndpointRegion): # String
self.add_query_param('SourceEndpointRegion', SourceEndpointRegion)
def get_SourceEndpointArchitecture(self): # String
return self.get_query_params().get('SourceEndpointArchitecture')
def set_SourceEndpointArchitecture(self, SourceEndpointArchitecture): # String
self.add_query_param('SourceEndpointArchitecture', SourceEndpointArchitecture)
def get_DestinationEndpointInstanceType(self): # String
return self.get_query_params().get('DestinationEndpointInstanceType')
def set_DestinationEndpointInstanceType(self, DestinationEndpointInstanceType): # String
self.add_query_param('DestinationEndpointInstanceType', DestinationEndpointInstanceType)
def get_SourceEndpointInstanceID(self): # String
return self.get_query_params().get('SourceEndpointInstanceID')
def set_SourceEndpointInstanceID(self, SourceEndpointInstanceID): # String
self.add_query_param('SourceEndpointInstanceID', SourceEndpointInstanceID)
def get_SourceEndpointUserName(self): # String
return self.get_query_params().get('SourceEndpointUserName')
def set_SourceEndpointUserName(self, SourceEndpointUserName): # String
self.add_query_param('SourceEndpointUserName', SourceEndpointUserName)
def get_SourceEndpointDatabaseName(self): # String
return self.get_query_params().get('SourceEndpointDatabaseName')
def set_SourceEndpointDatabaseName(self, SourceEndpointDatabaseName): # String
self.add_query_param('SourceEndpointDatabaseName', SourceEndpointDatabaseName)
def METHOD_NAME(self): # String
return self.get_query_params().get('DestinationEndpointRegion')
def set_DestinationEndpointRegion(self, DestinationEndpointRegion): # String
self.add_query_param('DestinationEndpointRegion', DestinationEndpointRegion)
def get_SourceEndpointIP(self): # String
return self.get_query_params().get('SourceEndpointIP')
def set_SourceEndpointIP(self, SourceEndpointIP): # String
self.add_query_param('SourceEndpointIP', SourceEndpointIP)
def get_DestinationEndpointUserName(self): # String
return self.get_query_params().get('DestinationEndpointUserName')
def set_DestinationEndpointUserName(self, DestinationEndpointUserName): # String
self.add_query_param('DestinationEndpointUserName', DestinationEndpointUserName)
def get_DestinationEndpointArchitecture(self): # String
return self.get_query_params().get('DestinationEndpointArchitecture')
def set_DestinationEndpointArchitecture(self, DestinationEndpointArchitecture): # String
self.add_query_param('DestinationEndpointArchitecture', DestinationEndpointArchitecture)
def get_DestinationEndpointOracleSID(self): # String
return self.get_query_params().get('DestinationEndpointOracleSID')
def set_DestinationEndpointOracleSID(self, DestinationEndpointOracleSID): # String
self.add_query_param('DestinationEndpointOracleSID', DestinationEndpointOracleSID)
def get_DestinationEndpointEngineName(self): # String
return self.get_query_params().get('DestinationEndpointEngineName')
def set_DestinationEndpointEngineName(self, DestinationEndpointEngineName): # String
self.add_query_param('DestinationEndpointEngineName', DestinationEndpointEngineName)
def get_DestinationEndpointInstanceID(self): # String
return self.get_query_params().get('DestinationEndpointInstanceID')
def set_DestinationEndpointInstanceID(self, DestinationEndpointInstanceID): # String
self.add_query_param('DestinationEndpointInstanceID', DestinationEndpointInstanceID)
def get_DestinationEndpointPort(self): # String
return self.get_query_params().get('DestinationEndpointPort')
def set_DestinationEndpointPort(self, DestinationEndpointPort): # String
self.add_query_param('DestinationEndpointPort', DestinationEndpointPort)
def get_SourceEndpointPassword(self): # String
return self.get_query_params().get('SourceEndpointPassword')
def set_SourceEndpointPassword(self, SourceEndpointPassword): # String
self.add_query_param('SourceEndpointPassword', SourceEndpointPassword)
def get_SourceEndpointPort(self): # String
return self.get_query_params().get('SourceEndpointPort')
def set_SourceEndpointPort(self, SourceEndpointPort): # String
self.add_query_param('SourceEndpointPort', SourceEndpointPort)
def get_DestinationEndpointIP(self): # String
return self.get_query_params().get('DestinationEndpointIP')
def set_DestinationEndpointIP(self, DestinationEndpointIP): # String
self.add_query_param('DestinationEndpointIP', DestinationEndpointIP)
def get_SourceEndpointInstanceType(self): # String
return self.get_query_params().get('SourceEndpointInstanceType')
def set_SourceEndpointInstanceType(self, SourceEndpointInstanceType): # String
self.add_query_param('SourceEndpointInstanceType', SourceEndpointInstanceType)
def get_SourceEndpointOracleSID(self): # String
return self.get_query_params().get('SourceEndpointOracleSID')
def set_SourceEndpointOracleSID(self, SourceEndpointOracleSID): # String
self.add_query_param('SourceEndpointOracleSID', SourceEndpointOracleSID)
def get_DestinationEndpointDatabaseName(self): # String
return self.get_query_params().get('DestinationEndpointDatabaseName')
def set_DestinationEndpointDatabaseName(self, DestinationEndpointDatabaseName): # String
self.add_query_param('DestinationEndpointDatabaseName', DestinationEndpointDatabaseName)
def get_DestinationEndpointPassword(self): # String
return self.get_query_params().get('DestinationEndpointPassword')
def set_DestinationEndpointPassword(self, DestinationEndpointPassword): # String
self.add_query_param('DestinationEndpointPassword', DestinationEndpointPassword)
def get_SourceEndpointEngineName(self): # String
return self.get_query_params().get('SourceEndpointEngineName')
def set_SourceEndpointEngineName(self, SourceEndpointEngineName): # String
self.add_query_param('SourceEndpointEngineName', SourceEndpointEngineName)
| null |
967 |
"""Tornado handlers for kernel specs."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import json
from typing import Dict, List, Optional
from jupyter_server.base.handlers import JupyterHandler
from jupyter_server.services.kernelspecs.handlers import is_kernelspec_model, kernelspec_model
from jupyter_server.utils import ensure_async, url_unescape
from tornado import web
from traitlets import Set
from ...base.handlers import APIHandler
from ...mixins import CORSMixin, JSONErrorsMixin, TokenAuthorizationMixin
from .kernelspec_cache import KernelSpecCache
def apply_user_filter(
kernelspec_model: Dict[str, object],
global_authorized_list: Set,
global_unauthorized_list: Set,
kernel_user: str = None,
) -> Optional[Dict[str, object]]:
"""
If authorization lists are configured - either within the kernelspec or globally, ensure
the user is authorized for the given kernelspec.
"""
if kernel_user:
# Check the unauthorized list of the kernelspec, then the globally-configured unauthorized list - the
# semantics of which are a union of the two lists.
try:
# Check if kernel_user in kernelspec_model
unauthorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][
"unauthorized_users"
]
except KeyError:
pass
else:
if kernel_user in unauthorized_list:
return None
if kernel_user in global_unauthorized_list:
return None
# Check the authorized list of the kernelspec, then the globally-configured authorized list -
# but only if the kernelspec list doesn't exist. This is because the kernelspec set of authorized
# users may be a subset of globally authorized users and is, essentially, used as a denial to those
# not defined in the kernelspec's list.
try:
authorized_list = kernelspec_model["spec"]["metadata"]["process_proxy"]["config"][
"authorized_users"
]
except KeyError:
if global_authorized_list and kernel_user not in global_authorized_list:
return None
else:
if authorized_list and kernel_user not in authorized_list:
return None
return kernelspec_model
class MainKernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler):
"""The root kernel spec handler."""
@property
def kernel_spec_cache(self) -> KernelSpecCache:
return self.settings["kernel_spec_cache"]
@web.authenticated
async def get(self) -> None:
"""Get the kernel spec models."""
ksc = self.kernel_spec_cache
km = self.kernel_manager
model = {}
model["default"] = km.default_kernel_name
model["kernelspecs"] = specs = {}
kernel_user_filter = self.request.query_arguments.get("user")
kernel_user = None
if kernel_user_filter:
kernel_user = kernel_user_filter[0].decode("utf-8")
if kernel_user:
self.log.debug("Searching kernels for user '%s' " % kernel_user)
kspecs = await ensure_async(ksc.get_all_specs())
list_kernels_found = []
for kernel_name, kernel_info in kspecs.items():
try:
if is_kernelspec_model(kernel_info):
d = kernel_info
else:
d = kernelspec_model(
self, kernel_name, kernel_info["spec"], kernel_info["resource_dir"]
)
d = apply_user_filter(
d,
self.settings["eg_authorized_users"],
self.settings["eg_unauthorized_users"],
kernel_user,
)
if d is not None:
specs[kernel_name] = d
list_kernels_found.append(d["name"])
else:
self.log.debug(
f"User {kernel_user} is not authorized to use kernel spec {kernel_name}"
)
except Exception:
self.log.error("Failed to load kernel spec: '%s'", kernel_name)
continue
self.set_header("Content-Type", "application/json")
self.finish(json.dumps(model))
class KernelSpecHandler(TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, APIHandler):
"""A handler for a specific kernel spec."""
@property
def kernel_spec_cache(self) -> KernelSpecCache:
return self.settings["kernel_spec_cache"]
@web.authenticated
async def get(self, kernel_name: str) -> None:
"""Get a kernel spec by name."""
ksc = self.kernel_spec_cache
kernel_name = url_unescape(kernel_name)
kernel_user_filter = self.request.query_arguments.get("user")
kernel_user = None
if kernel_user_filter:
kernel_user = kernel_user_filter[0].decode("utf-8")
try:
spec = await ensure_async(ksc.get_kernel_spec(kernel_name))
except KeyError:
raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) from None
if is_kernelspec_model(spec):
model = spec
else:
model = kernelspec_model(self, kernel_name, spec.to_dict(), spec.resource_dir)
d = apply_user_filter(
model,
self.settings["eg_authorized_users"],
self.settings["eg_unauthorized_users"],
kernel_user,
)
if d is None:
raise web.HTTPError(
403, f"User {kernel_user} is not authorized to use kernel spec {kernel_name}"
)
self.set_header("Content-Type", "application/json")
self.finish(json.dumps(model))
class KernelSpecResourceHandler(
TokenAuthorizationMixin, CORSMixin, JSONErrorsMixin, web.StaticFileHandler, JupyterHandler
):
"""A handler for kernel spec resources."""
SUPPORTED_METHODS = ("GET", "HEAD")
@property
def kernel_spec_cache(self) -> KernelSpecCache:
return self.settings["kernel_spec_cache"]
def initialize(self) -> None:
"""Initialize the handler."""
web.StaticFileHandler.initialize(self, path="")
@web.authenticated
async def get(self, kernel_name: str, path: str, include_body: bool = True) -> None:
"""Get a resource for a kernel."""
ksc = self.kernel_spec_cache
try:
kernelspec = await ensure_async(ksc.get_kernel_spec(kernel_name))
self.root = kernelspec.resource_dir
except KeyError as e:
raise web.HTTPError(404, "Kernel spec %s not found" % kernel_name) from e
self.log.debug("Serving kernel resource from: %s", self.root)
return await web.StaticFileHandler.get(self, path, include_body=include_body)
@web.authenticated
def METHOD_NAME(self, kernel_name: str, path: str) -> None:
"""Get the head for a kernel resource."""
return self.get(kernel_name, path, include_body=False)
kernel_name_regex: str = r"(?P<kernel_name>[\w\.\-%]+)"
# Extends the default handlers from the jupyter_server package with token auth, CORS
# and JSON errors.
default_handlers: List[tuple] = [
(r"/api/kernelspecs", MainKernelSpecHandler),
(r"/api/kernelspecs/%s" % kernel_name_regex, KernelSpecHandler),
(r"/kernelspecs/%s/(?P<path>.*)" % kernel_name_regex, KernelSpecResourceHandler),
]
| null |
968 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
import torch
from lm_eval.base import BaseLM
from lm_eval_harness.utils.multiple_token_stopping_criteria import (
MultipleTokenStoppingCriteria,
)
from lm_eval_harness.utils.request_factory import Request
from tqdm import tqdm
from transformers.generation.stopping_criteria import StoppingCriteriaList
from transformers.tokenization_utils import PreTrainedTokenizer
class HFEvalModel(BaseLM):
def __init__(
self,
model: torch.nn.Module,
tokenizer: PreTrainedTokenizer,
force_attention_mask: Optional[bool] = False,
max_generated_tokens: Optional[int] = 256,
) -> None:
super().__init__()
self._device = torch.device("cpu")
if torch.cuda.is_available():
self._device = torch.device("cuda")
self.model = model.to(self.device)
self.tokenizer = tokenizer
self.tokenizer.add_special_tokens({"pad_token": "[PAD]"})
self.force_attention_mask = force_attention_mask
self.max_generated_tokens = max_generated_tokens
@property
def eot_token_id(self) -> int:
return self.tokenizer.eos_token_id
@property
def max_length(self) -> int:
try:
return self.model.config.n_ctx
except AttributeError:
return self.model.config.max_position_embeddings
@property
def max_gen_toks(self) -> int:
return self.max_generated_tokens
@property
def METHOD_NAME(self) -> int:
return 1
@property
def device(self) -> torch.device:
return self._device
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string, add_special_tokens=False)
def tok_decode(self, tokens: List[int]) -> str:
return self.tokenizer.decode(tokens)
def _model_call(self, inps: torch.Tensor) -> torch.Tensor:
inps = inps.to(self.device)
kwargs = {}
if self.force_attention_mask:
kwargs["attention_mask"] = torch.zeros(inps.shape, dtype=torch.long, device=inps.device)
with torch.no_grad():
return self.model(inps, **kwargs)[0]
def _model_generate(self, context: str, max_length: int, eos_token_id: int) -> str:
return self.model.generate(context, max_length=max_length, eos_token_id=eos_token_id, do_sample=False)
def generate(self, requests: List[Request]) -> List[str]:
res = []
kwargs = {}
for context, stop_tokens, do_sample, temperature, top_p, max_new_tokens in tqdm(requests):
if not context:
context = self.eos_token
# Encodes the context and defines number of tokens to be
# removed when generation ends (default = 1)
input_ids = self.tokenizer(context, return_tensors="pt")["input_ids"].to(self.device)
n_removal_tokens = 1
if stop_tokens:
# Encodes the stop-tokens and defines the number of tokens to be
# removed as largest stop-token
encoded_stop_tokens = self.tokenizer(
stop_tokens,
padding="longest",
add_special_tokens=False,
return_attention_mask=False,
return_tensors="pt",
)["input_ids"].to(self.device)
n_removal_tokens = encoded_stop_tokens.shape[-1]
# Defines the stopping criteria
kwargs["stopping_criteria"] = StoppingCriteriaList([MultipleTokenStoppingCriteria(encoded_stop_tokens)])
# Generates the tokens and removes generated stop-tokens
generated_tokens = self.model.generate(
input_ids,
pad_token_id=self.eot_token_id,
do_sample=do_sample,
temperature=temperature,
top_p=top_p,
max_new_tokens=max_new_tokens,
**kwargs
).squeeze(0)
generated_tokens = generated_tokens[:-n_removal_tokens]
res.append(self.tok_decode(generated_tokens))
return res
| null |
969 |
from rest_framework import status as http_status
from flask import redirect, request
import markupsafe
from framework.auth.decorators import must_be_logged_in
from framework.exceptions import HTTPError, PermissionsError
from framework import status
from transitions import MachineError
from osf.exceptions import UnsupportedSanctionHandlerKind, TokenError
def registration_approval_handler(action, registration, registered_from):
# TODO: Unnecessary and duplicated dictionary.
status.push_status_message({
'approve': 'Your registration approval has been accepted.',
'reject': 'Your disapproval has been accepted and the registration has been cancelled.',
}[action], kind='success', trust=False)
# Allow decorated view function to return response
return None
def embargo_handler(action, registration, registered_from):
status.push_status_message({
'approve': 'Your embargo approval has been accepted.',
'reject': 'Your disapproval has been accepted and the embargo has been cancelled.',
}[action], kind='success', trust=False)
# Allow decorated view function to return response
return None
def embargo_termination_handler(action, registration, registered_from):
status.push_status_message({
'approve': 'Your approval to make this embargo public has been accepted.',
'reject': 'Your disapproval has been accepted and this embargo will not be made public.',
}[action], kind='success', trust=False)
# Allow decorated view function to return response
return None
def METHOD_NAME(action, registration, registered_from):
status.push_status_message({
'approve': 'Your withdrawal approval has been accepted.',
'reject': 'Your disapproval has been accepted and the withdrawal has been cancelled.'
}[action], kind='success', trust=False)
# Allow decorated view function to return response
return None
@must_be_logged_in
def sanction_handler(kind, action, payload, encoded_token, auth, **kwargs):
from osf.models import (
Embargo,
EmbargoTerminationApproval,
RegistrationApproval,
Retraction
)
Model = {
'registration': RegistrationApproval,
'embargo': Embargo,
'embargo_termination_approval': EmbargoTerminationApproval,
'retraction': Retraction
}.get(kind, None)
if not Model:
raise UnsupportedSanctionHandlerKind
sanction_id = payload.get('sanction_id', None)
sanction = Model.load(sanction_id)
err_code = None
err_message = None
if not sanction:
err_code = http_status.HTTP_400_BAD_REQUEST
err_message = 'There is no {0} associated with this token.'.format(
markupsafe.escape(Model.DISPLAY_NAME))
elif sanction.is_approved:
# Simply strip query params and redirect if already approved
return redirect(request.base_url)
elif sanction.is_rejected:
err_code = http_status.HTTP_410_GONE if kind in ['registration', 'embargo'] else http_status.HTTP_400_BAD_REQUEST
err_message = 'This registration {0} has been rejected.'.format(
markupsafe.escape(sanction.DISPLAY_NAME))
if err_code:
raise HTTPError(err_code, data=dict(
message_long=err_message
))
do_action = getattr(sanction, action, None)
if do_action:
registration = sanction.registrations.get()
registered_from = registration.registered_from
try:
do_action(user=auth.user, token=encoded_token)
except TokenError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': e.message_short,
'message_long': str(e)
})
except PermissionsError as e:
raise HTTPError(http_status.HTTP_401_UNAUTHORIZED, data={
'message_short': 'Unauthorized access',
'message_long': str(e)
})
except MachineError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Operation not allowed at this time',
'message_long': e.value
})
sanction.save()
return {
'registration': registration_approval_handler,
'embargo': embargo_handler,
'embargo_termination_approval': embargo_termination_handler,
'retraction': METHOD_NAME,
}[kind](action, registration, registered_from)
| null |
970 |
import logging
import torch
from pytorch_lightning.utilities.distributed import ReduceOp, sync_ddp_if_available
from reagent.core.types import CBInput
from reagent.evaluation.cb.base_evaluator import BaseOfflineEval
logger = logging.getLogger(__name__)
EPSILON = 1e-9
class PolicyEvaluator(BaseOfflineEval):
"""
An offline evaluator for Contextual Bandits, based on the paper https://arxiv.org/pdf/1003.0146.pdf (Algorithm 3)
"""
@torch.no_grad()
def _process_all_data(self, batch: CBInput) -> None:
assert batch.reward is not None
if batch.weight is not None:
weights = batch.weight
else:
weights = torch.ones_like(batch.reward)
self.sum_weight_all_data_local += weights.sum()
self.sum_reward_weighted_all_data_local += (weights * batch.reward).sum()
if batch.arm_presence is not None:
sizes = batch.arm_presence.sum(1)
else:
# assume that all arms are present
sizes = torch.ones_like(batch.reward) * batch.context_arm_features.shape[1]
self.sum_size_weighted_all_data_local += (weights.squeeze() * sizes).sum()
@torch.no_grad()
def METHOD_NAME(self, batch: CBInput) -> None:
"""
Process the observations for which the logged action matches the model action:
- Update the average reward
- Update the total weight counter
"""
assert batch.reward is not None
assert batch.importance_weight is not None
assert batch.importance_weight.shape == batch.reward.shape
if batch.weight is not None:
weights = batch.weight
else:
weights = torch.ones_like(batch.reward)
assert weights.shape == batch.importance_weight.shape
self.sum_reward_importance_weighted_accepted_local += (
batch.effective_weight * batch.reward
).sum()
accepted_indicators = (batch.importance_weight > 0).float()
self.sum_reward_weighted_accepted_local += (
weights * accepted_indicators * batch.reward
).sum()
self.sum_weight_accepted_local += (weights * accepted_indicators).sum()
self.sum_importance_weight_accepted_local += batch.effective_weight.sum()
if batch.arm_presence is not None:
sizes = batch.arm_presence.sum(1)
else:
# assume that all arms are present
sizes = torch.ones_like(batch.reward) * batch.context_arm_features.shape[1]
self.sum_size_weighted_accepted_local += (
(weights * accepted_indicators).squeeze() * sizes
).sum()
def _aggregate_across_instances(self) -> None:
# sum local values across all trainers, add to the global value
# clone the tensors to avoid modifying them inplace
sum_weight_accepted = sync_ddp_if_available(
self.sum_weight_accepted_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_importance_weight_accepted = sync_ddp_if_available(
self.sum_importance_weight_accepted_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_weight_all_data = sync_ddp_if_available(
self.sum_weight_all_data_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_weight_rejected = sum_weight_all_data - sum_weight_accepted
sum_reward_weighted_accepted = sync_ddp_if_available(
self.sum_reward_weighted_accepted_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_reward_importance_weighted_accepted = sync_ddp_if_available(
self.sum_reward_importance_weighted_accepted_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_reward_weighted_all_data = sync_ddp_if_available(
self.sum_reward_weighted_all_data_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_reward_weighted_rejected = (
sum_reward_weighted_all_data - sum_reward_weighted_accepted
)
sum_size_weighted_accepted = sync_ddp_if_available(
self.sum_size_weighted_accepted_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_size_weighted_all_data = sync_ddp_if_available(
self.sum_size_weighted_all_data_local.clone(),
# pyre-fixme[6]: For 2nd argument expected `Union[None, str, ReduceOp]`
# but got `RedOpType`.
reduce_op=ReduceOp.SUM,
)
sum_size_weighted_rejected = (
sum_size_weighted_all_data - sum_size_weighted_accepted
)
# udpate the global cumulative sum buffers
self.sum_reward_weighted_accepted += sum_reward_weighted_accepted
self.sum_reward_importance_weighted_accepted += (
sum_reward_importance_weighted_accepted
)
self.sum_weight_accepted += sum_weight_accepted
self.sum_importance_weight_accepted += sum_importance_weight_accepted
self.sum_weight_all_data += sum_weight_all_data
# calcualte the metrics for window (since last aggregation across instances)
self.frac_accepted = sum_weight_accepted / sum_weight_all_data
self.avg_reward_accepted = sum_reward_weighted_accepted / sum_weight_accepted
self.avg_reward_rejected = sum_reward_weighted_rejected / sum_weight_rejected
self.avg_reward_all_data = sum_reward_weighted_all_data / sum_weight_all_data
self.accepted_rejected_reward_ratio = (
self.avg_reward_accepted / self.avg_reward_rejected
)
self.avg_size_accepted = sum_size_weighted_accepted / sum_weight_accepted
self.avg_size_rejected = sum_size_weighted_rejected / sum_weight_rejected
# reset local values to zero
self.sum_reward_importance_weighted_accepted_local.zero_()
self.sum_reward_weighted_accepted_local.zero_()
self.sum_reward_weighted_all_data_local.zero_()
self.sum_weight_accepted_local.zero_()
self.sum_importance_weight_accepted_local.zero_()
self.sum_weight_all_data_local.zero_()
self.sum_size_weighted_accepted_local.zero_()
self.sum_size_weighted_all_data_local.zero_()
def get_avg_reward(self) -> float:
assert (
self.sum_importance_weight_accepted_local.item() == 0.0
), f"Non-zero local weight {self.sum_importance_weight_appected_local.item()} in the evaluator. _aggregate_across_instances() Should have beed called to aggregate across all instances and zero-out the local values."
# return the average reward
return (
self.sum_reward_importance_weighted_accepted
/ (self.sum_importance_weight_accepted + EPSILON)
).item()
| null |
971 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkwaf_openapi.endpoint import endpoint_data
class CreateDomainRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'waf-openapi', '2019-09-10', 'CreateDomain','waf')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_IpFollowStatus(self): # Integer
return self.get_query_params().get('IpFollowStatus')
def set_IpFollowStatus(self, IpFollowStatus): # Integer
self.add_query_param('IpFollowStatus', IpFollowStatus)
def get_Keepalive(self): # Boolean
return self.get_query_params().get('Keepalive')
def set_Keepalive(self, Keepalive): # Boolean
self.add_query_param('Keepalive', Keepalive)
def get_SniHost(self): # String
return self.get_query_params().get('SniHost')
def set_SniHost(self, SniHost): # String
self.add_query_param('SniHost', SniHost)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_HttpPort(self): # String
return self.get_query_params().get('HttpPort')
def set_HttpPort(self, HttpPort): # String
self.add_query_param('HttpPort', HttpPort)
def get_Http2Port(self): # String
return self.get_query_params().get('Http2Port')
def set_Http2Port(self, Http2Port): # String
self.add_query_param('Http2Port', Http2Port)
def get_WriteTime(self): # Integer
return self.get_query_params().get('WriteTime')
def set_WriteTime(self, WriteTime): # Integer
self.add_query_param('WriteTime', WriteTime)
def get_AccessHeaderMode(self): # Integer
return self.get_query_params().get('AccessHeaderMode')
def set_AccessHeaderMode(self, AccessHeaderMode): # Integer
self.add_query_param('AccessHeaderMode', AccessHeaderMode)
def get_AccessHeaders(self): # String
return self.get_query_params().get('AccessHeaders')
def set_AccessHeaders(self, AccessHeaders): # String
self.add_query_param('AccessHeaders', AccessHeaders)
def get_KeepaliveTimeout(self): # Integer
return self.get_query_params().get('KeepaliveTimeout')
def set_KeepaliveTimeout(self, KeepaliveTimeout): # Integer
self.add_query_param('KeepaliveTimeout', KeepaliveTimeout)
def get_ClusterType(self): # Integer
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # Integer
self.add_query_param('ClusterType', ClusterType)
def get_HttpsRedirect(self): # Integer
return self.get_query_params().get('HttpsRedirect')
def set_HttpsRedirect(self, HttpsRedirect): # Integer
self.add_query_param('HttpsRedirect', HttpsRedirect)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_ReadTime(self): # Integer
return self.get_query_params().get('ReadTime')
def set_ReadTime(self, ReadTime): # Integer
self.add_query_param('ReadTime', ReadTime)
def get_HttpsPort(self): # String
return self.get_query_params().get('HttpsPort')
def set_HttpsPort(self, HttpsPort): # String
self.add_query_param('HttpsPort', HttpsPort)
def get_SniStatus(self): # Integer
return self.get_query_params().get('SniStatus')
def set_SniStatus(self, SniStatus): # Integer
self.add_query_param('SniStatus', SniStatus)
def get_Retry(self): # Boolean
return self.get_query_params().get('Retry')
def set_Retry(self, Retry): # Boolean
self.add_query_param('Retry', Retry)
def get_KeepaliveRequests(self): # Integer
return self.get_query_params().get('KeepaliveRequests')
def METHOD_NAME(self, KeepaliveRequests): # Integer
self.add_query_param('KeepaliveRequests', KeepaliveRequests)
def get_AccessType(self): # String
return self.get_query_params().get('AccessType')
def set_AccessType(self, AccessType): # String
self.add_query_param('AccessType', AccessType)
def get_LogHeaders(self): # String
return self.get_query_params().get('LogHeaders')
def set_LogHeaders(self, LogHeaders): # String
self.add_query_param('LogHeaders', LogHeaders)
def get_ConnectionTime(self): # Integer
return self.get_query_params().get('ConnectionTime')
def set_ConnectionTime(self, ConnectionTime): # Integer
self.add_query_param('ConnectionTime', ConnectionTime)
def get_CloudNativeInstances(self): # String
return self.get_query_params().get('CloudNativeInstances')
def set_CloudNativeInstances(self, CloudNativeInstances): # String
self.add_query_param('CloudNativeInstances', CloudNativeInstances)
def get_SourceIps(self): # String
return self.get_query_params().get('SourceIps')
def set_SourceIps(self, SourceIps): # String
self.add_query_param('SourceIps', SourceIps)
def get_IsAccessProduct(self): # Integer
return self.get_query_params().get('IsAccessProduct')
def set_IsAccessProduct(self, IsAccessProduct): # Integer
self.add_query_param('IsAccessProduct', IsAccessProduct)
def get_LoadBalancing(self): # Integer
return self.get_query_params().get('LoadBalancing')
def set_LoadBalancing(self, LoadBalancing): # Integer
self.add_query_param('LoadBalancing', LoadBalancing)
def get_HttpToUserIp(self): # Integer
return self.get_query_params().get('HttpToUserIp')
def set_HttpToUserIp(self, HttpToUserIp): # Integer
self.add_query_param('HttpToUserIp', HttpToUserIp)
| null |
972 |
import json
import pytest
import falcon
from falcon import errors, media, testing
@pytest.fixture
def client():
return create_client()
def create_client(handlers=None):
res = testing.SimpleTestResource()
app = falcon.App()
app.add_route('/', res)
if handlers:
app.resp_options.media_handlers.update(handlers)
client = testing.TestClient(app)
client.resource = res
return client
class SimpleMediaResource:
def __init__(self, document, media_type=falcon.MEDIA_JSON):
self._document = document
self._media_type = media_type
def METHOD_NAME(self, req, resp):
resp.content_type = self._media_type
resp.media = self._document
resp.status = falcon.HTTP_OK
@pytest.mark.parametrize(
'media_type',
[
('*/*'),
(falcon.MEDIA_JSON),
('application/json; charset=utf-8'),
],
)
def test_json(client, media_type):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.content_type = media_type
resp.media = {'something': True}
assert json.loads(resp.render_body().decode('utf-8')) == {'something': True}
@pytest.mark.parametrize(
'document',
[
'',
'I am a \u1d0a\ua731\u1d0f\u0274 string.',
['\u2665', '\u2660', '\u2666', '\u2663'],
{'message': '\xa1Hello Unicode! \U0001F638'},
{
'description': 'A collection of primitive Python type examples.',
'bool': False is not True and True is not False,
'dict': {'example': 'mapping'},
'float': 1.0,
'int': 1337,
'list': ['a', 'sequence', 'of', 'items'],
'none': None,
'str': 'ASCII string',
'unicode': 'Hello Unicode! \U0001F638',
},
],
)
def test_non_ascii_json_serialization(document):
app = falcon.App()
app.add_route('/', SimpleMediaResource(document))
client = testing.TestClient(app)
resp = client.simulate_get('/')
assert resp.json == document
@pytest.mark.parametrize(
'media_type',
[
(falcon.MEDIA_MSGPACK),
('application/msgpack; charset=utf-8'),
('application/x-msgpack'),
],
)
def test_msgpack(media_type):
client = create_client(
{
'application/msgpack': media.MessagePackHandler(),
'application/x-msgpack': media.MessagePackHandler(),
}
)
client.simulate_get('/')
resp = client.resource.captured_resp
resp.content_type = media_type
# Bytes
resp.media = {b'something': True}
assert resp.render_body() == b'\x81\xc4\tsomething\xc3'
# Unicode
resp.media = {'something': True}
assert resp.render_body() == b'\x81\xa9something\xc3'
def test_unknown_media_type(client):
client.simulate_get('/')
resp = client.resource.captured_resp
with pytest.raises(errors.HTTPUnsupportedMediaType) as err:
resp.content_type = 'nope/json'
resp.media = {'something': True}
resp.render_body()
assert err.value.description == 'nope/json is an unsupported media type.'
def test_use_cached_media(client):
client.simulate_get('/')
resp = client.resource.captured_resp
expected = {'something': True}
resp._media = expected
assert resp.media == expected
def test_default_media_type(client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.content_type = ''
resp.media = {'something': True}
assert json.loads(resp.render_body().decode('utf-8')) == {'something': True}
assert resp.content_type == 'application/json'
def test_mimeparse_edgecases(client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.content_type = 'application/vnd.something'
with pytest.raises(errors.HTTPUnsupportedMediaType):
resp.media = {'something': True}
resp.render_body()
resp.content_type = 'invalid'
with pytest.raises(errors.HTTPUnsupportedMediaType):
resp.media = {'something': True}
resp.render_body()
# Clear the content type, shouldn't raise this time
resp.content_type = None
resp.media = {'something': True}
class TestRenderBodyPrecedence:
def test_text(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.text = 'body'
resp.data = b'data'
resp.media = ['media']
assert resp.render_body() == b'body'
def test_data(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.data = b'data'
resp.media = ['media']
assert resp.render_body() == b'data'
def test_media(self, client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.media = ['media']
assert json.loads(resp.render_body().decode('utf-8')) == ['media']
def test_media_rendered_cached(client):
client.simulate_get('/')
resp = client.resource.captured_resp
resp.media = {'foo': 'bar'}
first = resp.render_body()
assert first is resp.render_body()
assert first is resp._media_rendered
resp.media = 123
assert first is not resp.render_body()
| null |
973 |
import calendar
from flask import make_response, Response
from flask_appbuilder import expose, has_access, permission_name
from flask_appbuilder import ModelView
from flask_appbuilder.charts.views import GroupByChartView
from flask_appbuilder.models.group import aggregate_count
from flask_appbuilder.models.mongoengine.interface import MongoEngineInterface
from . import appbuilder
from .models import ContactGroup, Contact, Tags, Gender
def fill_gender():
try:
g1 = Gender(name="Male")
g1.save()
g2 = Gender(name="Female")
g2.save()
except:
pass
class ContactModelView(ModelView):
datamodel = MongoEngineInterface(Contact)
label_columns = {"image_thumb_show": "Photo", "image_show": "Photo"}
list_columns = [
"image_thumb_show",
"name",
"personal_celphone",
"birthday",
"contact_group",
]
show_columns = [
"image_show",
"name",
"personal_celphone",
"birthday",
"contact_group",
]
@expose("/mongo_download/<pk>")
@has_access
def mongo_download(self, pk):
item = self.datamodel.get(pk)
file = item.file.read()
response = make_response(file)
response.headers["Content-Disposition"] = "attachment; filename={0}".format(
item.file.name
)
return response
@expose("/img/<pk>")
@has_access
@permission_name("show_img")
def METHOD_NAME(self, pk):
item = self.datamodel.get(pk)
mime_type = item.image.content_type
return Response(item.image.read(), mimetype=mime_type, direct_passthrough=True)
@expose("/img_thumb/<pk>")
@has_access
@permission_name("show_img")
def img_thumb(self, pk):
item = self.datamodel.get(pk)
mime_type = item.image.content_type
return Response(
item.image.thumbnail.read(), mimetype=mime_type, direct_passthrough=True
)
class GroupModelView(ModelView):
datamodel = MongoEngineInterface(ContactGroup)
related_views = [ContactModelView]
search_columns = ["name"]
class TagsModelView(ModelView):
datamodel = MongoEngineInterface(Tags)
class ContactChartView(GroupByChartView):
datamodel = MongoEngineInterface(Contact)
chart_title = "Grouped contacts"
label_columns = ContactModelView.label_columns
chart_type = "PieChart"
definitions = [
{"group": "contact_group", "series": [(aggregate_count, "contact_group")]},
{"group": "gender", "series": [(aggregate_count, "gender")]},
]
def pretty_month_year(value):
return calendar.month_name[value.month] + " " + str(value.year)
def pretty_year(value):
return str(value.year)
class ContactTimeChartView(GroupByChartView):
datamodel = MongoEngineInterface(Contact)
chart_title = "Grouped Birth contacts"
chart_type = "AreaChart"
label_columns = ContactModelView.label_columns
definitions = [
{
"group": "month_year",
"formatter": pretty_month_year,
"series": [(aggregate_count, "contact_group")],
},
{
"group": "year",
"formatter": pretty_year,
"series": [(aggregate_count, "contact_group")],
},
]
appbuilder.add_view(
GroupModelView,
"List Groups",
icon="fa-folder-open-o",
category="Contacts",
category_icon="fa-envelope",
)
appbuilder.add_view(
ContactModelView,
"List Contacts",
icon="fa-folder-open-o",
category="Contacts",
category_icon="fa-envelope",
)
appbuilder.add_view(
TagsModelView,
"List Tags",
icon="fa-folder-open-o",
category="Contacts",
category_icon="fa-envelope",
)
appbuilder.add_separator("Contacts")
appbuilder.add_view(
ContactChartView, "Contacts Chart", icon="fa-dashboard", category="Contacts"
)
appbuilder.add_view(
ContactTimeChartView,
"Contacts Birth Chart",
icon="fa-dashboard",
category="Contacts",
)
appbuilder.security_cleanup()
fill_gender()
| null |
974 |
import os
from pathlib import Path
import pytest
from envparse import env
ORIGIN_KMOD_LOCATION = Path("/lib/modules/$(uname -r)/kernel/drivers/net/bonding/bonding.ko.xz")
CUSTOM_KMOD_DIRECTORY = ORIGIN_KMOD_LOCATION.parent / "custom_module_location"
@pytest.fixture()
def custom_kmod(shell):
"""
Fixture to copy files needed to build custom kmod to the testing machine.
Clean up after.
"""
tmp_dir = "/tmp/my-test"
files = ["my_kmod.c", "Makefile"]
assert shell(f"mkdir {tmp_dir}").returncode == 0
for file in files:
assert shell(f"cp files/{file} /tmp/my-test").returncode == 0
shell("yum -y install gcc make kernel-headers kernel-devel-$(uname -r) elfutils-libelf-devel")
# Build own kmod form source file that has been copied to the testing machine.
# This kmod marks the system with the P, O and E flags.
assert shell("make -C /tmp/my-test/").returncode == 0
assert shell("insmod /tmp/my-test/my_kmod.ko").returncode == 0
yield
# Clean up
assert shell("rmmod my_kmod").returncode == 0
assert shell("rm -rf /tmp/my-test/").returncode == 0
shell("yum -y remove gcc make kernel-headers kernel-devel-$(uname -r) elfutils-libelf-devel")
@pytest.fixture()
def kmod_in_different_directory(shell):
"""
This fixture moves an existing kmod to a custom location.
Inserts kmod from custom location, thus mimics that the kmod is unsupported in RHEL.
At the end of the test removes the loaded kernel and moves it to the original directory.
"""
shell(f"mkdir {CUSTOM_KMOD_DIRECTORY.as_posix()}")
shell(f"mv {ORIGIN_KMOD_LOCATION.as_posix()} {CUSTOM_KMOD_DIRECTORY.as_posix()}")
shell("depmod")
shell("modprobe bonding -v")
yield
assert shell("modprobe -r -v bonding").returncode == 0
shell(f"mv {CUSTOM_KMOD_DIRECTORY.as_posix()}/bonding.ko.xz {ORIGIN_KMOD_LOCATION.as_posix()}")
assert shell(f"rm -rf {CUSTOM_KMOD_DIRECTORY.as_posix()}").returncode == 0
shell("depmod")
@pytest.mark.test_custom_module_loaded
def test_inhibit_if_custom_module_loaded(kmod_in_different_directory, convert2rhel):
"""
This test verifies that rpmquery for detecting supported kernel modules in RHEL works correctly.
If custom module is loaded the conversion has to be inhibited.
"""
with convert2rhel(
"-y --no-rpm-va --serverurl {} --username {} --password {} --pool {} --debug".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
env.str("RHSM_POOL"),
),
unregister=True,
) as c2r:
c2r.expect("ENSURE_KERNEL_MODULES_COMPATIBILITY::UNSUPPORTED_KERNEL_MODULES")
assert c2r.exitstatus != 0
@pytest.mark.test_custom_module_not_loaded
def METHOD_NAME(shell, convert2rhel):
"""
Load the kmod from custom location.
Verify that it is loaded.
Remove the previously loaded 'custom' kmod and verify, the conversion is not inhibited.
The kmod compatibility check is right before the point of no return.
Abort the conversion right after the check.
"""
# Move the kmod to a custom location
shell(f"mkdir {CUSTOM_KMOD_DIRECTORY.as_posix()}")
shell(f"mv {ORIGIN_KMOD_LOCATION.as_posix()} {CUSTOM_KMOD_DIRECTORY.as_posix()}")
shell("depmod")
shell("modprobe bonding -v")
# Verify that it is loaded
assert "bonding" in shell("cat /proc/modules").output
# Remove the kmod and clean up
assert shell("modprobe -r -v bonding").returncode == 0
shell(f"mv {CUSTOM_KMOD_DIRECTORY.as_posix()}/bonding.ko.xz {ORIGIN_KMOD_LOCATION.as_posix()}")
assert shell(f"rm -rf {CUSTOM_KMOD_DIRECTORY.as_posix()}").returncode == 0
shell("depmod")
# If custom module is not loaded the conversion should not be inhibited.
with convert2rhel(
"--no-rpm-va --serverurl {} --username {} --password {} --pool {} --debug".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
env.str("RHSM_POOL"),
),
unregister=True,
) as c2r:
c2r.expect("Continue with the system conversion?")
c2r.sendline("y")
# Stop conversion before the point of no return as we do not need to run the full conversion
assert c2r.expect("All loaded kernel modules are available in RHEL") == 0
c2r.sendcontrol("c")
assert c2r.exitstatus != 0
@pytest.mark.test_force_loaded_kmod
def test_inhibit_if_module_is_force_loaded(shell, convert2rhel):
"""
In this test case we force load kmod and verify that the convert2rhel run is inhibited.
Force loaded kmods are denoted (FE) where F = module was force loaded E = unsigned module was loaded.
Convert2RHEL sees force loaded kmod as tainted.
"""
# Force load the kernel module
assert shell("modprobe -f -v bonding").returncode == 0
# Check for force loaded modules being flagged FE in /proc/modules
assert "(FE)" in shell("cat /proc/modules").output
with convert2rhel("--no-rpm-va --debug") as c2r:
# We need to get past the data collection acknowledgement.
c2r.expect("Continue with the system conversion?")
c2r.sendline("y")
assert c2r.expect("TAINTED_KMODS::TAINTED_KMODS_DETECTED - Tainted kernel modules detected") == 0
c2r.sendcontrol("c")
assert c2r.exitstatus != 0
# Clean up - unload kmod and check for force loaded modules not being in /proc/modules
assert shell("modprobe -r -v bonding").returncode == 0
assert "(FE)" not in shell("cat /proc/modules").output
@pytest.mark.test_tainted_kernel
def test_tainted_kernel_inhibitor(custom_kmod, convert2rhel):
"""
This test marks the kernel as tainted which is not supported by convert2rhel.
We need to install specific kernel packages to build own custom kernel module.
"""
with convert2rhel(
"-y --no-rpm-va --serverurl {} --username {} --password {} --pool {} --debug".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
env.str("RHSM_POOL"),
),
unregister=True,
) as c2r:
c2r.expect("Tainted kernel modules detected")
c2r.sendcontrol("c")
assert c2r.exitstatus != 0
@pytest.mark.test_unsupported_kmod_with_envar
def test_envar_overrides_unsupported_module_loaded(kmod_in_different_directory, convert2rhel):
"""
This test verifies that setting the environment variable "CONVERT2RHEL_ALLOW_UNAVAILABLE_KMODS"
will override the inhibition when there is RHEL unsupported kernel module detected.
The environment variable is set through the test metadata.
"""
with convert2rhel(
"--no-rpm-va --serverurl {} --username {} --password {} --pool {} --debug".format(
env.str("RHSM_SERVER_URL"),
env.str("RHSM_USERNAME"),
env.str("RHSM_PASSWORD"),
env.str("RHSM_POOL"),
)
) as c2r:
c2r.expect("Continue with the system conversion?")
c2r.sendline("y")
c2r.expect("Detected 'CONVERT2RHEL_ALLOW_UNAVAILABLE_KMODS' environment variable")
c2r.expect("We will continue the conversion with the following kernel modules")
c2r.sendcontrol("c")
assert c2r.exitstatus != 0
# Remove the set environment variable
del os.environ["CONVERT2RHEL_ALLOW_UNAVAILABLE_KMODS"]
| null |
975 |
from __future__ import annotations
import warnings
import pytest
from packaging.version import parse as parse_version
scipy = pytest.importorskip("scipy")
import numpy as np
import dask.array as da
import dask.array.stats
from dask.array.utils import allclose, assert_eq
from dask.delayed import Delayed
@pytest.mark.parametrize(
"kind, kwargs", [("skew", {}), ("kurtosis", {}), ("kurtosis", {"fisher": False})]
)
@pytest.mark.parametrize("single_dim", [True, False])
def test_measures(kind, kwargs, single_dim):
np.random.seed(seed=1337)
if single_dim:
x = np.random.random(size=(30,))
else:
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
dfunc = getattr(dask.array.stats, kind)
sfunc = getattr(scipy.stats, kind)
expected = sfunc(x, **kwargs)
result = dfunc(y, **kwargs)
if np.isscalar(expected):
# make it an array to account for possible numeric errors
expected = np.array(expected)
assert_eq(result, expected)
assert isinstance(result, da.Array)
def METHOD_NAME():
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
with pytest.raises(NotImplementedError):
dask.array.stats.skew(y, bias=False)
with pytest.raises(NotImplementedError):
dask.array.stats.kurtosis(y, bias=False)
@pytest.mark.parametrize(
"kind", ["chisquare", "power_divergence", "normaltest", "skewtest", "kurtosistest"]
)
def test_one(kind):
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
result = dask_test(a_)
expected = scipy_test(a)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"kind, kwargs",
[
("ttest_ind", {}),
("ttest_ind", {"equal_var": False}),
pytest.param(
"ttest_1samp",
{},
marks=pytest.mark.xfail(
# NOTE: using nested `parse_version` calls here to handle night scipy releases
parse_version(parse_version(scipy.__version__).base_version)
>= parse_version("1.10.0"),
reason="https://github.com/dask/dask/issues/9499",
),
),
("ttest_rel", {}),
("chisquare", {}),
("power_divergence", {}),
("power_divergence", {"lambda_": 0}),
("power_divergence", {"lambda_": -1}),
("power_divergence", {"lambda_": "neyman"}),
],
)
def test_two(kind, kwargs):
# The sums of observed and expected frequencies must match
a = np.random.random(size=30)
b = a[::-1]
a_ = da.from_array(a, 3)
b_ = da.from_array(b, 3)
dask_test = getattr(dask.array.stats, kind)
scipy_test = getattr(scipy.stats, kind)
with warnings.catch_warnings(): # maybe overflow warning (power_divergence)
warnings.simplefilter("ignore", category=RuntimeWarning)
result = dask_test(a_, b_, **kwargs)
expected = scipy_test(a, b, **kwargs)
assert isinstance(result, Delayed)
assert allclose(result.compute(), expected)
# fails occasionally. shouldn't this be exact?
# assert dask.compute(*result) == expected
@pytest.mark.parametrize("k", range(5))
def test_moments(k):
x = np.random.random(size=(30, 2))
y = da.from_array(x, 3)
expected = scipy.stats.moment(x, k)
result = dask.array.stats.moment(y, k)
assert_eq(result, expected)
def test_anova():
np_args = [i * np.random.random(size=(30,)) for i in range(4)]
da_args = [da.from_array(x, chunks=10) for x in np_args]
result = dask.array.stats.f_oneway(*da_args)
expected = scipy.stats.f_oneway(*np_args)
assert allclose(result.compute(), expected)
@pytest.mark.parametrize(
"func, nargs",
[
(dask.array.stats.ttest_1samp, 2),
(dask.array.stats.ttest_rel, 2),
(dask.array.stats.skewtest, 1),
(dask.array.stats.kurtosis, 1),
(dask.array.stats.kurtosistest, 1),
(dask.array.stats.normaltest, 1),
(dask.array.stats.moment, 1),
],
)
@pytest.mark.parametrize("nan_policy", ["omit", "raise"])
def test_nan_raises(func, nargs, nan_policy):
with pytest.raises(NotImplementedError):
func(*(None,) * nargs, nan_policy=nan_policy)
def test_power_divergence_invalid():
a = np.random.random(size=30)
a_ = da.from_array(a, 3)
with pytest.raises(ValueError):
dask.array.stats.power_divergence(a_, lambda_="wrong")
def test_skew_raises():
a = da.ones((7,), chunks=(7,))
with pytest.raises(ValueError, match="7 samples"):
dask.array.stats.skewtest(a)
def test_skew_single_return_type():
"""This function tests the return type for the skew method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.skew(dask_array).compute()
assert isinstance(result, np.float64)
def test_kurtosis_single_return_type():
"""This function tests the return type for the kurtosis method for a 1d array."""
numpy_array = np.random.random(size=(30,))
dask_array = da.from_array(numpy_array, 3)
result = dask.array.stats.kurtosis(dask_array).compute()
result_non_fisher = dask.array.stats.kurtosis(dask_array, fisher=False).compute()
assert isinstance(result, np.float64)
assert isinstance(result_non_fisher, np.float64)
| null |
976 |
# Use this extension for showing layer status with three leds
import pwmio
import time
from kmk.extensions import Extension, InvalidExtensionEnvironment
from kmk.keys import make_key
class statusLED(Extension):
def __init__(
self,
led_pins,
brightness=30,
brightness_step=5,
brightness_limit=100,
):
self._leds = []
for led in led_pins:
try:
self._leds.append(pwmio.PWMOut(led))
except Exception as e:
print(e)
raise InvalidExtensionEnvironment(
'Unable to create pulseio.PWMOut() instance with provided led_pin'
)
self._led_count = len(self._leds)
self.brightness = brightness
self._layer_last = -1
self.brightness_step = brightness_step
self.brightness_limit = brightness_limit
make_key(names=('SLED_INC',), on_press=self._key_led_inc)
make_key(names=('SLED_DEC',), on_press=self._key_led_dec)
def _layer_indicator(self, layer_active, *args, **kwargs):
'''
Indicates layer with leds
For the time being just a simple consecutive single led
indicator. And when there are more layers than leds it
wraps around to the first led again.
(Also works for a single led, which just lights when any
layer is active)
'''
if self._layer_last != layer_active:
led_last = 0 if self._layer_last == 0 else 1 + (self._layer_last - 1) % 3
if layer_active > 0:
led_active = 0 if layer_active == 0 else 1 + (layer_active - 1) % 3
self.METHOD_NAME(self.brightness, led_active)
self.METHOD_NAME(0, led_last)
else:
self.METHOD_NAME(0, led_last)
self._layer_last = layer_active
def __repr__(self):
return f'SLED({self._to_dict()})'
def _to_dict(self):
return {
'_brightness': self.brightness,
'brightness_step': self.brightness_step,
'brightness_limit': self.brightness_limit,
}
def on_runtime_enable(self, sandbox):
return
def on_runtime_disable(self, sandbox):
return
def during_bootup(self, sandbox):
'''Light up every single led once for 200 ms'''
for i in range(self._led_count + 2):
if i < self._led_count:
self._leds[i].duty_cycle = int(self.brightness / 100 * 65535)
i_off = i - 2
if i_off >= 0 and i_off < self._led_count:
self._leds[i_off].duty_cycle = int(0)
time.sleep(0.1)
for led in self._leds:
led.duty_cycle = int(0)
return
def before_matrix_scan(self, sandbox):
return
def after_matrix_scan(self, sandbox):
self._layer_indicator(sandbox.active_layers[0])
return
def before_hid_send(self, sandbox):
return
def after_hid_send(self, sandbox):
return
def on_powersave_enable(self, sandbox):
self.METHOD_NAME(0)
return
def on_powersave_disable(self, sandbox):
self.METHOD_NAME(self._brightness)
self._leds[2].duty_cycle = int(50 / 100 * 65535)
time.sleep(0.2)
self._leds[2].duty_cycle = int(0)
return
def METHOD_NAME(self, percent, layer_id=-1):
if layer_id < 0:
for led in self._leds:
led.duty_cycle = int(percent / 100 * 65535)
else:
self._leds[layer_id - 1].duty_cycle = int(percent / 100 * 65535)
def increase_brightness(self, step=None):
if not step:
self._brightness += self.brightness_step
else:
self._brightness += step
if self._brightness > 100:
self._brightness = 100
self.METHOD_NAME(self._brightness, self._layer_last)
def decrease_brightness(self, step=None):
if not step:
self._brightness -= self.brightness_step
else:
self._brightness -= step
if self._brightness < 0:
self._brightness = 0
self.METHOD_NAME(self._brightness, self._layer_last)
def _key_led_inc(self, *args, **kwargs):
self.increase_brightness()
def _key_led_dec(self, *args, **kwargs):
self.decrease_brightness()
| null |
977 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class CreateDataServiceApiRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'CreateDataServiceApi')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ScriptDetails(self): # String
return self.get_body_params().get('ScriptDetails')
def METHOD_NAME(self, ScriptDetails): # String
self.add_body_params('ScriptDetails', ScriptDetails)
def get_RequestMethod(self): # Integer
return self.get_body_params().get('RequestMethod')
def set_RequestMethod(self, RequestMethod): # Integer
self.add_body_params('RequestMethod', RequestMethod)
def get_ApiDescription(self): # String
return self.get_body_params().get('ApiDescription')
def set_ApiDescription(self, ApiDescription): # String
self.add_body_params('ApiDescription', ApiDescription)
def get_Timeout(self): # Integer
return self.get_body_params().get('Timeout')
def set_Timeout(self, Timeout): # Integer
self.add_body_params('Timeout', Timeout)
def get_FolderId(self): # Long
return self.get_body_params().get('FolderId')
def set_FolderId(self, FolderId): # Long
self.add_body_params('FolderId', FolderId)
def get_ResourceGroupId(self): # Long
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # Long
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_SqlMode(self): # Long
return self.get_body_params().get('SqlMode')
def set_SqlMode(self, SqlMode): # Long
self.add_body_params('SqlMode', SqlMode)
def get_TenantId(self): # Long
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # Long
self.add_body_params('TenantId', TenantId)
def get_RequestContentType(self): # Integer
return self.get_body_params().get('RequestContentType')
def set_RequestContentType(self, RequestContentType): # Integer
self.add_body_params('RequestContentType', RequestContentType)
def get_Protocols(self): # String
return self.get_body_params().get('Protocols')
def set_Protocols(self, Protocols): # String
self.add_body_params('Protocols', Protocols)
def get_ProjectId(self): # Long
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # Long
self.add_body_params('ProjectId', ProjectId)
def get_ResponseContentType(self): # Integer
return self.get_body_params().get('ResponseContentType')
def set_ResponseContentType(self, ResponseContentType): # Integer
self.add_body_params('ResponseContentType', ResponseContentType)
def get_GroupId(self): # String
return self.get_body_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_body_params('GroupId', GroupId)
def get_ApiPath(self): # String
return self.get_body_params().get('ApiPath')
def set_ApiPath(self, ApiPath): # String
self.add_body_params('ApiPath', ApiPath)
def get_WizardDetails(self): # String
return self.get_body_params().get('WizardDetails')
def set_WizardDetails(self, WizardDetails): # String
self.add_body_params('WizardDetails', WizardDetails)
def get_ApiMode(self): # Integer
return self.get_body_params().get('ApiMode')
def set_ApiMode(self, ApiMode): # Integer
self.add_body_params('ApiMode', ApiMode)
def get_VisibleRange(self): # Integer
return self.get_body_params().get('VisibleRange')
def set_VisibleRange(self, VisibleRange): # Integer
self.add_body_params('VisibleRange', VisibleRange)
def get_RegistrationDetails(self): # String
return self.get_body_params().get('RegistrationDetails')
def set_RegistrationDetails(self, RegistrationDetails): # String
self.add_body_params('RegistrationDetails', RegistrationDetails)
def get_ApiName(self): # String
return self.get_body_params().get('ApiName')
def set_ApiName(self, ApiName): # String
self.add_body_params('ApiName', ApiName)
| null |
978 |
from __future__ import annotations
import os
import pytest
from libtbx.utils import Sorry
from xia2.cli import to_shelxcde
expected_sad_files = [
"test.hkl",
"test.sh",
]
expected_native_files = [
"test_nat.hkl",
]
expected_sad_script = """shelxc test << eof
cell 42.369499 42.369499 39.691502 90.000000 90.000000 90.000000
spag P41212
sad test.hkl
maxm 1
eof
"""
expected_sites_script = """shelxc test << eof
cell 42.369499 42.369499 39.691502 90.000000 90.000000 90.000000
spag P41212
sad test.hkl
find 10
maxm 1
eof
"""
expected_native_script = """shelxc test << eof
cell 42.369499 42.369499 39.691502 90.000000 90.000000 90.000000
spag P41212
sad test.hkl
nat test_nat.hkl
maxm 1
eof
"""
def check_output(expected_files, expected_sh_script):
for expected_file in expected_files:
assert os.path.exists(expected_file)
if ".sh" in expected_file:
with open(expected_file) as fp:
test_sh = fp.read()
assert test_sh == expected_sh_script
def test_to_shelxcde_sad(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
to_shelxcde.run(["--sad", str(input_mtz), "test"])
check_output(expected_sad_files, expected_sad_script)
def test_to_shelxcde_sad_sites(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
to_shelxcde.run(["--sad", str(input_mtz), "--sites", "10", "test"])
check_output(expected_sad_files, expected_sites_script)
def METHOD_NAME(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
to_shelxcde.run(["--sad", str(input_mtz), "--label", "SIGI", "test"])
check_output(expected_sad_files, expected_sad_script)
def test_to_shelxcde_sad_native(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
to_shelxcde.run(["--sad", str(input_mtz), "--nat", str(input_mtz), "test"])
check_output(expected_sad_files + expected_native_files, expected_native_script)
def test_to_shelxcde_missing_input_file(dials_data, run_in_tmp_path):
with pytest.raises(SystemExit):
to_shelxcde.run(["tmp"])
def test_to_shelxcde_missing_prefix(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
with pytest.raises(SystemExit):
to_shelxcde.run(["--sad", str(input_mtz)])
def test_to_shelxcde_invalid_args_sad_mad(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
with pytest.raises(SystemExit):
to_shelxcde.run(["--sad", str(input_mtz), "--mad", str(input_mtz), "tmp"])
def test_to_shelxcde_invalid_args_sad_peak(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
with pytest.raises(SystemExit):
to_shelxcde.run(["--sad", str(input_mtz), "--peak", str(input_mtz), "tmp"])
def test_to_shelxcde_invalid_args_mad_label(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
with pytest.raises(SystemExit):
to_shelxcde.run(["--mad", str(input_mtz), "--label", "invalid", "tmp"])
def test_to_shelxcde_invalid_input_file(dials_data, run_in_tmp_path):
with pytest.raises(Sorry):
to_shelxcde.run(["--sad", "invalid_file", "tmp"])
def test_to_shelxcde_invalid_label(dials_data, run_in_tmp_path):
input_mtz = dials_data("x4wide_processed", pathlib=True) / (
"AUTOMATIC_DEFAULT_scaled_unmerged.mtz"
)
with pytest.raises(ValueError):
to_shelxcde.run(["--sad", str(input_mtz), "--label", "invalid", "test"])
| null |
979 |
from unittest.mock import AsyncMock, MagicMock
from fastapi import HTTPException
from mock import patch
import pytest
import pytest_asyncio
from tests_ma.test_api.conftest import create_test_user
from models.schemas.airlock_request import AirlockRequestInCreate
from models.domain.airlock_request import AirlockRequest, AirlockRequestStatus, AirlockRequestType
from db.repositories.airlock_requests import AirlockRequestRepository
from db.errors import EntityDoesNotExist
from azure.cosmos.exceptions import CosmosResourceNotFoundError, CosmosAccessConditionFailedError
pytestmark = pytest.mark.asyncio
WORKSPACE_ID = "abc000d3-82da-4bfc-b6e9-9a7853ef753e"
AIRLOCK_REQUEST_ID = "ce45d43a-e734-469a-88a0-109faf4a611f"
DRAFT = AirlockRequestStatus.Draft
SUBMITTED = AirlockRequestStatus.Submitted
IN_REVIEW = AirlockRequestStatus.InReview
APPROVED_IN_PROGRESS = AirlockRequestStatus.ApprovalInProgress
APPROVED = AirlockRequestStatus.Approved
REJECTION_IN_PROGRESS = AirlockRequestStatus.RejectionInProgress
REJECTED = AirlockRequestStatus.Rejected
CANCELLED = AirlockRequestStatus.Cancelled
BLOCKING_IN_PROGRESS = AirlockRequestStatus.BlockingInProgress
BLOCKED = AirlockRequestStatus.Blocked
FAILED = AirlockRequestStatus.Failed
ALL_STATUSES = [enum.value for enum in AirlockRequestStatus]
ALLOWED_STATUS_CHANGES = {
DRAFT: [SUBMITTED, CANCELLED, FAILED],
SUBMITTED: [IN_REVIEW, BLOCKING_IN_PROGRESS, FAILED],
IN_REVIEW: [APPROVED_IN_PROGRESS, REJECTION_IN_PROGRESS, CANCELLED, FAILED],
APPROVED_IN_PROGRESS: [APPROVED, FAILED],
APPROVED: [],
REJECTION_IN_PROGRESS: [REJECTED, FAILED],
REJECTED: [],
CANCELLED: [],
BLOCKING_IN_PROGRESS: [BLOCKED, FAILED],
BLOCKED: [],
FAILED: [],
}
@pytest_asyncio.fixture
async def airlock_request_repo():
with patch('db.repositories.base.BaseRepository._get_container', return_value=AsyncMock()):
with patch('azure.cosmos.CosmosClient') as cosmos_client_mock:
airlock_request_repo_mock = await AirlockRequestRepository.create(cosmos_client_mock)
yield airlock_request_repo_mock
@pytest.fixture
def METHOD_NAME():
return AirlockRequestInCreate(type=AirlockRequestType.Import, businessJustification="Some business justification")
@pytest.fixture
def verify_dictionary_contains_all_enum_values():
for status in ALL_STATUSES:
if status not in ALLOWED_STATUS_CHANGES:
raise Exception(f"Status '{status}' was not added to the ALLOWED_STATUS_CHANGES dictionary")
def airlock_request_mock(status=AirlockRequestStatus.Draft):
airlock_request = AirlockRequest(
id=AIRLOCK_REQUEST_ID,
workspaceId=WORKSPACE_ID,
type=AirlockRequestType.Import,
files=[],
businessJustification="some test reason",
status=status,
reviews=[]
)
return airlock_request
def get_allowed_status_changes():
for current_status, allowed_new_statuses in ALLOWED_STATUS_CHANGES.items():
for new_status in allowed_new_statuses:
yield current_status, new_status
def get_forbidden_status_changes():
for current_status, allowed_new_statuses in ALLOWED_STATUS_CHANGES.items():
forbidden_new_statuses = list(set(ALL_STATUSES) - set(allowed_new_statuses))
for new_status in forbidden_new_statuses:
yield current_status, new_status
async def test_get_airlock_request_by_id(airlock_request_repo):
airlock_request = airlock_request_mock()
airlock_request_repo.read_item_by_id = AsyncMock(return_value=airlock_request)
actual_service = await airlock_request_repo.get_airlock_request_by_id(AIRLOCK_REQUEST_ID)
assert actual_service == airlock_request
async def test_get_airlock_request_by_id_raises_entity_does_not_exist_if_no_such_request_id(airlock_request_repo):
airlock_request_repo.read_item_by_id = AsyncMock()
airlock_request_repo.read_item_by_id.side_effect = CosmosResourceNotFoundError
with pytest.raises(EntityDoesNotExist):
await airlock_request_repo.get_airlock_request_by_id(AIRLOCK_REQUEST_ID)
async def test_create_airlock_request_item_creates_an_airlock_request_with_the_right_values(METHOD_NAME, airlock_request_repo):
airlock_request_item_to_create = METHOD_NAME
created_by_user = {'id': 'test_user_id'}
airlock_request = airlock_request_repo.create_airlock_request_item(airlock_request_item_to_create, WORKSPACE_ID, created_by_user)
assert airlock_request.workspaceId == WORKSPACE_ID
assert airlock_request.createdBy['id'] == 'test_user_id'
@pytest.mark.parametrize("current_status, new_status", get_allowed_status_changes())
async def test_update_airlock_request_with_allowed_new_status_should_update_request_status(airlock_request_repo, current_status, new_status, verify_dictionary_contains_all_enum_values):
user = create_test_user()
mock_existing_request = airlock_request_mock(status=current_status)
airlock_request = await airlock_request_repo.update_airlock_request(mock_existing_request, user, new_status)
assert airlock_request.status == new_status
@pytest.mark.parametrize("current_status, new_status", get_forbidden_status_changes())
async def test_update_airlock_request_with_forbidden_status_should_fail_on_validation(airlock_request_repo, current_status, new_status, verify_dictionary_contains_all_enum_values):
user = create_test_user()
mock_existing_request = airlock_request_mock(status=current_status)
with pytest.raises(HTTPException):
await airlock_request_repo.update_airlock_request(mock_existing_request, user, new_status)
@patch("db.repositories.airlock_requests.AirlockRequestRepository.update_airlock_request_item", side_effect=[CosmosAccessConditionFailedError, None])
@patch("db.repositories.airlock_requests.AirlockRequestRepository.get_airlock_request_by_id", return_value=airlock_request_mock(status=DRAFT))
async def test_update_airlock_request_should_retry_update_when_etag_is_not_up_to_date(_, update_airlock_request_item_mock, airlock_request_repo):
expected_update_attempts = 2
user = create_test_user()
mock_existing_request = airlock_request_mock(status=DRAFT)
await airlock_request_repo.update_airlock_request(original_request=mock_existing_request, updated_by=user, new_status=SUBMITTED)
assert update_airlock_request_item_mock.call_count == expected_update_attempts
async def test_get_airlock_requests_queries_db(airlock_request_repo):
airlock_request_repo.container.query_items = MagicMock()
expected_query = airlock_request_repo.airlock_requests_query() + f' WHERE c.workspaceId = "{WORKSPACE_ID}"'
expected_parameters = [
{"name": "@user_id", "value": None},
{"name": "@status", "value": None},
{"name": "@type", "value": None},
]
await airlock_request_repo.get_airlock_requests(WORKSPACE_ID)
airlock_request_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=expected_parameters, enable_cross_partition_query=True)
| null |
980 |
# Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from typing import NamedTuple
import logging
Persistence = Enum('Persistence', 'TRANSIENT, PERSISTENT')
Deployment = NamedTuple('Deployment', [('name', str),
('replicas', int),
('labels', dict)])
Pod = NamedTuple('Pod', [('name', str),
('namespace', str),
('node_name', str),
('reserved', bool)])
Instance = NamedTuple('Instance', [('name', str),
('persistence', Persistence)])
Node = NamedTuple('Node',
[('name', str),
('persistence', Persistence),
('reserved', bool),
('used', bool)])
Condition = Enum('Condition', 'MEMORY_PRESSURE, DISK_PRESSURE, PID_PRESSURE, READY')
class DeploymentsContainer:
def __init__(self, deployments: [Deployment], pods: [Pod]):
self._deployments: [Deployment] = deployments
self._replicas_number: int = 0
self._pods: [Pod] = pods
self._pod_names: [str] = []
self._reserved_pod_names: [str] = []
self._unreserved_pod_names: [str] = []
for deployment in self._deployments:
self._replicas_number += deployment.replicas
for pod in self._pods:
self._pod_names.append(pod.name)
if pod.reserved:
self._reserved_pod_names.append(pod.name)
else:
self._unreserved_pod_names.append(pod.name)
@property
def deployments(self):
return self._deployments
@property
def pods(self):
return self._pods
@property
def pod_names(self):
return self._pod_names
@property
def replicas_number(self):
return self._replicas_number
def log(self):
logging.info("""Deployments summary:
%s target replicas
%s target pods
%s reserved %s
%s unreserved %s""",
self._replicas_number,
len(self._pods),
len(self._reserved_pod_names), self._reserved_pod_names,
len(self._unreserved_pod_names), self._unreserved_pod_names)
class NodesContainer:
def __init__(self, cluster_nodes_number: int, nodes: [Node]):
self._cluster_nodes_number: int = cluster_nodes_number
self._target_nodes: [Node] = nodes
self._non_target_nodes_number: int = 0
self._transient_nodes: [Node] = []
self._transient_used_node_names: [str] = []
self._transient_unused_node_names: [str] = []
self._transient_reserved_node_names: [str] = []
self._static_nodes: [Node] = []
self._static_used_node_names: [str] = []
self._static_unused_node_names: [str] = []
self._manageable_nodes: [Node] = []
self._non_target_nodes_number = self._cluster_nodes_number - len(self._target_nodes)
for node in self._target_nodes:
if node.persistence == Persistence.TRANSIENT:
self._transient_nodes.append(node)
if node.used:
self._transient_used_node_names.append(node.name)
else:
self._transient_unused_node_names.append(node.name)
if node.reserved:
self._transient_reserved_node_names.append(node.name)
else:
self._manageable_nodes.append(node)
if node.persistence == Persistence.PERSISTENT:
self._static_nodes.append(node)
if node.used:
self._static_used_node_names.append(node.name)
else:
self._static_unused_node_names.append(node.name)
@property
def METHOD_NAME(self):
return self._target_nodes
@property
def non_target_nodes_number(self):
return self._non_target_nodes_number
@property
def nodes_number(self):
return len(self._target_nodes)
@property
def target_node_names(self):
return [node.name for node in self._target_nodes]
@property
def manageable_nodes(self):
return self._manageable_nodes
@property
def transient_nodes(self):
return self._transient_nodes
def log(self):
logging.info("""Nodes summary:
%s cluster nodes
%s target
%s transient
%s used %s
%s unused %s
%s reserved %s
%s static
%s used %s
%s unused %s
%s non target""",
self._cluster_nodes_number,
len(self._target_nodes),
len(self._transient_nodes),
len(self._transient_used_node_names), self._transient_used_node_names,
len(self._transient_unused_node_names), self._transient_unused_node_names,
len(self._transient_reserved_node_names), self._transient_reserved_node_names,
len(self._static_nodes),
len(self._static_used_node_names), self._static_used_node_names,
len(self._static_unused_node_names), self._static_unused_node_names,
self._non_target_nodes_number)
class InstancesContainer:
def __init__(self, instances: [Instance]):
self._instances: [Instance] = instances
self._transient_instances: [Instance] = []
self._transient_instance_names: [str] = []
self._persistent_instances: [Instance] = []
self._persistent_instance_names: [str] = []
for instance in self._instances:
if instance.persistence == Persistence.TRANSIENT:
self._transient_instances.append(instance)
self._transient_instance_names.append(instance.name)
if instance.persistence == Persistence.PERSISTENT:
self._persistent_instances.append(instance)
self._persistent_instance_names.append(instance.name)
@property
def transient_instances(self):
return self._transient_instances
@property
def transient_instance_names(self):
return [instance.name for instance in self._transient_instances]
def log(self):
logging.info("""Instances summary:
%s target instances
%s transient %s
%s static %s""",
len(self._instances),
len(self._transient_instance_names), self._transient_instance_names,
len(self._persistent_instance_names), self._persistent_instance_names)
| null |
981 |
##########################################################################
#
# Copyright (c) 2008-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import IECoreScene
class TestTransformOp( unittest.TestCase ) :
def METHOD_NAME( self ) :
o = IECoreScene.TransformOp()
self.assertEqual( o["primVarsToModify"].getValue(), IECore.StringVectorData( [ "P", "N" ] ) )
def testTranformation( self ) :
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
IECoreScene.MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8, IECore.GeometricData.Interpretation.Vector ) )
m["notVel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8 ) )
mt = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( ["N", "P", "vel", "notVel"] ), matrix = IECore.M44fData( imath.M44f().translate( imath.V3f( 1 ) ) ) )
self.assertEqual( mt.bound(), imath.Box3f( imath.V3f( 0 ), imath.V3f( 2 ) ) )
self.assertEqual( mt["P"].data, IECore.V3fVectorData( [ x + imath.V3f( 1 ) for x in m["P"].data ], IECore.GeometricData.Interpretation.Point ) )
self.assertEqual( mt["N"].data, m["N"].data )
self.assertEqual( mt["vel"].data, m["vel"].data )
self.assertEqual( mt["notVel"].data, m["notVel"].data )
ms = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( ["N", "P", "vel", "notVel"] ), matrix = IECore.M44fData( imath.M44f().scale( imath.V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), imath.Box3f( imath.V3f( -1, -2, -3 ), imath.V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["P"].data ], IECore.GeometricData.Interpretation.Point ) )
self.assertNotEqual( ms["N"].data, m["N"].data )
self.assertNotEqual( ms["N"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["N"].data ], IECore.GeometricData.Interpretation.Normal ) )
self.assertEqual( ms["vel"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["vel"].data ], IECore.GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
self.assertEqual( ms["P"].data.getInterpretation(), IECore.GeometricData.Interpretation.Point )
self.assertEqual( ms["N"].data.getInterpretation(), IECore.GeometricData.Interpretation.Normal )
self.assertEqual( ms["vel"].data.getInterpretation(), IECore.GeometricData.Interpretation.Vector )
self.assertEqual( ms["notVel"].data.getInterpretation(), IECore.GeometricData.Interpretation.None_ )
def testPrimVarParameter( self ) :
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
IECoreScene.MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8, IECore.GeometricData.Interpretation.Vector ) )
m["notVel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8 ) )
ms = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( [ "P", "vel" ] ), matrix = IECore.M44fData( imath.M44f().scale( imath.V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), imath.Box3f( imath.V3f( -1, -2, -3 ), imath.V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["P"].data ], IECore.GeometricData.Interpretation.Point ) )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["vel"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["vel"].data ], IECore.GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
ms = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( [ "P" ] ), matrix = IECore.M44fData( imath.M44f().scale( imath.V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms.bound(), imath.Box3f( imath.V3f( -1, -2, -3 ), imath.V3f( 1, 2, 3 ) ) )
self.assertEqual( ms["P"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["P"].data ], IECore.GeometricData.Interpretation.Point ) )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["N"].data, m["N"].data )
self.assertEqual( ms["notVel"].data, m["notVel"].data )
def testSamePrimVars( self ) :
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
IECoreScene.MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8, IECore.GeometricData.Interpretation.Vector ) )
m["sameVel"] = m["vel"]
ms = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( [ "vel", "sameVel" ] ), matrix = IECore.M44fData( imath.M44f().scale( imath.V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms["vel"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["vel"].data ], IECore.GeometricData.Interpretation.Vector ) )
self.assertEqual( ms["vel"].data, ms["sameVel"].data )
def testIdenticalPrimVarsCanBeExcluded( self ) :
m = IECoreScene.MeshPrimitive.createBox( imath.Box3f( imath.V3f( -1 ), imath.V3f( 1 ) ) )
IECoreScene.MeshNormalsOp()( input = m, copyInput = False )
m["vel"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.V3fVectorData( [ imath.V3f( 0.5 ) ] * 8, IECore.GeometricData.Interpretation.Vector ) )
m["otherVel"] = m["vel"]
ms = IECoreScene.TransformOp()( input=m, primVarsToModify = IECore.StringVectorData( [ "vel" ] ), matrix = IECore.M44fData( imath.M44f().scale( imath.V3f( 1, 2, 3 ) ) ) )
self.assertEqual( ms["vel"].data, IECore.V3fVectorData( [ x * imath.V3f( 1, 2, 3 ) for x in m["vel"].data ], IECore.GeometricData.Interpretation.Vector ) )
self.assertNotEqual( ms["vel"].data, ms["otherVel"].data )
self.assertEqual( ms["otherVel"].data, m["otherVel"].data )
if __name__ == "__main__":
unittest.main()
| null |
982 |
import urllib
from codecs import StreamWriter
from typing import IO, Any, Dict, Iterator, Optional, TextIO, Union, cast
from rdflib import Graph
from rdflib.query import ResultRow
from ruamel.yaml.comments import CommentedMap
from schema_salad.jsonld_context import makerdf
from schema_salad.utils import ContextType
from .cwlviewer import CWLViewer
from .process import Process
def gather(tool: Process, ctx: ContextType) -> Graph:
g = Graph()
def visitor(t: CommentedMap) -> None:
makerdf(t["id"], t, ctx, graph=g)
tool.visit(visitor)
return g
def printrdf(wflow: Process, ctx: ContextType, style: str) -> str:
"""Serialize the CWL document into a string, ready for printing."""
rdf = gather(wflow, ctx).serialize(format=style, encoding="utf-8")
if not rdf:
return ""
return rdf.decode("utf-8")
def lastpart(uri: Any) -> str:
uri2 = str(uri)
if "/" in uri2:
return uri2[uri2.rindex("/") + 1 :]
return uri2
def dot_with_parameters(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?step ?run ?runtype
WHERE {
?step cwl:run ?run .
?run rdf:type ?runtype .
}"""
),
) # ResultRow because the query is of type SELECT
for step, run, _ in qres:
stdout.write(
'"{}" [label="{}"]\n'.format(lastpart(step), f"{lastpart(step)} ({lastpart(run)})")
)
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?step ?inp ?source
WHERE {
?wf Workflow:steps ?step .
?step cwl:inputs ?inp .
?inp cwl:source ?source .
}"""
),
) # ResultRow because the query is of type SELECT
for step, inp, source in qres:
stdout.write('"%s" [shape=box]\n' % (lastpart(inp)))
stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(inp), ""))
stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(inp), lastpart(step), ""))
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?step ?out
WHERE {
?wf Workflow:steps ?step .
?step cwl:outputs ?out .
}"""
),
) # ResultRow because the query is of type SELECT
for step, out in qres:
stdout.write('"%s" [shape=box]\n' % (lastpart(out)))
stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(step), lastpart(out), ""))
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?out ?source
WHERE {
?wf cwl:outputs ?out .
?out cwl:source ?source .
}"""
),
) # ResultRow because the query is of type SELECT
for out, source in qres:
stdout.write('"%s" [shape=octagon]\n' % (lastpart(out)))
stdout.write('"{}" -> "{}" [label="{}"]\n'.format(lastpart(source), lastpart(out), ""))
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?inp
WHERE {
?wf rdf:type cwl:Workflow .
?wf cwl:inputs ?inp .
}"""
),
) # ResultRow because the query is of type SELECT
for (inp,) in qres:
stdout.write('"%s" [shape=octagon]\n' % (lastpart(inp)))
def METHOD_NAME(g: Graph, stdout: Union[TextIO, StreamWriter]) -> None:
dotname: Dict[str, str] = {}
clusternode = {}
stdout.write("compound=true\n")
subworkflows = set()
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?run
WHERE {
?wf rdf:type cwl:Workflow .
?wf Workflow:steps ?step .
?step cwl:run ?run .
?run rdf:type cwl:Workflow .
} ORDER BY ?wf"""
),
) # ResultRow because the query is of type SELECT
for (run,) in qres:
subworkflows.add(run)
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT ?wf ?step ?run ?runtype
WHERE {
?wf rdf:type cwl:Workflow .
?wf Workflow:steps ?step .
?step cwl:run ?run .
?run rdf:type ?runtype .
} ORDER BY ?wf"""
),
) # ResultRow because the query is of type SELECT
currentwf: Optional[str] = None
for wf, step, _run, runtype in qres:
if step not in dotname:
dotname[step] = lastpart(step)
if wf != currentwf:
if currentwf is not None:
stdout.write("}\n")
if wf in subworkflows:
if wf not in dotname:
dotname[wf] = "cluster_" + lastpart(wf)
stdout.write(f'subgraph "{dotname[wf]}" {{ label="{lastpart(wf)}"\n') # noqa: B907
currentwf = wf
clusternode[wf] = step
else:
currentwf = None
if str(runtype) != "https://w3id.org/cwl/cwl#Workflow":
stdout.write(
f'"{dotname[step]}" [label="{urllib.parse.urldefrag(str(step))[1]}"]\n' # noqa: B907
)
if currentwf is not None:
stdout.write("}\n")
qres = cast(
Iterator[ResultRow],
g.query(
"""SELECT DISTINCT ?src ?sink ?srcrun ?sinkrun
WHERE {
?wf1 Workflow:steps ?src .
?wf2 Workflow:steps ?sink .
?src cwl:out ?out .
?inp cwl:source ?out .
?sink cwl:in ?inp .
?src cwl:run ?srcrun .
?sink cwl:run ?sinkrun .
}"""
),
) # ResultRow because the query is of type SELECT
for src, sink, srcrun, sinkrun in qres:
attr = ""
if srcrun in clusternode:
attr += 'ltail="%s"' % dotname[srcrun]
src = clusternode[srcrun]
if sinkrun in clusternode:
attr += ' lhead="%s"' % dotname[sinkrun]
sink = clusternode[sinkrun]
stdout.write(f'"{dotname[src]}" -> "{dotname[sink]}" [{attr}]\n') # noqa: B907
def printdot(
wf: Process,
ctx: ContextType,
stdout: IO[str],
) -> None:
cwl_viewer: CWLViewer = CWLViewer(printrdf(wf, ctx, "n3"))
stdout.write(cwl_viewer.dot().replace(f"{wf.metadata['id']}#", ""))
| null |
983 |
# Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from guild import __pkgdir__
from guild import entry_point_util
_plugins = entry_point_util.EntryPointResources("guild.plugins", "plugin")
class NotSupported(Exception):
pass
class ModelOpResolutionError(Exception):
pass
class Plugin:
"""Abstract interface for a Guild plugin."""
name = None
provides = []
resolve_model_op_priority = 100
sourcecode_select_rules_priority = 100
def __init__(self, ep):
self.name = ep.name
self.log = logging.getLogger("guild." + self.name)
def guildfile_data(self, data, src):
"""Called before data is used to initialize a Guildfile.
Plugins may use this callback to mutate data before it's used.
To modify a Guild file after it's been loaded, use
`guildfile_loaded`.
"""
def guildfile_loaded(self, gf):
"""Called immediately after a Guild file is loaded.
Plugins may use this callback to modify a Guild file after
it's been loaded. To modify the data before it's used to load
the Guild file, use `guildfile_data`.
"""
def enabled_for_op(self, opdef):
# pylint: disable=unused-argument
"""Returns a tuple of boolean and reason.
The boolean indicates whether or not the plugin is enabled for
`opdef`. The reason is used to provide additional information to the
user.
When a plugin is enabled for an operation, it participates in
the following:
- Listed in `GUILD_PLUGINS` run env
- Listed in `plugins` run attr
- Called for `default_sourcecode_select_rules_for_op()`
- Called for `apply_cmd_env()`
- Called for `run_starting()`
- Called for `run_stopping()`
If a plugin does not return True for an op def, it will not be
listed as a plugin for the run and will not be called for the
functions above.
"""
return False, "not applicable to operation"
def patch_env(self):
"""Called to let the plugin patch the Python environment."""
def resolve_model_op(self, opspec):
# pylint: disable=unused-argument
"""Return a tuple of model, op_name for opspec.
If opspec cannot be resolved to a model, the function should
return None.
"""
return None
def resource_source_for_data(self, data, resdef):
# pylint: disable=unused-argument
"""Return an instance of `guild.resourcedef.ResourceSource` for data.
Return None if data is not supported as a resource source.
"""
return None
def resolver_class_for_source(self, source):
# pylint: disable=unused-argument
"""Return a class (or factory) for a resolver suitable for `source`.
`source` is an instance of `guild.resourcedef.ResourceSource`.
Return None if resolution for the source is not supported by the plugin.
"""
return None
def run_starting(self, run, op, pidfile):
"""Called when a run is starting.
This called immediately before the run is marked as started.
`pidfile` is provided when the run is started in the background. The
plugin should poll the pidfile for creation and deletion to infer the
run process start and end. `run_stopped()` is not called when a pidfile
is provided.
If `pidfile` is None, the operation is run in the foreground and
`run_stopped()` is called, provided the parent process shuts down
cleanly.
"""
def run_stopped(self, run, op, exit_code):
"""Called when a run stops.
`exit_code` is the numeric process exit code. 0 indicates that the
process ended normally while a non-zero value indicates that an error
occurred. The exit code is determined by the run process.
This function is not called if the operation is started in the
background (see `run_starting()` for details).
"""
def apply_cmd_env(self, op, cmd_env):
"""Called in preparation of an operation command environment.
Plugins should implement this to provide operation specific environment
variables for use in a run.
"""
def default_sourcecode_select_rules_for_op(self, opdef):
# pylint: disable=unused-argument
"""Returns a default list of source code select rules for an operation.
This is called only when the plugin is enabled for the operation and Guild.
"""
return []
def iter_plugins():
return iter(_plugins)
def for_name(name):
return _plugins.one_for_name(name)
def METHOD_NAME():
_plugins.set_path([__pkgdir__])
| null |
984 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tools.android.emulator.xserver."""
import os
import subprocess
import tempfile
from tools.android.emulator import resources
from google.apputils import basetest as googletest
from tools.android.emulator import xserver
class X11ServerTest(googletest.TestCase):
def setUp(self):
self.x11 = None
def tearDown(self):
if self.x11 is not None:
self.x11.Kill()
def testXServerDisplayResolution(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
self.x11.Start()
self.assertEquals('225x300', self._GetXRes(self.x11.environment))
def testXServerKill(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 200, 400)
self.x11.Start()
# should not throw.
self._GetXRes(self.x11.environment)
env = self.x11.environment
self.x11.Kill()
try:
self._GetXRes(env)
self.fail('X not being killed')
except subprocess.CalledProcessError as unused_expected:
pass
def METHOD_NAME(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 200, 400)
self.x11._xvfb_bin = '/bin/false'
try:
self.x11.Start()
self.fail('should crash')
except xserver.ProcessCrashedError as unused_expected:
pass
def testDoubleStartNoOp(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 200, 400)
self.x11.Start()
env = self.x11.environment
self.x11.Start()
env_2 = self.x11.environment
self.assertEquals(env, env_2)
def testIsRunningBeforeStart(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
self.assertFalse(self.x11.IsRunning())
self.assertFalse(self.x11.IsRunning()) # Doesn't change after invocation.
def testIsRunningAfterKill(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
self.x11.Start()
self.x11.Kill()
self.assertFalse(self.x11.IsRunning())
self.assertFalse(self.x11.IsRunning()) # Doesn't change after invocation.
def testIsRunningWhenRunning(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
self.x11.Start()
self.assertTrue(self.x11.IsRunning())
self.assertTrue(self.x11.IsRunning()) # Doesn't change after invocation.
def testStartWithZeroTimeoutTimesOut(self):
self.x11 = xserver.X11Server(
resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
try:
self.x11.Start(wait_until_up_sec=0)
self.fail('should timeout')
except xserver.TimeoutError as unused_expected:
pass
# Uncomment this test to test performance of xvfb.
# The 100 cycles take 211 seconds as of cl/98458300, 2015/07/16.
# def testStart100times(self):
# for _ in range(0, 100):
# self.x11 = xserver.X11Server(
# resources.GetRunfilesDir(), tempfile.mkdtemp(), 225, 300)
# self.x11.Start()
# self.x11.Kill()
def _GetXRes(self, x11env):
env = dict(os.environ)
env.update(x11env)
return subprocess.check_output(
[
os.path.join(resources.GetRunfilesDir(),
'android_test_support/'
'tools/android/emulator/xres')
],
env=env).strip()
if __name__ == '__main__':
googletest.main()
| null |
985 |
from copy import deepcopy
from typing import Any, Dict
import pytest
import torch
import torch.nn as nn
from otx.algorithms.classification.adapters.mmcls import BYOL
from tests.test_suite.e2e_test_system import e2e_pytest_unit
@e2e_pytest_unit
class TestBYOL:
"""Test BYOL."""
@pytest.fixture(autouse=True)
def setup(self, monkeypatch, mocker) -> None:
class MockBackbone(nn.Module):
def __init__(self):
super().__init__()
self.pipeline = nn.Sequential(nn.Conv2d(3, 1, (1, 1), bias=False), nn.Conv2d(1, 1, (1, 1), bias=False))
def init_weights(self, pretrained=None):
pass
def forward(self, x):
return self.pipeline(x)
class MockNeck(nn.Sequential):
def __init__(self):
super().__init__(nn.Linear(2, 2, bias=False), nn.Linear(2, 2, bias=False))
def init_weights(self, init_linear=None):
pass
class MockHead(nn.Sequential):
def __init__(self):
super().__init__(nn.Linear(2, 2, bias=False))
def init_weights(self, init_linear=None):
pass
def forward(self, *args, **kwargs):
return {"loss": torch.Tensor(1)}
def build_mock_backbone(*args, **kwargs):
return MockBackbone()
def METHOD_NAME(*args, **kwargs):
return MockNeck()
def build_mock_head(*args, **kwargs):
return MockHead()
monkeypatch.setattr(
"otx.algorithms.classification.adapters.mmcls.models.classifiers.byol.build_backbone", build_mock_backbone
)
monkeypatch.setattr(
"otx.algorithms.classification.adapters.mmcls.models.classifiers.byol.build_neck", METHOD_NAME
)
monkeypatch.setattr(
"otx.algorithms.classification.adapters.mmcls.models.classifiers.byol.build_head", build_mock_head
)
self.byol = BYOL(backbone={}, neck={}, head={})
@e2e_pytest_unit
def test_init_weights(self) -> None:
"""Test init_weights function."""
for param_ol, param_tgt in zip(self.byol.online_backbone.parameters(), self.byol.target_backbone.parameters()):
assert torch.all(param_ol == param_tgt)
assert param_ol.requires_grad
assert not param_tgt.requires_grad
for param_ol, param_tgt in zip(
self.byol.online_projector.parameters(), self.byol.target_projector.parameters()
):
assert torch.all(param_ol == param_tgt)
assert param_ol.requires_grad
assert not param_tgt.requires_grad
@e2e_pytest_unit
def test_momentum_update(self) -> None:
"""Test _momentum_update function."""
original_params = {"backbone": [], "projector": []}
for param_tgt in self.byol.target_backbone.parameters():
param_tgt.data *= 2.0
original_params["backbone"].append(deepcopy(param_tgt))
for param_tgt in self.byol.target_projector.parameters():
param_tgt.data *= 2.0
original_params["projector"].append(deepcopy(param_tgt))
self.byol.momentum_update()
for param_ol, param_tgt, orig_tgt in zip(
self.byol.online_backbone.parameters(), self.byol.target_backbone.parameters(), original_params["backbone"]
):
assert torch.all(
param_tgt.data == orig_tgt * self.byol.momentum + param_ol.data * (1.0 - self.byol.momentum)
)
for param_ol, param_tgt, orig_tgt in zip(
self.byol.online_projector.parameters(),
self.byol.target_projector.parameters(),
original_params["projector"],
):
assert torch.all(
param_tgt.data == orig_tgt * self.byol.momentum + param_ol.data * (1.0 - self.byol.momentum)
)
@e2e_pytest_unit
def test_train_step(self) -> None:
"""Test train_step function wraps forward and _parse_losses."""
img1 = torch.randn((1, 3, 2, 2))
img2 = torch.randn((1, 3, 2, 2))
outputs = self.byol.train_step(data=dict(img1=img1, img2=img2), optimizer=None)
assert "loss" in outputs
assert "log_vars" in outputs
assert "num_samples" in outputs
@e2e_pytest_unit
@pytest.mark.parametrize(
"orig_state_dict,prefix,expected",
[
({"online_backbone.layer": 1}, "", {"layer": 1}),
({"backbone.layer": 1}, "", {"backbone.layer": 1}),
({"backbone.layer": 1}, "backbone.", {"backbone.layer": 1}),
],
)
def test_state_dict_hook(self, orig_state_dict: Dict[str, Any], prefix: str, expected: Dict[str, Any]) -> None:
"""Test state_dict_hook function."""
new_state_dict = BYOL.state_dict_hook(module=self.byol, state_dict=orig_state_dict, prefix=prefix)
assert new_state_dict == expected
| null |
986 |
from __future__ import print_function
import json
import string
import boto3
import time
import hashlib
import os
import collections
from collections import defaultdict
import botocore.response as br
import datetime
def METHOD_NAME(event, context):
#uncomment below if you want to see the JSON that is being passed to the Lambda Function
# jsondump = json.dumps(event)
# print(jsondump)
#the utterances to exit the bot broker
exitResponses={'quit','exit','return'}
currentUtterance = event["req"]["question"].lower()
print (currentUtterance)
if currentUtterance in exitResponses and "queryLambda" in event["res"]["session"]:
event["res"]["session"].pop("queryLambda",None)
event["res"]["session"].pop("botName",None)
event["res"]["session"].pop("botAlias",None)
event["res"]["session"].pop("brokerUID",None)
plaintextResp = 'Welcome back to QnABot!!!'
htmlResp = '<i> Welcome back to QnABot!!! </i>'
event["res"]["message"] = '{0}'.format(plaintextResp)
event["res"]["session"]["appContext"]={"altMessages":{"html":htmlResp}}
# return the default message telling the user that we are taking them to a partner bot
elif "queryLambda" not in event["res"]["session"]:
return middleman(event,True)
else:
return middleman(event,False)
return event
#handle the brokerage between Lex bots
def middleman(event, initialConnection):
lexClient = boto3.client('lex-runtime')
sessionAttrib = {}
#for Lex
if "sessionAttributes" in event["req"]["_event"]:
sessionAttrib = event["req"]["_event"].get("sessionAttributes",{})
#for Alexa
else:
sessionAttrib = event["req"]["_event"].get("session").get("attributes", {})
tempBotName = sessionAttrib.get("botName" , None)
tempBotAlias = sessionAttrib.get("botAlias", None)
tempBotUserID = sessionAttrib.get("brokerUID", None)
if tempBotName == None:
tempBotName = event["res"]["result"]["args"][0]
tempBotAlias = event["res"]["result"]["args"][1]
#userID location varies based on whether Lex or Alexa
tempBotUserID = event["req"]["_event"].get("userId") or event["req"]["_event"]["session"]["sessionId"]
if not(len(event["res"]["result"]["args"]) < 3 or event["res"]["result"]["args"][2].lower() == "remember"):
tempBotUserID ='{0}{1}'.format(tempBotUserID,int(round(time.time() * 1000)))
print (tempBotUserID)
if not initialConnection:
#if we don't unset the queryLambda here and we call another QnABot, we will run into a processing error and an infinite loop of Lambda calls
sessionAttrib.pop("queryLambda",None)
response = lexClient.post_text(
botName = tempBotName,
botAlias = tempBotAlias,
userId= tempBotUserID,
sessionAttributes= sessionAttrib,
inputText=event["req"]["question"]
)
print (json.dumps(response))
if "dialogState" in response:
event["res"]["type"] = response.get("messageFormat", "PlainText")
event["res"]["session"] = response["sessionAttributes"]
if "message" in response:
event["res"]["message"] = response["message"]
event["res"]["plainMessage"]=response["message"]
else:
tempMessage = "Intent {0} is {1}:".format(response["intentName"], response["dialogState"])
htmlMessage = tempMessage
for slot in response["slots"]:
tempMessage += " {0}:{1}".format(slot,response["slots"][slot])
htmlMessage += "<br> {0}:{1}".format(slot,response["slots"][slot])
event["res"]["message"] = tempMessage
event["res"]["plainMessage"]= tempMessage
event["res"]["session"]["appContext"]={"altMessages":{"html":htmlMessage}}
if "responseCard" in response:
card = response["responseCard"]["genericAttachments"][0]
event["res"]["card"]["send"] = True
for key,value in card.items():
event["res"]["card"][key] = value
if "botName" not in event["res"]["session"]:
event["res"]["session"]["botName"] = tempBotName
event["res"]["session"]["botAlias"] = tempBotAlias
event["res"]["session"]["brokerUID"] = tempBotUserID
event["res"]["session"]["queryLambda"] = os.environ['AWS_LAMBDA_FUNCTION_NAME']
return event
| null |
987 |
import os
import pytest
from ..utils import assert_content
from pyfs import head, cp
from pyio import write, write_regions
def test_cp_file_from_local_folder_to_mount_folder(size, local_file, mount_file, source_path):
"""TC-PIPE-FUSE-50"""
head(source_path, size=size, write_to=local_file)
cp(local_file, mount_file)
assert_content(local_file, mount_file)
def test_append_to_file_end(local_file, mount_file, source_path):
"""TC-PIPE-FUSE-51"""
head(source_path, append_to=local_file)
head(source_path, append_to=mount_file)
assert_content(local_file, mount_file)
def test_override_file_tail(size, local_file, mount_file):
"""TC-PIPE-FUSE-52"""
if size < 10:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 10, amount=10)
write(mount_file, offset=actual_size - 10, amount=10)
assert_content(local_file, mount_file)
def test_override_file_head(size, local_file, mount_file):
"""TC-PIPE-FUSE-53"""
if size < 10:
pytest.skip()
write(local_file, offset=0, amount=10)
write(mount_file, offset=0, amount=10)
assert_content(local_file, mount_file)
def test_write_to_position_that_is_bigger_than_file_length(local_file, mount_file):
"""TC-PIPE-FUSE-54"""
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size + 10, amount=10)
write(mount_file, offset=actual_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_that_exceeds_file_length(size, local_file, mount_file):
"""TC-PIPE-FUSE-55"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write(local_file, offset=actual_size - 5, amount=10)
write(mount_file, offset=actual_size - 5, amount=10)
assert_content(local_file, mount_file)
def METHOD_NAME(size, local_file, mount_file):
"""TC-PIPE-FUSE-56"""
if size < 20:
pytest.skip()
write(local_file, offset=10, amount=10)
write(mount_file, offset=10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-57"""
if size < chunk_size + 20:
pytest.skip()
write(local_file, offset=chunk_size + 10, amount=10)
write(mount_file, offset=chunk_size + 10, amount=10)
assert_content(local_file, mount_file)
def test_write_region_matching_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-58"""
if size < chunk_size:
pytest.skip()
write(local_file, offset=0, amount=chunk_size)
write(mount_file, offset=0, amount=chunk_size)
assert_content(local_file, mount_file)
def test_write_region_between_two_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-59"""
if size < chunk_size + 5:
pytest.skip()
write(local_file, offset=chunk_size - 5, amount=10)
write(mount_file, offset=chunk_size - 5, amount=10)
assert_content(local_file, mount_file)
def test_write_two_regions_in_single_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-60"""
if size < chunk_size + 110:
pytest.skip()
write_regions(local_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size + 10, 'amount': 10}, {'offset': chunk_size + 100, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_in_two_adjacent_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-61"""
if size < chunk_size + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size + 10, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_in_two_non_adjacent_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-62"""
if size < chunk_size * 2 + 20:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': chunk_size * 2 + 10, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_three_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-63"""
if size < chunk_size * 2 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 2 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_between_four_chunks(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-64"""
if size < chunk_size * 3 + 5:
pytest.skip()
write_regions(local_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
write_regions(mount_file, {'offset': chunk_size - 5, 'amount': 10}, {'offset': chunk_size * 3 - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_with_one_of_them_exceeding_file_length(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-65"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size - 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_with_one_of_them_starting_from_position_that_is_bigger_than_file_length(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-66"""
if size < 5:
pytest.skip()
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': actual_size + 5, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_regions_starting_from_position_that_is_bigger_than_file_length(chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-67"""
actual_size = os.path.getsize(local_file)
write_regions(local_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
write_regions(mount_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_two_overlapping_regions(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-68"""
if size < 25:
pytest.skip()
write_regions(local_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
write_regions(mount_file, {'offset': 10, 'amount': 10}, {'offset': 15, 'amount': 10})
assert_content(local_file, mount_file)
def test_write_region_to_an_already_written_chunk(size, chunk_size, local_file, mount_file):
"""TC-PIPE-FUSE-69"""
if size < chunk_size + 10:
pytest.skip()
write_regions(local_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
write_regions(mount_file, {'offset': 0, 'amount': chunk_size}, {'offset': 10, 'amount': chunk_size})
assert_content(local_file, mount_file)
| null |
988 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class ModifyInstanceSpecRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'ModifyInstanceSpec','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def METHOD_NAME(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def set_InstanceClass(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_EffectiveTime(self): # String
return self.get_query_params().get('EffectiveTime')
def set_EffectiveTime(self, EffectiveTime): # String
self.add_query_param('EffectiveTime', EffectiveTime)
def get_SourceBiz(self): # String
return self.get_query_params().get('SourceBiz')
def set_SourceBiz(self, SourceBiz): # String
self.add_query_param('SourceBiz', SourceBiz)
def get_BusinessInfo(self): # String
return self.get_query_params().get('BusinessInfo')
def set_BusinessInfo(self, BusinessInfo): # String
self.add_query_param('BusinessInfo', BusinessInfo)
def get_ShardCount(self): # Integer
return self.get_query_params().get('ShardCount')
def set_ShardCount(self, ShardCount): # Integer
self.add_query_param('ShardCount', ShardCount)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_MajorVersion(self): # String
return self.get_query_params().get('MajorVersion')
def set_MajorVersion(self, MajorVersion): # String
self.add_query_param('MajorVersion', MajorVersion)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_ReadOnlyCount(self): # Integer
return self.get_query_params().get('ReadOnlyCount')
def set_ReadOnlyCount(self, ReadOnlyCount): # Integer
self.add_query_param('ReadOnlyCount', ReadOnlyCount)
def get_ForceUpgrade(self): # Boolean
return self.get_query_params().get('ForceUpgrade')
def set_ForceUpgrade(self, ForceUpgrade): # Boolean
self.add_query_param('ForceUpgrade', ForceUpgrade)
def get_OrderType(self): # String
return self.get_query_params().get('OrderType')
def set_OrderType(self, OrderType): # String
self.add_query_param('OrderType', OrderType)
| null |
989 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkga.endpoint import endpoint_data
class CreateAcceleratorRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ga', '2019-11-20', 'CreateAccelerator','gaplus')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_IpSetConfig(self): # Struct
return self.get_query_params().get('IpSetConfig')
def set_IpSetConfig(self, IpSetConfig): # Struct
if IpSetConfig.get('AccessMode') is not None:
self.add_query_param('IpSetConfig.AccessMode', IpSetConfig.get('AccessMode'))
def get_AutoUseCoupon(self): # String
return self.get_query_params().get('AutoUseCoupon')
def set_AutoUseCoupon(self, AutoUseCoupon): # String
self.add_query_param('AutoUseCoupon', AutoUseCoupon)
def get_AutoRenewDuration(self): # Integer
return self.get_query_params().get('AutoRenewDuration')
def set_AutoRenewDuration(self, AutoRenewDuration): # Integer
self.add_query_param('AutoRenewDuration', AutoRenewDuration)
def get_Spec(self): # String
return self.get_query_params().get('Spec')
def METHOD_NAME(self, Spec): # String
self.add_query_param('Spec', Spec)
def get_Duration(self): # Integer
return self.get_query_params().get('Duration')
def set_Duration(self, Duration): # Integer
self.add_query_param('Duration', Duration)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_PromotionOptionNo(self): # String
return self.get_query_params().get('PromotionOptionNo')
def set_PromotionOptionNo(self, PromotionOptionNo): # String
self.add_query_param('PromotionOptionNo', PromotionOptionNo)
def get_BandwidthBillingType(self): # String
return self.get_query_params().get('BandwidthBillingType')
def set_BandwidthBillingType(self, BandwidthBillingType): # String
self.add_query_param('BandwidthBillingType', BandwidthBillingType)
def get_AutoRenew(self): # Boolean
return self.get_query_params().get('AutoRenew')
def set_AutoRenew(self, AutoRenew): # Boolean
self.add_query_param('AutoRenew', AutoRenew)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_PricingCycle(self): # String
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self, PricingCycle): # String
self.add_query_param('PricingCycle', PricingCycle)
| null |
990 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdknlb.endpoint import endpoint_data
class CreateServerGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Nlb', '2022-04-30', 'CreateServerGroup','nlb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ServerGroupName(self): # String
return self.get_body_params().get('ServerGroupName')
def set_ServerGroupName(self, ServerGroupName): # String
self.add_body_params('ServerGroupName', ServerGroupName)
def get_ClientToken(self): # String
return self.get_body_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_body_params('ClientToken', ClientToken)
def get_PreserveClientIpEnabled(self): # Boolean
return self.get_body_params().get('PreserveClientIpEnabled')
def set_PreserveClientIpEnabled(self, PreserveClientIpEnabled): # Boolean
self.add_body_params('PreserveClientIpEnabled', PreserveClientIpEnabled)
def get_HealthCheckConfig(self): # Struct
return self.get_body_params().get('HealthCheckConfig')
def set_HealthCheckConfig(self, HealthCheckConfig): # Struct
if HealthCheckConfig.get('HealthCheckEnabled') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckEnabled', HealthCheckConfig.get('HealthCheckEnabled'))
if HealthCheckConfig.get('HealthCheckType') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckType', HealthCheckConfig.get('HealthCheckType'))
if HealthCheckConfig.get('HealthCheckConnectPort') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckConnectPort', HealthCheckConfig.get('HealthCheckConnectPort'))
if HealthCheckConfig.get('HealthyThreshold') is not None:
self.add_body_params('HealthCheckConfig.HealthyThreshold', HealthCheckConfig.get('HealthyThreshold'))
if HealthCheckConfig.get('UnhealthyThreshold') is not None:
self.add_body_params('HealthCheckConfig.UnhealthyThreshold', HealthCheckConfig.get('UnhealthyThreshold'))
if HealthCheckConfig.get('HealthCheckConnectTimeout') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckConnectTimeout', HealthCheckConfig.get('HealthCheckConnectTimeout'))
if HealthCheckConfig.get('HealthCheckInterval') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckInterval', HealthCheckConfig.get('HealthCheckInterval'))
if HealthCheckConfig.get('HealthCheckDomain') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckDomain', HealthCheckConfig.get('HealthCheckDomain'))
if HealthCheckConfig.get('HealthCheckUrl') is not None:
self.add_body_params('HealthCheckConfig.HealthCheckUrl', HealthCheckConfig.get('HealthCheckUrl'))
if HealthCheckConfig.get('HealthCheckHttpCode') is not None:
for index1, value1 in enumerate(HealthCheckConfig.get('HealthCheckHttpCode')):
self.add_body_params('HealthCheckConfig.HealthCheckHttpCode.' + str(index1 + 1), value1)
if HealthCheckConfig.get('HttpCheckMethod') is not None:
self.add_body_params('HealthCheckConfig.HttpCheckMethod', HealthCheckConfig.get('HttpCheckMethod'))
def get_AddressIPVersion(self): # String
return self.get_body_params().get('AddressIPVersion')
def set_AddressIPVersion(self, AddressIPVersion): # String
self.add_body_params('AddressIPVersion', AddressIPVersion)
def get_Scheduler(self): # String
return self.get_body_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_body_params('Scheduler', Scheduler)
def get_ResourceGroupId(self): # String
return self.get_body_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_body_params('ResourceGroupId', ResourceGroupId)
def get_Protocol(self): # String
return self.get_body_params().get('Protocol')
def set_Protocol(self, Protocol): # String
self.add_body_params('Protocol', Protocol)
def get_Tags(self): # RepeatList
return self.get_body_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_body_params('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_DryRun(self): # Boolean
return self.get_body_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_body_params('DryRun', DryRun)
def get_ConnectionDrainEnabled(self): # Boolean
return self.get_body_params().get('ConnectionDrainEnabled')
def set_ConnectionDrainEnabled(self, ConnectionDrainEnabled): # Boolean
self.add_body_params('ConnectionDrainEnabled', ConnectionDrainEnabled)
def get_ConnectionDrainTimeout(self): # Integer
return self.get_body_params().get('ConnectionDrainTimeout')
def set_ConnectionDrainTimeout(self, ConnectionDrainTimeout): # Integer
self.add_body_params('ConnectionDrainTimeout', ConnectionDrainTimeout)
def METHOD_NAME(self): # Boolean
return self.get_body_params().get('AnyPortEnabled')
def set_AnyPortEnabled(self, AnyPortEnabled): # Boolean
self.add_body_params('AnyPortEnabled', AnyPortEnabled)
def get_ServerGroupType(self): # String
return self.get_body_params().get('ServerGroupType')
def set_ServerGroupType(self, ServerGroupType): # String
self.add_body_params('ServerGroupType', ServerGroupType)
def get_VpcId(self): # String
return self.get_body_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_body_params('VpcId', VpcId)
| null |
991 |
import pickle
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
import numpy as np
import pytest
from torch.utils.data import DataLoader
from lhotse import CutSet
from lhotse.dataset import DynamicCutSampler, IterableDatasetWrapper
from lhotse.dataset.webdataset import export_to_webdataset
from lhotse.utils import fastcopy
webdataset = pytest.importorskip(
"webdataset", reason="These tests require webdataset package to run."
)
def test_export_to_webdataset():
cuts = CutSet.from_file("test/fixtures/libri/cuts.json")
cut = cuts[0]
cuts = []
for i in range(10):
cuts.append(fastcopy(cut, id=cut.id + "-" + str(i)))
cuts = CutSet.from_cuts(cuts)
with NamedTemporaryFile(suffix=".tar") as f:
export_to_webdataset(cuts, output_path=f.name)
f.flush()
ds = webdataset.WebDataset(f.name)
dicts = (pickle.loads(data["data"]) for data in ds)
cuts_ds = CutSet.from_dicts(dicts)
assert list(cuts.ids) == list(cuts_ds.ids)
def test_cutset_from_webdataset():
cuts = CutSet.from_file("test/fixtures/libri/cuts.json")
cut = cuts[0]
cuts = []
for i in range(10):
cuts.append(fastcopy(cut, id=cut.id + "-" + str(i)))
cuts = CutSet.from_cuts(cuts)
with NamedTemporaryFile(suffix=".tar") as f:
export_to_webdataset(cuts, output_path=f.name)
f.flush()
cuts_ds = CutSet.from_webdataset(f.name)
assert list(cuts.ids) == list(cuts_ds.ids)
for c, cds in zip(cuts, cuts_ds):
np.testing.assert_equal(c.load_audio(), cds.load_audio())
np.testing.assert_almost_equal(
c.load_features(), cds.load_features(), decimal=1
)
def test_cutset_from_webdataset_sharded():
cuts = CutSet.from_file("test/fixtures/libri/cuts.json")
cut = cuts[0]
cuts = []
for i in range(10):
cuts.append(fastcopy(cut, id=cut.id + "-" + str(i)))
cuts = CutSet.from_cuts(cuts)
with TemporaryDirectory() as dir_path:
tar_pattern = f"{dir_path}/shard-%06d.tar"
export_to_webdataset(cuts, output_path=tar_pattern, shard_size=2)
# disabling shard shuffling for testing purposes here
cuts_ds = CutSet.from_webdataset(
dir_path + "/shard-{000000..000004}.tar", shuffle_shards=False
)
assert list(cuts.ids) == list(cuts_ds.ids)
for c, cds in zip(cuts, cuts_ds):
np.testing.assert_equal(c.load_audio(), cds.load_audio())
np.testing.assert_almost_equal(
c.load_features(), cds.load_features(), decimal=1
)
def METHOD_NAME():
cuts = CutSet.from_file("test/fixtures/libri/cuts.json")
cut = cuts[0]
cuts = []
for i in range(10):
cuts.append(fastcopy(cut, id=cut.id + "-" + str(i)))
cuts = CutSet.from_cuts(cuts)
with TemporaryDirectory() as dir_path:
tar_pattern = f"pipe:gzip -c > {dir_path}/shard-%06d.tar.gz"
export_to_webdataset(cuts, output_path=tar_pattern, shard_size=2)
# disabling shard shuffling for testing purposes here
cuts_ds = CutSet.from_webdataset(
"pipe:gunzip -c " + dir_path + "/shard-{000000..000004}.tar.gz",
shuffle_shards=False,
)
assert list(cuts.ids) == list(cuts_ds.ids)
for c, cds in zip(cuts, cuts_ds):
np.testing.assert_equal(c.load_audio(), cds.load_audio())
np.testing.assert_almost_equal(
c.load_features(), cds.load_features(), decimal=1
)
class DummyDataset:
"""Dataset that returns input cuts."""
def __getitem__(self, item):
return item
def test_webdataset_sampler_epoch_increment():
cuts = CutSet.from_file("test/fixtures/libri/cuts.json").repeat(10)
with TemporaryDirectory() as dir_path:
tar_pattern = f"{dir_path}/shard-%06d.tar"
export_to_webdataset(cuts, output_path=tar_pattern, shard_size=1)
cuts_ds = CutSet.from_webdataset(
[str(p) for p in Path(dir_path).glob("*.tar")], shuffle_shards=True
)
sampler = DynamicCutSampler(cuts_ds, max_cuts=1)
dloader = DataLoader(
IterableDatasetWrapper(DummyDataset(), sampler, auto_increment_epoch=True),
batch_size=None,
num_workers=1,
persistent_workers=True,
)
epoch_batches = {}
for epoch in [0, 1]:
batches = []
for batch in dloader:
for cut in batch:
batches.append(cut)
epoch_batches[epoch] = CutSet.from_cuts(batches)
# Both epochs have the same cut IDs.
assert sorted(epoch_batches[0].ids) == sorted(epoch_batches[1].ids)
# Both epochs have different cut order (shards were re-shuffled).
assert list(epoch_batches[0].ids) != list(epoch_batches[1].ids)
| null |
992 |
import sys
import types
import py
from py.builtin import set, frozenset
def test_enumerate():
l = [0,1,2]
for i,x in enumerate(l):
assert i == x
def test_any():
assert not py.builtin.any([0,False, None])
assert py.builtin.any([0,False, None,1])
def test_all():
assert not py.builtin.all([True, 1, False])
assert py.builtin.all([True, 1, object])
def test_BaseException():
assert issubclass(IndexError, py.builtin.BaseException)
assert issubclass(Exception, py.builtin.BaseException)
assert issubclass(KeyboardInterrupt, py.builtin.BaseException)
class MyRandomClass(object):
pass
assert not issubclass(MyRandomClass, py.builtin.BaseException)
assert py.builtin.BaseException.__module__ in ('exceptions', 'builtins')
assert Exception.__name__ == 'Exception'
def test_GeneratorExit():
assert py.builtin.GeneratorExit.__module__ in ('exceptions', 'builtins')
assert issubclass(py.builtin.GeneratorExit, py.builtin.BaseException)
def test_reversed():
reversed = py.builtin.reversed
r = reversed("hello")
assert iter(r) is r
s = "".join(list(r))
assert s == "olleh"
assert list(reversed(list(reversed("hello")))) == ['h','e','l','l','o']
py.test.raises(TypeError, reversed, reversed("hello"))
def test_simple():
s = set([1, 2, 3, 4])
assert s == set([3, 4, 2, 1])
s1 = s.union(set([5, 6]))
assert 5 in s1
assert 1 in s1
def test_frozenset():
s = set([frozenset([0, 1]), frozenset([1, 0])])
assert len(s) == 1
def test_print_simple():
from py.builtin import print_
py.test.raises(TypeError, "print_(hello=3)")
f = py.io.TextIO()
print_("hello", "world", file=f)
s = f.getvalue()
assert s == "hello world\n"
f = py.io.TextIO()
print_("hello", end="", file=f)
s = f.getvalue()
assert s == "hello"
f = py.io.TextIO()
print_("xyz", "abc", sep="", end="", file=f)
s = f.getvalue()
assert s == "xyzabc"
class X:
def __repr__(self): return "rep"
f = py.io.TextIO()
print_(X(), file=f)
assert f.getvalue() == "rep\n"
def test_execfile(tmpdir):
test_file = tmpdir.join("test.py")
test_file.write("x = y\ndef f(): pass")
ns = {"y" : 42}
py.builtin.execfile(str(test_file), ns)
assert ns["x"] == 42
assert py.code.getrawcode(ns["f"]).co_filename == str(test_file)
class A:
y = 3
x = 4
py.builtin.execfile(str(test_file))
assert A.x == 3
def test_getfuncdict():
def f():
raise NotImplementedError
f.x = 4
assert py.builtin._getfuncdict(f)["x"] == 4
assert py.builtin._getfuncdict(2) is None
def test_callable():
class A: pass
assert py.builtin.callable(test_callable)
assert py.builtin.callable(A)
assert py.builtin.callable(list)
assert py.builtin.callable(id)
assert not py.builtin.callable(4)
assert not py.builtin.callable("hi")
def test_totext():
py.builtin._totext("hello", "UTF-8")
def test_bytes_text():
if sys.version_info[0] < 3:
assert py.builtin.text == unicode
assert py.builtin.bytes == str
else:
assert py.builtin.text == str
assert py.builtin.bytes == bytes
def test_totext_badutf8():
# this was in printouts within the pytest testsuite
# totext would fail
if sys.version_info >= (3,):
errors = 'surrogateescape'
else: # old python has crappy error handlers
errors = 'replace'
py.builtin._totext("\xa6", "UTF-8", errors)
def test_reraise():
from py.builtin import _reraise
try:
raise Exception()
except Exception:
cls, val, tb = sys.exc_info()
excinfo = py.test.raises(Exception, "_reraise(cls, val, tb)")
def test_exec():
l = []
py.builtin.exec_("l.append(1)")
assert l == [1]
d = {}
py.builtin.exec_("x=4", d)
assert d['x'] == 4
def METHOD_NAME():
py.test.raises(ImportError, py.builtin._tryimport, 'xqwe123')
x = py.builtin._tryimport('asldkajsdl', 'py')
assert x == py
x = py.builtin._tryimport('asldkajsdl', 'py.path')
assert x == py.path
def test_getcode():
code = py.builtin._getcode(test_getcode)
assert isinstance(code, types.CodeType)
assert py.builtin._getcode(4) is None
| null |
993 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkprivatelink.endpoint import endpoint_data
class CreateVpcEndpointServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Privatelink', '2020-04-15', 'CreateVpcEndpointService','privatelink')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AutoAcceptEnabled(self): # Boolean
return self.get_query_params().get('AutoAcceptEnabled')
def set_AutoAcceptEnabled(self, AutoAcceptEnabled): # Boolean
self.add_query_param('AutoAcceptEnabled', AutoAcceptEnabled)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Payer(self): # String
return self.get_query_params().get('Payer')
def set_Payer(self, Payer): # String
self.add_query_param('Payer', Payer)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_ZoneAffinityEnabled(self): # Boolean
return self.get_query_params().get('ZoneAffinityEnabled')
def set_ZoneAffinityEnabled(self, ZoneAffinityEnabled): # Boolean
self.add_query_param('ZoneAffinityEnabled', ZoneAffinityEnabled)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_Resources(self): # RepeatList
return self.get_query_params().get('Resource')
def set_Resources(self, Resource): # RepeatList
for depth1 in range(len(Resource)):
if Resource[depth1].get('ResourceType') is not None:
self.add_query_param('Resource.' + str(depth1 + 1) + '.ResourceType', Resource[depth1].get('ResourceType'))
if Resource[depth1].get('ResourceId') is not None:
self.add_query_param('Resource.' + str(depth1 + 1) + '.ResourceId', Resource[depth1].get('ResourceId'))
if Resource[depth1].get('ZoneId') is not None:
self.add_query_param('Resource.' + str(depth1 + 1) + '.ZoneId', Resource[depth1].get('ZoneId'))
def get_ServiceResourceType(self): # String
return self.get_query_params().get('ServiceResourceType')
def set_ServiceResourceType(self, ServiceResourceType): # String
self.add_query_param('ServiceResourceType', ServiceResourceType)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('ServiceSupportIPv6')
def set_ServiceSupportIPv6(self, ServiceSupportIPv6): # Boolean
self.add_query_param('ServiceSupportIPv6', ServiceSupportIPv6)
def get_ServiceDescription(self): # String
return self.get_query_params().get('ServiceDescription')
def set_ServiceDescription(self, ServiceDescription): # String
self.add_query_param('ServiceDescription', ServiceDescription)
| null |
994 |
import os
from gettext import gettext as _
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.core.files.uploadhandler import TemporaryFileUploadHandler
from pygtrie import StringTrie
from pulpcore.app import models
from pulpcore.app import pulp_hashlib
class PulpTemporaryUploadedFile(TemporaryUploadedFile):
"""
A file uploaded to a temporary location in Pulp.
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
self.hashers = {}
for hasher in models.Artifact.DIGEST_FIELDS:
self.hashers[hasher] = pulp_hashlib.new(hasher)
super().__init__(name, content_type, size, charset, content_type_extra)
@classmethod
def from_file(cls, file):
"""
Create a PulpTemporaryUploadedFile from a file system file
Args:
file (File): a filesystem file
Returns:
PulpTemporaryUploadedFile: instantiated instance from file
"""
name = os.path.basename(file.name)
try:
size = file.size
except AttributeError:
size = os.path.getsize(file.name)
instance = cls(name, "", size, "", "")
instance.file = file
# Default 1MB
while data := file.read(1048576):
for hasher in models.Artifact.DIGEST_FIELDS:
instance.hashers[hasher].update(data)
# calling the method read() moves the file's pointer to the end of the file object,
# thus, it is necessary to reset the file's pointer position back to 0 in case of
# calling the method read() again from another place
file.seek(0)
return instance
class HashingFileUploadHandler(TemporaryFileUploadHandler):
"""
Upload handler that streams data into a temporary file.
"""
def new_file(
self,
field_name,
file_name,
content_type,
content_length,
charset=None,
content_type_extra=None,
):
"""
Signal that a new file has been started.
Args:
field_name (str): Name of the model field that this file is associated with. This
value is not used by this implementation of TemporaryFileUploadHandler.
file_name (str): Name of file being uploaded.
content_type (str): Type of file
content_length (int): Size of the file being stored. This value is not used by this
implementation of TemporaryFileUploadHandler.
charset (str):
"""
self.field_name = field_name
self.content_length = content_length
self.file = PulpTemporaryUploadedFile(
file_name, content_type, 0, charset, content_type_extra
)
def METHOD_NAME(self, raw_data, start):
self.file.write(raw_data)
for hasher in models.Artifact.DIGEST_FIELDS:
self.file.hashers[hasher].update(raw_data)
class TemporaryDownloadedFile(TemporaryUploadedFile):
"""
A temporary downloaded file.
The FileSystemStorage backend treats this object the same as a TemporaryUploadedFile. The
storage backend attempts to link the file to its final location. If the final location is on a
different physical drive, the file is copied to its final destination.
"""
def __init__(self, file, name=None):
"""
A constructor that does not create a blank temporary file.
The __init__ for TemporaryUploadedFile creates an empty temporary file. This constructor
is designed to handle files that have already been written to disk.
Args:
file (file): An open file
name (str): Name of the file
"""
self.file = file
if name is None:
name = getattr(file, "name", None)
self.name = name
def validate_file_paths(paths):
"""
Check for valid POSIX paths (ie ones that aren't duplicated and don't overlap).
Overlapping paths are where one path terminates inside another (e.g. a/b and a/b/c).
This function will raise an exception at the first dupe or overlap it detects. We use a trie (or
prefix tree) to keep track of which paths we've already seen.
Args:
paths (iterable of str): An iterable of strings each representing a relative path
Raises:
ValueError: If any path overlaps another
"""
overlap_error = _("The path for file '{path}' overlaps: {conflicts}")
path_trie = StringTrie(separator="/")
dups = []
overlaps = []
for path in paths:
if path in path_trie:
# path duplicates a path already in the trie
dups.append(path)
elif path_trie.has_subtrie(path):
# overlap where path is 'a/b' and trie has 'a/b/c'
conflicts = [item[0] for item in path_trie.items(prefix=path)]
overlaps.append(overlap_error.format(path=path, conflicts=", ".join(conflicts)))
else:
prefixes = list(path_trie.prefixes(path))
if prefixes:
# overlap where path is 'a/b/c' and trie has 'a/b'
conflicts = [prefix.key for prefix in prefixes]
overlaps.append(overlap_error.format(path=path, conflicts=", ".join(conflicts)))
# if there are no overlaps, add it to our trie and continue
path_trie[path] = True
if dups or overlaps:
dups_msg = ""
overlaps_msg = ""
if dups:
dups_msg = _("Paths are duplicated: {paths}").format(paths=",".join(dups))
if overlaps:
overlaps_msg = "\n".join(overlaps)
raise ValueError(
_("Path errors found. {dups}\n{overlaps}").format(dups=dups_msg, overlaps=overlaps_msg)
)
| null |
995 |
# Drakkar-Software OctoBot-Interfaces
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import asyncio
import octobot_services.interfaces.util as interfaces_util
import octobot.community as octobot_community
import octobot.commands as octobot_commands
import octobot.constants as octobot_constants
import octobot_commons.authentication as authentication
import octobot_trading.api as trading_api
def get_community_metrics_to_display():
return interfaces_util.run_in_bot_async_executor(octobot_community.get_community_metrics())
def can_get_community_metrics():
return octobot_community.can_read_metrics(interfaces_util.get_edited_config(dict_only=False))
def get_account_tentacles_packages(authenticator):
packages = authenticator.get_packages()
return [octobot_community.CommunityTentaclesPackage.from_community_dict(data) for data in packages]
def get_preview_tentacles_packages(url_for):
c1 = octobot_community.CommunityTentaclesPackage(
"AI candles analyser",
"Tentacles packages offering artificial intelligence analysis tools based on candles shapes.",
None, True,
[url_for("static", filename="img/community/tentacles_packages_previews/octobot.png")], None, None, None)
c1.uninstalled = False
c2 = octobot_community.CommunityTentaclesPackage(
"Telegram portfolio management",
"Manage your portfolio directly from the telegram interface.",
None, False,
[url_for("static", filename="img/community/tentacles_packages_previews/telegram.png")], None, None, None)
c2.uninstalled = False
c3 = octobot_community.CommunityTentaclesPackage(
"Mobile first web interface",
"Use a mobile oriented interface for your OctoBot.",
None, True,
[url_for("static", filename="img/community/tentacles_packages_previews/mobile.png")], None, None, None)
c3.uninstalled = True
return [c1, c2, c3]
def get_current_octobots_stats():
return interfaces_util.run_in_bot_async_executor(octobot_community.get_current_octobots_stats())
def _format_bot(bot):
return {
"name": octobot_community.CommunityUserAccount.get_bot_name_or_id(bot) if bot else None,
"id": octobot_community.CommunityUserAccount.get_bot_id(bot) if bot else None,
}
def get_all_user_bots():
# reload user bots to make sure the list is up to date
interfaces_util.run_in_bot_main_loop(authentication.Authenticator.instance().load_user_bots())
return sorted([
_format_bot(bot)
for bot in authentication.Authenticator.instance().user_account.get_all_user_bots_raw_data()
], key=lambda d: d["name"])
def get_selected_user_bot():
return _format_bot(authentication.Authenticator.instance().user_account.get_selected_bot_raw_data())
def select_bot(bot_id):
interfaces_util.run_in_bot_main_loop(authentication.Authenticator.instance().select_bot(bot_id))
def create_new_bot():
return interfaces_util.run_in_bot_main_loop(authentication.Authenticator.instance().create_new_bot())
def can_select_bot():
return not octobot_constants.COMMUNITY_BOT_ID
def can_logout():
return not authentication.Authenticator.instance().must_be_authenticated_through_authenticator()
def get_user_account_id():
return authentication.Authenticator.instance().get_user_id()
def has_filled_form(form_id):
return authentication.Authenticator.instance().has_filled_form(form_id)
def register_user_submitted_form(user_id, form_id):
try:
if get_user_account_id() != user_id:
return False, "Invalid user id"
interfaces_util.run_in_bot_main_loop(
authentication.Authenticator.instance().register_filled_form(form_id)
)
except Exception as e:
return False, f"Error when registering filled form {e}"
return True, "Thank you for your feedback !"
def METHOD_NAME():
trading_mode = interfaces_util.get_bot_api().get_trading_mode()
if trading_mode is None:
return None
identifier = trading_api.get_trading_mode_followed_strategy_signals_identifier(trading_mode)
if identifier is None:
return None
return authentication.Authenticator.instance().get_signal_community_url(
identifier
)
def is_community_feed_connected():
return authentication.Authenticator.instance().is_feed_connected()
def get_last_signal_time():
return authentication.Authenticator.instance().get_feed_last_message_time()
async def _sync_community_account():
profile_urls = await authentication.Authenticator.instance().get_subscribed_profile_urls()
return octobot_commands.download_missing_profiles(interfaces_util.get_edited_config(dict_only=False), profile_urls)
def sync_community_account():
return interfaces_util.run_in_bot_main_loop(_sync_community_account())
def wait_for_login_if_processing():
try:
interfaces_util.run_in_bot_main_loop(authentication.Authenticator.instance().wait_for_login_if_processing())
except asyncio.TimeoutError:
pass
| null |
996 |
# Copyright 2018-2021 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from .gltf2_blender_node import BlenderNode
from .gltf2_blender_animation import BlenderAnimation
from .gltf2_blender_vnode import VNode, compute_vnodes
from ..com.gltf2_blender_extras import set_extras
from ...io.imp.gltf2_io_user_extensions import import_user_extensions
class BlenderScene():
"""Blender Scene."""
def __new__(cls, *args, **kwargs):
raise RuntimeError("%s should not be instantiated" % cls)
@staticmethod
def METHOD_NAME(gltf):
"""Scene creation."""
scene = bpy.context.scene
gltf.blender_scene = scene.name
if bpy.context.collection.name in bpy.data.collections: # avoid master collection
gltf.blender_active_collection = bpy.context.collection.name
if scene.render.engine not in ['CYCLES', 'BLENDER_EEVEE']:
scene.render.engine = "BLENDER_EEVEE"
if gltf.data.scene is not None:
import_user_extensions('gather_import_scene_before_hook', gltf, gltf.data.scenes[gltf.data.scene], scene)
pyscene = gltf.data.scenes[gltf.data.scene]
set_extras(scene, pyscene.extras)
compute_vnodes(gltf)
gltf.display_current_node = 0 # for debugging
BlenderNode.create_vnode(gltf, 'root')
# User extensions before scene creation
gltf_scene = None
if gltf.data.scene is not None:
gltf_scene = gltf.data.scenes[gltf.data.scene]
import_user_extensions('gather_import_scene_after_nodes_hook', gltf, gltf_scene, scene)
BlenderScene.create_animations(gltf)
# User extensions after scene creation
gltf_scene = None
if gltf.data.scene is not None:
gltf_scene = gltf.data.scenes[gltf.data.scene]
import_user_extensions('gather_import_scene_after_animation_hook', gltf, gltf_scene, scene)
if bpy.context.mode != 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
BlenderScene.select_imported_objects(gltf)
BlenderScene.set_active_object(gltf)
@staticmethod
def create_animations(gltf):
"""Create animations."""
# Use a class here, to be able to pass data by reference to hook (to be able to change them inside hook)
class IMPORT_animation_options:
def __init__(self, restore_first_anim: bool = True):
self.restore_first_anim = restore_first_anim
animation_options = IMPORT_animation_options()
import_user_extensions('gather_import_animations', gltf, gltf.data.animations, animation_options)
if gltf.data.animations:
# NLA tracks are added bottom to top, so create animations in
# reverse so the first winds up on top
for anim_idx in reversed(range(len(gltf.data.animations))):
BlenderAnimation.anim(gltf, anim_idx)
# Restore first animation
if animation_options.restore_first_anim:
anim_name = gltf.data.animations[0].track_name
BlenderAnimation.restore_animation(gltf, anim_name)
if hasattr(bpy.data.scenes[0], "gltf2_animation_applied"):
bpy.data.scenes[0].gltf2_animation_applied = bpy.data.scenes[0].gltf2_animation_tracks.find(gltf.data.animations[0].track_name)
@staticmethod
def select_imported_objects(gltf):
"""Select all (and only) the imported objects."""
if bpy.ops.object.select_all.poll():
bpy.ops.object.select_all(action='DESELECT')
for vnode in gltf.vnodes.values():
if vnode.type == VNode.Object:
vnode.blender_object.select_set(state=True)
@staticmethod
def set_active_object(gltf):
"""Make the first root object from the default glTF scene active.
If no default scene, use the first scene, or just any root object.
"""
vnode = None
if gltf.data.scene is not None:
pyscene = gltf.data.scenes[gltf.data.scene]
if pyscene.nodes:
vnode = gltf.vnodes[pyscene.nodes[0]]
if not vnode:
for pyscene in gltf.data.scenes or []:
if pyscene.nodes:
vnode = gltf.vnodes[pyscene.nodes[0]]
break
if not vnode:
vnode = gltf.vnodes['root']
if vnode.type == VNode.DummyRoot:
if not vnode.children:
return # no nodes
vnode = gltf.vnodes[vnode.children[0]]
if vnode.type == VNode.Bone:
vnode = gltf.vnodes[vnode.bone_arma]
bpy.context.view_layer.objects.active = vnode.blender_object
| null |
997 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkehpc.endpoint import endpoint_data
class CreateJobTemplateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'EHPC', '2018-04-12', 'CreateJobTemplate')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StderrRedirectPath(self): # String
return self.get_query_params().get('StderrRedirectPath')
def set_StderrRedirectPath(self, StderrRedirectPath): # String
self.add_query_param('StderrRedirectPath', StderrRedirectPath)
def get_ClockTime(self): # String
return self.get_query_params().get('ClockTime')
def set_ClockTime(self, ClockTime): # String
self.add_query_param('ClockTime', ClockTime)
def get_CommandLine(self): # String
return self.get_query_params().get('CommandLine')
def set_CommandLine(self, CommandLine): # String
self.add_query_param('CommandLine', CommandLine)
def get_ArrayRequest(self): # String
return self.get_query_params().get('ArrayRequest')
def set_ArrayRequest(self, ArrayRequest): # String
self.add_query_param('ArrayRequest', ArrayRequest)
def get_UnzipCmd(self): # String
return self.get_query_params().get('UnzipCmd')
def set_UnzipCmd(self, UnzipCmd): # String
self.add_query_param('UnzipCmd', UnzipCmd)
def get_PackagePath(self): # String
return self.get_query_params().get('PackagePath')
def set_PackagePath(self, PackagePath): # String
self.add_query_param('PackagePath', PackagePath)
def get_Mem(self): # String
return self.get_query_params().get('Mem')
def set_Mem(self, Mem): # String
self.add_query_param('Mem', Mem)
def get_StdoutRedirectPath(self): # String
return self.get_query_params().get('StdoutRedirectPath')
def set_StdoutRedirectPath(self, StdoutRedirectPath): # String
self.add_query_param('StdoutRedirectPath', StdoutRedirectPath)
def get_Variables(self): # String
return self.get_query_params().get('Variables')
def set_Variables(self, Variables): # String
self.add_query_param('Variables', Variables)
def get_RunasUser(self): # String
return self.get_query_params().get('RunasUser')
def set_RunasUser(self, RunasUser): # String
self.add_query_param('RunasUser', RunasUser)
def get_ReRunable(self): # Boolean
return self.get_query_params().get('ReRunable')
def set_ReRunable(self, ReRunable): # Boolean
self.add_query_param('ReRunable', ReRunable)
def get_Thread(self): # Integer
return self.get_query_params().get('Thread')
def set_Thread(self, Thread): # Integer
self.add_query_param('Thread', Thread)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_Gpu(self): # Integer
return self.get_query_params().get('Gpu')
def set_Gpu(self, Gpu): # Integer
self.add_query_param('Gpu', Gpu)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('WithUnzipCmd')
def set_WithUnzipCmd(self, WithUnzipCmd): # Boolean
self.add_query_param('WithUnzipCmd', WithUnzipCmd)
def get_Node(self): # Integer
return self.get_query_params().get('Node')
def set_Node(self, Node): # Integer
self.add_query_param('Node', Node)
def get_Task(self): # Integer
return self.get_query_params().get('Task')
def set_Task(self, Task): # Integer
self.add_query_param('Task', Task)
def get_InputFileUrl(self): # String
return self.get_query_params().get('InputFileUrl')
def set_InputFileUrl(self, InputFileUrl): # String
self.add_query_param('InputFileUrl', InputFileUrl)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Queue(self): # String
return self.get_query_params().get('Queue')
def set_Queue(self, Queue): # String
self.add_query_param('Queue', Queue)
| null |
998 |
##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import fnmatch
import IECore
## This file implements functions which are useful but don't make sense
# as methods of any particular Parameter class.
## Recurses down from rootParameter, searching for classes held by
# ClassParameter and ClassVectorParameter instances. Returns a list
# of dictionaries of the following form :
#
# {
# "parent" : classParameter | classVectorParameter
# "parameterPath" : [ "path", "to", "parameter" ],
# "uiPath" : [ "path", "using", "labels" ],
# "classInstance" : instance
# }
#
# The classTypeFilter parameter can specify an optional tuple of class types
# which are used to return only a subset of types - this will passed
# to isinstance() calls.
#
# The classNameFilter parameter specified an optional string which will be
# used with fnmatch to filter based on the class path.
def findClasses( rootParameter, classTypeFilter=(IECore.Parameterised,), classNameFilter="*" ) :
result = []
__findClassesWalk( rootParameter, [], [], classTypeFilter, re.compile( fnmatch.translate( classNameFilter ) ), result )
return result
def __findClassesWalk( parameter, parameterPath, uiPath, classTypeFilter, classNameFilter, result ) :
if isinstance( parameter, IECore.ClassParameter ) :
cl = parameter.getClass()
if cl and isinstance( cl, classTypeFilter ) and classNameFilter.match( cl.path ) :
result.append(
{
"parent" : parameter,
"parameterPath" : parameterPath,
"uiPath" : uiPath,
"classInstance" : cl
}
)
elif isinstance( parameter, IECore.ClassVectorParameter ) :
cls = parameter.getClasses( True )
for cl in cls :
if isinstance( cl[0], classTypeFilter ) and classNameFilter.match( cl[0].path ) :
label = cl[1]
if cl[0].parameters().has_key( "label" ) :
label = cl[0]["label"].getTypedValue()
result.append(
{
"parent" : parameter,
"parameterPath" : parameterPath + [ cl[1] ],
"uiPath" : uiPath + [ label ],
"classInstance" : cl[0]
}
)
if isinstance( parameter, IECore.CompoundParameter ) :
for n, p in parameter.items() :
newParameterPath = parameterPath[:] + [ n ]
newUIPath = parameterPath[:] + [ n ]
__findClassesWalk( p, newParameterPath, newUIPath, classTypeFilter, classNameFilter, result )
## Recurses down from srcParameter and dstParameter simultaneously, syncing the dstParameter tree to
# srcParameter by making sure the ClassParameters and ClassVectorParameters there hold instances of the same classes
# that are held on the srcParameter side.
def METHOD_NAME( srcParameter, dstParameter ) :
if isinstance( srcParameter, IECore.ClassParameter ) and isinstance( dstParameter, IECore.ClassParameter ) :
c = srcParameter.getClass( True )
dstParameter.setClass( *c[1:] )
if isinstance( srcParameter, IECore.ClassVectorParameter ) and isinstance( dstParameter, IECore.ClassVectorParameter ) :
c = srcParameter.getClasses( True )
dstParameter.setClasses( [ cc[1:] for cc in c ] )
if isinstance( srcParameter, IECore.CompoundParameter ) and isinstance( dstParameter, IECore.CompoundParameter ) :
for n, p in srcParameter.items() :
if dstParameter.has_key( n ) :
METHOD_NAME( p, dstParameter[n] )
| null |
999 |
import math
from pathlib import Path
from brownian_motion import brownian_data, brownian_widget
from mediapipe import hand_to_camera_eye, xyz_mean
from shinymediapipe import input_hand
from shinywidgets import output_widget, register_widget
from shiny import App, reactive, render, req, ui
# Check that JS prerequisites are installed
if not (Path(__file__).parent / "shinymediapipe" / "node_modules").is_dir():
raise RuntimeError(
"Mediapipe dependencies are not installed. "
"Please run `npm install` in the 'shinymediapipe' subdirectory."
)
# Set to True to see underlying XYZ values and canvas
debug = True
app_ui = ui.page_fluid(
ui.input_action_button("data_btn", "New Data"),
output_widget("plot"),
input_hand("hand", debug=debug, throttle_delay_secs=0.05),
(
ui.panel_fixed(
ui.div(ui.tags.strong("x:"), ui.output_text("x_debug", inline=True)),
ui.div(ui.tags.strong("y:"), ui.output_text("y_debug", inline=True)),
ui.div(ui.tags.strong("z:"), ui.output_text("z_debug", inline=True)),
ui.div(ui.tags.strong("mag:"), ui.output_text("mag_debug", inline=True)),
left="12px",
bottom="12px",
width="200px",
height="auto",
class_="d-flex flex-column justify-content-end",
)
if debug
else None
),
class_="p-3",
)
def server(input, output, session):
# BROWNIAN MOTION ====
@reactive.Calc
def METHOD_NAME():
"""Generates brownian data whenever 'New Data' is clicked"""
input.data_btn()
return brownian_data(n=200)
# Create Plotly 3D widget and bind it to output_widget("plot")
widget = brownian_widget(600, 600)
register_widget("plot", widget)
@reactive.Effect
def update_plotly_data():
walk = METHOD_NAME()
layer = widget.data[0]
layer.x = walk["x"]
layer.y = walk["y"]
layer.z = walk["z"]
layer.marker.color = walk["z"]
# HAND TRACKING ====
@reactive.Calc
def camera_eye():
"""The eye position, as reflected by the hand input"""
hand_val = input.hand()
req(hand_val)
res = hand_to_camera_eye(hand_val, detect_ok=True)
req(res)
return res
# The raw data is a little jittery. Smooth it out by averaging a few samples
smooth_camera_eye = reactive_smooth(n_samples=5, smoother=xyz_mean)(camera_eye)
@reactive.Effect
def update_plotly_camera():
"""Update Plotly camera using the hand tracking"""
widget.layout.scene.camera.eye = smooth_camera_eye()
# DEBUGGING ====
@output
@render.text
def x_debug():
return camera_eye()["x"]
@output
@render.text
def y_debug():
return camera_eye()["y"]
@output
@render.text
def z_debug():
return camera_eye()["z"]
@output
@render.text
def mag_debug():
eye = camera_eye()
return f"{round(math.sqrt(eye['x']**2 + eye['y']**2 + eye['z']**2), 2)}"
app = App(app_ui, server)
def reactive_smooth(n_samples, smoother, *, filter_none=True):
"""Decorator for smoothing out reactive calculations over multiple samples"""
def wrapper(calc):
buffer = [] # Ring buffer of capacity `n_samples`
result = reactive.Value(None) # Holds the most recent smoothed result
@reactive.Effect
def _():
# Get latest value. Because this is happening in a reactive Effect, we'll
# automatically take a reactive dependency on whatever is happening in the
# calc().
new_value = calc()
buffer.append(new_value)
while len(buffer) > n_samples:
buffer.pop(0)
if not filter_none:
result.set(smoother(buffer))
else:
# The filter cannot handle None values; remove any in the buffer
filt_samples = [s for s in buffer if s is not None]
if len(filt_samples) == 0:
result.set(None)
else:
result.set(smoother(filt_samples))
# The return value for the wrapper
return result.get
return wrapper
| null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.