content
stringlengths 27
928k
| path
stringlengths 4
230
| size
int64 27
928k
| nl_text
stringlengths 21
396k
| nl_size
int64 21
396k
| nl_language
stringlengths 2
3
| nl_language_score
float64 0.04
1
|
---|---|---|---|---|---|---|
# File: gsgmail_process_email.py
# Copyright (c) 2017-2021 Splunk Inc.
#
# Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
import email
import tempfile
from collections import OrderedDict
import os
import re
from bs4 import BeautifulSoup, UnicodeDammit
import phantom.app as phantom
import phantom.utils as ph_utils
import mimetypes
import socket
from email.header import decode_header, make_header
import shutil
import hashlib
import json
import magic
import random
import string
import phantom.rules as phantom_rules
from gsgmail_consts import *
import sys
from requests.structures import CaseInsensitiveDict
_container_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
_artifact_common = {
"run_automation": False # Don't run any playbooks, when this artifact is added
}
FILE_EXTENSIONS = {
'.vmsn': ['os memory dump', 'vm snapshot file'],
'.vmss': ['os memory dump', 'vm suspend file'],
'.js': ['javascript'],
'.doc': ['doc'],
'.docx': ['doc'],
'.xls': ['xls'],
'.xlsx': ['xls'],
}
MAGIC_FORMATS = [
(re.compile('^PE.* Windows'), ['pe file', 'hash']),
(re.compile('^MS-DOS executable'), ['pe file', 'hash']),
(re.compile('^PDF '), ['pdf']),
(re.compile('^MDMP crash'), ['process dump']),
(re.compile('^Macromedia Flash'), ['flash']),
]
EWS_DEFAULT_ARTIFACT_COUNT = 100
EWS_DEFAULT_CONTAINER_COUNT = 100
HASH_FIXED_PHANTOM_VERSION = "2.0.201"
OFFICE365_APP_ID = "a73f6d32-c9d5-4fec-b024-43876700daa6"
EXCHANGE_ONPREM_APP_ID = "badc5252-4a82-4a6d-bc53-d1e503857124"
IMAP_APP_ID = "9f2e9f72-b0e5-45d6-92a7-09ef820476c1"
uri_regexc = re.compile(URI_REGEX)
email_regexc = re.compile(EMAIL_REGEX, re.IGNORECASE)
email_regexc2 = re.compile(EMAIL_REGEX2, re.IGNORECASE)
hash_regexc = re.compile(HASH_REGEX)
ip_regexc = re.compile(IP_REGEX)
ipv6_regexc = re.compile(IPV6_REGEX)
class ProcessMail:
def __init__(self, base_connector, config):
self._base_connector = base_connector
self._config = config
self._email_id_contains = list()
self._container = dict()
self._artifacts = list()
self._attachments = list()
self._python_version = None
try:
self._python_version = int(sys.version_info[0])
except Exception:
raise Exception("Error occurred while getting the Phantom server's Python major version.")
def _get_file_contains(self, file_path):
contains = []
ext = os.path.splitext(file_path)[1]
contains.extend(FILE_EXTENSIONS.get(ext, []))
magic_str = magic.from_file(file_path)
for regex, cur_contains in MAGIC_FORMATS:
if regex.match(magic_str):
contains.extend(cur_contains)
return contains
def _is_ip(self, input_ip):
if ph_utils.is_ip(input_ip):
return True
if self.is_ipv6(input_ip):
return True
return False
def is_ipv6(self, input_ip):
try:
socket.inet_pton(socket.AF_INET6, input_ip)
except Exception:
return False
return True
def _clean_url(self, url):
url = url.strip('>),.]\r\n')
# Check before splicing, find returns -1 if not found
# _and_ you will end up splicing on -1 (incorrectly)
if '<' in url:
url = url[:url.find('<')]
elif '>' in url:
url = url[:url.find('>')]
return url
def _extract_urls_domains(self, file_data, urls, domains):
if not self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS] and not self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
return
# try to load the email
try:
soup = BeautifulSoup(file_data, "html.parser")
except Exception as e:
self._base_connector.debug_print(e)
return
uris = []
# get all tags that have hrefs
links = soup.find_all(href=True)
if links:
# it's html, so get all the urls
uris = [x['href'] for x in links if (not x['href'].startswith('mailto:'))]
# work on the text part of the link, they might be http links different from the href
# and were either missed by the uri_regexc while parsing text or there was no text counterpart
# in the email
uri_text = [self._clean_url(x.get_text()) for x in links]
if uri_text:
uri_text = [x for x in uri_text if x.startswith('http')]
if uri_text:
uris.extend(uri_text)
else:
# Parse it as a text file
uris = re.findall(uri_regexc, file_data)
if uris:
uris = [self._clean_url(x) for x in uris]
if self._config[PROC_EMAIL_JSON_EXTRACT_URLS]:
# add the uris to the urls
urls |= set(uris)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
for uri in uris:
domain = phantom.get_host_from_url(uri)
if domain and not self._is_ip(domain):
domains.add(domain)
# work on any mailto urls if present
if links:
mailtos = [x['href'] for x in links if (x['href'].startswith('mailto:'))]
for curr_email in mailtos:
domain = curr_email[curr_email.find('@') + 1:]
if domain and not self._is_ip(domain):
domains.add(domain)
return
def _get_ips(self, file_data, ips):
# First extract what looks like an IP from the file, this is a faster operation
ips_in_mail = re.findall(ip_regexc, file_data)
ip6_in_mail = re.findall(ipv6_regexc, file_data)
if ip6_in_mail:
for ip6_tuple in ip6_in_mail:
ip6s = [x for x in ip6_tuple if x]
ips_in_mail.extend(ip6s)
# Now validate them
if ips_in_mail:
ips_in_mail = set(ips_in_mail)
ips_in_mail = [x for x in ips_in_mail if self._is_ip(x)]
if ips_in_mail:
ips |= set(ips_in_mail)
def _handle_body(self, body, parsed_mail, email_id):
local_file_path = body['file_path']
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
file_data = None
try:
with open(local_file_path, 'r') as f:
file_data = f.read()
except Exception:
with open(local_file_path, 'rb') as f:
file_data = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
if (file_data is None) or (len(file_data) == 0):
return phantom.APP_ERROR
file_data = UnicodeDammit(file_data).unicode_markup.encode('utf-8').decode('utf-8')
self._parse_email_headers_as_inline(file_data, parsed_mail, email_id)
if self._config[PROC_EMAIL_JSON_EXTRACT_DOMAINS]:
emails = []
emails.extend(re.findall(email_regexc, file_data))
emails.extend(re.findall(email_regexc2, file_data))
for curr_email in emails:
domain = curr_email[curr_email.rfind('@') + 1:]
if domain and (not ph_utils.is_ip(domain)):
domains.add(domain)
self._extract_urls_domains(file_data, urls, domains)
if self._config[PROC_EMAIL_JSON_EXTRACT_IPS]:
self._get_ips(file_data, ips)
if self._config[PROC_EMAIL_JSON_EXTRACT_HASHES]:
hashs_in_mail = re.findall(hash_regexc, file_data)
if hashs_in_mail:
hashes |= set(hashs_in_mail)
return phantom.APP_SUCCESS
def _add_artifacts(self, cef_key, input_set, artifact_name, start_index, artifacts):
added_artifacts = 0
for entry in input_set:
# ignore empty entries
if not entry:
continue
artifact = {}
artifact.update(_artifact_common)
artifact['source_data_identifier'] = start_index + added_artifacts
artifact['cef'] = {cef_key: entry}
artifact['name'] = artifact_name
self._base_connector.debug_print('Artifact:', artifact)
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _parse_email_headers_as_inline(self, file_data, parsed_mail, email_id):
# remove the 'Forwarded Message' from the email text and parse it
p = re.compile(r'(?<=\r\n).*Forwarded Message.*\r\n', re.IGNORECASE)
email_text = p.sub('', file_data.strip())
mail = email.message_from_string(email_text)
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
return phantom.APP_SUCCESS
def _add_email_header_artifacts(self, email_header_artifacts, start_index, artifacts):
added_artifacts = 0
for artifact in email_header_artifacts:
artifact['source_data_identifier'] = start_index + added_artifacts
artifacts.append(artifact)
added_artifacts += 1
return added_artifacts
def _create_artifacts(self, parsed_mail):
# get all the artifact data in their own list objects
ips = parsed_mail[PROC_EMAIL_JSON_IPS]
hashes = parsed_mail[PROC_EMAIL_JSON_HASHES]
urls = parsed_mail[PROC_EMAIL_JSON_URLS]
domains = parsed_mail[PROC_EMAIL_JSON_DOMAINS]
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
# set the default artifact dict
artifact_id = 0
# add artifacts
added_artifacts = self._add_artifacts('sourceAddress', ips, 'IP Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('fileHash', hashes, 'Hash Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('requestURL', urls, 'URL Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_artifacts('destinationDnsDomain', domains, 'Domain Artifact', artifact_id, self._artifacts)
artifact_id += added_artifacts
added_artifacts = self._add_email_header_artifacts(email_headers, artifact_id, self._artifacts)
artifact_id += added_artifacts
return phantom.APP_SUCCESS
def _decode_uni_string(self, input_str, def_name):
# try to find all the decoded strings, we could have multiple decoded strings
# or a single decoded string between two normal strings separated by \r\n
# YEAH...it could get that messy
encoded_strings = re.findall(r'=\?.*?\?=', input_str, re.I)
# return input_str as is, no need to do any conversion
if not encoded_strings:
return input_str
# get the decoded strings
try:
decoded_strings = [decode_header(x)[0] for x in encoded_strings]
decoded_strings = [{'value': x[0], 'encoding': x[1]} for x in decoded_strings]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
self._base_connector.debug_print("Decoding: {0}. Error code: {1}. Error message: {2}".format(encoded_strings, error_code, error_msg))
return def_name
# convert to dict for safe access, if it's an empty list, the dict will be empty
decoded_strings = dict(enumerate(decoded_strings))
new_str = ''
new_str_create_count = 0
for i, encoded_string in enumerate(encoded_strings):
decoded_string = decoded_strings.get(i)
if not decoded_string:
# nothing to replace with
continue
value = decoded_string.get('value')
encoding = decoded_string.get('encoding')
if not encoding or not value:
# nothing to replace with
continue
try:
if encoding != 'utf-8':
value = str(value, encoding)
except Exception:
pass
try:
# commenting the existing approach due to a new approach being deployed below
# substitute the encoded string with the decoded one
# input_str = input_str.replace(encoded_string, value)
# make new string insted of replacing in the input string because issue find in PAPP-9531
if value:
new_str += UnicodeDammit(value).unicode_markup
new_str_create_count += 1
except Exception:
pass
# replace input string with new string because issue find in PAPP-9531
if new_str and new_str_create_count == len(encoded_strings):
self._base_connector.debug_print("Creating a new string entirely from the encoded_strings and assiging into input_str")
input_str = new_str
return input_str
def _get_container_name(self, parsed_mail, email_id):
# Create the default name
def_cont_name = "Email ID: {0}".format(email_id)
# get the subject from the parsed mail
subject = parsed_mail.get(PROC_EMAIL_JSON_SUBJECT)
# if no subject then return the default
if not subject:
return def_cont_name
try:
return str(make_header(decode_header(subject)))
except Exception:
return self._decode_uni_string(subject, def_cont_name)
def _handle_if_body(self, content_disp, content_type, part, bodies, file_path, parsed_mail):
process_as_body = False
# if content disposition is None then assume that it is
if content_disp is None:
process_as_body = True
# if content disposition is inline
elif content_disp.lower().strip() == 'inline':
if ('text/html' in content_type) or ('text/plain' in content_type):
process_as_body = True
if not process_as_body:
return phantom.APP_SUCCESS, True
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS, False
charset = part.get_content_charset()
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
bodies.append({'file_path': file_path, 'charset': part.get_content_charset()})
self._add_body_in_email_headers(parsed_mail, file_path, charset, content_type)
return phantom.APP_SUCCESS, False
def _handle_part(self, part, part_index, tmp_dir, extract_attach, parsed_mail):
bodies = parsed_mail[PROC_EMAIL_JSON_BODIES]
files = parsed_mail[PROC_EMAIL_JSON_FILES]
# get the file_name
file_name = part.get_filename()
content_disp = part.get('Content-Disposition')
content_type = part.get('Content-Type')
content_id = part.get('Content-ID')
if file_name is None:
# init name and extension to default values
name = "part_{0}".format(part_index)
extension = ".{0}".format(part_index)
# Try to create an extension from the content type if possible
if content_type is not None:
extension = mimetypes.guess_extension(re.sub(';.*', '', content_type))
# Try to create a name from the content id if possible
if content_id is not None:
name = content_id
file_name = "{0}{1}".format(name, extension)
else:
try:
file_name = str(make_header(decode_header(file_name)))
except Exception:
file_name = self._decode_uni_string(file_name, file_name)
# Remove any chars that we don't want in the name
file_path = "{0}/{1}_{2}".format(tmp_dir, part_index,
file_name.translate(str.maketrans("", "", ''.join(['<', '>', ' ']))))
self._base_connector.debug_print("file_path: {0}".format(file_path))
# is the part representing the body of the email
status, process_further = self._handle_if_body(content_disp, content_type, part, bodies, file_path, parsed_mail)
if not process_further:
return phantom.APP_SUCCESS
# is this another email as an attachment
if (content_type is not None) and (content_type.find(PROC_EMAIL_CONTENT_TYPE_MESSAGE) != -1):
return phantom.APP_SUCCESS
# This is an attachment, first check if it is another email or not
if extract_attach:
_, file_extension = os.path.splitext(file_name)
part_payload = part.get_payload(decode=True)
if not part_payload:
return phantom.APP_SUCCESS
try:
with open(file_path, 'wb') as f: # noqa
f.write(part_payload)
files.append({'file_name': file_name, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, part_payload, file_extension, files, as_byte=False)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
return phantom.APP_SUCCESS
def _get_file_name(self, input_str):
try:
return str(make_header(decode_header(input_str)))
except Exception:
return self._decode_uni_string(input_str, input_str)
def _parse_email_headers(self, parsed_mail, part, charset=None, add_email_id=None):
email_header_artifacts = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
email_headers = part.items()
if not email_headers:
return 0
# Parse email keys first
headers = self._get_email_headers_from_part(part, charset)
cef_artifact = {}
cef_types = {}
if headers.get('From'):
emails = headers['From']
if emails:
cef_artifact.update({'fromEmail': emails})
if headers.get('To'):
emails = headers['To']
if emails:
cef_artifact.update({'toEmail': emails})
message_id = headers.get('Message-ID')
# if the header did not contain any email addresses and message ID then ignore this artifact
if not cef_artifact and not message_id:
return 0
cef_types.update({'fromEmail': ['email'], 'toEmail': ['email']})
if headers:
cef_artifact['emailHeaders'] = headers
# Adding the email id as a cef artifact crashes the UI when trying to show the action dialog box
# so not adding this right now. All the other code to process the emailId is there, but the refraining
# from adding the emailId
# add_email_id = False
if add_email_id:
cef_artifact['emailId'] = add_email_id
if self._email_id_contains:
cef_types.update({'emailId': self._email_id_contains})
artifact = {}
artifact.update(_artifact_common)
artifact['name'] = 'Email Artifact'
artifact['cef'] = cef_artifact
artifact['cef_types'] = cef_types
email_header_artifacts.append(artifact)
return len(email_header_artifacts)
def _get_email_headers_from_part(self, part, charset=None):
email_headers = list(part.items())
# TODO: the next 2 ifs can be condensed to use 'or'
if charset is None:
charset = part.get_content_charset()
if charset is None:
charset = 'utf8'
if not email_headers:
return {}
# Convert the header tuple into a dictionary
headers = CaseInsensitiveDict()
try:
[headers.update({x[0]: self._get_string(x[1], charset)}) for x in email_headers]
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while converting the header tuple into a dictionary"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
# Handle received separately
try:
received_headers = [self._get_string(x[1], charset) for x in email_headers if x[0].lower() == 'received']
except Exception as e:
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while handling the received header tuple separately"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
if received_headers:
headers['Received'] = received_headers
# handle the subject string, if required add a new key
subject = headers.get('Subject')
if subject:
try:
headers['decodedSubject'] = str(make_header(decode_header(subject)))
except Exception:
headers['decodedSubject'] = self._decode_uni_string(subject, subject)
return dict(headers)
def _get_error_message_from_exception(self, e):
""" This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
"""
try:
if e.args:
if len(e.args) > 1:
error_code = e.args[0]
error_msg = e.args[1]
elif len(e.args) == 1:
error_code = "Error code unavailable"
error_msg = e.args[0]
else:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
except Exception:
error_code = "Error code unavailable"
error_msg = "Error message unavailable. Please check the asset configuration and|or action parameters."
return error_code, error_msg
def _handle_mail_object(self, mail, email_id, rfc822_email, tmp_dir, start_time_epoch):
parsed_mail = OrderedDict()
# Create a tmp directory for this email, will extract all files here
tmp_dir = tmp_dir
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
extract_attach = self._config[PROC_EMAIL_JSON_EXTRACT_ATTACHMENTS]
charset = mail.get_content_charset()
if charset is None:
charset = 'utf-8'
# Extract fields and place it in a dictionary
parsed_mail[PROC_EMAIL_JSON_SUBJECT] = mail.get('Subject', '')
parsed_mail[PROC_EMAIL_JSON_FROM] = mail.get('From', '')
parsed_mail[PROC_EMAIL_JSON_TO] = mail.get('To', '')
parsed_mail[PROC_EMAIL_JSON_DATE] = mail.get('Date', '')
parsed_mail[PROC_EMAIL_JSON_MSG_ID] = mail.get('Message-ID', '')
parsed_mail[PROC_EMAIL_JSON_FILES] = files = []
parsed_mail[PROC_EMAIL_JSON_BODIES] = bodies = []
parsed_mail[PROC_EMAIL_JSON_START_TIME] = start_time_epoch
parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS] = []
# parse the parts of the email
if mail.is_multipart():
for i, part in enumerate(mail.walk()):
add_email_id = None
if i == 0:
add_email_id = email_id
self._parse_email_headers(parsed_mail, part, add_email_id=add_email_id)
self._base_connector.debug_print("part: {0}".format(part.__dict__))
self._base_connector.debug_print("part type", type(part))
if part.is_multipart():
self.check_and_update_eml(part)
continue
try:
ret_val = self._handle_part(part, i, tmp_dir, extract_attach, parsed_mail)
except Exception as e:
self._base_connector.debug_print("ErrorExp in _handle_part # {0}".format(i), e)
continue
if phantom.is_fail(ret_val):
continue
else:
self._parse_email_headers(parsed_mail, mail, add_email_id=email_id)
# parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(mail.items())
file_path = "{0}/part_1.text".format(tmp_dir)
with open(file_path, 'wb') as f: # noqa
f.write(mail.get_payload(decode=True))
bodies.append({'file_path': file_path, 'charset': charset})
self._add_body_in_email_headers(parsed_mail, file_path, mail.get_content_charset(), 'text/plain')
# get the container name
container_name = self._get_container_name(parsed_mail, email_id)
if container_name is None:
return phantom.APP_ERROR
# Add the container
# first save the container, to do that copy things from parsed_mail to a new object
container = {}
container_data = dict(parsed_mail)
# delete the header info, we dont make it a part of the container json
del (container_data[PROC_EMAIL_JSON_EMAIL_HEADERS])
container.update(_container_common)
self._container['source_data_identifier'] = email_id
self._container['name'] = container_name
self._container['data'] = {'raw_email': rfc822_email}
# Create the sets before handling the bodies If both the bodies add the same ip
# only one artifact should be created
parsed_mail[PROC_EMAIL_JSON_IPS] = set()
parsed_mail[PROC_EMAIL_JSON_HASHES] = set()
parsed_mail[PROC_EMAIL_JSON_URLS] = set()
parsed_mail[PROC_EMAIL_JSON_DOMAINS] = set()
# For bodies
for i, body in enumerate(bodies):
if not body:
continue
try:
self._handle_body(body, parsed_mail, email_id)
except Exception as e:
self._base_connector.debug_print_debug_print("ErrorExp in _handle_body # {0}: {1}".format(i, str(e)))
continue
# Files
self._attachments.extend(files)
self._create_artifacts(parsed_mail)
return phantom.APP_SUCCESS
def _add_body_in_email_headers(self, parsed_mail, file_path, charset, content_type):
# Add email_bodies to email_headers
email_headers = parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS]
try:
with open(file_path, 'r') as f:
body_content = f.read()
except Exception:
with open(file_path, 'rb') as f:
body_content = f.read()
self._base_connector.debug_print("Reading file data using binary mode")
# Add body to the last added Email artifact
body_content = UnicodeDammit(body_content).unicode_markup.encode('utf-8').decode('utf-8')
if 'text/plain' in content_type:
try:
email_headers[-1]['cef']['bodyText'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyText'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyText'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/plain body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
elif 'text/html' in content_type:
try:
email_headers[-1]['cef']['bodyHtml'] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyHtml'] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyHtml'] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing text/html body content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
else:
if not email_headers[-1]['cef'].get('bodyOther'):
email_headers[-1]['cef']['bodyOther'] = {}
try:
email_headers[-1]['cef']['bodyOther'][content_type] = self._get_string(
body_content, charset)
except Exception as e:
try:
email_headers[-1]['cef']['bodyOther'][content_type] = str(make_header(decode_header(body_content)))
except Exception:
email_headers[-1]['cef']['bodyOther'][content_type] = self._decode_uni_string(body_content, body_content)
error_code, error_msg = self._get_error_message_from_exception(e)
err = "Error occurred while parsing bodyOther content for creating artifacts"
self._base_connector.debug_print("{}. {}. {}".format(err, error_code, error_msg))
def _get_string(self, input_str, charset):
try:
if input_str:
if self._python_version == 2:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset)
else:
input_str = UnicodeDammit(input_str).unicode_markup.encode(charset).decode(charset)
except Exception:
try:
input_str = str(make_header(decode_header(input_str)))
except Exception:
input_str = self._decode_uni_string(input_str, input_str)
self._base_connector.debug_print(
"Error occurred while converting to string with specific encoding {}".format(input_str))
return input_str
def _set_email_id_contains(self, email_id):
if not self._base_connector:
return
try:
email_id = self._get_string(email_id, 'utf-8')
except Exception:
email_id = str(email_id)
if self._base_connector.get_app_id() == EXCHANGE_ONPREM_APP_ID and email_id.endswith('='):
self._email_id_contains = ["exchange email id"]
elif self._base_connector.get_app_id() == OFFICE365_APP_ID and email_id.endswith('='):
self._email_id_contains = ["office 365 email id"]
elif self._base_connector.get_app_id() == IMAP_APP_ID and email_id.isdigit():
self._email_id_contains = ["imap email id"]
elif ph_utils.is_sha1(email_id):
self._email_id_contains = ["vault id"]
return
def _int_process_email(self, rfc822_email, email_id, start_time_epoch):
mail = email.message_from_string(rfc822_email)
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
try:
ret_val = self._handle_mail_object(mail, email_id, rfc822_email, tmp_dir, start_time_epoch)
except Exception as e:
message = "ErrorExp in _handle_mail_object: {0}".format(e)
self._base_connector.debug_print(message)
return phantom.APP_ERROR, message, []
results = [{'container': self._container, 'artifacts': self._artifacts, 'files': self._attachments, 'temp_directory': tmp_dir}]
return ret_val, PROC_EMAIL_PARSED, results
def check_and_update_eml(self, part):
if self._config[PROC_EMAIL_JSON_EXTRACT_EMAIL_ATTACHMENTS]:
tmp_dir = None
msg = None
file_extension = ''
try:
tmp_dir = tempfile.mkdtemp(prefix='ph_email')
filename = self._get_file_name(part.get_filename())
_, file_extension = os.path.splitext(filename)
if filename.endswith('.eml'):
file_path = os.path.join(tmp_dir, filename)
msg = part.get_payload()[0]
with open(file_path, 'wb') as f: # noqa
f.write(msg.as_bytes())
self._attachments.append({'file_name': filename, 'file_path': file_path})
except IOError as e:
error_msg = str(e)
if "File name too long" in error_msg:
self.write_with_new_filename(tmp_dir, msg, file_extension, self._attachments, as_byte=True)
else:
self._base_connector.debug_print('Failed to write file: {}'.format(e))
except Exception as e:
self._base_connector.debug_print("Exception occurred: {}".format(e))
def write_with_new_filename(self, tmp_dir, data, file_extension, dict_to_fill, as_byte=False):
try:
random_suffix = '_' + ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(16))
new_file_name = "ph_long_file_name_{0}{1}".format(random_suffix, file_extension)
file_path = os.path.join(tmp_dir, new_file_name)
with open(file_path, 'wb') as f:
if as_byte:
f.write(data.as_bytes())
else:
f.write(data)
dict_to_fill.append({'file_name': new_file_name, 'file_path': file_path})
except Exception as e:
self._base_connector.debug_print('Exception while writing file: {}'.format(e))
def process_email(self, rfc822_email, email_id, epoch):
try:
self._set_email_id_contains(email_id)
except Exception:
pass
ret_val, message, results = self._int_process_email(rfc822_email, email_id, epoch)
if not ret_val:
return phantom.APP_ERROR, message
self._parse_results(results)
return phantom.APP_SUCCESS, PROC_EMAIL_PROCESSED
def _parse_results(self, results):
param = self._base_connector.get_current_param()
container_count = EWS_DEFAULT_CONTAINER_COUNT
artifact_count = EWS_DEFAULT_ARTIFACT_COUNT
if param:
container_count = param.get(phantom.APP_JSON_CONTAINER_COUNT, EWS_DEFAULT_CONTAINER_COUNT)
artifact_count = param.get(phantom.APP_JSON_ARTIFACT_COUNT, EWS_DEFAULT_ARTIFACT_COUNT)
results = results[:container_count]
for result in results:
container = result.get('container')
if not container:
continue
container.update(_container_common)
try:
ret_val, message, container_id = self._base_connector.save_container(container)
except Exception as e:
self._base_connector.debug_print("Exception: ", e)
continue
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONTAINER.format(ret_val, message, container_id))
if phantom.is_fail(ret_val):
message = PROC_EMAIL_FAILED_CONTAINER.format(container['source_data_identifier'], message)
self._base_connector.debug_print(message)
continue
if not container_id:
message = PROC_EMAIL_SAVE_CONTAINER_FAILED
self._base_connector.debug_print(message)
continue
files = result.get('files')
vault_artifacts_added = 0
for curr_file in files:
ret_val, added_to_vault = self._handle_file(curr_file, container_id)
if added_to_vault:
vault_artifacts_added += 1
artifacts = result.get('artifacts')
if not artifacts:
continue
if not self._base_connector.is_poll_now():
artifacts = artifacts[:artifact_count]
len_artifacts = len(artifacts)
for j, artifact in enumerate(artifacts):
if not artifact:
continue
# add the container id to the artifact
artifact['container_id'] = container_id
self._set_sdi(artifact)
# if it is the last artifact of the last container
if (j + 1) == len_artifacts:
# mark it such that active playbooks get executed
artifact['run_automation'] = True
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
# delete any temp directories that were created by the email parsing function
[shutil.rmtree(x['temp_directory'], ignore_errors=True) for x in results if x.get('temp_directory')]
return self._base_connector.set_status(phantom.APP_SUCCESS)
def _add_vault_hashes_to_dictionary(self, cef_artifact, vault_id):
success, message, vault_info = phantom_rules.vault_info(vault_id=vault_id)
if not vault_info:
return phantom.APP_ERROR, "Vault ID not found"
# The return value is a list, each item represents an item in the vault
# matching the vault id, the info that we are looking for (the hashes)
# will be the same for every entry, so just access the first one
try:
metadata = vault_info[0].get('metadata')
except Exception:
return phantom.APP_ERROR, PROC_EMAIL_FAILED_VAULT_CONT_DATA
try:
cef_artifact['fileHashSha256'] = metadata['sha256']
except Exception:
pass
try:
cef_artifact['fileHashMd5'] = metadata['md5']
except Exception:
pass
try:
cef_artifact['fileHashSha1'] = metadata['sha1']
except Exception:
pass
return phantom.APP_SUCCESS, PROC_EMAIL_MAPPED_HASH_VAL
def _handle_file(self, curr_file, container_id):
file_name = curr_file.get('file_name')
local_file_path = curr_file['file_path']
contains = self._get_file_contains(local_file_path)
# lets move the data into the vault
vault_attach_dict = {}
if not file_name:
file_name = os.path.basename(local_file_path)
self._base_connector.debug_print("Vault file name: {0}".format(file_name))
vault_attach_dict[phantom.APP_JSON_ACTION_NAME] = self._base_connector.get_action_name()
vault_attach_dict[phantom.APP_JSON_APP_RUN_ID] = self._base_connector.get_app_run_id()
file_name = self._decode_uni_string(file_name, file_name)
# success, message, vault_id = phantom_rules.vault_add(container_id, local_file_path, file_name)
try:
success, message, vault_id = phantom_rules.vault_add(file_location=local_file_path, container=container_id, file_name=file_name, metadata=vault_attach_dict)
except Exception as e:
self._base_connector.debug_print(phantom.APP_ERR_FILE_ADD_TO_VAULT.format(e))
return phantom.APP_ERROR, phantom.APP_ERROR
if not success:
self._base_connector.debug_print(PROC_EMAIL_FAILED_VAULT_ADD_FILE.format(message))
return phantom.APP_ERROR, phantom.APP_ERROR
# add the vault id artifact to the container
cef_artifact = {}
if file_name:
cef_artifact.update({'fileName': file_name})
if vault_id:
cef_artifact.update({'vaultId': vault_id,
'cs6': vault_id,
'cs6Label': 'Vault ID'})
# now get the rest of the hashes and add them to the cef artifact
self._add_vault_hashes_to_dictionary(cef_artifact, vault_id)
if not cef_artifact:
return phantom.APP_SUCCESS, phantom.APP_ERROR
artifact = {}
artifact.update(_artifact_common)
artifact['container_id'] = container_id
artifact['name'] = 'Vault Artifact'
artifact['cef'] = cef_artifact
if contains:
artifact['cef_types'] = {'vaultId': contains, 'cs6': contains}
self._set_sdi(artifact)
ret_val, status_string, artifact_id = self._base_connector.save_artifact(artifact)
self._base_connector.debug_print(PROC_EMAIL_SAVE_CONT_PASSED.format(ret_val, status_string, artifact_id))
return phantom.APP_SUCCESS, ret_val
def cmp2(self, a, b):
return (a > b) - (a < b)
def _set_sdi(self, input_dict):
if 'source_data_identifier' in input_dict:
del input_dict['source_data_identifier']
dict_hash = None
# first get the phantom version
phantom_version = self._base_connector.get_product_version()
if not phantom_version:
dict_hash = self._create_dict_hash(input_dict)
else:
ver_cmp = self.cmp2(phantom_version, HASH_FIXED_PHANTOM_VERSION)
if ver_cmp == -1:
dict_hash = self._create_dict_hash(input_dict)
if dict_hash:
input_dict['source_data_identifier'] = dict_hash
else:
# Remove this code once the backend has fixed PS-4216 _and_ it has been
# merged into next so that 2.0 and 2.1 has the code
input_dict['source_data_identifier'] = self._create_dict_hash(input_dict)
return phantom.APP_SUCCESS
def _create_dict_hash(self, input_dict):
try:
input_dict_str = json.dumps(input_dict, sort_keys=True)
except Exception as e:
self._base_connector.debug_print('Exception: ', e)
return None
return hashlib.md5(input_dict_str.encode('utf-8')).hexdigest()
|
Apps/phgsgmail/gsgmail_process_email.py
| 42,076 |
This method is used to get appropriate error message from the exception.
:param e: Exception object
:return: error message
File: gsgmail_process_email.py Copyright (c) 2017-2021 Splunk Inc. Licensed under Apache 2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt) Don't run any playbooks, when this artifact is added Don't run any playbooks, when this artifact is added Check before splicing, find returns -1 if not found _and_ you will end up splicing on -1 (incorrectly) try to load the email get all tags that have hrefs it's html, so get all the urls work on the text part of the link, they might be http links different from the href and were either missed by the uri_regexc while parsing text or there was no text counterpart in the email Parse it as a text file add the uris to the urls work on any mailto urls if present First extract what looks like an IP from the file, this is a faster operation Now validate them ignore empty entries remove the 'Forwarded Message' from the email text and parse it get all the artifact data in their own list objects set the default artifact dict add artifacts try to find all the decoded strings, we could have multiple decoded strings or a single decoded string between two normal strings separated by \r\n YEAH...it could get that messy return input_str as is, no need to do any conversion get the decoded strings convert to dict for safe access, if it's an empty list, the dict will be empty nothing to replace with nothing to replace with commenting the existing approach due to a new approach being deployed below substitute the encoded string with the decoded one input_str = input_str.replace(encoded_string, value) make new string insted of replacing in the input string because issue find in PAPP-9531 replace input string with new string because issue find in PAPP-9531 Create the default name get the subject from the parsed mail if no subject then return the default if content disposition is None then assume that it is if content disposition is inline noqa get the file_name init name and extension to default values Try to create an extension from the content type if possible Try to create a name from the content id if possible Remove any chars that we don't want in the name is the part representing the body of the email is this another email as an attachment This is an attachment, first check if it is another email or not noqa Parse email keys first if the header did not contain any email addresses and message ID then ignore this artifact Adding the email id as a cef artifact crashes the UI when trying to show the action dialog box so not adding this right now. All the other code to process the emailId is there, but the refraining from adding the emailId add_email_id = False TODO: the next 2 ifs can be condensed to use 'or' Convert the header tuple into a dictionary Handle received separately handle the subject string, if required add a new key Create a tmp directory for this email, will extract all files here Extract fields and place it in a dictionary parse the parts of the email parsed_mail[PROC_EMAIL_JSON_EMAIL_HEADERS].append(mail.items()) noqa get the container name Add the container first save the container, to do that copy things from parsed_mail to a new object delete the header info, we dont make it a part of the container json Create the sets before handling the bodies If both the bodies add the same ip only one artifact should be created For bodies Files Add email_bodies to email_headers Add body to the last added Email artifact noqa add the container id to the artifact if it is the last artifact of the last container mark it such that active playbooks get executed delete any temp directories that were created by the email parsing function The return value is a list, each item represents an item in the vault matching the vault id, the info that we are looking for (the hashes) will be the same for every entry, so just access the first one lets move the data into the vault success, message, vault_id = phantom_rules.vault_add(container_id, local_file_path, file_name) add the vault id artifact to the container now get the rest of the hashes and add them to the cef artifact first get the phantom version Remove this code once the backend has fixed PS-4216 _and_ it has been merged into next so that 2.0 and 2.1 has the code
| 4,337 |
en
| 0.844701 |
# Copyright 2021 ETH Zurich, Media Technology Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pandas as pd
"""
This module is mainly used to transform the data from the partners into our desired format.
In the and only load_data and get_metadata is used in the algorithms.
"""
def load_data(folder, input_path='user_item', cut=40,high_cut=1000000, seed=None):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_pickle(
f'{folder}/{input_path}_train.pkl'), pd.read_pickle(f'{folder}/{input_path}_test.pkl'), pd.read_pickle(
f'{folder}/{input_path}_validation.pkl')
user_item_train = user_item_train[user_item_train.str.len() > cut * 0.7]
user_item_train = user_item_train[user_item_train.str.len() < high_cut * 0.7]
user_item_test = user_item_test.loc[user_item_train.index]
user_item_validation = user_item_validation.loc[user_item_train.index]
return user_item_train, user_item_test, user_item_validation
def load_data_vertical(folder, input_path='user_item_vertical', cut=40):
"""
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = pd.read_parquet(
f'{folder}/{input_path}_train.pq'), pd.read_parquet(f'{folder}/{input_path}_test.pq'), pd.read_parquet(
f'{folder}/{input_path}_validation.pq')
user_item_train = user_item_train[user_item_train['count'] >cut]
user_item_test =user_item_test[user_item_test['count'] >cut]
user_item_validation = user_item_validation[user_item_validation['count'] >cut]
user_item_train['resource_id']=user_item_train['article_id']
user_item_test['resource_id']=user_item_test['article_id']
user_item_validation['resource_id']=user_item_validation['article_id']
return user_item_train, user_item_test, user_item_validation
def load_data_cv(folder, input_path='user_item', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data(folder, input_path=input_path, cut=cut,high_cut=high_cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def load_data_vertical_cv(folder, input_path='user_item_vertical', cut=40, high_cut=100000,seed=1):
"""
Same as load_data but only returns random 80% of the training set
"""
# cut cuts off users that read less than cut articles
user_item_train, user_item_test, user_item_validation = load_data_vertical(folder, input_path=input_path, cut=cut)
user_item_train = user_item_train.sample(frac=0.8,random_state=seed)
user_item_test = user_item_test.sample(frac=1, random_state=seed)
return user_item_train, user_item_test, user_item_validation
def get_metadata(folder, usecols=[]):
"""
Loads and returns the article metadata.
The algorithms expect the format to be a Dataframe with two columns:
- "resource_id": unique id for the article
- "text": full text of the article (without html tags)
"""
if not usecols:
usecols = ['text', 'resource_id']
metadata = pd.read_csv(f"{folder}/meta.csv", usecols=usecols)
return metadata.dropna(subset=['text'])
def transform_item_matrix_to_horizontal_format(folder, output_path='user_item_matrix.pkl',
input_path='user_item_matrix_vertical.pq', sortby='ts'):
"""
Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have
one row for each user and each row contains a (sorted) list of articles she/he clicked on.
:param folder: Input folder
:param output_path: Filename/path for outputfile
:param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:
"user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp
to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.
:param sortby: Columnname of the timestamp column to sort by
:return: returns a Series where the index is the UserID and values is the by timestamp
sorted list of clicked ArticleIDs
"""
now = datetime.datetime.now()
matrices = pd.read_parquet(f"{folder}/{input_path}")
grouped = matrices.sort_values(sortby).groupby(['user_ix']).apply(lambda x: list(x['article_id']))
grouped.to_pickle(f"{folder}/{output_path}")
print(f"Data transformed {datetime.datetime.now() - now}")
def create_split(folder, input_path='user_item_matrix.pkl', ouput_path='user_item', cut_dump=10):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_pickle(f"{folder}/{input_path}")
user_item = user_item[user_item.str.len() > (cut_dump)]
user_item_train = user_item.apply(lambda x: x[:int(len(x) * 0.7)])
user_item_test = user_item.apply(lambda x: x[int(len(x) * 0.7):int(len(x) * 0.9)])
user_item_validation = user_item.apply(lambda x: x[int(len(x) * 0.9):])
user_item_train.name = 'article_id'
user_item_test.name = 'article_id'
user_item_validation.name = 'article_id'
user_item_train.to_pickle(f'{folder}/{ouput_path}_train.pkl')
user_item_test.to_pickle(f'{folder}/{ouput_path}_test.pkl')
user_item_validation.to_pickle(f'{folder}/{ouput_path}_validation.pkl')
print(f"Split created {datetime.datetime.now() - now}")
def create_split_vertical(folder, input_path='user_item_matrix_vertical.pq', ouput_path='user_item_vertical', cut_dump=10,time_column='ts'):
"""
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
"""
now = datetime.datetime.now()
user_item = pd.read_parquet(f"{folder}/{input_path}").sort_values(time_column)
user_item['count']=user_item.groupby(['user_ix']).article_id.transform('count')
user_item = user_item[user_item['count']>cut_dump]
grouped = user_item.groupby(['user_ix'])
user_item['percentile'] = (grouped.article_id.cumcount() + 1) / grouped.article_id.transform('count')
user_item_train = user_item[user_item['percentile']<=0.7]
user_item_test = user_item[(user_item['percentile']>0.7) & (user_item['percentile']<0.9)]
user_item_validation = user_item[user_item['percentile']>0.9]
user_item_train.to_parquet(f'{folder}/{ouput_path}_train.pq')
user_item_test.to_parquet(f'{folder}/{ouput_path}_test.pq')
user_item_validation.to_parquet(f'{folder}/{ouput_path}_validation.pq')
print(f"Split created {datetime.datetime.now() - now}")
def transform_horizontal_to_vertical(df):
"""
Transforms the horizontal format into vertical format
:param df:
:return:
"""
return df.explode().reset_index()
if __name__ == "__main__":
import pandas as pd
folder = os.getenv('DATA_FOLDER','processed')
# Transforms the user-item-matrix into a user-series. For each user we store the articles read as one sorted list.
# Save the new format.
# This format is more convenient for creating the split and for training some of the algorithms.
transform_item_matrix_to_horizontal_format(folder=folder)
# Create a train,test,validation split. 70%,10%,20% and save it
create_split(folder=folder, cut_dump=10)
create_split_vertical(folder=folder, cut_dump=10)
# loads the saved train,validation,test split
train, test, validation = load_data(folder=folder, cut=40)
# # if you wish to transform into normal user-item-format
# train_vertical = transform_horizontal_to_vertical(train)
|
preprocessing.py
| 10,315 |
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
Loads the horizontal user item data from folder and creates a user-wise a 70% train, 20% validation, 10% test split.
This means for each user the first 70% read articles are in the train the next 20% in validation and the last 10%
read articles in the test set. We remove users with less than 10 clicked articles.
This is the data that is loaded to train/test the models in the end.
Loads and returns the article metadata.
The algorithms expect the format to be a Dataframe with two columns:
- "resource_id": unique id for the article
- "text": full text of the article (without html tags)
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
Same as load_data but only returns random 80% of the training set
loads the training,validation,test set from the folder, restricts the users with at least "cut" read articles and
returns the sets. The Format of the sets is pd.Series with index the UserID and value a list of ArticleIDs
:param folder/input_path: {folder}/{input_path} is the path to look for the *_train.pkl files
:param cut: value to cut off users with less than "cut" read articles
:return: three pd.Series. Index of each series is the UserID. The value is a list of ArticleIDs.
(look in create_split to see how the split is defines)
Same as load_data but only returns random 80% of the training set
Transforms the horizontal format into vertical format
:param df:
:return:
Transforms vertical User-Item matrix where ich row is one click into a horizontal User-item matrix where we have
one row for each user and each row contains a (sorted) list of articles she/he clicked on.
:param folder: Input folder
:param output_path: Filename/path for outputfile
:param input_path: Filename/path for inputfile. This pickled file contains a DataFrame with three columns:
"user_ix": the UserID and "article_id" the ArticleID and "<sortby>" which should be timestamp
to sort by. Each UserID ArticleID pair indicates a click of the user on the article at a time.
:param sortby: Columnname of the timestamp column to sort by
:return: returns a Series where the index is the UserID and values is the by timestamp
sorted list of clicked ArticleIDs
Copyright 2021 ETH Zurich, Media Technology Center Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. cut cuts off users that read less than cut articles cut cuts off users that read less than cut articles cut cuts off users that read less than cut articles cut cuts off users that read less than cut articles Transforms the user-item-matrix into a user-series. For each user we store the articles read as one sorted list. Save the new format. This format is more convenient for creating the split and for training some of the algorithms. Create a train,test,validation split. 70%,10%,20% and save it loads the saved train,validation,test split if you wish to transform into normal user-item-format train_vertical = transform_horizontal_to_vertical(train)
| 4,319 |
en
| 0.861619 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from foundations.step import Step
from training.metric_logger import MetricLogger
from testing import test_case
class TestMetricLogger(test_case.TestCase):
def test_create(self):
MetricLogger()
@staticmethod
def create_logger():
logger = MetricLogger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 0.5)
logger.add('train_accuracy', Step.from_iteration(1, 400), 0.6)
logger.add('test_accuracy', Step.from_iteration(0, 400), 0.4)
return logger
def test_add_get(self):
logger = TestMetricLogger.create_logger()
self.assertEqual(logger.get_data('train_accuracy'), [(0, 0.5), (1, 0.6)])
self.assertEqual(logger.get_data('test_accuracy'), [(0, 0.4)])
self.assertEqual(logger.get_data('test_loss'), [])
def test_overwrite(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(0, 400), 1.0)
self.assertEqual(logger.get_data('train_accuracy'), [(0, 1.0), (1, 0.6)])
def test_sorting(self):
logger = TestMetricLogger.create_logger()
logger.add('train_accuracy', Step.from_iteration(5, 400), 0.9)
logger.add('train_accuracy', Step.from_iteration(3, 400), 0.7)
logger.add('train_accuracy', Step.from_iteration(4, 400), 0.8)
self.assertEqual(logger.get_data('train_accuracy'),
[(0, 0.5), (1, 0.6), (3, 0.7), (4, 0.8), (5, 0.9)])
def test_str(self):
logger = TestMetricLogger.create_logger()
expected = ['train_accuracy,0,0.5', 'train_accuracy,1,0.6', 'test_accuracy,0,0.4']
self.assertEqual(str(logger), '\n'.join(expected))
def test_create_from_string(self):
logger = TestMetricLogger.create_logger()
logger2 = MetricLogger.create_from_string(str(logger))
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
def test_file_operations(self):
logger = TestMetricLogger.create_logger()
save_loc = os.path.join(self.root, 'temp_logger')
logger.save(save_loc)
logger2 = MetricLogger.create_from_file(save_loc)
self.assertEqual(logger.get_data('train_accuracy'), logger2.get_data('train_accuracy'))
self.assertEqual(logger.get_data('test_accuracy'), logger2.get_data('test_accuracy'))
self.assertEqual(str(logger), str(logger2))
test_case.main()
|
training/test/test_metric_logger.py
| 2,845 |
Copyright (c) Facebook, Inc. and its affiliates. This source code is licensed under the MIT license found in the LICENSE file in the root directory of this source tree.
| 168 |
en
| 0.897895 |
import logging
import os
from enum import Enum
from functools import lru_cache
from typing import Optional
from pydantic import BaseSettings, PostgresDsn
logger = logging.getLogger(__name__)
class EnvironmentEnum(str, Enum):
PRODUCTION = "production"
LOCAL = "local"
class GlobalConfig(BaseSettings):
TITLE: str = "Endorser"
DESCRIPTION: str = "An endorser service for aca-py wallets"
ENVIRONMENT: EnvironmentEnum
DEBUG: bool = False
TESTING: bool = False
TIMEZONE: str = "UTC"
# the following defaults match up with default values in scripts/.env.example
# these MUST be all set in non-local environments.
PSQL_HOST: str = os.environ.get("ENDORSER_POSTGRESQL_HOST", "localhost")
PSQL_PORT: int = os.environ.get("ENDORSER_POSTGRESQL_PORT", 5432)
PSQL_DB: str = os.environ.get("ENDORSER_POSTGRESQL_DB", "traction")
PSQL_USER: str = os.environ.get("ENDORSER_DB_USER", "tractionuser")
PSQL_PASS: str = os.environ.get("ENDORSER_DB_USER_PWD", "tractionPass")
PSQL_ADMIN_USER: str = os.environ.get("ENDORSER_DB_ADMIN", "tractionadminuser")
PSQL_ADMIN_PASS: str = os.environ.get("ENDORSER_DB_ADMIN_PWD", "tractionadminPass")
# application connection is async
# fmt: off
SQLALCHEMY_DATABASE_URI: PostgresDsn = (
f"postgresql+asyncpg://{PSQL_USER}:{PSQL_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# migrations connection uses owner role and is synchronous
SQLALCHEMY_DATABASE_ADMIN_URI: PostgresDsn = (
f"postgresql://{PSQL_ADMIN_USER}:{PSQL_ADMIN_PASS}@{PSQL_HOST}:{PSQL_PORT}/{PSQL_DB}" # noqa: E501
)
# fmt: on
ACAPY_ADMIN_URL: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL", "http://localhost:9031"
)
ACAPY_ADMIN_URL_API_KEY: str = os.environ.get(
"ENDORSER_ACAPY_ADMIN_URL_API_KEY", "change-me"
)
ENDORSER_API_ADMIN_USER: str = os.environ.get("ENDORSER_API_ADMIN_USER", "endorser")
ENDORSER_API_ADMIN_KEY: str = os.environ.get("ENDORSER_API_ADMIN_KEY", "change-me")
ENDORSER_WEBHOOK_URL: str = os.environ.get(
"ENDORSER_WEBHOOK_URL", "http://endorser-api:5000/webhook"
)
ACAPY_WEBHOOK_URL_API_KEY_NAME = "x-api-key"
ACAPY_WEBHOOK_URL_API_KEY: str = os.environ.get("ACAPY_WEBHOOK_URL_API_KEY", "")
DB_ECHO_LOG: bool = False
# Api V1 prefix
API_V1_STR = "/v1"
# openssl rand -hex 32
JWT_SECRET_KEY = "09d25e094faa6ca2556c818166b7a9563b93f7099f6f0f4caa6cf63b88e8d3e7"
JWT_ALGORITHM = "HS256"
JWT_ACCESS_TOKEN_EXPIRE_MINUTES = 300
class Config:
case_sensitive = True
class LocalConfig(GlobalConfig):
"""Local configurations."""
DEBUG: bool = True
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.LOCAL
class ProdConfig(GlobalConfig):
"""Production configurations."""
DEBUG: bool = False
ENVIRONMENT: EnvironmentEnum = EnvironmentEnum.PRODUCTION
class FactoryConfig:
def __init__(self, environment: Optional[str]):
self.environment = environment
def __call__(self) -> GlobalConfig:
if self.environment == EnvironmentEnum.LOCAL.value:
return LocalConfig()
return ProdConfig()
@lru_cache()
def get_configuration() -> GlobalConfig:
return FactoryConfig(os.environ.get("ENVIRONMENT"))()
settings = get_configuration()
|
services/endorser/api/core/config.py
| 3,343 |
Local configurations.
Production configurations.
the following defaults match up with default values in scripts/.env.example these MUST be all set in non-local environments. application connection is async fmt: off noqa: E501 migrations connection uses owner role and is synchronous noqa: E501 fmt: on Api V1 prefix openssl rand -hex 32
| 338 |
en
| 0.768724 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 12,500 WEI:
starting_balance = 12500
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress() # bug workaround, coins generated assigned to first getnewaddress!
self.nodes[0].settxfee(.001)
node0_address1 = self.nodes[0].getnewaddress()
node0_txid1 = self.nodes[0].sendtoaddress(node0_address1, 12190)
node0_tx1 = self.nodes[0].gettransaction(node0_txid1)
node0_address2 = self.nodes[0].getnewaddress()
node0_txid2 = self.nodes[0].sendtoaddress(node0_address2, 290)
node0_tx2 = self.nodes[0].gettransaction(node0_txid2)
assert_equal(self.nodes[0].getbalance(),
starting_balance + node0_tx1["fee"] + node0_tx2["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendtoaddress(node1_address, 400)
txid2 = self.nodes[0].sendtoaddress(node1_address, 200)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 400 WEI serialized is 00902f5009000000
pos0 = 2*(4+1+36+1+4+1)
hex400 = "00902f5009000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 400 and clone_raw[pos0 : pos0 + 16] != hex400 or rawtx1["vout"][0]["value"] != 400 and clone_raw[pos0 : pos0 + 16] == hex400):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransactionwithwallet(clone_raw, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 500WEI for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + node0_tx1["fee"] + node0_tx2["fee"]
if self.options.mine_block:
expected += 500
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(node0_tx1["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(node0_tx2["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 1000 WEI for 2 matured,
# less possible orphaned matured subsidy
expected += 1000
if (self.options.mine_block):
expected -= 500
assert_equal(self.nodes[0].getbalance(), expected)
if __name__ == '__main__':
TxnMallTest().main()
|
test/functional/wallet_txn_clone.py
| 5,972 |
Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.
!/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Start with split network: All nodes should start with 12,500 WEI: bug workaround, coins generated assigned to first getnewaddress! Coins are sent to node1_address Send tx1, and another transaction tx2 that won't be cloned Construct a clone of tx1, to be malleated createrawtransaction randomizes the order of its outputs, so swap them if necessary. output 0 is at version+inputs+input+sigstub+sequence+outputs 400 WEI serialized is 00902f5009000000 Use a different signature hash type to sign. This creates an equivalent but malleated clone. Don't send the clone anywhere yet Have node0 mine a block, if requested: Node0's balance should be starting balance, plus 500WEI for another matured block, minus tx1 and tx2 amounts, and minus transaction fees: Send clone and its parent to miner ... mine a block... Reconnect the split network, and sync chain: Mine another block to make sure we sync Re-fetch transaction info: Verify expected confirmations Check node0's total balance; should be same as before the clone, + 1000 WEI for 2 matured, less possible orphaned matured subsidy
| 1,384 |
en
| 0.850748 |
import collections
from collections import defaultdict
import sys
import json
import random
from jsmin import jsmin
from io import StringIO
import numpy as np
import copy
import os
script_n = os.path.basename(__file__).split('.')[0]
script_n = script_n.split('_', 1)[1]
def to_ng(loc):
return (int(loc[0]/4), int(loc[1]/4), int(loc[2]/40))
'''Load data'''
import compress_pickle
fname = 'gen_210518_setup01_v2_syndb_threshold_20_coalesced.gz'
grc_mfs_locs = compress_pickle.load(fname)
mfs_locs = defaultdict(list)
for grc in grc_mfs_locs:
for mf in grc_mfs_locs[grc]:
for syn in grc_mfs_locs[grc][mf]:
mfs_locs[mf].append(syn['syn_loc0'])
# print(mfs_locs[mf]); asdf
asdff = (172644, 113468, 89)
asdfff = (137580, 101824, 369)
# white list for big boutons
whitelist = set([
(172644, 113468, 89),
(163520, 98364, 83),
(113008, 109372, 1154),
(70424, 116512, 71),
(186536, 100020, 130),
(86780, 110184, 81),
(177992, 108528, 1164),
(127368, 101716, 1143),
(155036, 103252, 71),
(97884, 104152, 1160),
(109476, 104808, 76),
(82936, 122484, 76),
(113532, 104660, 1150),
(78904, 115540, 1158),
(190684, 91276, 1015),
(160500, 99828, 1165),
(109020, 115476, 74),
(93516, 101476, 858),
(126728, 104988, 86),
(173456, 106376, 71),
(197436, 95688, 898),
(122752, 110608, 85),
(122192, 119344, 70),
(122396, 118840, 83),
(204868, 103452, 145),
(94212, 107860, 1137),
(92360, 105844, 1162),
(84704, 115452, 119),
(54036, 105484, 394),
(110624, 105800, 70),
(170512, 99132, 107),
(71200, 114308, 1123),
(106588, 98692, 1160),
(70164, 107908, 1015),
(144772, 106812, 105),
(asdff),
(asdff),
(asdff),
])
blacklist = set([
(137580, 101824, 369),
(127384, 115252, 746),
(155268, 99276, 918),
(182000, 91966, 716),
(119828, 107400, 312),
(171384, 94244, 573),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
(asdfff),
])
'''Cluster and extract locations of MF boutons'''
from sklearn.cluster import DBSCAN
mfs_bouton_locs = {}
'''if a bouton location has less than this many synapses then it won't be considered in order to reduce false positives'''
# bouton_synapse_threshold = 6 # safe for determining big bouton locations
bouton_synapse_threshold = 2
bouton_synapse_threshold = 3
bouton_synapse_threshold = 4 # 4 is a bit iffy, since it has some semi big boutons
bouton_synapse_threshold = 5
# bouton_synapse_threshold = 6 # this threshold has quite a bit of FPs
for mf in mfs_locs:
dbscan = DBSCAN(eps=8000, min_samples=2) # max dist set to 8um
# dbscan = DBSCAN(eps=10000, min_samples=2) # max dist set to 8um
dbscan.fit(mfs_locs[mf])
loc_by_label = defaultdict(list)
for loc, label in zip(mfs_locs[mf], dbscan.labels_):
loc_by_label[label].append(loc)
mf_bouton_locs = []
for label in loc_by_label:
if len(loc_by_label[label]) <= bouton_synapse_threshold:
whitelisted = False
for loc in loc_by_label[label]:
if to_ng(loc) in whitelist:
whitelisted = True
if not whitelisted:
if len(loc_by_label[label]) >= 2:
print(f'Ignoring {mf} due to insufficient synapses')
for loc in loc_by_label[label]:
print(to_ng(loc))
continue
sum = [0, 0, 0]
for loc in loc_by_label[label]:
sum = [sum[0]+loc[0], sum[1]+loc[1], sum[2]+loc[2]]
center = [
int(sum[0]/len(loc_by_label[label])),
int(sum[1]/len(loc_by_label[label])),
int(sum[2]/len(loc_by_label[label])),
]
mf_bouton_locs.append(center)
mfs_bouton_locs[mf] = mf_bouton_locs
# print(mf_bouton_locs)
# for loc in mf_bouton_locs:
# print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
mfs_bouton_count = defaultdict(list)
for mf in mfs_bouton_locs:
mfs_bouton_count[len(mfs_bouton_locs[mf])].append(mf)
for count in sorted(mfs_bouton_count.keys()):
print(f'{count}: {mfs_bouton_count[count]}')
'''save mfs_bouton_locs'''
import compress_pickle
compress_pickle.dump((
mfs_bouton_locs
), f"{script_n}.gz")
asdf
for loc in mfs_bouton_locs['mf_431']:
print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
for loc in mfs_locs['mf_41']:
print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
|
analysis/gen_db/mf_grc/gen_mf_locs_210518.py
| 4,520 |
print(mfs_locs[mf]); asdf white list for big boutons bouton_synapse_threshold = 6 safe for determining big bouton locations 4 is a bit iffy, since it has some semi big boutons bouton_synapse_threshold = 6 this threshold has quite a bit of FPs max dist set to 8um dbscan = DBSCAN(eps=10000, min_samples=2) max dist set to 8um print(mf_bouton_locs) for loc in mf_bouton_locs: print([int(loc[0]/4), int(loc[1]/4), int(loc[2]/40)])
| 433 |
en
| 0.750874 |
# third-party
from flask import render_template, url_for, request, jsonify
# locals
from . import warehouse
@warehouse.route('/element_types', methods=['GET'])
def index():
return render_template("warehouse/element_types.html")
@warehouse.route('/element_type', methods=['POST'])
def create_new_element_type():
print(request.__dict__)
print(request.data)
print(request.get_json())
return jsonify({
"success": True
})
# @warehouse.route('/element_type', methods=['GET'])
# @warehouse.route('/element_type/<element_type_id>', methods=['GET'])
# def element_type(element_type_id=None):
# pass
# @warehouse.route('/element_type', methods=['POST'])
# def new_element_type()
|
warehouse/views.py
| 710 |
third-party locals @warehouse.route('/element_type', methods=['GET']) @warehouse.route('/element_type/<element_type_id>', methods=['GET']) def element_type(element_type_id=None): pass @warehouse.route('/element_type', methods=['POST']) def new_element_type()
| 262 |
en
| 0.114816 |
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
import os
import unittest
import random
import numpy as np
import paddle.fluid as fluid
import six
import paddle
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.contrib.slim.quantization import ConvertToInt8Pass
from paddle.fluid.contrib.slim.quantization import TransformForMobilePass
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
paddle.enable_static()
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ["CPU_NUM"] = "1"
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
return loss
def residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data = fluid.layers.data(
name='image',
shape=[1, 1, 32, 32],
dtype='float32',
append_batch_size=False)
label = fluid.layers.data(
name='label', shape=[1, 1], dtype='int64', append_batch_size=False)
hidden = data
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
matmul_weight = fluid.layers.create_parameter(
shape=[1, 16, 32, 32], dtype='float32')
hidden = fluid.layers.matmul(hidden, matmul_weight, True, True)
if quant_skip_pattern:
with fluid.name_scope(quant_skip_pattern):
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
else:
pool = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
fc = fluid.layers.fc(input=pool, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
def conv_net(img, label, quant_skip_pattern):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
pool_type='max',
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
pool_type='avg',
act="relu")
hidden = fluid.layers.fc(input=conv_pool_2, size=100, act='relu')
with fluid.name_scope(quant_skip_pattern):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
return avg_loss
class TestQuantizationTransformPass(unittest.TestCase):
def setUp(self):
self.quantizable_op_and_inputs = {
'conv2d': ['Input', 'Filter'],
'depthwise_conv2d': ['Input', 'Filter'],
'mul': ['X', 'Y']
}
self.quantizable_grad_op_inputs = {
'conv2d_grad': ['Input', 'Filter'],
'depthwise_conv2d_grad': ['Input', 'Filter'],
'mul_grad': ['X', 'Y']
}
def check_program(self, program):
quantized_ops = set()
for block in program.blocks:
for op in block.ops:
# check forward
if op.type in self.quantizable_op_and_inputs:
for arg_name in op.input_arg_names:
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
quantized_ops.add(arg_name)
for op in block.ops:
# check backward
if op.type in self.quantizable_grad_op_inputs:
for pname in self.quantizable_grad_op_inputs[op.type]:
arg_name = op.input(pname)[0]
self.assertTrue(
arg_name.endswith('.quantized.dequantized'))
self.assertTrue(arg_name in quantized_ops)
def linear_fc_quant(self,
activation_quant_type,
weight_quantize_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_fc_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_fc_' + activation_quant_type,
val_marked_nodes)
def test_linear_fc_quant_abs_max(self):
self.linear_fc_quant('abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_range_abs_max(self):
self.linear_fc_quant('range_abs_max', 'abs_max', for_ci=True)
def test_linear_fc_quant_moving_average_abs_max(self):
self.linear_fc_quant(
'moving_average_abs_max', 'channel_wise_abs_max', for_ci=True)
def residual_block_quant(self,
activation_quant_type,
weight_quantize_type,
quantizable_op_type,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
transform_pass = QuantizationTransformPass(
scope=fluid.global_scope(),
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=quantizable_op_type)
transform_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
graph.draw('.', 'quantize_residual_' + activation_quant_type,
marked_nodes)
program = graph.to_program()
self.check_program(program)
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quantize') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_residual_' + activation_quant_type,
val_marked_nodes)
def test_residual_block_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_range_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'range_abs_max', 'abs_max', quantizable_op_type, for_ci=True)
def test_residual_block_moving_average_abs_max(self):
quantizable_op_type = ['conv2d', 'depthwise_conv2d', 'mul', 'matmul']
self.residual_block_quant(
'moving_average_abs_max',
'channel_wise_abs_max',
quantizable_op_type,
for_ci=True)
class TestQuantizationFreezePass(unittest.TestCase):
def freeze_graph(self,
use_cuda,
seed,
activation_quant_type,
bias_correction=False,
weight_quant_type='abs_max',
for_ci=True,
quant_skip_pattern='skip_quant'):
def build_program(main, startup, is_test):
main.random_seed = seed
startup.random_seed = seed
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
loss = conv_net(img, label, quant_skip_pattern)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss
random.seed(0)
np.random.seed(0)
main = fluid.Program()
startup = fluid.Program()
test_program = fluid.Program()
feeds, loss = build_program(main, startup, False)
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
test_graph = IrGraph(core.Graph(test_program.desc), for_test=True)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
scope = fluid.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quant_type,
weight_quantize_type=weight_quant_type,
skip_pattern=quant_skip_pattern)
transform_pass.apply(main_graph)
transform_pass.apply(test_graph)
dev_name = '_gpu_' if use_cuda else '_cpu_'
if not for_ci:
marked_nodes = set()
for op in main_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
main_graph.draw('.', 'main' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test' + dev_name + activation_quant_type + '_'
+ weight_quant_type, marked_nodes)
build_strategy = fluid.BuildStrategy()
build_strategy.memory_optimize = False
build_strategy.enable_inplace = False
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
quantized_test_program = test_graph.to_program()
iters = 5
batch_size = 8
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
with fluid.scope_guard(scope):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[loss])
if not for_ci:
print('{}: {}'.format('loss' + dev_name +
activation_quant_type + '_' +
weight_quant_type, loss_v))
test_data = next(test_reader())
with fluid.program_guard(quantized_test_program):
w_var = fluid.framework._get_var('conv2d_1.w_0.quantized',
quantized_test_program)
# Testing
with fluid.scope_guard(scope):
test_loss1, w_quant = exe.run(program=quantized_test_program,
feed=feeder.feed(test_data),
fetch_list=[loss, w_var])
# Freeze graph for inference, but the weight of fc/conv is still float type.
freeze_pass = QuantizationFreezePass(
scope=scope, place=place, bias_correction=bias_correction, \
weight_quantize_type=weight_quant_type)
freeze_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_freeze' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
server_program = test_graph.to_program()
with fluid.scope_guard(scope):
test_loss2, = exe.run(program=server_program,
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
if not for_ci:
print(
'{}: {}'.format('test_loss1' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss1))
print(
'{}: {}'.format('test_loss2' + dev_name + activation_quant_type
+ '_' + weight_quant_type, test_loss2))
w_freeze = np.array(scope.find_var('conv2d_1.w_0').get_tensor())
# Maybe failed, this is due to the calculation precision
# self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))
if not for_ci:
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
print('{}: {}'.format('w_quant' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_quant)))
# Convert parameter to 8-bit.
convert_int8_pass = ConvertToInt8Pass(scope=scope, place=place)
convert_int8_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_int8' + dev_name + activation_quant_type
+ '_' + weight_quant_type, marked_nodes)
server_program_int8 = test_graph.to_program()
# Save the 8-bit parameter and model file.
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
server_program_int8)
# Test whether the 8-bit parameter and model file can be loaded successfully.
[infer, feed, fetch] = fluid.io.load_inference_model(
'server_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, exe)
# Check the loaded 8-bit weight.
w_8bit = np.array(scope.find_var('conv2d_1.w_0.int8').get_tensor())
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
if not for_ci:
print('{}: {}'.format('w_8bit' + dev_name + activation_quant_type +
'_' + weight_quant_type, np.sum(w_8bit)))
print('{}: {}'.format('w_freeze' + dev_name + activation_quant_type
+ '_' + weight_quant_type, np.sum(w_freeze)))
mobile_pass = TransformForMobilePass()
mobile_pass.apply(test_graph)
if not for_ci:
marked_nodes = set()
for op in test_graph.all_op_nodes():
if op.name().find('quantize') > -1:
marked_nodes.add(op)
test_graph.draw('.', 'test_mobile' + dev_name +
activation_quant_type + '_' + weight_quant_type,
marked_nodes)
mobile_program = test_graph.to_program()
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
'mobile_int8' + dev_name + activation_quant_type + '_' +
weight_quant_type, ['image', 'label'], [loss], exe,
mobile_program)
def test_freeze_graph_cuda_dynamic(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_dynamic(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cuda_static(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
bias_correction=True,
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
True,
seed=1,
activation_quant_type='moving_average_abs_max',
bias_correction=True,
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def test_freeze_graph_cpu_static(self):
with fluid.unique_name.guard():
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='range_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
self.freeze_graph(
False,
seed=2,
activation_quant_type='moving_average_abs_max',
weight_quant_type='channel_wise_abs_max',
for_ci=True)
def quant_dequant_residual_block(num, quant_skip_pattern=None):
def conv_bn_layer(input,
ch_out,
filter_size,
stride,
padding,
act='relu',
bias_attr=False):
tmp = fluid.layers.conv2d(
input=input,
filter_size=filter_size,
num_filters=ch_out,
stride=stride,
padding=padding,
act=None,
bias_attr=bias_attr)
return fluid.layers.batch_norm(input=tmp, act=act)
data1 = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
data2 = fluid.layers.data(
name='matmul_input', shape=[16, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data1
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
hidden = fluid.layers.matmul(hidden, data2, True, True)
if isinstance(quant_skip_pattern, str):
with fluid.name_scope(quant_skip_pattern):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
elif isinstance(quant_skip_pattern, list):
assert len(
quant_skip_pattern
) > 1, 'test config error: the len of quant_skip_pattern list should be greater than 1.'
with fluid.name_scope(quant_skip_pattern[0]):
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
with fluid.name_scope(quant_skip_pattern[1]):
pool_add = fluid.layers.elementwise_add(
x=pool1, y=pool2, act='relu')
else:
pool1 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='avg', pool_stride=2)
pool2 = fluid.layers.pool2d(
input=hidden, pool_size=2, pool_type='max', pool_stride=2)
pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu')
fc = fluid.layers.fc(input=pool_add, size=10)
loss = fluid.layers.cross_entropy(input=fc, label=label)
loss = fluid.layers.mean(loss)
return loss
class TestAddQuantDequantPass(unittest.TestCase):
def setUp(self):
self._target_ops = {'elementwise_add', 'pool2d'}
self._target_grad_ops = {'elementwise_add_grad', 'pool2d_grad'}
def check_graph(self, graph, skip_pattern=None):
ops = graph.all_op_nodes()
for op_node in ops:
if op_node.name() in self._target_ops:
user_skipped = False
if isinstance(skip_pattern, list):
user_skipped = op_node.op().has_attr("op_namescope") and \
any(pattern in op_node.op().attr("op_namescope") for pattern in skip_pattern)
elif isinstance(skip_pattern, str):
user_skipped = op_node.op().has_attr("op_namescope") and \
op_node.op().attr("op_namescope").find(skip_pattern) != -1
if user_skipped:
continue
in_nodes_all_not_persistable = True
for input_name in op_node.input_arg_names():
in_node = graph._find_node_by_name(op_node.inputs,
input_name)
in_nodes_all_not_persistable = (
in_nodes_all_not_persistable and
not in_node.persistable())
if not in_nodes_all_not_persistable:
continue
input_names = op_node.input_arg_names()
for input_name in input_names:
self.assertTrue(input_name.endswith('.quant_dequant'))
def residual_block_quant(self,
quantizable_op_type,
skip_pattern=None,
for_ci=True):
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = quant_dequant_residual_block(2, skip_pattern)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
place = fluid.CPUPlace()
graph = IrGraph(core.Graph(main.desc), for_test=False)
add_quant_dequant_pass = AddQuantDequantPass(
scope=fluid.global_scope(),
place=place,
skip_pattern=skip_pattern,
quantizable_op_type=quantizable_op_type)
add_quant_dequant_pass.apply(graph)
if not for_ci:
marked_nodes = set()
for op in graph.all_op_nodes():
if op.name().find('quant') > -1:
marked_nodes.add(op)
graph.draw('.', 'add_quant_dequant_graph', marked_nodes)
self.check_graph(graph, skip_pattern)
program = graph.to_program()
val_graph = IrGraph(core.Graph(program.desc), for_test=False)
if not for_ci:
val_marked_nodes = set()
for op in val_graph.all_op_nodes():
if op.name().find('quant') > -1:
val_marked_nodes.add(op)
val_graph.draw('.', 'val_add_quant_dequant_graph', val_marked_nodes)
def test_residual_block(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern=None, for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type, skip_pattern='skip_quant', for_ci=True)
def test_residual_block_skip_pattern(self):
quantizable_op_type = ['elementwise_add', 'pool2d', 'mul', 'matmul']
self.residual_block_quant(
quantizable_op_type,
skip_pattern=['skip_quant1', 'skip_quant2'],
for_ci=True)
if __name__ == '__main__':
unittest.main()
|
python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py
| 29,224 |
copyright (c) 2018 paddlepaddle authors. all rights reserved. licensed under the apache license, version 2.0 (the "license"); you may not use this file except in compliance with the license. you may obtain a copy of the license at http://www.apache.org/licenses/license-2.0 unless required by applicable law or agreed to in writing, software distributed under the license is distributed on an "as is" basis, without warranties or conditions of any kind, either express or implied. see the license for the specific language governing permissions and limitations under the license. check forward check backward Testing Freeze graph for inference, but the weight of fc/conv is still float type. Maybe failed, this is due to the calculation precision self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant)) Convert parameter to 8-bit. Save the 8-bit parameter and model file. Test whether the 8-bit parameter and model file can be loaded successfully. Check the loaded 8-bit weight.
| 984 |
en
| 0.808731 |
#!/usr/bin/envpython
# -*- coding: utf-8 -*-
def black(string):
return'\033[30m'+string+'\033[0m'
def blue(string):
return'\033[94m'+string+'\033[0m'
def gray(string):
return'\033[1;30m'+string+'\033[0m'
def green(string):
return'\033[92m'+string+'\033[0m'
def cyan(string):
return'\033[96m'+string+'\033[0m'
def lightPurple(string):
return'\033[94m'+string+'\033[0m'
def purple(string):
return'\033[95m'+string+'\033[0m'
def red(string):
return'\033[91m'+string+'\033[0m'
def underline(string):
return'\033[4m'+string+'\033[0m'
def white(string):
return'\033[0m'+string+'\033[0m'
def white_2(string):
return'\033[1m'+string+'\033[0m'
def yellow(string):
return'\033[93m'+string+'\033[0m'
|
utils/color.py
| 749 |
!/usr/bin/envpython -*- coding: utf-8 -*-
| 41 |
en
| 0.402683 |
import cProfile
import json
import logging
import os
import pstats
import signal
import tempfile
import time
import traceback
from django.conf import settings
from django.utils.timezone import now as tz_now
from django.db import DatabaseError, OperationalError, connection as django_connection
from django.db.utils import InterfaceError, InternalError
import psutil
import redis
from awx.main.consumers import emit_channel_notification
from awx.main.models import (JobEvent, AdHocCommandEvent, ProjectUpdateEvent,
InventoryUpdateEvent, SystemJobEvent, UnifiedJob,
Job)
from awx.main.tasks import handle_success_and_failure_notifications
from awx.main.models.events import emit_event_detail
from .base import BaseWorker
logger = logging.getLogger('awx.main.commands.run_callback_receiver')
class CallbackBrokerWorker(BaseWorker):
'''
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
'''
MAX_RETRIES = 2
last_stats = time.time()
total = 0
last_event = ''
prof = None
def __init__(self):
self.buff = {}
self.pid = os.getpid()
self.redis = redis.Redis.from_url(settings.BROKER_URL)
for key in self.redis.keys('awx_callback_receiver_statistics_*'):
self.redis.delete(key)
def read(self, queue):
try:
res = self.redis.blpop(settings.CALLBACK_QUEUE, timeout=settings.JOB_EVENT_BUFFER_SECONDS)
if res is None:
return {'event': 'FLUSH'}
self.total += 1
return json.loads(res[1])
except redis.exceptions.RedisError:
logger.exception("encountered an error communicating with redis")
time.sleep(1)
except (json.JSONDecodeError, KeyError):
logger.exception("failed to decode JSON message from redis")
finally:
self.record_statistics()
return {'event': 'FLUSH'}
def record_statistics(self):
# buffer stat recording to once per (by default) 5s
if time.time() - self.last_stats > settings.JOB_EVENT_STATISTICS_INTERVAL:
try:
self.redis.set(f'awx_callback_receiver_statistics_{self.pid}', self.debug())
self.last_stats = time.time()
except Exception:
logger.exception("encountered an error communicating with redis")
self.last_stats = time.time()
def debug(self):
return f'. worker[pid:{self.pid}] sent={self.total} rss={self.mb}MB {self.last_event}'
@property
def mb(self):
return '{:0.3f}'.format(
psutil.Process(self.pid).memory_info().rss / 1024.0 / 1024.0
)
def toggle_profiling(self, *args):
if self.prof:
self.prof.disable()
filename = f'callback-{self.pid}.pstats'
filepath = os.path.join(tempfile.gettempdir(), filename)
with open(filepath, 'w') as f:
pstats.Stats(self.prof, stream=f).sort_stats('cumulative').print_stats()
pstats.Stats(self.prof).dump_stats(filepath + '.raw')
self.prof = False
logger.error(f'profiling is disabled, wrote {filepath}')
else:
self.prof = cProfile.Profile()
self.prof.enable()
logger.error('profiling is enabled')
def work_loop(self, *args, **kw):
if settings.AWX_CALLBACK_PROFILE:
signal.signal(signal.SIGUSR1, self.toggle_profiling)
return super(CallbackBrokerWorker, self).work_loop(*args, **kw)
def flush(self, force=False):
now = tz_now()
if (
force or
any([len(events) >= 1000 for events in self.buff.values()])
):
for cls, events in self.buff.items():
logger.debug(f'{cls.__name__}.objects.bulk_create({len(events)})')
for e in events:
if not e.created:
e.created = now
e.modified = now
try:
cls.objects.bulk_create(events)
except Exception:
# if an exception occurs, we should re-attempt to save the
# events one-by-one, because something in the list is
# broken/stale
for e in events:
try:
e.save()
except Exception:
logger.exception('Database Error Saving Job Event')
for e in events:
emit_event_detail(e)
self.buff = {}
def perform_work(self, body):
try:
flush = body.get('event') == 'FLUSH'
if flush:
self.last_event = ''
if not flush:
event_map = {
'job_id': JobEvent,
'ad_hoc_command_id': AdHocCommandEvent,
'project_update_id': ProjectUpdateEvent,
'inventory_update_id': InventoryUpdateEvent,
'system_job_id': SystemJobEvent,
}
job_identifier = 'unknown job'
for key, cls in event_map.items():
if key in body:
job_identifier = body[key]
break
self.last_event = f'\n\t- {cls.__name__} for #{job_identifier} ({body.get("event", "")} {body.get("uuid", "")})' # noqa
if body.get('event') == 'EOF':
try:
final_counter = body.get('final_counter', 0)
logger.info('Event processing is finished for Job {}, sending notifications'.format(job_identifier))
# EOF events are sent when stdout for the running task is
# closed. don't actually persist them to the database; we
# just use them to report `summary` websocket events as an
# approximation for when a job is "done"
emit_channel_notification(
'jobs-summary',
dict(group_name='jobs', unified_job_id=job_identifier, final_counter=final_counter)
)
# Additionally, when we've processed all events, we should
# have all the data we need to send out success/failure
# notification templates
uj = UnifiedJob.objects.get(pk=job_identifier)
if isinstance(uj, Job):
# *actual playbooks* send their success/failure
# notifications in response to the playbook_on_stats
# event handling code in main.models.events
pass
elif hasattr(uj, 'send_notification_templates'):
handle_success_and_failure_notifications.apply_async([uj.id])
except Exception:
logger.exception('Worker failed to emit notifications: Job {}'.format(job_identifier))
return
event = cls.create_from_data(**body)
self.buff.setdefault(cls, []).append(event)
retries = 0
while retries <= self.MAX_RETRIES:
try:
self.flush(force=flush)
break
except (OperationalError, InterfaceError, InternalError):
if retries >= self.MAX_RETRIES:
logger.exception('Worker could not re-establish database connectivity, giving up on one or more events.')
return
delay = 60 * retries
logger.exception('Database Error Saving Job Event, retry #{i} in {delay} seconds:'.format(
i=retries + 1,
delay=delay
))
django_connection.close()
time.sleep(delay)
retries += 1
except DatabaseError:
logger.exception('Database Error Saving Job Event')
break
except Exception as exc:
tb = traceback.format_exc()
logger.error('Callback Task Processor Raised Exception: %r', exc)
logger.error('Detail: {}'.format(tb))
|
awx/main/dispatch/worker/callback.py
| 8,699 |
A worker implementation that deserializes callback event data and persists
it into the database.
The code that *generates* these types of messages is found in the
ansible-runner display callback plugin.
buffer stat recording to once per (by default) 5s if an exception occurs, we should re-attempt to save the events one-by-one, because something in the list is broken/stale noqa EOF events are sent when stdout for the running task is closed. don't actually persist them to the database; we just use them to report `summary` websocket events as an approximation for when a job is "done" Additionally, when we've processed all events, we should have all the data we need to send out success/failure notification templates *actual playbooks* send their success/failure notifications in response to the playbook_on_stats event handling code in main.models.events
| 863 |
en
| 0.92048 |
from flask import render_template, flash, redirect, url_for, request
from flask.views import MethodView
from app.middleware import auth
from app.models.user import User
from app.validators.register_form import RegisterForm
from app.services import avatar_service
class RegisterController(MethodView):
@auth.optional
def get(self):
"""
Show register form
Returns:
Register template with form
"""
return render_template('auth/register.html', form=RegisterForm())
@auth.optional
def post(self):
"""
Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors
"""
form = RegisterForm()
if form.validate_on_submit():
form.validate_username(form.username)
avatar = 'no-image.png'
if 'avatar' in request.files and request.files['avatar']:
avatar = avatar_service.save(form.avatar.data)
User.create(form.username.data, form.password.data, avatar)
flash('Your account has been created. You may now login.', 'info')
return redirect(url_for('login'))
return render_template('auth/register.html', form=form)
|
app/controllers/auth/register.py
| 1,193 |
Show register form
Returns:
Register template with form
Handle the POST request and sign up the user if form validation passes
Returns:
A redirect or a template with the validation errors
| 193 |
en
| 0.679072 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
"""
"""
from abc import ABC, abstractproperty, abstractmethod
class AbstractType(ABC):
@abstractproperty
def length(self):
pass
@abstractmethod
def __call__(self):
pass
def _get_chunk(self):
return self.locator.content(self.length)
|
cbexplorer/types/AbstractType.py
| 417 |
!/usr/bin/env python3 -*- coding: utf-8 -*- Copyright 2020- IBM Inc. All rights reserved SPDX-License-Identifier: Apache2.0
| 123 |
en
| 0.32005 |
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import openvino.inference_engine as ie
from ..infer_raw_results import InferRawResults
from ..aggregated_statistics import AggregatedStatistics
class CollectResultsCallback:
def __init__(
self,
network: ie.IENetwork,
exec_network: ie.ExecutableNetwork,
collect_resuls: bool = True,
collect_layers: set = None,
collect_aggregated_statistics: bool = True,
iterations_count: int = 1,
dataset_size: int = 1):
if not network:
raise ValueError("network is not specified")
if not exec_network:
raise ValueError("exec_network is not specified")
self._network = network
self._exec_network = exec_network
self._aggregated_statistics = None
self._iterations_count = iterations_count
self._dataset_size = dataset_size
self._collect_results = collect_resuls
self._collect_layers = collect_layers
self._collect_aggregated_statistics = collect_aggregated_statistics
self._infer_raw_results = InferRawResults() if collect_resuls else None
self._latencies = list()
def callback(self, value, latency = None):
if self._collect_aggregated_statistics:
if not self._aggregated_statistics:
self._aggregated_statistics = AggregatedStatistics(
iterations_count = self._iterations_count,
dataset_size = self._dataset_size)
self._aggregated_statistics.add(self._network, self._exec_network, value)
if self._collect_results:
if self._collect_layers:
collect_value = dict()
for layer_name in value:
if layer_name in self._collect_layers:
collect_value[layer_name] = value[layer_name]
self._infer_raw_results.add(collect_value)
else:
self._infer_raw_results.add(value)
if latency:
self._latencies.append(latency)
@property
def aggregated_statistics(self) -> AggregatedStatistics:
return self._aggregated_statistics
@property
def infer_raw_result(self) -> InferRawResults:
return self._infer_raw_results
@property
def latencies(self) -> list:
return self._latencies
def release(self):
if self._aggregated_statistics:
self._aggregated_statistics.release()
if self._infer_raw_results:
self._infer_raw_results.release()
def get_accuracy_drop(self):
return None
|
tools/calibration/process_dataset_callbacks/collect_results_callback.py
| 3,160 |
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 568 |
en
| 0.857326 |
from django.shortcuts import render
from wiki.models import Page
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import get_object_or_404,render
class PageList(ListView):
"""
This view grabs all the pages out of the database
returns a list of each unique wiki page for the
user to access on the website through 'list.html'
"""
model = Page
def get(self, request):
""" Returns a list of wiki pages. """
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context)
class PageDetailView(DetailView):
"""
This view returns a page for a unique wiki using it's slug as an identifier
or a 404 message if the page does not exist
"""
model = Page
def get(self, request, slug):
wiki = get_object_or_404(Page, slug=slug)
return render(request, 'page.html', {'wiki': wiki})
def post(self, request, slug):
pass
|
wiki/views.py
| 1,034 |
This view returns a page for a unique wiki using it's slug as an identifier
or a 404 message if the page does not exist
This view grabs all the pages out of the database
returns a list of each unique wiki page for the
user to access on the website through 'list.html'
Returns a list of wiki pages.
| 297 |
en
| 0.68909 |
"""Polynomial model class used by agents for building stuff.
"""
from torch import nn, optim
import torch
import torch.nn.functional as F
from stock_trading_backend.agent.model import Model
class NNModel(nn.Module):
"""Torch neural network model.
"""
def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
"""Initializer for linear model.
Args:
num_inputs: the dimension of input data.
num_hidden_layers: the number of hidden layers.
num_inner_features: the number of features in the hidden layers
"""
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1)
def forward(self, input_tensor):
"""Forward pass on the neural network model.
Args:
input_tensor: the input tensor.
Returns:
Tensor with model results.
"""
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output
class NeuralNetworkModel(Model):
"""Neural netowrk model class.
"""
name = "neural_network_model"
def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100):
"""Initializer for model class.
Args:
learning_rate: the learning rate of the model.
num_hidden_layers: number of hidden layers in the network.
num_inner_features: number of features in the hidden layers.
"""
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers,
num_inner_features)
def _init_model(self, num_inputs):
"""Initializes internal linear model.
Args:
num_inputs: number of inputs that model will have.
"""
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
def _predict(self, state_action_tensor):
"""Use provided information to make a prediction.
Args:
state_action_tensor: pytorch tensor with state-action values.
Returns:
Predicted values for observation-action tensors.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape(-1)
def _train(self, state_action_tensor, expected_values_tensor):
"""Train the model for 1 epoch.
Args:
state_action_tensor: pytorch tensor with state-action expected_values.
expected_values: pytorch tensor with expected values for each state-action.
Returns:
The loss before trainig.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value
|
stock_trading_backend/agent/neural_network_model.py
| 3,829 |
Torch neural network model.
Neural netowrk model class.
Initializer for linear model.
Args:
num_inputs: the dimension of input data.
num_hidden_layers: the number of hidden layers.
num_inner_features: the number of features in the hidden layers
Initializer for model class.
Args:
learning_rate: the learning rate of the model.
num_hidden_layers: number of hidden layers in the network.
num_inner_features: number of features in the hidden layers.
Initializes internal linear model.
Args:
num_inputs: number of inputs that model will have.
Use provided information to make a prediction.
Args:
state_action_tensor: pytorch tensor with state-action values.
Returns:
Predicted values for observation-action tensors.
Train the model for 1 epoch.
Args:
state_action_tensor: pytorch tensor with state-action expected_values.
expected_values: pytorch tensor with expected values for each state-action.
Returns:
The loss before trainig.
Forward pass on the neural network model.
Args:
input_tensor: the input tensor.
Returns:
Tensor with model results.
Polynomial model class used by agents for building stuff.
| 1,176 |
en
| 0.735824 |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_access_rules_facts
short_description: Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure
- Gets the currently configured access rules for the Web Application Firewall configuration of a specified WAAS policy.
The order of the access rules is important. The rules will be checked in the order they are specified and the first matching rule will be used.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
required: true
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ]
"""
EXAMPLES = """
- name: List access_rules
oci_waas_access_rules_facts:
# required
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
access_rules:
description:
- List of AccessRules resources
returned: on success
type: complex
contains:
name:
description:
- The unique name of the access rule.
returned: on success
type: str
sample: name_example
criteria:
description:
- The list of access rule criteria. The rule would be applied only for the requests that matched all the listed conditions.
returned: on success
type: complex
contains:
condition:
description:
- "The criteria the access rule and JavaScript Challenge uses to determine if action should be taken on a request.
- **URL_IS:** Matches if the concatenation of request URL path and query is identical to the contents of the `value` field. URL must
start with a `/`.
- **URL_IS_NOT:** Matches if the concatenation of request URL path and query is not identical to the contents of the `value` field.
URL must start with a `/`.
- **URL_STARTS_WITH:** Matches if the concatenation of request URL path and query starts with the contents of the `value` field. URL
must start with a `/`.
- **URL_PART_ENDS_WITH:** Matches if the concatenation of request URL path and query ends with the contents of the `value` field.
- **URL_PART_CONTAINS:** Matches if the concatenation of request URL path and query contains the contents of the `value` field.
- **URL_REGEX:** Matches if the concatenation of request URL path and query is described by the regular expression in the value field.
The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org).
- **URL_DOES_NOT_MATCH_REGEX:** Matches if the concatenation of request URL path and query is not described by the regular expression
in the `value` field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org).
- **URL_DOES_NOT_START_WITH:** Matches if the concatenation of request URL path and query does not start with the contents of the
`value` field.
- **URL_PART_DOES_NOT_CONTAIN:** Matches if the concatenation of request URL path and query does not contain the contents of the
`value` field.
- **URL_PART_DOES_NOT_END_WITH:** Matches if the concatenation of request URL path and query does not end with the contents of the
`value` field.
- **IP_IS:** Matches if the request originates from one of the IP addresses contained in the defined address list. The `value` in this
case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n
*Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\"
- **IP_IS_NOT:** Matches if the request does not originate from any of the IP addresses contained in the defined address list. The
`value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n
*Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\"
- **IP_IN_LIST:** Matches if the request originates from one of the IP addresses contained in the referenced address list. The `value`
in this case is OCID of the address list.
- **IP_NOT_IN_LIST:** Matches if the request does not originate from any IP address contained in the referenced address list. The
`value` field in this case is OCID of the address list.
- **HTTP_HEADER_CONTAINS:** The HTTP_HEADER_CONTAINS criteria is defined using a compound value separated by a colon: a header field
name and a header field value. `host:test.example.com` is an example of a criteria value where `host` is the header field name and
`test.example.com` is the header field value. A request matches when the header field name is a case insensitive match and the
header field value is a case insensitive, substring match.
*Example:* With a criteria value of `host:test.example.com`, where `host` is the name of the field and `test.example.com` is the value
of the host field, a request with the header values, `Host: www.test.example.com` will match, where as a request with header values of
`host: www.example.com` or `host: test.sub.example.com` will not match.
- **HTTP_METHOD_IS:** Matches if the request method is identical to one of the values listed in field. The `value` in this case is
string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`,
`PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`"
- "*Example:* \\"GET\\\\nPOST\\""
- "- **HTTP_METHOD_IS_NOT:** Matches if the request is not identical to any of the contents of the `value` field. The `value` in this
case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`,
`POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`"
- "*Example:* \\"GET\\\\nPOST\\""
- "- **COUNTRY_IS:** Matches if the request originates from one of countries in the `value` field. The `value` in this case is string
with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes,
see L(ISO's website,https://www.iso.org/obp/ui/#search/code/).
*Example:* \\"AL\\\\nDZ\\\\nAM\\"
- **COUNTRY_IS_NOT:** Matches if the request does not originate from any of countries in the `value` field. The `value` in this case
is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a
list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/).
*Example:* \\"AL\\\\nDZ\\\\nAM\\"
- **USER_AGENT_IS:** Matches if the requesting user agent is identical to the contents of the `value` field.
*Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`
- **USER_AGENT_IS_NOT:** Matches if the requesting user agent is not identical to the contents of the `value` field.
*Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`"
returned: on success
type: str
sample: URL_IS
value:
description:
- The criteria value.
returned: on success
type: str
sample: value_example
is_case_sensitive:
description:
- When enabled, the condition will be matched with case-sensitive rules.
returned: on success
type: bool
sample: true
action:
description:
- The action to take when the access criteria are met for a rule. If unspecified, defaults to `ALLOW`.
- "- **ALLOW:** Takes no action, just logs the request."
- "- **DETECT:** Takes no action, but creates an alert for the request."
- "- **BLOCK:** Blocks the request by returning specified response code or showing error page."
- "- **BYPASS:** Bypasses some or all challenges."
- "- **REDIRECT:** Redirects the request to the specified URL. These fields are required when `REDIRECT` is selected: `redirectUrl`,
`redirectResponseCode`."
- "- **SHOW_CAPTCHA:** Show a CAPTCHA Challenge page instead of the requested page."
- Regardless of action, no further rules are processed once a rule is matched.
returned: on success
type: str
sample: ALLOW
block_action:
description:
- The method used to block requests if `action` is set to `BLOCK` and the access criteria are met. If unspecified, defaults to
`SET_RESPONSE_CODE`.
returned: on success
type: str
sample: SET_RESPONSE_CODE
block_response_code:
description:
- "The response status code to return when `action` is set to `BLOCK`, `blockAction` is set to `SET_RESPONSE_CODE`, and the access criteria are
met. If unspecified, defaults to `403`. The list of available response codes: `200`, `201`, `202`, `204`, `206`, `300`, `301`, `302`, `303`,
`304`, `307`, `400`, `401`, `403`, `404`, `405`, `408`, `409`, `411`, `412`, `413`, `414`, `415`, `416`, `422`, `444`, `494`, `495`, `496`,
`497`, `499`, `500`, `501`, `502`, `503`, `504`, `507`."
returned: on success
type: int
sample: 56
block_error_page_message:
description:
- The message to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are
met. If unspecified, defaults to 'Access to the website is blocked.'
returned: on success
type: str
sample: block_error_page_message_example
block_error_page_code:
description:
- The error code to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria
are met. If unspecified, defaults to 'Access rules'.
returned: on success
type: str
sample: block_error_page_code_example
block_error_page_description:
description:
- The description text to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access
criteria are met. If unspecified, defaults to 'Access blocked by website owner. Please contact support.'
returned: on success
type: str
sample: block_error_page_description_example
bypass_challenges:
description:
- The list of challenges to bypass when `action` is set to `BYPASS`. If unspecified or empty, all challenges are bypassed.
- "- **JS_CHALLENGE:** Bypasses JavaScript Challenge."
- "- **DEVICE_FINGERPRINT_CHALLENGE:** Bypasses Device Fingerprint Challenge."
- "- **HUMAN_INTERACTION_CHALLENGE:** Bypasses Human Interaction Challenge."
- "- **CAPTCHA:** Bypasses CAPTCHA Challenge."
returned: on success
type: list
sample: []
redirect_url:
description:
- The target to which the request should be redirected, represented as a URI reference. Required when `action` is `REDIRECT`.
returned: on success
type: str
sample: redirect_url_example
redirect_response_code:
description:
- The response status code to return when `action` is set to `REDIRECT`.
- "- **MOVED_PERMANENTLY:** Used for designating the permanent movement of a page (numerical code - 301)."
- "- **FOUND:** Used for designating the temporary movement of a page (numerical code - 302)."
returned: on success
type: str
sample: MOVED_PERMANENTLY
captcha_title:
description:
- The title used when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_title_example
captcha_header:
description:
- The text to show in the header when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_header_example
captcha_footer:
description:
- The text to show in the footer when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_footer_example
captcha_submit_label:
description:
- The text to show on the label of the CAPTCHA challenge submit button when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_submit_label_example
response_header_manipulation:
description:
- An object that represents an action to apply to an HTTP response headers if all rule criteria will be matched regardless of `action` value.
returned: on success
type: complex
contains:
action:
description:
- ""
returned: on success
type: str
sample: EXTEND_HTTP_RESPONSE_HEADER
header:
description:
- A header field name that conforms to RFC 7230.
- "Example: `example_header_name`"
returned: on success
type: str
sample: header_example
value:
description:
- A header field value that conforms to RFC 7230.
- "Example: `example_value`"
returned: on success
type: str
sample: value_example
sample: [{
"name": "name_example",
"criteria": [{
"condition": "URL_IS",
"value": "value_example",
"is_case_sensitive": true
}],
"action": "ALLOW",
"block_action": "SET_RESPONSE_CODE",
"block_response_code": 56,
"block_error_page_message": "block_error_page_message_example",
"block_error_page_code": "block_error_page_code_example",
"block_error_page_description": "block_error_page_description_example",
"bypass_challenges": [],
"redirect_url": "redirect_url_example",
"redirect_response_code": "MOVED_PERMANENTLY",
"captcha_title": "captcha_title_example",
"captcha_header": "captcha_header_example",
"captcha_footer": "captcha_footer_example",
"captcha_submit_label": "captcha_submit_label_example",
"response_header_manipulation": [{
"action": "EXTEND_HTTP_RESPONSE_HEADER",
"header": "header_example",
"value": "value_example"
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AccessRulesFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"waas_policy_id",
]
def list_resources(self):
optional_list_method_params = [
"name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_access_rules,
waas_policy_id=self.module.params.get("waas_policy_id"),
**optional_kwargs
)
AccessRulesFactsHelperCustom = get_custom_class("AccessRulesFactsHelperCustom")
class ResourceFactsHelper(AccessRulesFactsHelperCustom, AccessRulesFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(waas_policy_id=dict(type="str", required=True), name=dict(type="str"),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="access_rules",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(access_rules=result)
if __name__ == "__main__":
main()
|
plugins/modules/oci_waas_access_rules_facts.py
| 19,748 |
Supported operations: list
!/usr/bin/python Copyright (c) 2020, 2021 Oracle and/or its affiliates. This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) Apache License v2.0 See LICENSE.TXT for details. GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
| 410 |
en
| 0.747432 |
import enum
import warnings
from optuna import exceptions
from optuna import logging
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from datetime import datetime # NOQA
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from optuna.distributions import BaseDistribution # NOQA
class TrialState(enum.Enum):
"""State of a :class:`~optuna.trial.Trial`.
Attributes:
RUNNING:
The :class:`~optuna.trial.Trial` is running.
COMPLETE:
The :class:`~optuna.trial.Trial` has been finished without any error.
PRUNED:
The :class:`~optuna.trial.Trial` has been pruned with
:class:`~optuna.exceptions.TrialPruned`.
FAIL:
The :class:`~optuna.trial.Trial` has failed due to an uncaught error.
"""
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
def __repr__(self):
# type: () -> str
return str(self)
def is_finished(self):
# type: () -> bool
return self != TrialState.RUNNING and self != TrialState.WAITING
class StudyDirection(enum.Enum):
"""Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
"""
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
class FrozenTrial(object):
"""Status and results of a :class:`~optuna.trial.Trial`.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
distributions:
Dictionary that contains the distributions of :attr:`params`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
"""
def __init__(
self,
number, # type: int
state, # type: TrialState
value, # type: Optional[float]
datetime_start, # type: Optional[datetime]
datetime_complete, # type: Optional[datetime]
params, # type: Dict[str, Any]
distributions, # type: Dict[str, BaseDistribution]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
intermediate_values, # type: Dict[int, float]
trial_id, # type: int
):
# type: (...) -> None
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
# Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation.
# TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved.
_ordered_fields = [
'number', 'value', 'datetime_start', 'datetime_complete', 'params', '_distributions',
'user_attrs', 'system_attrs', 'intermediate_values', '_trial_id', 'state', ]
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
# type: () -> int
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
# type: () -> str
return ('{cls}({kwargs})'.format(
cls=self.__class__.__name__,
kwargs=', '.join('{field}={value}'.format(
field=field if not field.startswith('_') else field[1:],
value=repr(getattr(self, field))) for field in self._ordered_fields)))
def _validate(self):
# type: () -> None
if self.datetime_start is None:
raise ValueError('`datetime_start` is supposed to be set.')
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError('`datetime_complete` is supposed to be set for a finished trial.')
else:
if self.datetime_complete is not None:
raise ValueError(
'`datetime_complete` is supposed to not be set for a finished trial.')
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError('`value` is supposed to be set for a complete trial.')
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError('Inconsistent parameters {} and distributions {}.'.format(
set(self.params.keys()), set(self.distributions.keys())))
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution {}.".
format(param_value, param_name, distribution))
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Return the distributions for this trial.
Returns:
The distributions.
"""
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
"""Set the distributions for this trial.
Args:
value: The distributions.
"""
self._distributions = value
@property
def trial_id(self):
# type: () -> int
"""Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID.
"""
warnings.warn(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.')
return self._trial_id
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
class StudySummary(object):
"""Basic attributes and aggregated results of a :class:`~optuna.study.Study`.
See also :func:`optuna.study.get_all_study_summaries`.
Attributes:
study_name:
Name of the :class:`~optuna.study.Study`.
direction:
:class:`StudyDirection` of the :class:`~optuna.study.Study`.
best_trial:
:class:`FrozenTrial` with best objective value in the :class:`~optuna.study.Study`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with
:func:`optuna.study.Study.set_user_attr`.
system_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally
set by Optuna.
n_trials:
The number of trials ran in the :class:`~optuna.study.Study`.
datetime_start:
Datetime where the :class:`~optuna.study.Study` started.
"""
def __init__(
self,
study_name, # type: str
direction, # type: StudyDirection
best_trial, # type: Optional[FrozenTrial]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
n_trials, # type: int
datetime_start, # type: Optional[datetime]
study_id, # type: int
):
# type: (...) -> None
self.study_name = study_name
self.direction = direction
self.best_trial = best_trial
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.n_trials = n_trials
self.datetime_start = datetime_start
self._study_id = study_id
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id < other._study_id
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id <= other._study_id
@property
def study_id(self):
# type: () -> int
"""Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.structs.StudySummary.study_name` instead.
Returns:
The study ID.
"""
message = 'The use of `StudySummary.study_id` is deprecated. ' \
'Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id
class TrialPruned(exceptions.TrialPruned):
"""Exception for pruned trials.
.. deprecated:: 0.19.0
This class was moved to :mod:`~optuna.exceptions`. Please use
:class:`~optuna.exceptions.TrialPruned` instead.
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
message = 'The use of `optuna.structs.TrialPruned` is deprecated. ' \
'Please use `optuna.exceptions.TrialPruned` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
|
optuna/structs.py
| 11,497 |
Status and results of a :class:`~optuna.trial.Trial`.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
distributions:
Dictionary that contains the distributions of :attr:`params`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
Basic attributes and aggregated results of a :class:`~optuna.study.Study`.
See also :func:`optuna.study.get_all_study_summaries`.
Attributes:
study_name:
Name of the :class:`~optuna.study.Study`.
direction:
:class:`StudyDirection` of the :class:`~optuna.study.Study`.
best_trial:
:class:`FrozenTrial` with best objective value in the :class:`~optuna.study.Study`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with
:func:`optuna.study.Study.set_user_attr`.
system_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally
set by Optuna.
n_trials:
The number of trials ran in the :class:`~optuna.study.Study`.
datetime_start:
Datetime where the :class:`~optuna.study.Study` started.
Exception for pruned trials.
.. deprecated:: 0.19.0
This class was moved to :mod:`~optuna.exceptions`. Please use
:class:`~optuna.exceptions.TrialPruned` instead.
State of a :class:`~optuna.trial.Trial`.
Attributes:
RUNNING:
The :class:`~optuna.trial.Trial` is running.
COMPLETE:
The :class:`~optuna.trial.Trial` has been finished without any error.
PRUNED:
The :class:`~optuna.trial.Trial` has been pruned with
:class:`~optuna.exceptions.TrialPruned`.
FAIL:
The :class:`~optuna.trial.Trial` has failed due to an uncaught error.
Return the distributions for this trial.
Returns:
The distributions.
Set the distributions for this trial.
Args:
value: The distributions.
Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.structs.StudySummary.study_name` instead.
Returns:
The study ID.
Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID.
NOQA NOQA NOQA NOQA NOQA type: () -> str type: () -> bool type: int type: TrialState type: Optional[float] type: Optional[datetime] type: Optional[datetime] type: Dict[str, Any] type: Dict[str, BaseDistribution] type: Dict[str, Any] type: Dict[str, Any] type: Dict[int, float] type: int type: (...) -> None Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation. TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved. type: (Any) -> bool type: (Any) -> bool type: (Any) -> bool type: () -> int type: () -> str type: () -> None type: () -> Dict[str, BaseDistribution] type: (Dict[str, BaseDistribution]) -> None type: () -> int type: () -> Optional[int] type: str type: StudyDirection type: Optional[FrozenTrial] type: Dict[str, Any] type: Dict[str, Any] type: int type: Optional[datetime] type: int type: (...) -> None type: (Any) -> bool type: (Any) -> bool type: (Any) -> bool type: () -> int type: (Any, Any) -> None
| 4,290 |
en
| 0.609432 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class UsagePlan(pulumi.CustomResource):
api_stages: pulumi.Output[list]
"""
The associated API stages of the usage plan.
* `api_id` (`str`) - API Id of the associated API stage in a usage plan.
* `stage` (`str`) - API stage name of the associated API stage in a usage plan.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
description: pulumi.Output[str]
"""
The description of a usage plan.
"""
name: pulumi.Output[str]
"""
The name of the usage plan.
"""
product_code: pulumi.Output[str]
"""
The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
"""
quota_settings: pulumi.Output[dict]
"""
The quota settings of the usage plan.
* `limit` (`float`) - The maximum number of requests that can be made in a given time period.
* `offset` (`float`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`str`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
"""
tags: pulumi.Output[dict]
"""
Key-value map of resource tags
"""
throttle_settings: pulumi.Output[dict]
"""
The throttling limits of the usage plan.
* `burstLimit` (`float`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`float`) - The API request steady-state rate limit.
"""
def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway Usage Plan.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
myapi = aws.apigateway.RestApi("myapi")
dev = aws.apigateway.Deployment("dev",
rest_api=myapi.id,
stage_name="dev")
prod = aws.apigateway.Deployment("prod",
rest_api=myapi.id,
stage_name="prod")
my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",
api_stages=[
{
"api_id": myapi.id,
"stage": dev.stage_name,
},
{
"api_id": myapi.id,
"stage": prod.stage_name,
},
],
description="my description",
product_code="MYCODE",
quota_settings={
"limit": 20,
"offset": 2,
"period": "WEEK",
},
throttle_settings={
"burstLimit": 5,
"rate_limit": 10,
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__(
'aws:apigateway/usagePlan:UsagePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
"""
Get an existing UsagePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_stages"] = api_stages
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["product_code"] = product_code
__props__["quota_settings"] = quota_settings
__props__["tags"] = tags
__props__["throttle_settings"] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
sdk/python/pulumi_aws/apigateway/usage_plan.py
| 9,677 |
Provides an API Gateway Usage Plan.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
myapi = aws.apigateway.RestApi("myapi")
dev = aws.apigateway.Deployment("dev",
rest_api=myapi.id,
stage_name="dev")
prod = aws.apigateway.Deployment("prod",
rest_api=myapi.id,
stage_name="prod")
my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",
api_stages=[
{
"api_id": myapi.id,
"stage": dev.stage_name,
},
{
"api_id": myapi.id,
"stage": prod.stage_name,
},
],
description="my description",
product_code="MYCODE",
quota_settings={
"limit": 20,
"offset": 2,
"period": "WEEK",
},
throttle_settings={
"burstLimit": 5,
"rate_limit": 10,
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
Get an existing UsagePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! ***
| 4,696 |
en
| 0.58603 |
# -*- coding: utf-8 -*-
"""Tests for NullTask plugin."""
import unittest
from pomito.plugins.task import nulltask, TaskPlugin
class NullTaskTests(unittest.TestCase):
"""Tests for NullTask."""
def setUp(self):
self.task = nulltask.NullTask(None)
def test_nulltask_is_a_task_plugin(self):
assert issubclass(nulltask.NullTask, TaskPlugin)
def test_nulltask_initialize_should_not_throw(self):
self.task.initialize()
def test_nulltask_get_tasks_returns_empty_list(self):
assert len(self.task.get_tasks()) == 0
def test_nulltask_get_tasks_by_filter_returns_empty_list(self):
assert len(self.task.get_tasks_by_filter("")) == 0
def test_nulltask_get_task_by_id_returns_none(self):
assert self.task.get_task_by_id(1) is None
|
tests/plugins/task/test_nulltask.py
| 802 |
Tests for NullTask.
Tests for NullTask plugin.
-*- coding: utf-8 -*-
| 70 |
en
| 0.492654 |
#!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_ip_sec_connection_device_status_facts
short_description: Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
- Deprecated. To get the tunnel status, instead use
L(GetIPSecConnectionTunnel,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/IPSecConnectionTunnel/GetIPSecConnectionTunnel).
version_added: "2.9"
author: Oracle (@oracle)
options:
ipsc_id:
description:
- The OCID of the IPSec connection.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific ip_sec_connection_device_status
oci_network_ip_sec_connection_device_status_facts:
ipsc_id: ocid1.ipsc.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
ip_sec_connection_device_status:
description:
- IpSecConnectionDeviceStatus resource
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the IPSec connection.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
id:
description:
- The IPSec connection's Oracle ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
tunnels:
description:
- Two L(TunnelStatus,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/TunnelStatus/) objects.
returned: on success
type: complex
contains:
ip_address:
description:
- The IP address of Oracle's VPN headend.
- "Example: `203.0.113.50`"
returned: on success
type: string
sample: 203.0.113.50
lifecycle_state:
description:
- The tunnel's current state.
returned: on success
type: string
sample: UP
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_state_modified:
description:
- When the state of the tunnel last changed, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"tunnels": [{
"ip_address": "203.0.113.50",
"lifecycle_state": "UP",
"time_created": "2016-08-25T21:10:29.600Z",
"time_state_modified": "2016-08-25T21:10:29.600Z"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class IpSecConnectionDeviceStatusFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"ipsc_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_ip_sec_connection_device_status,
ipsc_id=self.module.params.get("ipsc_id"),
)
IpSecConnectionDeviceStatusFactsHelperCustom = get_custom_class(
"IpSecConnectionDeviceStatusFactsHelperCustom"
)
class ResourceFactsHelper(
IpSecConnectionDeviceStatusFactsHelperCustom,
IpSecConnectionDeviceStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(ipsc_id=dict(aliases=["id"], type="str", required=True),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="ip_sec_connection_device_status",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(ip_sec_connection_device_status=result)
if __name__ == "__main__":
main()
|
plugins/modules/oci_network_ip_sec_connection_device_status_facts.py
| 6,476 |
Supported operations: get
!/usr/bin/python Copyright (c) 2017, 2020 Oracle and/or its affiliates. This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license. GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) Apache License v2.0 See LICENSE.TXT for details. GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
| 409 |
en
| 0.758759 |
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "streamtube.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font
constructor must be a dict or
an instance of plotly_study.graph_objs.streamtube.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.streamtube.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
|
plotly_study/graph_objs/streamtube/hoverlabel/__init__.py
| 11,103 |
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
color ----- colorsrc -------- family ------ familysrc --------- size ---- sizesrc ------- property parent name -------------------- Self properties description --------------------------- Validate arg ------------ Handle skip_invalid ------------------- Import validators ----------------- Initialize validators --------------------- Populate data dict with properties ---------------------------------- Process unknown kwargs ---------------------- Reset skip_invalid ------------------
| 5,471 |
en
| 0.570346 |
class MGDHCPSettings(object):
def __init__(self, session):
super(MGDHCPSettings, self).__init__()
self._session = session
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
"""
**List common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp
- networkId (string)
"""
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'getNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource)
def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
"""
**Update common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp
- networkId (string)
- dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.
- dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.
- dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'
"""
kwargs.update(locals())
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'updateNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
|
meraki/api/mg_dhcp_settings.py
| 1,884 |
**List common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp
- networkId (string)
**Update common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp
- networkId (string)
- dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.
- dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.
- dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'
| 709 |
en
| 0.636551 |
"""Config Port Stats message tests."""
from pyof.v0x04.controller2switch.common import PortStats
from tests.test_struct import TestStruct
class TestPortStats(TestStruct):
"""Config Port Stats message tests."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112)
|
build/lib/tests/v0x04/test_controller2switch/test_port_stats.py
| 511 |
Config Port Stats message tests.
Configure raw file and its object in parent class (TestDump).
Config Port Stats message tests.
| 127 |
en
| 0.555224 |
"""Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
|
components/server/src/routes/report.py
| 6,123 |
Return all subjects and metrics that have the tag.
Delete a report.
Download the report as pdf.
Get a report with all metrics that have the specified tag.
Set a report attribute.
Copy a report.
Import a preconfigured report into the database.
Add a new report.
Report routes.
Set pdf scale to 70% or otherwise the dashboard falls off the page
| 344 |
en
| 0.846093 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for augmentation."""
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
# Default replace value
REPLACE_VALUE = 128
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
"""Inverts the image pixels."""
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
"""Implements blend of invert with original image."""
return blend(invert(image), image, factor)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
"""Equivalent of PIL Rotation."""
# Convert from degrees to radians
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
"""Equivalent of PIL Translate in X dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
"""Equivalent of PIL Translate in Y dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops."""
def scale_channel(channel):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
"""Implements blend of autocontrast with original image."""
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
"""Implements blend of equalize with original image."""
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
"""Blur with the same kernel as ImageFilter.BLUR."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant(
[[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
"""Smooth with the same kernel as ImageFilter.SMOOTH."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
"""Rescales image and enlarged cornet."""
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
|
third_party/augment_ops.py
| 13,825 |
Implements Autocontrast function from PIL using TF ops.
Implements blend of autocontrast with original image.
Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
Blur with the same kernel as ImageFilter.BLUR.
Equivalent of PIL Brightness.
Equivalent of PIL Color.
Equivalent of PIL Contrast.
Implements Equalize function from PIL using TF ops.
Implements blend of equalize with original image.
Inverts the image pixels.
Implements blend of invert with original image.
Equivalent of PIL Posterize.
Rescales image and enlarged cornet.
Equivalent of PIL Rotation.
Scale the 2D image using the autocontrast rule.
Scale the data in the channel to implement equalize.
Implements Sharpness function from PIL using TF ops.
Equivalent of PIL Shearing in X dimension.
Equivalent of PIL Shearing in Y dimension.
Smooth with the same kernel as ImageFilter.SMOOTH.
Equivalent of PIL Translate in X dimension.
Equivalent of PIL Translate in Y dimension.
Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
Returns 'image' with an extra channel set to all 1s.
Various ops for augmentation.
Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Default replace value Flatten the spatial dimensions. Find all pixels where the last channel is zero. Where they are zero, fill them in with 'replace'. For each pixel in the image, select the pixel if the value is less than the threshold. Otherwise, subtract 255 from the pixel. For each pixel in the image less than threshold we add 'addition' amount to it and then clip the pixel value to be between 0 and 255. The value of 'addition' is between -128 and 128 Convert from degrees to radians In practice, we should randomize the rotation degrees by flipping it negatively half the time, but that's done on 'degrees' outside of the function. Shear parallel to x axis is a projective transform with a matrix form of: [1 level 0 1] Shear parallel to y axis is a projective transform with a matrix form of: [1 0 level 1] A possibly cheaper version can be done using cumsum/unique_with_counts over the histogram values, rather than iterating over the entire image. to compute mins and maxes. Scale the image, making the lowest value 0 and the highest value 255. Assumes RGB for now. Scales each channel independently and then stacks the result. Make image 4D for conv operation SMOOTH PIL Kernel Tile across channel dimension For the borders of the resulting image, fill in the values of the original image. Blend the final result Compute the histogram of the image channel. For the purposes of computing the step, filter out the nonzeros. Compute the cumulative sum, shifting by step // 2 and then normalization by step. Shift lut, prepending with 0. Clip the counts to be in range. This is done in the C code for image.point. If step is zero, return the original image. Otherwise, build lut from the full histogram and step and then index from it. Assumes RGB for now. Scales each channel independently and then stacks the result. adding 0.5 for future rounding, same as in PIL: https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.cL101 pylint: disable=line-too-long See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py pylint: disable=line-too-long class BLUR(BuiltinFilter): name = "Blur" fmt: off filterargs = (5, 5), 16, 0, ( 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, ) fmt: on filterargs are following: (kernel_size_x, kernel_size_y), divisor, offset, kernel See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py pylint: disable=line-too-long class SMOOTH(BuiltinFilter): name = "Smooth" fmt: off filterargs = (3, 3), 13, 0, ( 1, 1, 1, 1, 5, 1, 1, 1, 1, ) fmt: on filterargs are following: (kernel_size_x, kernel_size_y), divisor, offset, kernel See tf.image.ResizeMethod for full list
| 5,429 |
en
| 0.812088 |
"""
Scrape quotes, books and authors from ``Good Reads`` website.
"""
import bs4
from .utils import *
def get_author_name(soup):
"""Get the author's name from its main page.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
string: name of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_name(soup)
J.K. Rowling
"""
author_h1 = soup.find('h1', attrs={'class': 'authorName'})
return author_h1.find('span').text
def get_author_desc(soup):
"""Get the author description / biography.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
str: long description of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_desc(soup)
See also: Robert Galbraith
Although she writes under the pen name J.K. Rowling, pronounced like rolling,
her name when her first Harry Potter book was published was simply Joanne Rowling.
...
"""
author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'})
author_info_long = author_info_desc.findAll('span')[-1]
long_desc = ""
for sentence in author_info_long.children:
if isinstance(sentence, bs4.element.Tag):
if sentence.name == 'br':
long_desc += '\n'
else:
long_desc += sentence.text
else:
long_desc += sentence
long_desc = long_desc.replace('’', "'")
return long_desc
def get_author_info(soup):
"""Get all information from an author (genres, influences, website etc.).
Args:
soup (bs4.element.Tag): author page connection.
Returns:
dict
"""
container = soup.find('div', attrs={'class': 'rightContainer'})
author_info = {}
data_div = container.find('br', attrs={'class': 'clear'})
while data_div:
if data_div.name:
data_class = data_div.get('class')[0]
# Information section is finished
if data_class == 'aboutAuthorInfo':
break
# Key elements
elif data_class == 'dataTitle':
key = data_div.text.strip()
author_info[key] = []
# Born section
if data_div.text == 'Born':
data_div = data_div.next_sibling
author_info[key].append(data_div.strip())
# Influences section
elif data_div.text == 'Influences':
data_div = data_div.next_sibling.next_sibling
data_items = data_div.findAll('span')[-1].findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
# Member since section
elif data_div.text == 'Member Since':
data_div = data_div.next_sibling.next_sibling
author_info[key].append(data_div.text.strip())
# Genre, website and other sections
else:
data_items = data_div.findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
data_div = data_div.next_sibling
author_info.update({'Description': get_author_desc(soup)})
return author_info
def scrape_quotes_container(soup):
"""Get the quote container from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
bs4.element.Tag
"""
return soup.findAll('div', attrs={'class': 'quotes'})
def scrape_quotes(soup):
"""Retrieve all ``<div>`` quote element from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
yield bs4.element.Tag
"""
for container_div in scrape_quotes_container(soup):
quote_div = container_div.find('div', attrs={'class': 'quote'})
while quote_div:
if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'):
yield quote_div
quote_div = quote_div.next_sibling
def get_quote_text(quote_div):
"""Get the text from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.
Returns:
string
"""
quote_text = ''
text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children
for text in text_iterator:
if text.name == 'br':
quote_text += '\n'
elif not text.name:
quote_text += text.strip()
quote_text = process_quote_text(quote_text)
return quote_text
def scrape_quote_tags(quote_div):
"""Scrape tags from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
yield ``<a>`` tags
"""
tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'})
if tags_container:
for tag in tags_container.children:
if tag.name == 'a':
yield tag
return None
def get_quote_book(quote_div):
"""Get the reference (book) from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag
"""
quote_details = quote_div.find('div', attrs={'class': 'quoteText'})
return quote_details.find('a', attrs={'class': 'authorOrTitle'})
def get_quote_author_name(quote_div):
"""Get the author's name from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
string
"""
quote_text = quote_div.find('div', attrs={'class': 'quoteText '})
author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text
return remove_punctuation(author_name).title()
def get_quote_likes(quote_div):
"""Get the likes ``<a>`` tag from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag: ``<a>`` tag for likes.
"""
quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'})
return quote_footer.find('a', attrs={'class': 'smallText'})
# TODO: deprecate this
def get_quote_name_id(quote_div):
"""Get the name and id of a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
tuple: id and name.
"""
quote_href = get_quote_likes(quote_div).get('href')
quote_id = quote_href.split('/')[-1].split('-')[0]
quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:])
return quote_id, quote_name
def scrape_author_books(soup):
"""Retrieve books from an author's page.
Args:
soup (bs4.element.Tag): connection to an author books page.
Returns:
yield bs4.element.Tag: ``<tr>`` element.
"""
table_tr = soup.find('tr')
while table_tr:
if table_tr.name == 'tr':
yield table_tr
table_tr = table_tr.next_sibling
def get_author_book_title(book_tr):
"""Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book title ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_title = get_author_book_title(book_tr)
... print(book_title.text.strip(), book_title.get('href'))
The Bell Jar /book/show/6514.The_Bell_Jar
Ariel /book/show/395090.Ariel
The Collected Poems /book/show/31426.The_Collected_Poems
The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'bookTitle'})
def get_author_book_author(book_tr):
"""Get the author ``<a>`` element from a table ``<tr>`` element.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: author name ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_author = get_author_book_author(book_tr)
... print(book_author.text, book_author.get('href'))
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'authorName'})
def get_author_book_ratings(book_tr):
"""Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: ratings ``<span>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... ratings_span = get_author_book_ratings(book_tr)
... print(ratings_span.contents[-1])
4.55 avg rating — 2,414 ratings
3.77 avg rating — 1,689 ratings
4.28 avg rating — 892 ratings
4.54 avg rating — 490 ratings
...
"""
return book_tr.find('span', attrs={'class': 'minirating'})
def get_author_book_edition(book_tr):
"""Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book edition ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_edition = get_author_book_edition(book_tr)
... if book_edition:
... print(book_edition.text, book_edition.get('href'))
... print()
493 editions /work/editions/1385044-the-bell-jar
80 editions /work/editions/1185316-ariel
30 editions /work/editions/1003095-the-collected-poems
45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
return book_details.find('a', attrs={'class': 'greyText'})
def get_author_book_date(book_tr):
"""Get the published date from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
int: date of publication
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_date = get_author_book_date(book_tr)
... print(book_date)
None
None
1958
2009
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
book_publish = book_details.contents[-1].replace('—', '').replace('\n', '')
book_date = book_publish.replace('published', '').strip()
book_date = eval(book_date) if book_date != '' else None
return book_date
def get_book_quote_page(soup):
"""Find the ``<a>`` element pointing to the quote page of a book.
Args:
soup (bs4.element.Tag):
Returns:
"""
quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'})
if quote_div:
return quote_div[-1].find('a')
return None
|
scrapereads/scrape.py
| 11,987 |
Get the author ``<a>`` element from a table ``<tr>`` element.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: author name ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_author = get_author_book_author(book_tr)
... print(book_author.text, book_author.get('href'))
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Get the published date from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
int: date of publication
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_date = get_author_book_date(book_tr)
... print(book_date)
None
None
1958
2009
...
Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book edition ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_edition = get_author_book_edition(book_tr)
... if book_edition:
... print(book_edition.text, book_edition.get('href'))
... print()
493 editions /work/editions/1385044-the-bell-jar
80 editions /work/editions/1185316-ariel
30 editions /work/editions/1003095-the-collected-poems
45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath
...
Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: ratings ``<span>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... ratings_span = get_author_book_ratings(book_tr)
... print(ratings_span.contents[-1])
4.55 avg rating — 2,414 ratings
3.77 avg rating — 1,689 ratings
4.28 avg rating — 892 ratings
4.54 avg rating — 490 ratings
...
Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book title ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_title = get_author_book_title(book_tr)
... print(book_title.text.strip(), book_title.get('href'))
The Bell Jar /book/show/6514.The_Bell_Jar
Ariel /book/show/395090.Ariel
The Collected Poems /book/show/31426.The_Collected_Poems
The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath
Get the author description / biography.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
str: long description of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_desc(soup)
See also: Robert Galbraith
Although she writes under the pen name J.K. Rowling, pronounced like rolling,
her name when her first Harry Potter book was published was simply Joanne Rowling.
...
Get all information from an author (genres, influences, website etc.).
Args:
soup (bs4.element.Tag): author page connection.
Returns:
dict
Get the author's name from its main page.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
string: name of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_name(soup)
J.K. Rowling
Find the ``<a>`` element pointing to the quote page of a book.
Args:
soup (bs4.element.Tag):
Returns:
Get the author's name from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
string
Get the reference (book) from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag
Get the likes ``<a>`` tag from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag: ``<a>`` tag for likes.
Get the name and id of a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
tuple: id and name.
Get the text from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.
Returns:
string
Retrieve books from an author's page.
Args:
soup (bs4.element.Tag): connection to an author books page.
Returns:
yield bs4.element.Tag: ``<tr>`` element.
Scrape tags from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
yield ``<a>`` tags
Retrieve all ``<div>`` quote element from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
yield bs4.element.Tag
Get the quote container from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
bs4.element.Tag
Scrape quotes, books and authors from ``Good Reads`` website.
Information section is finished Key elements Born section Influences section Member since section Genre, website and other sections TODO: deprecate this
| 5,867 |
en
| 0.657749 |
from pyspark.sql import Column, DataFrame, SparkSession, functions
from pyspark.sql.functions import *
from py4j.java_collections import MapConverter
from delta.tables import *
import shutil
import threading
tableName = "tbltestpython"
# Enable SQL/DML commands and Metastore tables for the current spark session.
# We need to set the following configs
spark = SparkSession.builder \
.appName("quickstart_sql") \
.master("local[*]") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.getOrCreate()
# Clear any previous runs
spark.sql("DROP TABLE IF EXISTS " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
try:
# Create a table
print("############# Creating a table ###############")
spark.sql("CREATE TABLE %s(id LONG) USING delta" % tableName)
spark.sql("INSERT INTO %s VALUES 0, 1, 2, 3, 4" % tableName)
# Read the table
print("############ Reading the table ###############")
spark.sql("SELECT * FROM %s" % tableName).show()
# Upsert (merge) new data
print("########### Upsert new data #############")
spark.sql("CREATE TABLE newData(id LONG) USING parquet")
spark.sql("INSERT INTO newData VALUES 3, 4, 5, 6")
spark.sql('''MERGE INTO {0} USING newData
ON {0}.id = newData.id
WHEN MATCHED THEN
UPDATE SET {0}.id = newData.id
WHEN NOT MATCHED THEN INSERT *
'''.format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Update table data
print("########## Overwrite the table ###########")
spark.sql("INSERT OVERWRITE %s select * FROM (VALUES 5, 6, 7, 8, 9) x (id)" % tableName)
spark.sql("SELECT * FROM %s" % tableName).show()
# Update every even value by adding 100 to it
print("########### Update to the table(add 100 to every even value) ##############")
spark.sql("UPDATE {0} SET id = (id + 100) WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Delete every even value
print("######### Delete every even value ##############")
spark.sql("DELETE FROM {0} WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Read old version of data using time travel
print("######## Read old data using time travel ############")
df = spark.read.format("delta").option("versionAsOf", 0).table(tableName)
df.show()
finally:
# cleanup
spark.sql("DROP TABLE " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
spark.stop()
|
examples/python/quickstart_sql.py
| 2,663 |
Enable SQL/DML commands and Metastore tables for the current spark session. We need to set the following configs Clear any previous runs Create a table Read the table Upsert (merge) new data Update table data Update every even value by adding 100 to it Delete every even value Read old version of data using time travel cleanup
| 327 |
en
| 0.781274 |
"""Welcome to MLToolset, a package to simplify machine learning research!
Author: Ryan Eloff
Contact: [email protected]
Date: May 2018
"""
from . import data
from . import nearest_neighbour
from . import neural_blocks
from . import siamese
from . import training
from . import utils
from ._globals import TF_FLOAT
from ._globals import TF_INT
from ._globals import NP_FLOAT
from ._globals import NP_INT
|
src/mltoolset/__init__.py
| 417 |
Welcome to MLToolset, a package to simplify machine learning research!
Author: Ryan Eloff
Contact: [email protected]
Date: May 2018
| 141 |
en
| 0.679663 |
"""
This file implements the signature scheme from "Unique Ring Signatures: A Practical
Construction" by Matthew Franklin and Haibin Zhang
"""
import sys
import math
from random import randint
import hashlib
from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from libsig import primes
# ----------- HELPER FUNCTIONS -----------
# function to find divisors in order to find generators
def find_divisors(x):
"""
This is the "function to find divisors in order to find generators" module.
This DocTest verifies that the module is correctly calculating all divisors
of a number x.
>>> find_divisors(10)
[1, 2, 5, 10]
>>> find_divisors(112)
[1, 2, 4, 7, 8, 14, 16, 28, 56, 112]
"""
divisors = [ i for i in range(1,x+1) if x % i == 0]
return divisors
# function to find random generator of G
def find_generator(p):
'''
The order of any element in a group can be divided by p-1.
Step 1: Calculate all Divisors.
Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.
if neither is one but e to the power of p-1, a generator is found.
'''
# Init
# Generate element which is tested for generator characteristics.
# Saved in list to prevent checking the same element twice.
testGen = randint(1,p)
listTested = []
listTested.append(testGen)
# Step 1.
divisors = find_divisors(p)
# try for all random numbers
# Caution: this leads to a truly random generator but is not very efficient.
while len(listTested) < p-1:
# only test each possible generator once
if testGen in listTested:
# Step 2.
for div in divisors:
testPotency = math.pow(testGen,div) % (p+1)
if testPotency == 1.0 and div != divisors[-1]:
# element does not have the same order like the group,
# therefore try next element
break
elif testPotency == 1.0 and div == divisors[-1]:
# generator is found
return testGen
# try new element
testGen = randint(1,p)
listTested.append(testGen)
def list_to_string(input_list):
'''
convert a list into a concatenated string of all its elements
'''
result = ''.join(map(str,input_list))
return result
# ----------- HELPER FUNCTIONS END -----------
class UniqueRingSignature(AbstractRingSignatureScheme):
'''
| output: pp = (lamdba, q, G, H, H2) with,
| q is prime,
| g is generator of G,
| G is multiplicative Group with prime order q,
| H1 and H2 are two Hash functions H1: {0,1}* -> G,
| (as well as H2: {0,1}* -> Zq which is the same).
'''
# set prime p (Sophie-Germain and therefore save)
#q = 53
q = 59
# find random generator of G
g = find_generator(q-1)
# hash functions with desired range and the usage of secure hashes
h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# this way to share the information should be improved
h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# list of public keys
Rp = list()
@staticmethod
def keygen(verbose=False):
#print("---- KeyGen Started ---- \n")
r = randint(1,UniqueRingSignature.q)
# x = g**r % q
x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q)
# y = g**x
y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q)
if verbose == True:
print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n")
print("---- KeyGen Completed ---- \n")
# Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process
return x,y
@staticmethod
def ringsign(x, pubkey, message,verbose=False):
'''
input: x is the privkey from user i,
| all public keys: pubkeys,
| the message
output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),
| R: all the pubkeys concatenated,
| cj,tj: random number within Zq
'''
# calculate R = pk1,pk2,..,pkn
R = list_to_string(pubkey)
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# message + pubkeys concatenated
mR = message + str(R)
C = list()
T = list()
A = list()
B = list()
ri = -1
# simulation step
#
for i in pubkey:
# Step 1:
#
a = 0
b = 0
c = 0
t = 0
if pow(g,x,q) != i:
c, t = randint(1,q), randint(1,q)
a = (pow(g, t) * pow(int(i), c)) % q
b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q
else:
# Step 2:
#
ri = randint(1, q)
a = pow(g, ri, q)
b = pow(h1(mR), ri, q)
# insert to allocate place
c = -1
t = -1
A.append(a)
B.append(b)
C.append(c)
T.append(t)
# for end
# Step 3:
#
cj = 0
# list count from 0
ab = ''.join('{}{}'.format(*t) for t in zip(A,B))
usernr = 0
for i in range(len(pubkey)):
if pubkey[i] != (pow(g,x,q)):
cj = (cj + C[i]) % q
else:
usernr = i
ci = h2(message + R + ab) - (cj % (q-1))
# update ci, this was initialized with -1
C[usernr] = ci
ti = ((ri - (C[usernr]*x)) % (q-1))
if ti < 0:
ti = (q-1) + ti
# update ti, this was initialized with -1
T[usernr] = ti
# Step 4:
#
# concatenate ct: c1,t1,c2,t2,...,cn,tn
ct = ','.join('{},{}'.format(*t) for t in zip(C,T))
# returning result
result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct
if verbose == True:
print("RingSign Result: "+ result)
print("---- RingSign Completed ---- \n")
return result
@staticmethod
def verify(R, message, signature,verbose=False):
'''
Input: the public keys R
| the message
| the signature computed with ringsign
Output: whether the message was signed by R or not
'''
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# parse the signature
parsed = signature.split(",")
tt = int(parsed[2])
cjs = list()
tjs = list()
for i in range(0,int(((len(parsed))/2)-1)):
cjs.append(int(parsed[3+2*i]))
tjs.append(int(parsed[4+2*i]))
#print(str(cjs)+" "+str(tjs) + " "+ str(tt))
# check signature
# sum of all cjs
# =?
# self.pp['h2'](message + R + gyh1)
mR = list_to_string(R)
val1 = sum(cjs) % q
# for all users in R:
# g**tj * yj ** cj , h1(m||R)**tj * tt**cj
gyh1 = ""
for i in range(len(tjs)):
if tjs[i] < 0:
tjs[i] = (q-1) + tjs[i]
if cjs[i] < 0:
cjs[i] = (q-1) + cjs[i]
gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q
h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q
gyh1 = gyh1 + str( gy) + str( h)
val2 = str(h2(message + list_to_string(R) + gyh1))
if int(val1) == int(val2):
if verbose == True:
print("Signature is valid!\n")
print("Common Result: " + str(val1))
print("---- Validation Completed ---- \n")
return True
else:
if verbose == True:
print("Signature is not valid!\n")
print(str(val1) + " != " + str(val2))
print("---- Validation Completed ---- \n")
return False
def local_test(verbose=True):
# verbose output
print(verbose)
# user 1 will signate and validate later,
# therefore his private key is saved for test purposes
privKey1,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
a,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
# usernr start from 0
# ringsign(self, privkey, usernr, pubkeys, message)
ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose)
if verbose:
print("Result of Signature Validation:")
# verify(pubkeys, message, signature):
UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose)
if __name__ == '__main__':
# doctest start
import doctest
doctest.testmod()
if len(sys.argv) > 1:
verbose = False
if sys.argv[1] == "True":
verbose = True
# run a local test
local_test(verbose)
|
libsig/FZZ_unique_ring_signature.py
| 9,481 |
| output: pp = (lamdba, q, G, H, H2) with,
| q is prime,
| g is generator of G,
| G is multiplicative Group with prime order q,
| H1 and H2 are two Hash functions H1: {0,1}* -> G,
| (as well as H2: {0,1}* -> Zq which is the same).
This is the "function to find divisors in order to find generators" module.
This DocTest verifies that the module is correctly calculating all divisors
of a number x.
>>> find_divisors(10)
[1, 2, 5, 10]
>>> find_divisors(112)
[1, 2, 4, 7, 8, 14, 16, 28, 56, 112]
The order of any element in a group can be divided by p-1.
Step 1: Calculate all Divisors.
Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.
if neither is one but e to the power of p-1, a generator is found.
convert a list into a concatenated string of all its elements
input: x is the privkey from user i,
| all public keys: pubkeys,
| the message
output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),
| R: all the pubkeys concatenated,
| cj,tj: random number within Zq
Input: the public keys R
| the message
| the signature computed with ringsign
Output: whether the message was signed by R or not
This file implements the signature scheme from "Unique Ring Signatures: A Practical
Construction" by Matthew Franklin and Haibin Zhang
from AbstractRingSignatureScheme import AbstractRingSignatureSchemefrom libsig import primes ----------- HELPER FUNCTIONS ----------- function to find divisors in order to find generators function to find random generator of G Init Generate element which is tested for generator characteristics. Saved in list to prevent checking the same element twice. Step 1. try for all random numbers Caution: this leads to a truly random generator but is not very efficient. only test each possible generator once Step 2. element does not have the same order like the group, therefore try next element generator is found try new element ----------- HELPER FUNCTIONS END ----------- set prime p (Sophie-Germain and therefore save)q = 53 find random generator of G hash functions with desired range and the usage of secure hashes this way to share the information should be improved list of public keysprint("---- KeyGen Started ---- \n") x = g**r % q y = g**x Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process calculate R = pk1,pk2,..,pkn message + pubkeys concatenated simulation step Step 1: Step 2: insert to allocate place for end Step 3: list count from 0 update ci, this was initialized with -1 update ti, this was initialized with -1 Step 4: concatenate ct: c1,t1,c2,t2,...,cn,tn returning result parse the signatureprint(str(cjs)+" "+str(tjs) + " "+ str(tt)) check signature sum of all cjs =? self.pp['h2'](message + R + gyh1) for all users in R: g**tj * yj ** cj , h1(m||R)**tj * tt**cj verbose output user 1 will signate and validate later, therefore his private key is saved for test purposes usernr start from 0 ringsign(self, privkey, usernr, pubkeys, message) verify(pubkeys, message, signature): doctest start run a local test
| 3,126 |
en
| 0.789918 |
import crcmod
from selfdrive.car.hyundai.values import CAR, CHECKSUM
hyundai_checksum = crcmod.mkCrcFun(0x11D, initCrc=0xFD, rev=False, xorOut=0xdf)
def create_lkas11(packer, car_fingerprint, bus, apply_steer, steer_req, cnt, enabled, lkas11, hud_alert,
lane_visible, left_lane_depart, right_lane_depart, keep_stock=False):
values = {
"CF_Lkas_Bca_R": lkas11["CF_Lkas_Bca_R"] if keep_stock else 3,
#"CF_Lkas_LdwsSysState": 3 if steer_req else lane_visible,
"CF_Lkas_LdwsSysState": 3 if enabled else 1,
"CF_Lkas_SysWarning": hud_alert,
#"CF_Lkas_LdwsLHWarning": lkas11["CF_Lkas_LdwsLHWarning"],
#"CF_Lkas_LdwsRHWarning": lkas11["CF_Lkas_LdwsRHWarning"],
"CF_Lkas_LdwsLHWarning": left_lane_depart,
"CF_Lkas_LdwsRHWarning": right_lane_depart,
"CF_Lkas_HbaLamp": lkas11["CF_Lkas_HbaLamp"] if keep_stock else 0,
"CF_Lkas_FcwBasReq": lkas11["CF_Lkas_FcwBasReq"] if keep_stock else 0,
"CR_Lkas_StrToqReq": apply_steer,
"CF_Lkas_ActToi": steer_req,
"CF_Lkas_ToiFlt": 0,
"CF_Lkas_HbaSysState": lkas11["CF_Lkas_HbaSysState"] if keep_stock else 1,
"CF_Lkas_FcwOpt": lkas11["CF_Lkas_FcwOpt"] if keep_stock else 0,
"CF_Lkas_HbaOpt": lkas11["CF_Lkas_HbaOpt"] if keep_stock else 3,
"CF_Lkas_MsgCount": cnt,
"CF_Lkas_FcwSysState": lkas11["CF_Lkas_FcwSysState"] if keep_stock else 0,
"CF_Lkas_FcwCollisionWarning": lkas11["CF_Lkas_FcwCollisionWarning"] if keep_stock else 0,
"CF_Lkas_FusionState": lkas11["CF_Lkas_FusionState"] if keep_stock else 0,
"CF_Lkas_Chksum": 0,
"CF_Lkas_FcwOpt_USM": lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2,
"CF_Lkas_LdwsOpt_USM": lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 3,
}
if car_fingerprint == CAR.GENESIS:
values["CF_Lkas_Bca_R"] = 2
values["CF_Lkas_HbaSysState"] = lkas11["CF_Lkas_HbaSysState"] if keep_stock else 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2
values["CF_Lkas_LdwsOpt_USM"] = lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_OPTIMA:
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_CARDENZA:
########################################################
#values["CF_Lkas_Bca_R"] = int(left_lane) + (int(right_lane) << 1)
#values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
# FcwOpt_USM 5 = Orange blinking car + lanes
# FcwOpt_USM 4 = Orange car + lanes
# FcwOpt_USM 3 = Green blinking car + lanes
# FcwOpt_USM 2 = Green car + lanes
# FcwOpt_USM 1 = White car + lanes
# FcwOpt_USM 0 = No car + lanes
#values["CF_Lkas_SysWarning"] = 4 if sys_warning else 0
# SysWarning 4 = keep hands on wheel
# SysWarning 5 = keep hands on wheel (red)
# SysWarning 6 = keep hands on wheel (red) + beep
# Note: the warning is hidden while the blinkers are on
#values["CF_Lkas_LdwsOpt_USM"] = 2
########################################################
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_FcwOpt_USM"] = 1
values["CF_Lkas_LdwsOpt_USM"] = 3
dat = packer.make_can_msg("LKAS11", 0, values)[2]
if car_fingerprint in CHECKSUM["crc8"]:
# CRC Checksum as seen on 2019 Hyundai Santa Fe
dat = dat[:6] + dat[7:8]
checksum = hyundai_checksum(dat)
elif car_fingerprint in CHECKSUM["6B"]:
# Checksum of first 6 Bytes, as seen on 2018 Kia Sorento
checksum = sum(dat[:6]) % 256
else:
# Checksum of first 6 Bytes and last Byte as seen on 2018 Kia Stinger
checksum = (sum(dat[:6]) + dat[7]) % 256
values["CF_Lkas_Chksum"] = checksum
return packer.make_can_msg("LKAS11", bus, values)
def create_clu11(packer, bus, clu11, button, speed, cnt):
values = {
"CF_Clu_CruiseSwState": button,
"CF_Clu_CruiseSwMain": clu11["CF_Clu_CruiseSwMain"],
"CF_Clu_SldMainSW": clu11["CF_Clu_SldMainSW"],
"CF_Clu_ParityBit1": clu11["CF_Clu_ParityBit1"],
"CF_Clu_VanzDecimal": clu11["CF_Clu_VanzDecimal"],
"CF_Clu_Vanz": speed,
"CF_Clu_SPEED_UNIT": clu11["CF_Clu_SPEED_UNIT"],
"CF_Clu_DetentOut": clu11["CF_Clu_DetentOut"],
"CF_Clu_RheostatLevel": clu11["CF_Clu_RheostatLevel"],
"CF_Clu_CluInfo": clu11["CF_Clu_CluInfo"],
"CF_Clu_AmpInfo": clu11["CF_Clu_AmpInfo"],
"CF_Clu_AliveCnt1": cnt,
}
return packer.make_can_msg("CLU11", bus, values)
def create_scc12(packer, apply_accel, enabled, cnt, scc12):
values = {
"CF_VSM_Prefill": scc12["CF_VSM_Prefill"],
"CF_VSM_DecCmdAct": scc12["CF_VSM_DecCmdAct"],
"CF_VSM_HBACmd": scc12["CF_VSM_HBACmd"],
"CF_VSM_Warn": scc12["CF_VSM_Warn"],
"CF_VSM_Stat": scc12["CF_VSM_Stat"],
"CF_VSM_BeltCmd": scc12["CF_VSM_BeltCmd"],
"ACCFailInfo": scc12["ACCFailInfo"],
"ACCMode": scc12["ACCMode"],
"StopReq": scc12["StopReq"],
"CR_VSM_DecCmd": scc12["CR_VSM_DecCmd"],
"aReqMax": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMax"],
"TakeOverReq": scc12["TakeOverReq"],
"PreFill": scc12["PreFill"],
"aReqMin": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMin"],
"CF_VSM_ConfMode": scc12["CF_VSM_ConfMode"],
"AEB_Failinfo": scc12["AEB_Failinfo"],
"AEB_Status": scc12["AEB_Status"],
"AEB_CmdAct": scc12["AEB_CmdAct"],
"AEB_StopReq": scc12["AEB_StopReq"],
"CR_VSM_Alive": cnt,
"CR_VSM_ChkSum": 0,
}
dat = packer.make_can_msg("SCC12", 0, values)[2]
values["CR_VSM_ChkSum"] = 16 - sum([sum(divmod(i, 16)) for i in dat]) % 16
return packer.make_can_msg("SCC12", 0, values)
def create_mdps12(packer, car_fingerprint, cnt, mdps12):
values = {
"CR_Mdps_StrColTq": mdps12["CR_Mdps_StrColTq"],
"CF_Mdps_Def": mdps12["CF_Mdps_Def"],
"CF_Mdps_ToiActive": 0,
"CF_Mdps_ToiUnavail": 1,
"CF_Mdps_MsgCount2": cnt,
"CF_Mdps_Chksum2": 0,
"CF_Mdps_ToiFlt": mdps12["CF_Mdps_ToiFlt"],
"CF_Mdps_SErr": mdps12["CF_Mdps_SErr"],
"CR_Mdps_StrTq": mdps12["CR_Mdps_StrTq"],
"CF_Mdps_FailStat": mdps12["CF_Mdps_FailStat"],
"CR_Mdps_OutTq": mdps12["CR_Mdps_OutTq"],
}
dat = packer.make_can_msg("MDPS12", 2, values)[2]
checksum = sum(dat) % 256
values["CF_Mdps_Chksum2"] = checksum
return packer.make_can_msg("MDPS12", 2, values)
|
selfdrive/car/hyundai/hyundaican.py
| 6,466 |
"CF_Lkas_LdwsSysState": 3 if steer_req else lane_visible,"CF_Lkas_LdwsLHWarning": lkas11["CF_Lkas_LdwsLHWarning"],"CF_Lkas_LdwsRHWarning": lkas11["CF_Lkas_LdwsRHWarning"],values["CF_Lkas_Bca_R"] = int(left_lane) + (int(right_lane) << 1)values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1 FcwOpt_USM 5 = Orange blinking car + lanes FcwOpt_USM 4 = Orange car + lanes FcwOpt_USM 3 = Green blinking car + lanes FcwOpt_USM 2 = Green car + lanes FcwOpt_USM 1 = White car + lanes FcwOpt_USM 0 = No car + lanesvalues["CF_Lkas_SysWarning"] = 4 if sys_warning else 0 SysWarning 4 = keep hands on wheel SysWarning 5 = keep hands on wheel (red) SysWarning 6 = keep hands on wheel (red) + beep Note: the warning is hidden while the blinkers are onvalues["CF_Lkas_LdwsOpt_USM"] = 2 CRC Checksum as seen on 2019 Hyundai Santa Fe Checksum of first 6 Bytes, as seen on 2018 Kia Sorento Checksum of first 6 Bytes and last Byte as seen on 2018 Kia Stinger
| 935 |
en
| 0.583977 |
"""
Current-flow betweenness centrality measures for subsets of nodes.
"""
# Copyright (C) 2010-2011 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['current_flow_betweenness_centrality_subset',
'edge_current_flow_betweenness_centrality_subset']
import itertools
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
def current_flow_betweenness_centrality_subset(G,sources,targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
r"""Compute current-flow betweenness centrality for subsets of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_betweenness_centrality() ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[s]+=0.5*np.abs(row[i]-row[j])
betweenness[t]+=0.5*np.abs(row[i]-row[j])
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for v in H:
betweenness[v]=betweenness[v]/nb+1.0/(2-n)
return dict((ordering[k],v) for k,v in betweenness.items())
def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow betweenness centrality for edges using subsets
of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness=(dict.fromkeys(H.edges(),0.0))
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[e]+=0.5*np.abs(row[i]-row[j])
betweenness[e]/=nb
return dict(((ordering[s],ordering[t]),v)
for (s,t),v in betweenness.items())
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("NumPy not available")
|
networkx/algorithms/centrality/current_flow_betweenness_subset.py
| 9,545 |
Compute current-flow betweenness centrality for subsets of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
Compute current-flow betweenness centrality for edges using subsets
of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
Current-flow betweenness centrality measures for subsets of nodes.
Copyright (C) 2010-2011 by Aric Hagberg <[email protected]> Dan Schult <[email protected]> Pieter Swart <[email protected]> All rights reserved. BSD license. make a copy with integer labels according to rcm ordering this could be done without a copy if we really wanted to b[v]=0 for v in H normalization factor make a copy with integer labels according to rcm ordering this could be done without a copy if we really wanted to normalization factor fixture for nose tests
| 5,201 |
en
| 0.800813 |
# Generated by Django 3.1.4 on 2021-09-28 13:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='paystack_response',
),
]
|
apps/store/migrations/0002_remove_payment_paystack_response.py
| 327 |
Generated by Django 3.1.4 on 2021-09-28 13:49
| 45 |
en
| 0.687631 |
#!/usr/bin/python3
import functools
from copy import deepcopy
from .grammar import BASE_NODE_TYPES
class NodeBase:
"""Represents a node within the solidity AST.
Attributes:
depth: Number of nodes between this node and the SourceUnit
offset: Absolute source offsets as a (start, stop) tuple
contract_id: Contract ID as given by the standard compiler JSON
fields: List of attributes for this node
"""
def __init__(self, ast, parent):
self.depth = parent.depth + 1 if parent is not None else 0
self._parent = parent
self._children = set()
src = [int(i) for i in ast["src"].split(":")]
self.offset = (src[0], src[0] + src[1])
self.contract_id = src[2]
self.fields = sorted(ast.keys())
for key, value in ast.items():
if isinstance(value, dict) and value.get("nodeType") == "Block":
value = value["statements"]
elif key == "body" and not value:
value = []
if isinstance(value, dict):
item = node_class_factory(value, self)
if isinstance(item, NodeBase):
self._children.add(item)
setattr(self, key, item)
elif isinstance(value, list):
items = [node_class_factory(i, self) for i in value]
setattr(self, key, items)
self._children.update(i for i in items if isinstance(i, NodeBase))
else:
setattr(self, key, value)
def __hash__(self):
return hash(f"{self.nodeType}{self.depth}{self.offset}")
def __repr__(self):
repr_str = f"<{self.nodeType}"
if hasattr(self, "nodes"):
repr_str += " iterable"
if hasattr(self, "type"):
if isinstance(self.type, str):
repr_str += f" {self.type}"
else:
repr_str += f" {self.type._display()}"
if self._display():
repr_str += f" '{self._display()}'"
else:
repr_str += " object"
return f"{repr_str}>"
def _display(self):
if hasattr(self, "name") and hasattr(self, "value"):
return f"{self.name} = {self.value}"
for attr in ("name", "value", "absolutePath"):
if hasattr(self, attr):
return f"{getattr(self, attr)}"
return ""
def children(
self,
depth=None,
include_self=False,
include_parents=True,
include_children=True,
required_offset=None,
offset_limits=None,
filters=None,
exclude_filter=None,
):
"""Get childen nodes of this node.
Arguments:
depth: Number of levels of children to traverse. 0 returns only this node.
include_self: Includes this node in the results.
include_parents: Includes nodes that match in the results, when they also have
child nodes that match.
include_children: If True, as soon as a match is found it's children will not
be included in the search.
required_offset: Only match nodes with a source offset that contains this offset.
offset_limits: Only match nodes when their source offset is contained inside
this source offset.
filters: Dictionary of {attribute: value} that children must match. Can also
be given as a list of dicts, children that match one of the dicts
will be returned.
exclude_filter: Dictionary of {attribute:value} that children cannot match.
Returns:
List of node objects."""
if filters is None:
filters = {}
if exclude_filter is None:
exclude_filter = {}
if isinstance(filters, dict):
filters = [filters]
filter_fn = functools.partial(
_check_filters, required_offset, offset_limits, filters, exclude_filter
)
find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children)
result = find_fn(find_fn, depth, self)
if include_self or not result or result[0] != self:
return result
return result[1:]
def parents(self, depth=-1, filters=None):
"""Get parent nodes of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth.
filters: Dictionary of {attribute: value} that parents must match.
Returns: list of nodes"""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
node_list = []
parent = self
while True:
parent = parent._parent
if not filters or _check_filter(parent, filters, {}):
node_list.append(parent)
if parent.depth == depth:
return node_list
def parent(self, depth=-1, filters=None):
"""Get a parent node of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth. The parent at this exact depth is returned.
filters: Dictionary of {attribute: value} that the parent must match.
If a filter value is given, will return the first parent that meets the filters
up to the given depth. If none is found, returns None.
If no filter is given, returns the parent at the given depth."""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
parent = self
while parent.depth > depth:
parent = parent._parent
if parent.depth == depth and not filters:
return parent
if filters and _check_filter(parent, filters, {}):
return parent
return None
def is_child_of(self, node):
"""Checks if this object is a child of the given node object."""
if node.depth >= self.depth:
return False
return self.parent(node.depth) == node
def is_parent_of(self, node):
"""Checks if this object is a parent of the given node object."""
if node.depth <= self.depth:
return False
return node.parent(self.depth) == self
def get(self, key, default=None):
"""
Gets an attribute from this node, if that attribute exists.
Arguments:
key: Field name to return. May contain decimals to return a value
from a child node.
default: Default value to return.
Returns: Field value if it exists. Default value if not.
"""
if key is None:
raise TypeError("Cannot match against None")
obj = self
for k in key.split("."):
if isinstance(obj, dict):
obj = obj.get(k)
else:
obj = getattr(obj, k, None)
return obj or default
class IterableNodeBase(NodeBase):
def __getitem__(self, key):
if isinstance(key, str):
try:
return next(i for i in self.nodes if getattr(i, "name", None) == key)
except StopIteration:
raise KeyError(key)
return self.nodes[key]
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def __contains__(self, obj):
return obj in self.nodes
def node_class_factory(ast, parent):
ast = deepcopy(ast)
if not isinstance(ast, dict) or "nodeType" not in ast:
return ast
if "body" in ast:
ast["nodes"] = ast.pop("body")
base_class = IterableNodeBase if "nodes" in ast else NodeBase
base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None)
if base_type:
ast["baseNodeType"] = base_type
return type(ast["nodeType"], (base_class,), {})(ast, parent)
def _check_filters(required_offset, offset_limits, filters, exclude, node):
if required_offset and not is_inside_offset(required_offset, node.offset):
return False
if offset_limits and not is_inside_offset(node.offset, offset_limits):
return False
for f in filters:
if _check_filter(node, f, exclude):
return True
return False
def _check_filter(node, filters, exclude):
for key, value in filters.items():
if node.get(key) != value:
return False
for key, value in exclude.items():
if node.get(key) == value:
return False
return True
def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node):
if depth is not None:
depth -= 1
if depth < 0:
return [node] if filter_fn(node) else []
if not include_children and filter_fn(node):
return [node]
node_list = []
for child in node._children:
node_list.extend(find_fn(find_fn, depth, child))
if (include_parents or not node_list) and filter_fn(node):
node_list.insert(0, node)
return node_list
def is_inside_offset(inner, outer):
"""Checks if the first offset is contained in the second offset
Args:
inner: inner offset tuple
outer: outer offset tuple
Returns: bool"""
return outer[0] <= inner[0] <= inner[1] <= outer[1]
|
solcast/nodes.py
| 9,868 |
Represents a node within the solidity AST.
Attributes:
depth: Number of nodes between this node and the SourceUnit
offset: Absolute source offsets as a (start, stop) tuple
contract_id: Contract ID as given by the standard compiler JSON
fields: List of attributes for this node
Get childen nodes of this node.
Arguments:
depth: Number of levels of children to traverse. 0 returns only this node.
include_self: Includes this node in the results.
include_parents: Includes nodes that match in the results, when they also have
child nodes that match.
include_children: If True, as soon as a match is found it's children will not
be included in the search.
required_offset: Only match nodes with a source offset that contains this offset.
offset_limits: Only match nodes when their source offset is contained inside
this source offset.
filters: Dictionary of {attribute: value} that children must match. Can also
be given as a list of dicts, children that match one of the dicts
will be returned.
exclude_filter: Dictionary of {attribute:value} that children cannot match.
Returns:
List of node objects.
Gets an attribute from this node, if that attribute exists.
Arguments:
key: Field name to return. May contain decimals to return a value
from a child node.
default: Default value to return.
Returns: Field value if it exists. Default value if not.
Checks if this object is a child of the given node object.
Checks if the first offset is contained in the second offset
Args:
inner: inner offset tuple
outer: outer offset tuple
Returns: bool
Checks if this object is a parent of the given node object.
Get a parent node of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth. The parent at this exact depth is returned.
filters: Dictionary of {attribute: value} that the parent must match.
If a filter value is given, will return the first parent that meets the filters
up to the given depth. If none is found, returns None.
If no filter is given, returns the parent at the given depth.
Get parent nodes of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth.
filters: Dictionary of {attribute: value} that parents must match.
Returns: list of nodes
!/usr/bin/python3
| 2,486 |
en
| 0.836349 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool
import requests
PROCESS_POOL_SIZE = 10
REQUESTS = 10000
BASE_URL = "http://localhost:8888"
RESOURCE_NAME = "resource"
def f(process_number):
resource_name = RESOURCE_NAME
raw_body = '{"title": "%i", "lifetime": 300, "wait": 20}' % process_number
r = requests.post("%s/locks/%s" % (BASE_URL, resource_name), data=raw_body)
if r.status_code != 201:
raise Exception("bad status code %i from post request" % r.status_code)
lock_url = r.headers['Location']
r = requests.delete(lock_url)
if r.status_code != 204:
raise Exception("bad status code %i from delete request" % r.status_code)
if __name__ == '__main__':
pool = Pool(processes=PROCESS_POOL_SIZE)
pool.map(f, range(0, REQUESTS))
|
tests/bomb1.py
| 817 |
!/usr/bin/env python -*- coding: utf-8 -*-
| 42 |
en
| 0.34282 |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Splits the gencost variable into two pieces if costs are given for Qg.
"""
from sys import stderr
from numpy import array, arange
def pqcost(gencost, ng, on=None):
"""Splits the gencost variable into two pieces if costs are given for Qg.
Checks whether C{gencost} has cost information for reactive power
generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}
rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves
C{qcost} empty. Also does some error checking.
If C{on} is specified (list of indices of generators which are on line)
it only returns the rows corresponding to these generators.
@author: Ray Zimmerman (PSERC Cornell)
"""
if on is None:
on = arange(ng)
if gencost.shape[0] == ng:
pcost = gencost[on, :]
qcost = array([])
elif gencost.shape[0] == 2 * ng:
pcost = gencost[on, :]
qcost = gencost[on + ng, :]
else:
stderr.write('pqcost: gencost has wrong number of rows\n')
return pcost, qcost
|
pandapower/pypower/pqcost.py
| 1,210 |
Splits the gencost variable into two pieces if costs are given for Qg.
Checks whether C{gencost} has cost information for reactive power
generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}
rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves
C{qcost} empty. Also does some error checking.
If C{on} is specified (list of indices of generators which are on line)
it only returns the rows corresponding to these generators.
@author: Ray Zimmerman (PSERC Cornell)
Splits the gencost variable into two pieces if costs are given for Qg.
Copyright (c) 1996-2015 PSERC. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
| 723 |
en
| 0.829125 |
""" Full assembly of the parts to form the complete network """
import torch.nn.functional as F
from .unet_parts import *
from .channels import C
class UNet3D(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True, apply_sigmoid_to_output=False):
super(UNet3D, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv3D(n_channels, C[0])
self.down1 = Down(C[0], C[1])
self.down2 = Down(C[1], C[2])
self.down3 = Down(C[2], C[3])
factor = 2 if bilinear else 1
self.down4 = Down(C[3], C[4] // factor) # switch do Double CONV if stick do 8x spatial down
self.up1 = Up(C[4], C[3] // factor, bilinear)
self.up2 = Up(C[3], C[2] // factor, bilinear)
self.up3 = Up(C[2], C[1] // factor, bilinear)
self.up4 = Up(C[1], C[0], bilinear)
self.outc = OutConv(C[0], n_classes) if apply_sigmoid_to_output is False else OutConv(C[0], n_classes, sigmoid=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
|
pytorch/unet_3d/unet_model.py
| 1,376 |
Full assembly of the parts to form the complete network
switch do Double CONV if stick do 8x spatial down
| 108 |
en
| 0.696753 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from neutron.conf.policies import base
DEPRECATED_REASON = (
"The security group API now supports system scope and default roles.")
SG_COLLECTION_PATH = '/security-groups'
SG_RESOURCE_PATH = '/security-groups/{id}'
RULE_COLLECTION_PATH = '/security-group-rules'
RULE_RESOURCE_PATH = '/security-group-rules/{id}'
RULE_ADMIN_OR_SG_OWNER = 'rule:admin_or_sg_owner'
RULE_ADMIN_OWNER_OR_SG_OWNER = 'rule:admin_owner_or_sg_owner'
rules = [
policy.RuleDefault(
name='admin_or_sg_owner',
check_str=base.policy_or(
'rule:context_is_admin',
'tenant_id:%(security_group:tenant_id)s'),
description='Rule for admin or security group owner access'),
policy.RuleDefault(
name='admin_owner_or_sg_owner',
check_str=base.policy_or(
'rule:owner',
RULE_ADMIN_OR_SG_OWNER),
description=('Rule for resource owner, '
'admin or security group owner access')),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group?
policy.DocumentedRuleDefault(
name='create_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group',
operations=[
{
'method': 'POST',
'path': SG_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description='Get a security group',
operations=[
{
'method': 'GET',
'path': SG_COLLECTION_PATH,
},
{
'method': 'GET',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group',
check_str=base.RULE_ANY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='update_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Update a security group',
operations=[
{
'method': 'PUT',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='update_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group',
operations=[
{
'method': 'DELETE',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group_rule?
policy.DocumentedRuleDefault(
name='create_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group rule',
operations=[
{
'method': 'POST',
'path': RULE_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group_rule',
check_str=base.policy_or(
base.SYSTEM_OR_PROJECT_READER,
base.RULE_SG_OWNER),
scope_types=['system', 'project'],
description='Get a security group rule',
operations=[
{
'method': 'GET',
'path': RULE_COLLECTION_PATH,
},
{
'method': 'GET',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group_rule',
check_str=RULE_ADMIN_OWNER_OR_SG_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group rule',
operations=[
{
'method': 'DELETE',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
]
def list_rules():
return rules
|
neutron/conf/policies/security_group.py
| 6,444 |
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO(amotoki): admin_or_owner is the right rule? Does an empty string make more sense for create_security_group? TODO(amotoki): admin_or_owner is the right rule? Does an empty string make more sense for create_security_group_rule?
| 761 |
en
| 0.830217 |
import re
find_image_scheme = re.compile(r'(?P<image_construction><img\b[^>]*src="(?P<image_url>[^"]+?)"[^>]*?\/>)')
# find_link_around_image_scheme = re.compile(r"<a\b[^>]*>(.*?)<img\b(.*?)<\/a>")
def move_image_to_attachment(content, attachment_object):
# collect images from the post body
intext_image_list = re.findall(find_image_scheme, content)
if intext_image_list:
# delete images form text
content = re.sub(find_image_scheme, r"", content)
# insert link to image into attachments
attachment_object += [{
"type": "Document",
"mediaType": "image/jpeg",
"url": image[1],
"name": "null"
} for image in intext_image_list]
return content
|
rssbot/utils.py
| 758 |
find_link_around_image_scheme = re.compile(r"<a\b[^>]*>(.*?)<img\b(.*?)<\/a>") collect images from the post body delete images form text insert link to image into attachments
| 174 |
en
| 0.760214 |
import numpy as np
import matplotlib.pyplot as plt
import gym
import random
# hyper parameters
# test 1
# alpha = 0.5
# gamma = 0.95
# epsilon = 0.1
epsilon = 0.1
alpha = 0.1
gamma = 0.1
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma):
'''
update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy
return action
'''
next_max = sarsa[next_state,next_action] # corresponding action-state value to current action
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
sarsa[state,action] = sarsa[state,action] + alpha * (reward + gamma * next_max - sarsa[state,action])
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(sarsa[state])
def epsilon_greedy_policy(env, state, q, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(q[state])
def update_q_table(q, pre_state, action, reward, next_state, alpha, gamma):
'''
'''
next_max = np.max(q[next_state]) # max state-action value for next state
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
q[pre_state,action] = q[pre_state,action] + alpha * (reward + gamma * next_max - q[pre_state,action])
#-----------------------q learning-------------------------------------------
env = gym.make("Taxi-v3")
# initialize q table
q = np.zeros((env.observation_space.n, env.action_space.n))
q_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
reward_record = []
error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# update Q(S,A)
update_q_table(q,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + np.sum(np.abs(q[i]-q_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
error_record.append(error)
q_pre = np.copy(q)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
# plt.plot(list(range(5000)),reward_record)
# plt.show()
# plt.plot(list(range(5000)),error_record)
# plt.show()
#double q learning
env = gym.make("Taxi-v3")
# initialize q table
q1 = np.zeros((env.observation_space.n, env.action_space.n))
q2 = np.zeros((env.observation_space.n, env.action_space.n))
q1_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
q2_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
# reward and error record
d_reward_record = []
d_error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q1+Q2(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q1+q2,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# with 0.5 probability:
if random.uniform(0,1) < 0.5:
update_q_table(q1,state,action,reward,next_state,alpha,gamma)
else:
update_q_table(q2,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
d_reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + 0.5 * np.sum(np.abs(q1[i]-q1_pre[i])) + 0.5 * np.sum(np.abs(q2[i]-q2_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
d_error_record.append(error)
q1_pre = np.copy(q1)
q2_pre = np.copy(q2)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
plt.plot(list(range(5000)),reward_record,label='q learning')
plt.plot(list(range(5000)),d_reward_record,label='double q learning')
plt.legend()
plt.show()
plt.plot(list(range(5000)),error_record,label='q learning')
plt.plot(list(range(5000)),d_error_record, label='double q learning')
plt.legend()
plt.show()
|
TD/double_q_learning.py
| 5,010 |
epsilon greedy policy for q learning to generate actions
epsilon greedy policy for q learning to generate actions
update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy
return action
hyper parameters test 1 alpha = 0.5 gamma = 0.95 epsilon = 0.1 corresponding action-state value to current action print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}') max state-action value for next state print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')-----------------------q learning------------------------------------------- initialize q table to check convergence when training loop for each episode: loop for each step of episode choose A from S using policy derived from Q(e.g, epsilon greedy policy) take action A, observe R, S' update Q(S,A) S<--S' print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')close game envplot diagram plt.plot(list(range(5000)),reward_record) plt.show() plt.plot(list(range(5000)),error_record) plt.show()double q learning initialize q table to check convergence when training to check convergence when training reward and error record loop for each episode: loop for each step of episode choose A from S using policy derived from Q1+Q2(e.g, epsilon greedy policy) take action A, observe R, S' with 0.5 probability: S<--S' print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')close game envplot diagram
| 1,539 |
en
| 0.561277 |
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://Python-Markdown.github.io/extensions/meta_data>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
|
venv/lib/python3.6/site-packages/markdown/extensions/meta.py
| 2,395 |
Meta-Data extension for Python-Markdown.
Get Meta-Data.
Add MetaPreprocessor to Markdown instance.
Parse Meta-Data and store in Markdown.Meta.
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://Python-Markdown.github.io/extensions/meta_data>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
Global Vars blank line or end of YAML header - done Add another line to existing key no meta data - done
| 672 |
en
| 0.442522 |
from datetime import datetime, timedelta
from typing import List, Optional
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import ugettext as _
from celery.schedules import crontab
from celery.task import periodic_task, task
from celery.utils.log import get_task_logger
from dimagi.utils.couch import CriticalSection
from corehq.apps.domain.models import Domain
from corehq.apps.domain_migration_flags.api import any_migrations_in_progress
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.motech.repeaters.dbaccessors import (
get_couch_repeat_record_ids_by_payload_id,
get_sql_repeat_records_by_payload_id,
iter_repeat_record_ids_by_repeater,
)
from corehq.motech.repeaters.models import SQLRepeatRecord
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.toggles import CASE_DEDUPE, DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from .deduplication import reset_deduplicate_rule, backfill_deduplicate_rule
from .interfaces import FormManagementMode
from .models import (
AUTO_UPDATE_XMLNS,
AutomaticUpdateRule,
CaseDuplicate,
CaseRuleSubmission,
DomainCaseRuleRun,
)
from .utils import (
add_cases_to_case_group,
archive_or_restore_forms,
iter_cases_and_run_rules,
operate_on_payloads,
run_rules_for_case,
)
logger = get_task_logger('data_interfaces')
ONE_HOUR = 60 * 60
def _get_upload_progress_tracker(upload_id):
def _progress_tracker(current, total):
cache.set(upload_id, {
'inProgress': True,
'current': current,
'total': total,
}, ONE_HOUR)
return _progress_tracker
@no_result_task(queue='case_rule_queue', acks_late=True,
soft_time_limit=15 * settings.CELERY_TASK_SOFT_TIME_LIMIT)
def reset_and_backfill_deduplicate_rule_task(domain, rule_id):
if not CASE_DEDUPE.enabled(domain):
return
try:
rule = AutomaticUpdateRule.objects.get(
id=rule_id,
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_DEDUPLICATE,
active=True,
deleted=False,
)
except AutomaticUpdateRule.DoesNotExist:
return
AutomaticUpdateRule.clear_caches(rule.domain, AutomaticUpdateRule.WORKFLOW_DEDUPLICATE)
reset_deduplicate_rule(rule)
backfill_deduplicate_rule(domain, rule)
@task(queue='background_queue')
def delete_duplicates_for_cases(case_ids):
CaseDuplicate.bulk_remove_unique_cases(case_ids)
CaseDuplicate.remove_duplicates_for_case_ids(case_ids)
@task(serializer='pickle', ignore_result=True)
def bulk_upload_cases_to_group(upload_id, domain, case_group_id, cases):
results = add_cases_to_case_group(
domain,
case_group_id,
cases,
progress_tracker=_get_upload_progress_tracker(upload_id)
)
cache.set(upload_id, results, ONE_HOUR)
@task(serializer='pickle')
def bulk_form_management_async(archive_or_restore, domain, couch_user, form_ids):
task = bulk_form_management_async
mode = FormManagementMode(archive_or_restore, validate=True)
if not form_ids:
return {'messages': {'errors': [_('No Forms are supplied')]}}
response = archive_or_restore_forms(domain, couch_user.user_id, couch_user.username, form_ids, mode, task)
return response
@periodic_task(serializer='pickle',
run_every=crontab(hour='*', minute=0),
queue=settings.CELERY_PERIODIC_QUEUE,
ignore_result=True
)
def run_case_update_rules(now=None):
domains = (AutomaticUpdateRule
.objects
.filter(active=True, deleted=False, workflow=AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
.values_list('domain', flat=True)
.distinct()
.order_by('domain'))
hour_to_run = now.hour if now else datetime.utcnow().hour
for domain in domains:
if not any_migrations_in_progress(domain) and not DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK.enabled(domain):
domain_obj = Domain.get_by_name(domain)
if domain_obj.auto_case_update_hour is None:
domain_hour = settings.RULE_UPDATE_HOUR
else:
domain_hour = domain_obj.auto_case_update_hour
if hour_to_run == domain_hour:
run_case_update_rules_for_domain.delay(domain, now)
@task(serializer='pickle', queue='case_rule_queue')
def run_case_update_rules_for_domain(domain, now=None):
now = now or datetime.utcnow()
domain_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
all_rule_case_types = set(domain_rules.values_list('case_type', flat=True))
for case_type in all_rule_case_types:
run_record = DomainCaseRuleRun.objects.create(
domain=domain,
started_on=datetime.utcnow(),
status=DomainCaseRuleRun.STATUS_RUNNING,
case_type=case_type
)
for db in get_db_aliases_for_partitioned_query():
run_case_update_rules_for_domain_and_db.delay(domain, now, run_record.pk, case_type, db=db)
@serial_task(
'{domain}-{case_type}-{db}',
timeout=36 * 60 * 60,
max_retries=0,
queue='case_rule_queue',
)
def run_case_update_rules_for_domain_and_db(domain, now, run_id, case_type, db=None):
all_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
rules = list(all_rules.filter(case_type=case_type))
boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
iterator = AutomaticUpdateRule.iter_cases(domain, case_type, boundary_date, db=db)
run = iter_cases_and_run_rules(domain, iterator, rules, now, run_id, case_type, db)
if run.status == DomainCaseRuleRun.STATUS_FINISHED:
for rule in rules:
AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
@task(serializer='pickle', queue='background_queue', acks_late=True, ignore_result=True)
def run_case_update_rules_on_save(case):
key = 'case-update-on-save-case-{case}'.format(case=case.case_id)
with CriticalSection([key]):
update_case = True
if case.xform_ids:
last_form = FormAccessors(case.domain).get_form(case.xform_ids[-1])
update_case = last_form.xmlns != AUTO_UPDATE_XMLNS
if update_case:
rules = AutomaticUpdateRule.by_domain(case.domain,
AutomaticUpdateRule.WORKFLOW_CASE_UPDATE).filter(case_type=case.type)
now = datetime.utcnow()
run_rules_for_case(case, rules, now)
@periodic_task(run_every=crontab(hour=0, minute=0), queue='case_rule_queue', ignore_result=True)
def delete_old_rule_submission_logs():
start = datetime.utcnow()
max_age = start - timedelta(days=90)
CaseRuleSubmission.objects.filter(created_on__lt=max_age).delete()
@task(serializer='pickle')
def task_operate_on_payloads(
record_ids: List[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
):
return operate_on_payloads(record_ids, domain, action, use_sql,
task=task_operate_on_payloads)
@task(serializer='pickle')
def task_generate_ids_and_operate_on_payloads(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
) -> dict:
repeat_record_ids = _get_repeat_record_ids(payload_id, repeater_id, domain,
use_sql)
return operate_on_payloads(repeat_record_ids, domain, action, use_sql,
task=task_generate_ids_and_operate_on_payloads)
def _get_repeat_record_ids(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
use_sql: bool,
) -> List[str]:
if not payload_id and not repeater_id:
return []
if payload_id:
if use_sql:
records = get_sql_repeat_records_by_payload_id(domain, payload_id)
return [r.id for r in records]
else:
return get_couch_repeat_record_ids_by_payload_id(domain, payload_id)
else:
if use_sql:
queryset = SQLRepeatRecord.objects.filter(
domain=domain,
repeater__repeater_id=repeater_id,
)
return [r['id'] for r in queryset.values('id')]
else:
return list(iter_repeat_record_ids_by_repeater(domain, repeater_id))
|
corehq/apps/data_interfaces/tasks.py
| 8,651 |
type: Literal['resend', 'cancel', 'requeue'] 3.8+ type: Literal['resend', 'cancel', 'requeue'] 3.8+
| 103 |
en
| 0.134853 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
INTENT_HOW_ARE_YOU = "mikpan:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
with Hermes("localhost:1883") as h:
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.start()
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
|
V2_action-how-are-you.py
| 1,518 |
!/usr/bin/env python2 -*- coding: utf-8 -*-
| 43 |
en
| 0.380934 |
"""Python 3.9.5"""
import cv2
import HandTrackingModule as htm
def thumbIncrementCheck(lmList: list[list[int]]) -> int:
"""Checks whether your thumb is up or not.
No matter what hand you use.
returns 1 if thumb is up else 0"""
count = 0
t_x = lmList[4][1]
p_x = lmList[17][1]
if t_x > p_x: # If true: RIGHT hand
if lmList[4][1] >= lmList[2][1]:
count += 1
else: # ELse: LEFT hand
if lmList[4][1] <= lmList[2][1]:
count += 1
return count
def textOutput(count, cc) -> str:
"""Returns an appropriate text output depending on
`count` and `cc`."""
text = "NOTHING"
if (count, cc) == (2, 2):
text = "SCISSOR"
elif count == 0:
text = "ROCK"
elif count == 5:
text = "PAPER"
else:
pass
return text
def main():
# cap = cv2.VideoCapture(0) # opens the camera
detector = htm.HandDetector()
while True:
success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg")
img = detector.findHands(img)
lmlist = detector.findPosition(img, draw=True)
# If a hand is not detected value will be 0
# else non-zero (21)
hand_exists = len(lmlist)
tipIDs = [4, 8, 12, 16, 20] # Represents fingertips
dipIDs = [2, 7, 11, 15, 19] # Represents landmarks below the tips
count = 0 # keeps count of how many fingers are up
cc = 0 # for later checking if `Scissor` or not
if hand_exists:
# Looping for the five fingers
for i in range(0, 5):
if i == 0:
count += thumbIncrementCheck(lmlist)
else:
# 8: Index finger
# 12: Middle finger
if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and (
tipIDs[i] in (8, 12) # if either index or middle
):
count += 1
cc += 1
elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]:
count += 1
# print(cc)
else:
count = -1
txt = textOutput(count, cc)
# (10, 140) is coordinate of txt on the screen
cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
cv2.imshow("Image", img)
# close key isn't working for me
# os: linux mint 20.1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if __name__ == "__main__":
main()
|
forOutput.py
| 2,606 |
Returns an appropriate text output depending on
`count` and `cc`.
Checks whether your thumb is up or not.
No matter what hand you use.
returns 1 if thumb is up else 0
Python 3.9.5
If true: RIGHT hand ELse: LEFT hand cap = cv2.VideoCapture(0) opens the camera If a hand is not detected value will be 0 else non-zero (21) Represents fingertips Represents landmarks below the tips keeps count of how many fingers are up for later checking if `Scissor` or not Looping for the five fingers 8: Index finger 12: Middle finger if either index or middle print(cc) (10, 140) is coordinate of txt on the screen close key isn't working for me os: linux mint 20.1
| 655 |
en
| 0.688529 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Basic neural network layers."""
from ..block import Block, HybridBlock
from ..utils import _indent
class Sequential(Block):
"""Stacks `Block`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class HybridSequential(HybridBlock):
"""Stacks `HybridBlock`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Input shape:
A 2D input with shape `(batch_size, in_units)`.
Output shape:
The output would have shape `(batch_size, units)`.
"""
def __init__(self, units, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = F.FullyConnected(x, weight, no_bias=True, num_hidden=self._units,
name='fwd')
else:
act = F.FullyConnected(x, weight, bias, num_hidden=self._units,
name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(self._in_units, self._units) if self._in_units
else self._units)
class Activation(HybridBlock):
"""Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
if hasattr(self, 'in_channels'):
s += ', in_channels={0}'.format(self.in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active::
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, alpha, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(N, M)`.
Output shape:
3D tensor with shape: `(N, M, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
"""Flattens the input to two dimensional.
Input shape:
Arbitrary shape `(N, a, b, c, ...)`
Output shape:
2D tensor with shape: `(N, a*b*c...)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
|
python/mxnet/gluon/nn/basic_layers.py
| 15,431 |
Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Input shape:
A 2D input with shape `(batch_size, in_units)`.
Output shape:
The output would have shape `(batch_size, units)`.
Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(N, M)`.
Output shape:
3D tensor with shape: `(N, M, output_dim)`.
Flattens the input to two dimensional.
Input shape:
Arbitrary shape `(N, a, b, c, ...)`
Output shape:
2D tensor with shape: `(N, a*b*c...)`
Stacks `HybridBlock`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active::
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
Stacks `Block`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
Adds block on top of the stack.
Adds block on top of the stack.
Basic neural network layers.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. coding: utf-8 pylint: disable= arguments-differ
| 6,294 |
en
| 0.713549 |
import numpy as np
np.random.seed(0)
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
# define some helper functions
def clustering(X, algorithm, n_clusters):
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# Generate the new colors:
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
# set up initial data
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
# set up plot (styling in theme.yaml)
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
# set up widgets
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
# set up callbacks
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['colors'] = colors
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
source.data['colors'] = colors
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value', update_samples_or_dataset)
# set up layout
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, widgetbox(samples_slider, clusters_slider))
# add to document
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
|
examples/app/clustering/main.py
| 6,043 |
define some helper functions normalize dataset for easier parameter selection estimate bandwidth for mean shift connectivity matrix for structured Ward make connectivity symmetric Generate the new colors: set up initial data set up plot (styling in theme.yaml) set up widgets set up callbacks set up layout add to document
| 322 |
en
| 0.546946 |
import csv
source_file = "Resources/budget_data.csv"
output_file = "Resources/budget_data_analysis.txt"
#initialize months counter, total income, decrease and increase in revenue amounts
number_of_months = 0 # to track the total number of months
income_total = 0 #variable to hold total income as we iterate through the csv
previous_income = 0 #variable to hold previously eveluated value from csv
greatest_profit_increase = ["",0] #list to hold the greatest profit increase, inaitialized to lowest value 0
greatest_loss_decrease = ["",1000000000000] #list to hold the greatest loss decrease, inaitialized to highest value
change_in_pl = [] #list to hold change in profit/loss as we iterate through the csv
change_in_income = 0
#print (revenue_decrease)
with open(source_file) as budget_data:
csv_reader = csv.DictReader(budget_data)
for row in csv_reader:
number_of_months = number_of_months + 1
#print(row["Profit/Losses"])
income_total = income_total + int(row["Profit/Losses"])
#print(row)
#trace the changes in amount
change_in_income = int(row["Profit/Losses"]) - previous_income
#print(change_in_income)
#reinitiate the value to the record we completed evaluating
previous_income = int(row["Profit/Losses"])
#print(previous_income)
#greatest increase
if(change_in_income > greatest_profit_increase[1]):
greatest_profit_increase[0] = row["Date"]
greatest_profit_increase[1] = change_in_income
#greatest decrease
if(change_in_income < greatest_loss_decrease[1]):
greatest_loss_decrease[0] = row["Date"]
greatest_loss_decrease[1] = change_in_income
#append to the change_in_pl for sum calculations
#print(int(row['Profit/Losses']))
change_in_pl.append(int(row['Profit/Losses']))
#calculate net profit or loss
net_profit = sum(change_in_pl)
#print(net_profit)
print()
print('Financial Anlysis')
print('--------------------------')
print("Total Months: " + str(number_of_months))
print("Total Income: " + "$" + str(net_profit))
print("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1]))
print("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1]))
#write outup to text file
with open(output_file,"w") as results:
results.write("Total Months: " + str(number_of_months))
results.write("\n")
results.write("Total Income: " + "$" + str(net_profit))
results.write("\n")
results.write("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1]))
results.write("\n")
results.write("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1]))
|
PyBank/main.py
| 2,924 |
initialize months counter, total income, decrease and increase in revenue amounts to track the total number of monthsvariable to hold total income as we iterate through the csvvariable to hold previously eveluated value from csv list to hold the greatest profit increase, inaitialized to lowest value 0list to hold the greatest loss decrease, inaitialized to highest valuelist to hold change in profit/loss as we iterate through the csvprint (revenue_decrease)print(row["Profit/Losses"])print(row)trace the changes in amountprint(change_in_income)reinitiate the value to the record we completed evaluatingprint(previous_income)greatest increasegreatest decreaseappend to the change_in_pl for sum calculationsprint(int(row['Profit/Losses']))calculate net profit or lossprint(net_profit)write outup to text file
| 809 |
en
| 0.852688 |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
from webkitpy.common.net.file_uploader import FileUploader
try:
import json
except ImportError:
# python 2.5 compatibility
import webkitpy.thirdparty.simplejson as json
# A JSON results generator for generic tests.
# FIXME: move this code out of the layout_package directory.
_log = logging.getLogger(__name__)
_JSON_PREFIX = "ADD_RESULTS("
_JSON_SUFFIX = ");"
def has_json_wrapper(string):
return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
def strip_json_wrapper(json_content):
# FIXME: Kill this code once the server returns json instead of jsonp.
if has_json_wrapper(json_content):
return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
return json_content
def load_json(filesystem, file_path):
content = filesystem.read_text_file(file_path)
content = strip_json_wrapper(content)
return json.loads(content)
def write_json(filesystem, json_object, file_path, callback=None):
# Specify separators in order to get compact encoding.
json_string = json.dumps(json_object, separators=(',', ':'))
if callback:
json_string = callback + "(" + json_string + ");"
filesystem.write_text_file(file_path, json_string)
def convert_trie_to_flat_paths(trie, prefix=None):
"""Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + "/" + name
if len(data) and not "results" in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
def add_path_to_trie(path, value, trie):
"""Inserts a single flat directory path and associated value into a directory trie structure."""
if not "/" in path:
trie[path] = value
return
directory, slash, rest = path.partition("/")
if not directory in trie:
trie[directory] = {}
add_path_to_trie(rest, value, trie[directory])
def test_timings_trie(port, individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
# FIXME: We already have a TestResult class in test_results.py
class TestResult(object):
"""A simple class that represents a single test result."""
# Test modifier constants.
(NONE, FAILS, FLAKY, DISABLED) = range(4)
def __init__(self, test, failed=False, elapsed_time=0):
self.test_name = test
self.failed = failed
self.test_run_time = elapsed_time
test_name = test
try:
test_name = test.split('.')[1]
except IndexError:
_log.warn("Invalid test name: %s.", test)
pass
if test_name.startswith('FAILS_'):
self.modifier = self.FAILS
elif test_name.startswith('FLAKY_'):
self.modifier = self.FLAKY
elif test_name.startswith('DISABLED_'):
self.modifier = self.DISABLED
else:
self.modifier = self.NONE
def fixable(self):
return self.failed or self.modifier == self.DISABLED
class JSONResultsGeneratorBase(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
# Min time (seconds) that will be added to the JSON.
MIN_TIME = 1
# Note that in non-chromium tests those chars are used to indicate
# test modifiers (FAILS, FLAKY, etc) but not actual test results.
PASS_RESULT = "P"
SKIP_RESULT = "X"
FAIL_RESULT = "F"
FLAKY_RESULT = "L"
NO_DATA_RESULT = "N"
MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
TestResult.DISABLED: SKIP_RESULT,
TestResult.FAILS: FAIL_RESULT,
TestResult.FLAKY: FLAKY_RESULT}
VERSION = 4
VERSION_KEY = "version"
RESULTS = "results"
TIMES = "times"
BUILD_NUMBERS = "buildNumbers"
TIME = "secondsSinceEpoch"
TESTS = "tests"
FIXABLE_COUNT = "fixableCount"
FIXABLE = "fixableCounts"
ALL_FIXABLE_COUNT = "allFixableCount"
RESULTS_FILENAME = "results.json"
TIMES_MS_FILENAME = "times_ms.json"
INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
# FIXME: Remove generate_incremental_results once the reference to it in
# http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
# has been removed.
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
test_results_server=None,
test_type="",
master_name="",
generate_incremental_results=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args
port: port-specific wrapper
builder_name: the builder name (e.g. Webkit).
build_name: the build name (e.g. webkit-rel).
build_number: the build number.
results_file_base_path: Absolute path to the directory containing the
results json file.
builder_base_url: the URL where we have the archived test results.
If this is None no archived results will be retrieved.
test_results_map: A dictionary that maps test_name to TestResult.
svn_repositories: A (json_field_name, svn_path) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
"""
self._port = port
self._filesystem = port._filesystem
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
self._results_directory = results_file_base_path
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
self._svn_repositories = svn_repositories
if not self._svn_repositories:
self._svn_repositories = {}
self._test_results_server = test_results_server
self._test_type = test_type
self._master_name = master_name
self._archived_results = None
def generate_json_output(self):
json_object = self.get_json()
if json_object:
file_path = self._filesystem.join(self._results_directory, self.INCREMENTAL_RESULTS_FILENAME)
write_json(self._filesystem, json_object, file_path)
def generate_times_ms_file(self):
# FIXME: rename to generate_times_ms_file. This needs to be coordinated with
# changing the calls to this on the chromium build slaves.
times = test_timings_trie(self._port, self._test_results_map.values())
file_path = self._filesystem.join(self._results_directory, self.TIMES_MS_FILENAME)
write_json(self._filesystem, times, file_path)
def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
if not results_json:
results_json, error = self._get_archived_json_results()
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the
# bot.
_log.error("Archive directory is inaccessible. Not "
"modifying or clobbering the results.json "
"file: " + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
_log.debug("Builder name (%s) is not in the results.json file."
% builder_name)
self._convert_json_to_current_version(results_json)
if builder_name not in results_json:
results_json[builder_name] = (
self._create_results_for_builder_json())
results_for_builder = results_json[builder_name]
self._insert_generic_metadata(results_for_builder)
self._insert_failure_summaries(results_for_builder)
# Update the all failing tests with result type and time.
tests = results_for_builder[self.TESTS]
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(convert_trie_to_flat_paths(tests))
for test in all_failing_tests:
self._insert_test_time_and_result(test, tests)
return results_json
def set_archived_results(self, archived_results):
self._archived_results = archived_results
def upload_json_files(self, json_files):
"""Uploads the given json_files to the test_results_server (if the
test_results_server is given)."""
if not self._test_results_server:
return
if not self._master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.info("Uploading JSON files for builder: %s", self._builder_name)
attrs = [("builder", self._builder_name),
("testtype", self._test_type),
("master", self._master_name)]
files = [(file, self._filesystem.join(self._results_directory, file))
for file in json_files]
url = "http://%s/testfile/upload" % self._test_results_server
uploader = FileUploader(url)
try:
# Set uploading timeout in case appengine server is having problem.
# 120 seconds are more than enough to upload test results.
uploader.upload(attrs, files, 120)
except Exception, err:
_log.error("Upload failed: %s" % err)
return
_log.info("JSON files uploaded.")
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
for the given test_name."""
if test_name in self._test_results_map:
# Floor for now to get time in seconds.
return int(self._test_results_map[test_name].test_run_time)
return 0
def _get_failed_test_names(self):
"""Returns a set of failed test names."""
return set([r.test_name for r in self._test_results if r.failed])
def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
return self.MODIFIER_TO_CHAR[test_result.modifier]
return self.__class__.PASS_RESULT
def _get_result_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier == TestResult.DISABLED:
return self.__class__.SKIP_RESULT
if test_result.failed:
return self.__class__.FAIL_RESULT
return self.__class__.PASS_RESULT
# FIXME: Callers should use scm.py instead.
# FIXME: Identify and fix the run-time errors that were observed on Windows
# chromium buildbot when we had updated this code to use scm.py once before.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
Args:
in_directory: The directory where svn is to be run.
"""
if self._filesystem.exists(self._filesystem.join(in_directory, '.svn')):
# Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
stdout=subprocess.PIPE).communicate()[0]
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute(
'revision')
except xml.parsers.expat.ExpatError:
return ""
return ""
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON %
(urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
# FIXME: We should talk to the network via a Host object.
results_file = urllib2.urlopen(results_file_url)
info = results_file.info()
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if (http_error.code < 400 and http_error.code >= 500):
error = http_error
except urllib2.URLError, url_error:
error = url_error
if old_results:
# Strip the prefix and suffix so we can get the actual JSON object.
old_results = strip_json_wrapper(old_results)
try:
results_json = json.loads(old_results)
except:
_log.debug("results.json was not valid JSON. Clobbering.")
# The JSON file is not valid JSON. Just clobber the results.
results_json = {}
else:
_log.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
def _insert_failure_summaries(self, results_for_builder):
"""Inserts aggregate pass/failure statistics into the JSON.
This method reads self._test_results and generates
FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
"""
# Insert the number of tests that failed or skipped.
fixable_count = len([r for r in self._test_results if r.fixable()])
self._insert_item_into_raw_list(results_for_builder,
fixable_count, self.FIXABLE_COUNT)
# Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
entry = {}
for test_name in self._test_results_map.iterkeys():
result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
# Insert the pass/skip/failure summary dictionary.
self._insert_item_into_raw_list(results_for_builder, entry,
self.FIXABLE)
# Insert the number of all the tests that are supposed to pass.
all_test_count = len(self._test_results)
self._insert_item_into_raw_list(results_for_builder,
all_test_count, self.ALL_FIXABLE_COUNT)
def _insert_item_into_raw_list(self, results_for_builder, item, key):
"""Inserts the item into the list with the given key in the results for
this builder. Creates the list if no such list exists.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
item: Number or string to insert into the list.
key: Key in results_for_builder for the list to insert into.
"""
if key in results_for_builder:
raw_list = results_for_builder[key]
else:
raw_list = []
raw_list.insert(0, item)
raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
results_for_builder[key] = raw_list
def _insert_item_run_length_encoded(self, item, encoded_results):
"""Inserts the item into the run-length encoded results.
Args:
item: String or number to insert.
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
if len(encoded_results) and item == encoded_results[0][1]:
num_results = encoded_results[0][0]
if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
encoded_results[0][0] = num_results + 1
else:
# Use a list instead of a class for the run-length encoding since
# we want the serialized form to be concise.
encoded_results.insert(0, [1, item])
def _insert_generic_metadata(self, results_for_builder):
""" Inserts generic metadata (such as version number, current time etc)
into the JSON.
Args:
results_for_builder: Dictionary containing the test results for
a single builder.
"""
self._insert_item_into_raw_list(results_for_builder,
self._build_number, self.BUILD_NUMBERS)
# Include SVN revisions for the given repositories.
for (name, path) in self._svn_repositories:
self._insert_item_into_raw_list(results_for_builder,
self._get_svn_revision(path),
name + 'Revision')
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
tests: Dictionary containing test result entries.
"""
result = self._get_result_char(test_name)
time = self._get_test_timing(test_name)
this_test = tests
for segment in test_name.split("/"):
if segment not in this_test:
this_test[segment] = {}
this_test = this_test[segment]
if not len(this_test):
self._populate_results_and_times_json(this_test)
if self.RESULTS in this_test:
self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
else:
this_test[self.RESULTS] = [[1, result]]
if self.TIMES in this_test:
self._insert_item_run_length_encoded(time, this_test[self.TIMES])
else:
this_test[self.TIMES] = [[1, time]]
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
"""
if self.VERSION_KEY in results_json:
archive_version = results_json[self.VERSION_KEY]
if archive_version == self.VERSION:
return
else:
archive_version = 3
# version 3->4
if archive_version == 3:
num_results = len(results_json.values())
for builder, results in results_json.iteritems():
self._convert_tests_to_trie(results)
results_json[self.VERSION_KEY] = self.VERSION
def _convert_tests_to_trie(self, results):
if not self.TESTS in results:
return
test_results = results[self.TESTS]
test_results_trie = {}
for test in test_results.iterkeys():
single_test_result = test_results[test]
add_path_to_trie(test, single_test_result, test_results_trie)
results[self.TESTS] = test_results_trie
def _populate_results_and_times_json(self, results_and_times):
results_and_times[self.RESULTS] = []
results_and_times[self.TIMES] = []
return results_and_times
def _create_results_for_builder_json(self):
results_for_builder = {}
results_for_builder[self.TESTS] = {}
return results_for_builder
def _remove_items_over_max_number_of_builds(self, encoded_list):
"""Removes items from the run-length encoded list after the final
item that exceeds the max number of builds to track.
Args:
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
num_builds = 0
index = 0
for result in encoded_list:
num_builds = num_builds + result[0]
index = index + 1
if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
return encoded_list[:index]
return encoded_list
def _normalize_results_json(self, test, test_name, tests):
""" Prune tests where all runs pass or tests that no longer exist and
truncate all results to maxNumberOfBuilds.
Args:
test: ResultsAndTimes object for this test.
test_name: Name of the test.
tests: The JSON object with all the test results for this builder.
"""
test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
test[self.RESULTS])
test[self.TIMES] = self._remove_items_over_max_number_of_builds(
test[self.TIMES])
is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
self.PASS_RESULT)
is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
self.NO_DATA_RESULT)
max_time = max([time[1] for time in test[self.TIMES]])
# Remove all passes/no-data from the results to reduce noise and
# filesize. If a test passes every run, but takes > MIN_TIME to run,
# don't throw away the data.
if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
del tests[test_name]
def _is_results_all_of_type(self, results, type):
"""Returns whether all the results are of the given type
(e.g. all passes)."""
return len(results) == 1 and results[0][1] == type
# Left here not to break anything.
class JSONResultsGenerator(JSONResultsGeneratorBase):
pass
|
WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
| 25,303 |
Copyright (C) 2010 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. python 2.5 compatibility A JSON results generator for generic tests. FIXME: move this code out of the layout_package directory. FIXME: Kill this code once the server returns json instead of jsonp. Specify separators in order to get compact encoding. FIXME: We already have a TestResult class in test_results.py Test modifier constants. Min time (seconds) that will be added to the JSON. Note that in non-chromium tests those chars are used to indicate test modifiers (FAILS, FLAKY, etc) but not actual test results. FIXME: Remove generate_incremental_results once the reference to it in http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py has been removed. FIXME: rename to generate_times_ms_file. This needs to be coordinated with changing the calls to this on the chromium build slaves. If there was an error don't write a results.json file at all as it would lose all the information on the bot. Update the all failing tests with result type and time. Set uploading timeout in case appengine server is having problem. 120 seconds are more than enough to upload test results. Floor for now to get time in seconds. FIXME: Callers should use scm.py instead. FIXME: Identify and fix the run-time errors that were observed on Windows chromium buildbot when we had updated this code to use scm.py once before. Note: Not thread safe: http://bugs.python.org/issue2320 FIXME: We should talk to the network via a Host object. A non-4xx status code means the bot is hosed for some reason and we can't grab the results.json file off of it. Strip the prefix and suffix so we can get the actual JSON object. The JSON file is not valid JSON. Just clobber the results. Insert the number of tests that failed or skipped. Create a test modifiers (FAILS, FLAKY etc) summary dictionary. Insert the pass/skip/failure summary dictionary. Insert the number of all the tests that are supposed to pass. Use a list instead of a class for the run-length encoding since we want the serialized form to be concise. Include SVN revisions for the given repositories. version 3->4 Remove all passes/no-data from the results to reduce noise and filesize. If a test passes every run, but takes > MIN_TIME to run, don't throw away the data. Left here not to break anything.
| 3,754 |
en
| 0.870698 |
################################################################################
#
# MIT License
#
# Copyright (c) 2020 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from __future__ import print_function
import argparse
import sys, os, shutil
from python import *
OUT_DIR='out'
def igemm_flatten(args, config_content):
asm_target = os.path.join(args.dir, os.path.splitext(os.path.basename(args.config_file))[0] + '.s')
emitter = mc_emit_to_file_t(asm_target)
sec_root = config_content.get_section('codegen')[0]
arch = amdgpu_arch_config_t({
'arch' : amdgpu_string_to_arch( sec_root['arch'] ),
'data_type' : AMDGPU_PRECISION_FP32,
'code_object' : amdgpu_string_to_codeobj( sec_root['code_object']) })
# create mc
mc = mc_asm_printer_t(emitter, arch)
mc_set_current(mc)
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
codegen_driver_t(mc, tunable_dicts)(split_kernel = args.split_kernel)
# os.chmod(asm_target, 0x777)
def igemm_out_tunable_param(output_file, config_content):
sec_root = config_content.get_section('codegen')[0]
list_emitter = mc_emit_to_file_t(output_file)
list_emitter.open()
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
td_item = igemm_gtc_tunable_parameter_t(td)
list_emitter.emit(td_item.output())
list_emitter.close()
def igemm_check_fp16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "fp16" in td['precision']:
return True
return False
def igemm_check_int8_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "int8" in td['precision']:
return True
return False
def igemm_check_bf16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "bf16" in td['precision']:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("config_file", help="config file as input")
parser.add_argument("-d", "--dir", help="directory of output files", default = OUT_DIR)
parser.add_argument("-output", nargs='?', const='tunable_parameter_list.txt', help="output tunable parameter list")
parser.add_argument("-s", "--split_kernel", action="store_true")
args = parser.parse_args()
config_parser = config_parser_t(args.config_file)
#print(os.getcwd())
config_content = config_parser()
#config_content.dump()
#print(args.output)
if args.output:
igemm_out_tunable_param(args.output, config_content)
arch = config_content.get_section('codegen')[0]['arch']
code_object = config_content.get_section('codegen')[0]['code_object']
has_fp16_config = igemm_check_fp16_configs(config_content)
has_int8_config = igemm_check_int8_configs(config_content)
has_bf16_config = igemm_check_bf16_configs(config_content)
if config_content.get_section('codegen')[0]['mode'] in ('flat', 'flatten'):
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
cxxflags = []
if args.split_kernel:
cxxflags += ["-DIGEMM_SPLIT_KERNEL"]
host_driver(cxxflags=cxxflags, arch=arch, config_file=args.config_file, out_dir=args.dir, has_fp16_config=has_fp16_config, has_int8_config=has_int8_config, has_bf16_config=has_bf16_config)
igemm_flatten(args, config_content)
if config_content.get_section('codegen')[0]['mode'] in ('seq', 'sequencer'):
# config_content.dump()
# igemm_sequence(args, config_content)
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
sequence_driver(arch=arch, code_object=code_object,
config_content=config_content, out_dir=args.dir )
|
igemm_codegen.py
| 5,531 |
MIT License Copyright (c) 2020 Advanced Micro Devices, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. create mc append arch to each section os.chmod(asm_target, 0x777) append arch to each sectionprint(os.getcwd())config_content.dump()print(args.output) config_content.dump() igemm_sequence(args, config_content)
| 1,311 |
en
| 0.816655 |
from __future__ import unicode_literals
from boto.ec2.instancetype import InstanceType
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \
dict_from_querystring, optional_from_querystring
class InstanceResponse(BaseResponse):
def describe_instances(self):
filter_dict = filters_from_querystring(self.querystring)
instance_ids = instance_ids_from_querystring(self.querystring)
if instance_ids:
reservations = self.ec2_backend.get_reservations_by_instance_ids(
instance_ids, filters=filter_dict)
else:
reservations = self.ec2_backend.all_reservations(
make_copy=True, filters=filter_dict)
template = self.response_template(EC2_DESCRIBE_INSTANCES)
return template.render(reservations=reservations)
def run_instances(self):
min_count = int(self.querystring.get('MinCount', ['1'])[0])
image_id = self.querystring.get('ImageId')[0]
user_data = self.querystring.get('UserData')
security_group_names = self._get_multi_param('SecurityGroup')
security_group_ids = self._get_multi_param('SecurityGroupId')
nics = dict_from_querystring("NetworkInterface", self.querystring)
instance_type = self.querystring.get("InstanceType", ["m1.small"])[0]
placement = self.querystring.get(
"Placement.AvailabilityZone", [None])[0]
subnet_id = self.querystring.get("SubnetId", [None])[0]
private_ip = self.querystring.get("PrivateIpAddress", [None])[0]
associate_public_ip = self.querystring.get(
"AssociatePublicIpAddress", [None])[0]
key_name = self.querystring.get("KeyName", [None])[0]
if self.is_not_dryrun('RunInstance'):
new_reservation = self.ec2_backend.add_instances(
image_id, min_count, user_data, security_group_names,
instance_type=instance_type, placement=placement, subnet_id=subnet_id,
key_name=key_name, security_group_ids=security_group_ids,
nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip)
template = self.response_template(EC2_RUN_INSTANCES)
return template.render(reservation=new_reservation)
def terminate_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('TerminateInstance'):
instances = self.ec2_backend.terminate_instances(instance_ids)
template = self.response_template(EC2_TERMINATE_INSTANCES)
return template.render(instances=instances)
def reboot_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('RebootInstance'):
instances = self.ec2_backend.reboot_instances(instance_ids)
template = self.response_template(EC2_REBOOT_INSTANCES)
return template.render(instances=instances)
def stop_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('StopInstance'):
instances = self.ec2_backend.stop_instances(instance_ids)
template = self.response_template(EC2_STOP_INSTANCES)
return template.render(instances=instances)
def start_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('StartInstance'):
instances = self.ec2_backend.start_instances(instance_ids)
template = self.response_template(EC2_START_INSTANCES)
return template.render(instances=instances)
def describe_instance_status(self):
instance_ids = instance_ids_from_querystring(self.querystring)
include_all_instances = optional_from_querystring('IncludeAllInstances',
self.querystring) == 'true'
if instance_ids:
instances = self.ec2_backend.get_multi_instances_by_id(
instance_ids)
elif include_all_instances:
instances = self.ec2_backend.all_instances()
else:
instances = self.ec2_backend.all_running_instances()
template = self.response_template(EC2_INSTANCE_STATUS)
return template.render(instances=instances)
def describe_instance_types(self):
instance_types = [InstanceType(
name='t1.micro', cores=1, memory=644874240, disk=0)]
template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES)
return template.render(instance_types=instance_types)
def describe_instance_attribute(self):
# TODO this and modify below should raise IncorrectInstanceState if
# instance not in stopped state
attribute = self.querystring.get("Attribute")[0]
key = camelcase_to_underscores(attribute)
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
instance, value = self.ec2_backend.describe_instance_attribute(
instance_id, key)
if key == "group_set":
template = self.response_template(
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE)
else:
template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE)
return template.render(instance=instance, attribute=attribute, value=value)
def modify_instance_attribute(self):
handlers = [self._dot_value_instance_attribute_handler,
self._block_device_mapping_handler,
self._security_grp_instance_attribute_handler]
for handler in handlers:
success = handler()
if success:
return success
msg = "This specific call to ModifyInstanceAttribute has not been" \
" implemented in Moto yet. Feel free to open an issue at" \
" https://github.com/spulec/moto/issues"
raise NotImplementedError(msg)
def _block_device_mapping_handler(self):
"""
Handles requests which are generated by code similar to:
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
The querystring contains information similar to:
BlockDeviceMapping.1.Ebs.DeleteOnTermination : ['true']
BlockDeviceMapping.1.DeviceName : ['/dev/sda1']
For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination"
configuration, but it should be trivial to add anything else.
"""
mapping_counter = 1
mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName'
mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination'
while True:
mapping_device_name = mapping_device_name_fmt % mapping_counter
if mapping_device_name not in self.querystring.keys():
break
mapping_del_on_term = mapping_del_on_term_fmt % mapping_counter
del_on_term_value_str = self.querystring[mapping_del_on_term][0]
del_on_term_value = True if 'true' == del_on_term_value_str else False
device_name_value = self.querystring[mapping_device_name][0]
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
instance = self.ec2_backend.get_instance(instance_id)
if self.is_not_dryrun('ModifyInstanceAttribute'):
block_device_type = instance.block_device_mapping[
device_name_value]
block_device_type.delete_on_termination = del_on_term_value
# +1 for the next device
mapping_counter += 1
if mapping_counter > 1:
return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _dot_value_instance_attribute_handler(self):
attribute_key = None
for key, value in self.querystring.items():
if '.Value' in key:
attribute_key = key
break
if not attribute_key:
return
if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]):
value = self.querystring.get(attribute_key)[0]
normalized_attribute = camelcase_to_underscores(
attribute_key.split(".")[0])
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
self.ec2_backend.modify_instance_attribute(
instance_id, normalized_attribute, value)
return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _security_grp_instance_attribute_handler(self):
new_security_grp_list = []
for key, value in self.querystring.items():
if 'GroupId.' in key:
new_security_grp_list.append(self.querystring.get(key)[0])
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
if self.is_not_dryrun('ModifyInstanceSecurityGroups'):
self.ec2_backend.modify_instance_security_groups(
instance_id, new_security_grp_list)
return EC2_MODIFY_INSTANCE_ATTRIBUTE
EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<reservationId>{{ reservation.id }}</reservationId>
<ownerId>123456789012</ownerId>
<groupSet>
<item>
<groupId>sg-245f6a01</groupId>
<groupName>default</groupName>
</item>
</groupSet>
<instancesSet>
{% for instance in reservation.instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<imageId>{{ instance.image_id }}</imageId>
<instanceState>
<code>0</code>
<name>pending</name>
</instanceState>
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
<dnsName>{{ instance.public_dns }}</dnsName>
<reason/>
<keyName>{{ instance.key_name }}</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
<placement>
<availabilityZone>{{ instance.placement}}</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<monitoring>
<state>enabled</state>
</monitoring>
{% if instance.nics %}
{% if instance.nics[0].subnet %}
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
{% endif %}
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
{% if instance.public_ip %}
<ipAddress>{{ instance.public_ip }}</ipAddress>
{% endif %}
{% else %}
<subnetId>{{ instance.subnet_id }}</subnetId>
{% endif %}
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in instance.dynamic_group_list %}
<item>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
</item>
{% endfor %}
</groupSet>
{% if instance.platform %}
<platform>{{ instance.platform }}</platform>
{% endif %}
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
<architecture>{{ instance.architecture }}</architecture>
<kernelId>{{ instance.kernel }}</kernelId>
<clientToken/>
<hypervisor>xen</hypervisor>
<ebsOptimized>false</ebsOptimized>
<networkInterfaceSet>
{% for nic in instance.nics.values() %}
<item>
<networkInterfaceId>{{ nic.id }}</networkInterfaceId>
{% if nic.subnet %}
<subnetId>{{ nic.subnet.id }}</subnetId>
<vpcId>{{ nic.subnet.vpc_id }}</vpcId>
{% endif %}
<description>Primary network interface</description>
<ownerId>123456789012</ownerId>
<status>in-use</status>
<macAddress>1b:2b:3c:4d:5e:6f</macAddress>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in nic.group_set %}
<item>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
</item>
{% endfor %}
</groupSet>
<attachment>
<attachmentId>{{ nic.attachment_id }}</attachmentId>
<deviceIndex>{{ nic.device_index }}</deviceIndex>
<status>attached</status>
<attachTime>2015-01-01T00:00:00Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
<privateIpAddressesSet>
<item>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<primary>true</primary>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
</item>
</privateIpAddressesSet>
</item>
{% endfor %}
</networkInterfaceSet>
</item>
{% endfor %}
</instancesSet>
</RunInstancesResponse>"""
EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>fdcdcab1-ae5c-489e-9c33-4637c5dda355</requestId>
<reservationSet>
{% for reservation in reservations %}
<item>
<reservationId>{{ reservation.id }}</reservationId>
<ownerId>123456789012</ownerId>
<groupSet>
{% for group in reservation.dynamic_group_list %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<instancesSet>
{% for instance in reservation.instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<imageId>{{ instance.image_id }}</imageId>
<instanceState>
<code>{{ instance._state.code }}</code>
<name>{{ instance._state.name }}</name>
</instanceState>
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
<dnsName>{{ instance.public_dns }}</dnsName>
<reason>{{ instance._reason }}</reason>
<keyName>{{ instance.key_name }}</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
<placement>
<availabilityZone>{{ instance.placement }}</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
{% if instance.platform %}
<platform>{{ instance.platform }}</platform>
{% endif %}
<monitoring>
<state>disabled</state>
</monitoring>
{% if instance.nics %}
{% if instance.nics[0].subnet %}
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
{% endif %}
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
{% if instance.nics[0].public_ip %}
<ipAddress>{{ instance.nics[0].public_ip }}</ipAddress>
{% endif %}
{% endif %}
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in instance.dynamic_group_list %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<stateReason>
<code>{{ instance._state_reason.code }}</code>
<message>{{ instance._state_reason.message }}</message>
</stateReason>
<architecture>{{ instance.architecture }}</architecture>
<kernelId>{{ instance.kernel }}</kernelId>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
{% for device_name,deviceobject in instance.get_block_device_mapping %}
<item>
<deviceName>{{ device_name }}</deviceName>
<ebs>
<volumeId>{{ deviceobject.volume_id }}</volumeId>
<status>{{ deviceobject.status }}</status>
<attachTime>{{ deviceobject.attach_time }}</attachTime>
<deleteOnTermination>{{ deviceobject.delete_on_termination }}</deleteOnTermination>
<size>{{deviceobject.size}}</size>
</ebs>
</item>
{% endfor %}
</blockDeviceMapping>
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
<clientToken>ABCDE1234567890123</clientToken>
<tagSet>
{% for tag in instance.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
{% for nic in instance.nics.values() %}
<item>
<networkInterfaceId>{{ nic.id }}</networkInterfaceId>
{% if nic.subnet %}
<subnetId>{{ nic.subnet.id }}</subnetId>
<vpcId>{{ nic.subnet.vpc_id }}</vpcId>
{% endif %}
<description>Primary network interface</description>
<ownerId>123456789012</ownerId>
<status>in-use</status>
<macAddress>1b:2b:3c:4d:5e:6f</macAddress>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in nic.group_set %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<attachment>
<attachmentId>{{ nic.attachment_id }}</attachmentId>
<deviceIndex>{{ nic.device_index }}</deviceIndex>
<status>attached</status>
<attachTime>2015-01-01T00:00:00Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
<privateIpAddressesSet>
<item>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<primary>true</primary>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
</item>
</privateIpAddressesSet>
</item>
{% endfor %}
</networkInterfaceSet>
</item>
{% endfor %}
</instancesSet>
</item>
{% endfor %}
</reservationSet>
</DescribeInstancesResponse>"""
EC2_TERMINATE_INSTANCES = """
<TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>32</code>
<name>shutting-down</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</TerminateInstancesResponse>"""
EC2_STOP_INSTANCES = """
<StopInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>64</code>
<name>stopping</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</StopInstancesResponse>"""
EC2_START_INSTANCES = """
<StartInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>0</code>
<name>pending</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</StartInstancesResponse>"""
EC2_REBOOT_INSTANCES = """<RebootInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RebootInstancesResponse>"""
EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
<value>{{ value }}</value>
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
{% for sg_id in value %}
<item>
<groupId>{{ sg_id }}</groupId>
</item>
{% endfor %}
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
EC2_MODIFY_INSTANCE_ATTRIBUTE = """<ModifyInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ModifyInstanceAttributeResponse>"""
EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceStatusSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<availabilityZone>{{ instance.placement }}</availabilityZone>
<instanceState>
<code>{{ instance.state_code }}</code>
<name>{{ instance.state }}</name>
</instanceState>
{% if instance.state_code == 16 %}
<systemStatus>
<status>ok</status>
<details>
<item>
<name>reachability</name>
<status>passed</status>
</item>
</details>
</systemStatus>
<instanceStatus>
<status>ok</status>
<details>
<item>
<name>reachability</name>
<status>passed</status>
</item>
</details>
</instanceStatus>
{% else %}
<systemStatus>
<status>not-applicable</status>
</systemStatus>
<instanceStatus>
<status>not-applicable</status>
</instanceStatus>
{% endif %}
</item>
{% endfor %}
</instanceStatusSet>
</DescribeInstanceStatusResponse>"""
EC2_DESCRIBE_INSTANCE_TYPES = """<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceTypesResponse xmlns="http://api.outscale.com/wsdl/fcuext/2014-04-15/">
<requestId>f8b86168-d034-4e65-b48d-3b84c78e64af</requestId>
<instanceTypeSet>
{% for instance_type in instance_types %}
<item>
<name>{{ instance_type.name }}</name>
<vcpu>{{ instance_type.cores }}</vcpu>
<memory>{{ instance_type.memory }}</memory>
<storageSize>{{ instance_type.disk }}</storageSize>
<storageCount>{{ instance_type.storageCount }}</storageCount>
<maxIpAddresses>{{ instance_type.maxIpAddresses }}</maxIpAddresses>
<ebsOptimizedAvailable>{{ instance_type.ebsOptimizedAvailable }}</ebsOptimizedAvailable>
</item>
{% endfor %}
</instanceTypeSet>
</DescribeInstanceTypesResponse>"""
|
moto/ec2/responses/instances.py
| 28,296 |
Handles requests which are generated by code similar to:
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
The querystring contains information similar to:
BlockDeviceMapping.1.Ebs.DeleteOnTermination : ['true']
BlockDeviceMapping.1.DeviceName : ['/dev/sda1']
For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination"
configuration, but it should be trivial to add anything else.
TODO this and modify below should raise IncorrectInstanceState if instance not in stopped state +1 for the next device
| 552 |
en
| 0.859155 |
import urllib.parse
from functools import partial, wraps
from pathlib import Path
from drfs import config
from drfs.util import prepend_scheme, remove_scheme
def get_fs(path, opts=None, rtype="instance"):
"""Helper to infer filesystem correctly.
Gets filesystem options from settings and updates them with given `opts`.
Parameters
----------
path: str
Path for which we want to infer filesystem.
opts: dict
Kwargs that will be passed to inferred filesystem instance.
rtype: str
Either 'instance' (default) or 'class'.
"""
from drfs.filesystems import FILESYSTEMS
try:
protocol = path.scheme
except AttributeError:
protocol = _get_protocol(path)
try:
cls = FILESYSTEMS[protocol]
if rtype == "class":
return cls
except KeyError:
raise KeyError(
f"No filesystem for protocol {protocol}. Try "
f"installing it. Available protocols are: "
f"{set(FILESYSTEMS.keys())}"
)
config_scheme_key = protocol if protocol else "file"
opts_ = config["fs_opts"][config_scheme_key].get(dict).copy() # type: dict
if opts is not None:
opts_.update(opts)
opts_ = _fix_opts_abfs(cls, path, opts_)
return cls(**opts_)
def _get_protocol(path):
if "://" in str(path):
protocol = urllib.parse.urlparse(str(path)).scheme
else:
# most likely a windows path, basically if in doubt assume local
protocol = ""
return protocol
def _fix_opts_abfs(cls, path, opts: dict):
try:
from drfs.filesystems.azure_blob import AzureBlobFileSystem, extract_abfs_parts
except ImportError:
AzureBlobFileSystem = extract_abfs_parts = None
if (
AzureBlobFileSystem is not None
and cls is AzureBlobFileSystem
and "account_name" not in opts
):
opts = opts.copy()
opts["account_name"] = extract_abfs_parts(path)[0]
return opts
def allow_pathlib(func):
"""Allow methods to receive pathlib.Path objects.
Parameters
----------
func: callable
function to decorate must have the following signature
self, path, *args, **kwargs
Returns
-------
wrapper: callable
"""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
# Can only be used if path is passed as first argument right
# after self
from drfs.path import asstr
p = asstr(path)
return func(self, p, *args, **kwargs)
return wrapper
def return_pathlib(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
from drfs.path import aspath
res = func(self, path, *args, **kwargs)
as_path = aspath(res)
return as_path
return wrapper
def return_schemes(func):
"""Make sure method returns full path with scheme."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
res = func(self, path, *args, **kwargs)
try:
res = list(map(partial(prepend_scheme, self.scheme), res))
except TypeError:
res = prepend_scheme(self.scheme, res)
return res
return wrapper
def maybe_remove_scheme(func):
"""Remove scheme from args and kwargs in case underlying fs does not support it."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
if not self.supports_scheme:
path = remove_scheme(path, raise_=False)
args = [remove_scheme(a, raise_=False) for a in args]
kwargs = {
k: remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v
for k, v in kwargs.items()
}
return func(self, path, *args, **kwargs)
return wrapper
|
drfs/filesystems/util.py
| 3,791 |
Allow methods to receive pathlib.Path objects.
Parameters
----------
func: callable
function to decorate must have the following signature
self, path, *args, **kwargs
Returns
-------
wrapper: callable
Helper to infer filesystem correctly.
Gets filesystem options from settings and updates them with given `opts`.
Parameters
----------
path: str
Path for which we want to infer filesystem.
opts: dict
Kwargs that will be passed to inferred filesystem instance.
rtype: str
Either 'instance' (default) or 'class'.
Remove scheme from args and kwargs in case underlying fs does not support it.
Make sure method returns full path with scheme.
type: dict most likely a windows path, basically if in doubt assume local Can only be used if path is passed as first argument right after self
| 806 |
en
| 0.735791 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import textwrap
import unittest
from contextlib import redirect_stdout
from airflow.cli import cli_parser
from airflow.cli.commands import plugins_command
from airflow.hooks.base import BaseHook
from airflow.listeners.listener import get_listener_manager
from airflow.plugins_manager import AirflowPlugin
from tests.plugins.test_plugin import AirflowTestPlugin as ComplexAirflowPlugin
from tests.test_utils.mock_plugins import mock_plugin_manager
class PluginHook(BaseHook):
pass
class TestPlugin(AirflowPlugin):
name = "test-plugin-cli"
hooks = [PluginHook]
class TestPluginsCommand(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
@mock_plugin_manager(plugins=[])
def test_should_display_no_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
assert 'No plugins loaded' in stdout
@mock_plugin_manager(plugins=[ComplexAirflowPlugin])
def test_should_display_one_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
print(stdout)
info = json.loads(stdout)
assert info == [
{
'name': 'test_plugin',
'macros': ['tests.plugins.test_plugin.plugin_macro'],
'executors': ['tests.plugins.test_plugin.PluginExecutor'],
'flask_blueprints': [
"<flask.blueprints.Blueprint: name='test_plugin' import_name='tests.plugins.test_plugin'>"
],
'appbuilder_views': [
{
'name': 'Test View',
'category': 'Test Plugin',
'view': 'tests.plugins.test_plugin.PluginTestAppBuilderBaseView',
}
],
'global_operator_extra_links': [
'<tests.test_utils.mock_operators.AirflowLink object>',
'<tests.test_utils.mock_operators.GithubLink object>',
],
'timetables': ['tests.plugins.test_plugin.CustomCronDataIntervalTimetable'],
'operator_extra_links': [
'<tests.test_utils.mock_operators.GoogleLink object>',
'<tests.test_utils.mock_operators.AirflowLink2 object>',
'<tests.test_utils.mock_operators.CustomOpLink object>',
'<tests.test_utils.mock_operators.CustomBaseIndexOpLink object>',
],
'hooks': ['tests.plugins.test_plugin.PluginHook'],
'listeners': ['tests.listeners.empty_listener'],
'source': None,
'appbuilder_menu_items': [
{'name': 'Google', 'href': 'https://www.google.com', 'category': 'Search'},
{
'name': 'apache',
'href': 'https://www.apache.org/',
'label': 'The Apache Software Foundation',
},
],
'ti_deps': ['<TIDep(CustomTestTriggerRule)>'],
}
]
get_listener_manager().clear()
@mock_plugin_manager(plugins=[TestPlugin])
def test_should_display_one_plugins_as_table(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=table']))
stdout = temp_stdout.getvalue()
# Remove leading spaces
stdout = "\n".join(line.rstrip(" ") for line in stdout.splitlines())
# Assert that only columns with values are displayed
expected_output = textwrap.dedent(
"""\
name | hooks
================+===================================================
test-plugin-cli | tests.cli.commands.test_plugins_command.PluginHook
"""
)
self.assertEqual(stdout, expected_output)
|
tests/cli/commands/test_plugins_command.py
| 5,043 |
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Remove leading spaces Assert that only columns with values are displayed
| 825 |
en
| 0.868997 |
"""Conversion tool from SQD to FIF.
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.
"""
# Authors: Teon Brooks <[email protected]>
# Joan Massich <[email protected]>
# Christian Brodbeck <[email protected]>
#
# License: BSD (3-clause)
from collections import defaultdict, OrderedDict
from math import sin, cos
from os import SEEK_CUR, path as op
from struct import unpack
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...utils import (verbose, logger, warn, fill_doc, _check_option,
_stamp_to_dt)
from ...transforms import apply_trans, als_ras_trans
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ...epochs import BaseEpochs
from ..constants import FIFF
from ..meas_info import _empty_info
from .constants import KIT, LEGACY_AMP_PARAMS
from .coreg import read_mrk
from ...event import read_events
from .._digitization import _set_dig_kit
def _call_digitization(info, mrk, elp, hsp, kit_info):
# Use values from kit_info only if all others are None
if mrk is None and elp is None and hsp is None:
mrk = kit_info.get('mrk', None)
elp = kit_info.get('elp', None)
hsp = kit_info.get('hsp', None)
# prepare mrk
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, str)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
# setup digitization
if mrk is not None and elp is not None and hsp is not None:
dig_points, dev_head_t = _set_dig_kit(
mrk, elp, hsp, kit_info['eeg_dig'])
info['dig'] = dig_points
info['dev_head_t'] = dev_head_t
elif mrk is not None or elp is not None or hsp is not None:
raise ValueError("mrk, elp and hsp need to be provided as a group "
"(all or none)")
return info
class UnsupportedKITFormat(ValueError):
"""Our reader is not guaranteed to work with old files."""
def __init__(self, sqd_version, *args, **kwargs): # noqa: D102
self.sqd_version = sqd_version
ValueError.__init__(self, *args, **kwargs)
@fill_doc
class RawKIT(BaseRaw):
"""Raw object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>' | None
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes. If None, no synthesized channel is generated.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event. If None, stim must also be set to None.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.preload = False
logger.info('Creating Raw.info structure...')
info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info['slope'] = slope
kit_info['stimthresh'] = stimthresh
if kit_info['acq_type'] != KIT.CONTINUOUS:
raise TypeError('SQD file contains epochs, not raw data. Wrong '
'reader.')
logger.info('Creating Info structure...')
last_samps = [kit_info['n_samples'] - 1]
self._raw_extras = [kit_info]
self._set_stimchannels(info, stim, stim_code)
super(RawKIT, self).__init__(
info, preload, last_samps=last_samps, filenames=[input_fname],
raw_extras=self._raw_extras, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def read_stim_ch(self, buffer_size=1e5):
"""Read events from data.
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
"""
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int64)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x = self[pick, b_start:b_stop][0]
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _set_stimchannels(self, info, stim, stim_code):
"""Specify how the trigger channel is synthesized from analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
info : instance of MeasInfo
The measurement info.
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
"""
if self.preload:
raise NotImplementedError("Can't change stim channel after "
"loading data")
_check_option('stim_code', stim_code, ['binary', 'channel'])
if stim is not None:
if isinstance(stim, str):
picks = _default_stim_chs(info)
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
else:
stim = np.asarray(stim, int)
if stim.max() >= self._raw_extras[0]['nchan']:
raise ValueError(
'Got stim=%s, but sqd file only has %i channels' %
(stim, self._raw_extras[0]['nchan']))
# modify info
nchan = self._raw_extras[0]['nchan'] + 1
info['chs'].append(dict(
cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,
unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE,
ch_name='STI 014',
coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),
kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN))
info._update_redundant()
self._raw_extras[0]['stim'] = stim
self._raw_extras[0]['stim_code'] = stim_code
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
sqd = self._raw_extras[fi]
nchan = sqd['nchan']
data_left = (stop - start) * nchan
conv_factor = sqd['conv_factor']
n_bytes = sqd['dtype'].itemsize
assert n_bytes in (2, 4)
# Read up to 100 MB of data at a time.
blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
pointer = start * nchan * n_bytes
fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)
stim = sqd['stim']
for blk_start in np.arange(0, data_left, blk_size) // nchan:
blk_size = min(blk_size, data_left - blk_start * nchan)
block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size)
block = block.reshape(nchan, -1, order='F').astype(float)
blk_stop = blk_start + block.shape[1]
data_view = data[:, blk_start:blk_stop]
block *= conv_factor
# Create a synthetic stim channel
if stim is not None:
stim_ch = _make_stim_channel(
block[stim, :], sqd['slope'], sqd['stimthresh'],
sqd['stim_code'], stim)
block = np.vstack((block, stim_ch))
_mult_cal_one(data_view, block, idx, cals, mult)
# cals are all unity, so can be ignored
def _default_stim_chs(info):
"""Return default stim channels for SQD files."""
return pick_types(info, meg=False, ref_meg=False, misc=True,
exclude=[])[:8]
def _make_stim_channel(trigger_chs, slope, threshold, stim_code,
trigger_values):
"""Create synthetic stim channel from multiple trigger channels."""
if slope == '+':
trig_chs_bin = trigger_chs > threshold
elif slope == '-':
trig_chs_bin = trigger_chs < threshold
else:
raise ValueError("slope needs to be '+' or '-'")
# trigger value
if stim_code == 'binary':
trigger_values = 2 ** np.arange(len(trigger_chs))
elif stim_code != 'channel':
raise ValueError("stim_code must be 'binary' or 'channel', got %s" %
repr(stim_code))
trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]
return np.array(trig_chs.sum(axis=0), ndmin=2)
class EpochsKIT(BaseEpochs):
"""Epochs Array object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
events : str | array, shape (n_events, 3)
Path to events file. If array, it is the events typically returned
by the read_events function. If some events don't match the events
of interest as specified by event_id,they will be marked as 'IGNORED'
in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.Epochs : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, events, event_id=None, tmin=0,
baseline=None, reject=None, flat=None, reject_tmin=None,
reject_tmax=None, mrk=None, elp=None, hsp=None,
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
if isinstance(events, str):
events = read_events(events)
logger.info('Extracting KIT Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info.update(filename=input_fname)
self._raw_extras = [kit_info]
self._filenames = []
if len(events) != self._raw_extras[0]['n_epochs']:
raise ValueError('Event list does not match number of epochs.')
if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:
self._raw_extras[0]['data_length'] = KIT.INT
else:
raise TypeError('SQD file contains raw data, not epochs or '
'average. Wrong reader.')
if event_id is None: # convert to int to make typing-checks happy
event_id = {str(e): int(e) for e in np.unique(events[:, 2])}
for key, val in event_id.items():
if val not in events[:, 2]:
raise ValueError('No matching events found for %s '
'(event id %i)' % (key, val))
data = self._read_kit_data()
assert data.shape == (self._raw_extras[0]['n_epochs'],
self.info['nchan'],
self._raw_extras[0]['frame_length'])
tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
super(EpochsKIT, self).__init__(
self.info, data, events, event_id, tmin, tmax, baseline,
reject=reject, flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def _read_kit_data(self):
"""Read epochs data.
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
info = self._raw_extras[0]
epoch_length = info['frame_length']
n_epochs = info['n_epochs']
n_samples = info['n_samples']
filename = info['filename']
dtype = info['dtype']
nchan = info['nchan']
with open(filename, 'rb', buffering=0) as fid:
fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'])
count = n_samples * nchan
data = np.fromfile(fid, dtype=dtype, count=count)
data = data.reshape((n_samples, nchan)).T
data = data * info['conv_factor']
data = data.reshape((nchan, n_epochs, epoch_length))
data = data.transpose((1, 0, 2))
return data
def _read_dir(fid):
return dict(offset=np.fromfile(fid, np.uint32, 1)[0],
size=np.fromfile(fid, np.int32, 1)[0],
max_count=np.fromfile(fid, np.int32, 1)[0],
count=np.fromfile(fid, np.int32, 1)[0])
@verbose
def get_kit_info(rawfile, allow_unknown_format, standardize_names=None,
verbose=None):
"""Extract all the information from the sqd/con file.
Parameters
----------
rawfile : str
KIT file to be read.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
info : instance of Info
An Info for the instance.
sqd : dict
A dict containing all the sqd parameter settings.
"""
sqd = dict()
sqd['rawfile'] = rawfile
unsupported_format = False
sqd['dirs'] = dirs = list()
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
#
# directories (0)
#
dirs.append(_read_dir(fid))
dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1))
assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']
#
# system (1)
#
fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset'])
# check file format version
version, revision = unpack('2i', fid.read(2 * KIT.INT))
if version < 2 or (version == 2 and revision < 3):
version_string = "V%iR%03i" % (version, revision)
if allow_unknown_format:
unsupported_format = True
logger.warning("Force loading KIT format %s", version_string)
else:
raise UnsupportedKITFormat(
version_string,
"SQD file format %s is not officially supported. "
"Set allow_unknown_format=True to load it anyways." %
(version_string,))
sysid = unpack('i', fid.read(KIT.INT))[0]
# basic info
system_name = unpack('128s', fid.read(128))[0].decode()
# model name
model_name = unpack('128s', fid.read(128))[0].decode()
# channels
sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]
comment = unpack('256s', fid.read(256))[0].decode()
create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))
fid.seek(KIT.INT * 3, SEEK_CUR) # reserved
dewar_style = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
fll_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
trigger_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
adboard_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 29, SEEK_CUR) # reserved
if version < 2 or (version == 2 and revision <= 3):
adc_range = float(unpack('i', fid.read(KIT.INT))[0])
else:
adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]
adc_polarity, adc_allocated, adc_stored = unpack('3i',
fid.read(3 * KIT.INT))
system_name = system_name.replace('\x00', '')
system_name = system_name.strip().replace('\n', '/')
model_name = model_name.replace('\x00', '')
model_name = model_name.strip().replace('\n', '/')
full_version = f'V{version:d}R{revision:03d}'
logger.debug("SQD file basic information:")
logger.debug("Meg160 version = %s", full_version)
logger.debug("System ID = %i", sysid)
logger.debug("System name = %s", system_name)
logger.debug("Model name = %s", model_name)
logger.debug("Channel count = %i", channel_count)
logger.debug("Comment = %s", comment)
logger.debug("Dewar style = %i", dewar_style)
logger.debug("FLL type = %i", fll_type)
logger.debug("Trigger type = %i", trigger_type)
logger.debug("A/D board type = %i", adboard_type)
logger.debug("ADC range = +/-%s[V]", adc_range / 2.)
logger.debug("ADC allocate = %i[bit]", adc_allocated)
logger.debug("ADC bit = %i[bit]", adc_stored)
# MGH description: 'acquisition (megacq) VectorView system at NMR-MGH'
description = \
f'{system_name} ({sysid}) {full_version} {model_name}'
sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}'))
# check that we can read this file
if fll_type not in KIT.FLL_SETTINGS:
fll_types = sorted(KIT.FLL_SETTINGS.keys())
use_fll_type = fll_types[
np.searchsorted(fll_types, fll_type) - 1]
warn('Unknown site filter settings (FLL) for system '
'"%s" model "%s" (ID %s), will assume FLL %d->%d, check '
'your data for correctness, including channel scales and '
'filter settings!'
% (system_name, model_name, sysid, fll_type, use_fll_type))
fll_type = use_fll_type
#
# channel information (4)
#
chan_dir = dirs[KIT.DIR_INDEX_CHANNELS]
chan_offset, chan_size = chan_dir['offset'], chan_dir['size']
sqd['channels'] = channels = []
exg_gains = list()
for i in range(channel_count):
fid.seek(chan_offset + chan_size * i)
channel_type, = unpack('i', fid.read(KIT.INT))
# System 52 mislabeled reference channels as NULL. This was fixed
# in system 53; not sure about 51...
if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:
channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE
if channel_type in KIT.CHANNELS_MEG:
if channel_type not in KIT.CH_TO_FIFF_COIL:
raise NotImplementedError(
"KIT channel type %i can not be read. Please contact "
"the mne-python developers." % channel_type)
channels.append({
'type': channel_type,
# (x, y, z, theta, phi) for all MEG channels. Some channel
# types have additional information which we're not using.
'loc': np.fromfile(fid, dtype='d', count=5),
})
if channel_type in KIT.CHANNEL_NAME_NCHAR:
fid.seek(16, SEEK_CUR) # misc fields
channels[-1]['name'] = _read_name(fid, channel_type)
elif channel_type in KIT.CHANNELS_MISC:
channel_no, = unpack('i', fid.read(KIT.INT))
fid.seek(4, SEEK_CUR)
name = _read_name(fid, channel_type)
channels.append({
'type': channel_type,
'no': channel_no,
'name': name,
})
if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG):
offset = 6 if channel_type == KIT.CHANNEL_EEG else 8
fid.seek(offset, SEEK_CUR)
exg_gains.append(np.fromfile(fid, 'd', 1)[0])
elif channel_type == KIT.CHANNEL_NULL:
channels.append({'type': channel_type})
else:
raise IOError("Unknown KIT channel type: %i" % channel_type)
exg_gains = np.array(exg_gains)
#
# Channel sensitivity information: (5)
#
# only sensor channels requires gain. the additional misc channels
# (trigger channels, audio and voice channels) are passed
# through unaffected
fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset'])
# (offset [Volt], gain [Tesla/Volt]) for each channel
sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)
sensitivity.shape = (channel_count, 2)
channel_offset, channel_gain = sensitivity.T
assert (channel_offset == 0).all() # otherwise we have a problem
#
# amplifier gain (7)
#
fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset'])
amp_data = unpack('i', fid.read(KIT.INT))[0]
if fll_type >= 100: # Kapper Type
# gain: mask bit
gain1 = (amp_data & 0x00007000) >> 12
gain2 = (amp_data & 0x70000000) >> 28
gain3 = (amp_data & 0x07000000) >> 24
amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])
# filter settings
hpf = (amp_data & 0x00000700) >> 8
lpf = (amp_data & 0x00070000) >> 16
bef = (amp_data & 0x00000003) >> 0
else: # Hanger Type
# gain
input_gain = (amp_data & 0x1800) >> 11
output_gain = (amp_data & 0x0007) >> 0
amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]
# filter settings
hpf = (amp_data & 0x007) >> 4
lpf = (amp_data & 0x0700) >> 8
bef = (amp_data & 0xc000) >> 14
hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]
sqd['highpass'] = KIT.HPFS[hpf_options][hpf]
sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]
sqd['notch'] = KIT.BEFS[bef_options][bef]
#
# Acquisition Parameters (8)
#
fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset'])
sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))
sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))
if acq_type == KIT.CONTINUOUS:
# samples_count, = unpack('i', fid.read(KIT.INT))
fid.seek(KIT.INT, SEEK_CUR)
sqd['n_samples'], = unpack('i', fid.read(KIT.INT))
elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:
sqd['frame_length'], = unpack('i', fid.read(KIT.INT))
sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))
sqd['average_count'], = unpack('i', fid.read(KIT.INT))
sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))
if acq_type == KIT.EVOKED:
sqd['n_samples'] = sqd['frame_length']
else:
sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
else:
raise IOError("Invalid acquisition type: %i. Your file is neither "
"continuous nor epoched data." % (acq_type,))
#
# digitization information (12 and 26)
#
dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS]
cor_dir = dirs[KIT.DIR_INDEX_COREG]
dig = dict()
hsp = list()
if dig_dir['count'] > 0 and cor_dir['count'] > 0:
# directories (0)
fid.seek(dig_dir['offset'])
for _ in range(dig_dir['count']):
name = _read_name(fid, n=8).strip()
# Sometimes there are mismatches (e.g., AFz vs AFZ) between
# the channel name and its digitized, name, so let's be case
# insensitive. It will also prevent collisions with HSP
name = name.lower()
rr = np.fromfile(fid, 'd', 3)
if name:
assert name not in dig
dig[name] = rr
else:
hsp.append(rr)
# nasion, lpa, rpa, HPI in native space
elp = [dig.pop(key) for key in (
'fidnz', 'fidt9', 'fidt10',
'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')]
if 'hpi_5' in dig and dig['hpi_5'].any():
elp.append(dig.pop('hpi_5'))
elp = np.array(elp)
hsp = np.array(hsp, float).reshape(-1, 3)
assert elp.shape in ((7, 3), (8, 3))
# coregistration
fid.seek(cor_dir['offset'])
mrk = np.zeros((elp.shape[0] - 3, 3))
for _ in range(cor_dir['count']):
done = np.fromfile(fid, np.int32, 1)[0]
fid.seek(16 * KIT.DOUBLE + # meg_to_mri
16 * KIT.DOUBLE, # mri_to_meg
SEEK_CUR)
marker_count = np.fromfile(fid, np.int32, 1)[0]
if not done:
continue
assert marker_count >= len(mrk)
for mi in range(len(mrk)):
mri_type, meg_type, mri_done, meg_done = \
np.fromfile(fid, np.int32, 4)
assert meg_done
fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos
mrk[mi] = np.fromfile(fid, 'd', 3)
fid.seek(256, SEEK_CUR) # marker_file (char)
sqd.update(hsp=hsp, elp=elp, mrk=mrk)
all_names = set(ch.get('name', '') for ch in channels)
if standardize_names is None and all_names.difference({'', 'EEG'}):
standardize_names = True
warn('standardize_names defaults to True in 0.21 but will change '
'to False in 0.22', DeprecationWarning)
# precompute conversion factor for reading data
if unsupported_format:
if sysid not in LEGACY_AMP_PARAMS:
raise IOError("Legacy parameters for system ID %i unavailable" %
(sysid,))
adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]
is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])
ad_to_volt = adc_range / (2 ** adc_stored)
ad_to_tesla = ad_to_volt / amp_gain * channel_gain
conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)
# XXX this is a bit of a hack. Should probably do this more cleanly at
# some point... the 2 ** (adc_stored - 14) was emperically determined using
# the test files with known amplitudes. The conv_factors need to be
# replaced by these values otherwise we're off by a factor off 5000.0
# for the EEG data.
is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)
for ch in channels]
exg_gains /= 2 ** (adc_stored - 14)
conv_factor[is_exg] = exg_gains
sqd['conv_factor'] = conv_factor[:, np.newaxis]
# Create raw.info dict for raw fif object with SQD data
info = _empty_info(float(sqd['sfreq']))
info.update(meas_date=_stamp_to_dt((create_time, 0)),
lowpass=sqd['lowpass'],
highpass=sqd['highpass'], kit_system_id=sysid,
description=description)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = fiff_channels = []
channel_index = defaultdict(lambda: 0)
sqd['eeg_dig'] = OrderedDict()
for idx, ch in enumerate(channels, 1):
if ch['type'] in KIT.CHANNELS_MEG:
ch_name = ch.get('name', '')
if ch_name == '' or standardize_names:
ch_name = 'MEG %03d' % idx
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
theta, phi = np.radians(ch['loc'][3:])
x = sin(theta) * cos(phi)
y = sin(theta) * sin(phi)
z = cos(theta)
vec_z = np.array([x, y, z])
vec_z /= linalg.norm(vec_z)
vec_x = np.zeros(vec_z.size, dtype=np.float64)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
vec_x /= linalg.norm(vec_x)
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
unit = FIFF.FIFF_UNIT_T
loc = vecs.ravel()
else:
ch_type_label = KIT.CH_LABEL[ch['type']]
channel_index[ch_type_label] += 1
ch_type_index = channel_index[ch_type_label]
ch_name = ch.get('name', '')
eeg_name = ch_name.lower()
# some files have all EEG labeled as EEG
if ch_name in ('', 'EEG') or standardize_names:
ch_name = '%s %03i' % (ch_type_label, ch_type_index)
unit = FIFF.FIFF_UNIT_V
loc = np.zeros(12)
if eeg_name and eeg_name in dig:
loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name]
fiff_channels.append(dict(
cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,
unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,
coord_frame=FIFF.FIFFV_COORD_DEVICE,
coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],
kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))
info._update_redundant()
return info, sqd
def _read_name(fid, ch_type=None, n=None):
n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type]
return fid.read(n).split(b'\x00')[0].decode('utf-8')
@fill_doc
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None):
"""Reader function for Ricoh/KIT conversion to FIF.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
raw : instance of RawKIT
A Raw object containing KIT data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
If mrk, hsp or elp are array_like inputs, then the numbers in xyz
coordinates should be in units of meters.
"""
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, stim_code=stim_code,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names, verbose=verbose)
@fill_doc
def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,
hsp=None, allow_unknown_format=False,
standardize_names=None, verbose=None):
"""Reader function for Ricoh/KIT epochs files.
Parameters
----------
input_fname : str
Path to the sqd file.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
Notes
-----
.. versionadded:: 0.9.0
"""
epochs = EpochsKIT(input_fname=input_fname, events=events,
event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names,
verbose=verbose)
return epochs
|
venv/lib/python3.8/site-packages/mne/io/kit/kit.py
| 41,874 |
Epochs Array object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
events : str | array, shape (n_events, 3)
Path to events file. If array, it is the events typically returned
by the read_events function. If some events don't match the events
of interest as specified by event_id,they will be marked as 'IGNORED'
in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.Epochs : Documentation of attribute and methods.
Raw object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>' | None
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes. If None, no synthesized channel is generated.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event. If None, stim must also be set to None.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Our reader is not guaranteed to work with old files.
Return default stim channels for SQD files.
Create synthetic stim channel from multiple trigger channels.
Read epochs data.
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
Read a chunk of raw data.
Specify how the trigger channel is synthesized from analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
info : instance of MeasInfo
The measurement info.
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
Extract all the information from the sqd/con file.
Parameters
----------
rawfile : str
KIT file to be read.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
info : instance of Info
An Info for the instance.
sqd : dict
A dict containing all the sqd parameter settings.
Reader function for Ricoh/KIT epochs files.
Parameters
----------
input_fname : str
Path to the sqd file.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
Notes
-----
.. versionadded:: 0.9.0
Reader function for Ricoh/KIT conversion to FIF.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
raw : instance of RawKIT
A Raw object containing KIT data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
If mrk, hsp or elp are array_like inputs, then the numbers in xyz
coordinates should be in units of meters.
Read events from data.
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
Conversion tool from SQD to FIF.
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.
Authors: Teon Brooks <[email protected]> Joan Massich <[email protected]> Christian Brodbeck <[email protected]> License: BSD (3-clause) Use values from kit_info only if all others are None prepare mrk setup digitization noqa: D102 noqa: D102 modify info Read up to 100 MB of data at a time. extract data Create a synthetic stim channel cals are all unity, so can be ignored trigger value noqa: D102 convert to int to make typing-checks happy buffering=0 for np bug directories (0) system (1) check file format version basic info model name channels reserved spare spare spare reserved MGH description: 'acquisition (megacq) VectorView system at NMR-MGH' check that we can read this file channel information (4) System 52 mislabeled reference channels as NULL. This was fixed in system 53; not sure about 51... (x, y, z, theta, phi) for all MEG channels. Some channel types have additional information which we're not using. misc fields Channel sensitivity information: (5) only sensor channels requires gain. the additional misc channels (trigger channels, audio and voice channels) are passed through unaffected (offset [Volt], gain [Tesla/Volt]) for each channel otherwise we have a problem amplifier gain (7) Kapper Type gain: mask bit filter settings Hanger Type gain filter settings Acquisition Parameters (8) samples_count, = unpack('i', fid.read(KIT.INT)) digitization information (12 and 26) directories (0) Sometimes there are mismatches (e.g., AFz vs AFZ) between the channel name and its digitized, name, so let's be case insensitive. It will also prevent collisions with HSP nasion, lpa, rpa, HPI in native space coregistration meg_to_mri mri_to_meg mri_pos marker_file (char) precompute conversion factor for reading data XXX this is a bit of a hack. Should probably do this more cleanly at some point... the 2 ** (adc_stored - 14) was emperically determined using the test files with known amplitudes. The conv_factors need to be replaced by these values otherwise we're off by a factor off 5000.0 for the EEG data. Create raw.info dict for raw fif object with SQD data Creates a list of dicts of meg channels for raw.info create three orthogonal vector ch_angles[0]: theta, ch_angles[1]: phi transform to Neuromag like coordinate space some files have all EEG labeled as EEG
| 14,252 |
en
| 0.814151 |
#!/usr/bin/python3
# --- 001 > U5W2P1_Task6_w1
def solution( n ):
if(n > 2 and n < 7 ):
return True;
else:
return False;
if __name__ == "__main__":
print('----------start------------')
n = 10
print(solution( n ))
print('------------end------------')
|
src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task6_w1.py
| 292 |
!/usr/bin/python3 --- 001 > U5W2P1_Task6_w1
| 43 |
en
| 0.218291 |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\gardening\gardening_commands.py
# Compiled at: 2017-11-18 00:09:10
# Size of source mod 2**32: 1465 bytes
from objects.components import types
from objects.components.types import GARDENING_COMPONENT
from objects.gardening.gardening_component_fruit import GardeningFruitComponent
import services, sims4.commands
@sims4.commands.Command('gardening.cleanup_gardening_objects')
def cleanup_gardening_objects(_connection=None):
for obj in services.object_manager().get_all_objects_with_component_gen(GARDENING_COMPONENT):
gardening_component = obj.get_component(types.GARDENING_COMPONENT)
if not isinstance(gardening_component, GardeningFruitComponent):
continue
if obj.parent is None:
obj.is_in_inventory() or obj.is_on_active_lot() or sims4.commands.output('Destroyed object {} on open street was found without a parent at position {}, parent_type {}.'.format(obj, obj.position, obj.parent_type), _connection)
obj.destroy(source=obj, cause='Fruit/Flower with no parent on open street')
sims4.commands.output('Gardening cleanup complete', _connection)
return True
|
Scripts/simulation/objects/gardening/gardening_commands.py
| 1,362 |
uncompyle6 version 3.7.4 Python bytecode 3.7 (3394) Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\gardening\gardening_commands.py Compiled at: 2017-11-18 00:09:10 Size of source mod 2**32: 1465 bytes
| 321 |
en
| 0.460865 |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "15/09/2016"
from ...io.XsocsH5 import ScanPositions
from .ProjectItem import ProjectItem
from .ProjectDef import ItemClassDef
@ItemClassDef('ScanPositionsItem')
class ScanPositionsItem(ProjectItem):
def _createItem(self):
with self.xsocsH5 as h5f:
entries = h5f.entries()
entry = entries[0]
scan_positions = h5f.scan_positions(entry)
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
self._set_array_data(itemPath, scan_positions.pos_0)
itemPath = pathTpl.format('pos_1')
self._set_array_data(itemPath, scan_positions.pos_1)
itemPath = pathTpl.format('motor_0')
self._set_scalar_data(itemPath, scan_positions.motor_0)
itemPath = pathTpl.format('motor_1')
self._set_scalar_data(itemPath, scan_positions.motor_1)
itemPath = pathTpl.format('n_0')
self._set_scalar_data(itemPath, scan_positions.shape[0])
itemPath = pathTpl.format('n_1')
self._set_scalar_data(itemPath, scan_positions.shape[1])
def positions(self):
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
pos_0 = self._get_array_data(itemPath)
itemPath = pathTpl.format('pos_1')
pos_1 = self._get_array_data(itemPath)
itemPath = pathTpl.format('motor_0')
motor_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('motor_1')
motor_1 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_0')
n_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_1')
n_1 = self._get_scalar_data(itemPath)
return ScanPositions(motor_0=motor_0,
pos_0=pos_0,
motor_1=motor_1,
pos_1=pos_1,
shape=(n_0, n_1))
|
xsocs/gui/project/ScanPositionsItem.py
| 3,495 |
coding: utf-8 /* Copyright (c) 2015-2016 European Synchrotron Radiation Facility Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
| 1,104 |
en
| 0.870883 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/2/9 12:09 下午
# @Author: zhoumengjie
# @File : tabledrawer.py
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
def draw_table(columns_head:[], cell_vals=[]):
# 设置字体及负数
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 画布
fig, ax = plt.subplots(figsize=(10, 4), dpi=100)
# 数据
data = [
[100, 200, 300, -100, 350],
[-120, 290, -90, 450, 150]
]
# 列与行
columns = ('一', '二', '三', '四', '五')
rows = ['A', 'B']
# 作图参数
index = np.arange(len(columns)) - 0.1
bar_width = 0.4
# 设置颜色
colors = ['turquoise', 'coral']
# 柱状图
bar1 = plt.bar(index, data[0], bar_width, color=colors[0], edgecolor='grey')
bar2 = plt.bar(index + bar_width, data[1], bar_width, color=colors[1], edgecolor='grey')
# 设置标题
ax.set_title('收益情况', fontsize=16, y=1.1, x=0.44)
ax.set_ylabel('元', fontsize=12, color='black', alpha=0.7, rotation=360)
ax.set_ylim(-150, 500)
# 显示数据标签
# ax.bar_label(bar1, label_type='edge')
# ax.bar_label(bar2, label_type='edge')
# x,y刻度不显示
ax.tick_params(axis=u'both', which=u'both', length=0)
plt.xticks([])
table = plt.table(cellText=data, rowLabels=rows,
rowColours=colors,
colLabels=columns, cellLoc='center', loc='bottom',
bbox=[0, -0.4, 1, 0.24])
cellDict = table.get_celld()
for i in range(0, len(columns)):
cellDict[(0, i)].set_height(0.6)
for j in range(1, len(rows) + 1):
cellDict[(j, i)].set_height(0.4)
cellDict[(1, -1)].set_height(0.4)
cellDict[(2, -1)].set_height(0.4)
table.auto_set_font_size(False)
table.set_fontsize(10)
for key, cell in table.get_celld().items():
cell.set_linewidth(0.6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
name = ['', '']
ax.legend(name, handlelength=0.7, labelspacing=0.6,
bbox_to_anchor=(-0.1, -0.23), loc='upper left', frameon=False)
plt.show()
if __name__ == '__main__':
# draw_table(['A', 'B'], [['中国', '必胜'], ['你好', '谢谢']])
# print(4800 / 1100 / 1000)
data = {
'linux': [1.2, 2.2, 3.1, '中国', 2.0, 1.0, 2.1, 3.5, 4.0, 2.0, ],
'linuxmi': [5.2, 6.7, 7.9, 8.3, 1.2, 5.7, 6.1, 7.2, 8.3, '-', ],
}
df = pd.DataFrame(data)
fig, ax = plt.subplots(figsize=(3, 3))
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values,
colLabels=df.columns,
bbox=[0, 0, 1, 1],
)
# plt.savefig('xx.png')
plt.show()
|
wxcloudrun/common/tabledrawer.py
| 2,969 |
!/usr/bin/env python -*- coding:utf-8 -*- @Time : 2022/2/9 12:09 下午 @Author: zhoumengjie @File : tabledrawer.py 设置字体及负数 画布 数据 列与行 作图参数 设置颜色 柱状图 设置标题 显示数据标签 ax.bar_label(bar1, label_type='edge') ax.bar_label(bar2, label_type='edge') x,y刻度不显示 draw_table(['A', 'B'], [['中国', '必胜'], ['你好', '谢谢']]) print(4800 / 1100 / 1000) plt.savefig('xx.png')
| 343 |
zh
| 0.48027 |
from .TapChanger import TapChanger
class RatioTapChanger(TapChanger):
'''
A tap changer that changes the voltage ratio impacting the voltage magnitude but not the phase angle across the transformer.
:tculControlMode: Specifies the regulation control mode (voltage or reactive) of the RatioTapChanger. Default: None
:stepVoltageIncrement: Tap step increment, in per cent of nominal voltage, per step position. Default: 0.0
:RatioTapChangerTable: The ratio tap changer of this tap ratio table. Default: None
:TransformerEnd: Ratio tap changer associated with this transformer end. Default: None
'''
cgmesProfile = TapChanger.cgmesProfile
possibleProfileList = {'class': [cgmesProfile.EQ.value, cgmesProfile.SSH.value, ],
'tculControlMode': [cgmesProfile.EQ.value, ],
'stepVoltageIncrement': [cgmesProfile.EQ.value, ],
'RatioTapChangerTable': [cgmesProfile.EQ.value, ],
'TransformerEnd': [cgmesProfile.EQ.value, ],
}
serializationProfile = {}
__doc__ += '\n Documentation of parent class TapChanger: \n' + TapChanger.__doc__
def __init__(self, tculControlMode = None, stepVoltageIncrement = 0.0, RatioTapChangerTable = None, TransformerEnd = None, *args, **kw_args):
super().__init__(*args, **kw_args)
self.tculControlMode = tculControlMode
self.stepVoltageIncrement = stepVoltageIncrement
self.RatioTapChangerTable = RatioTapChangerTable
self.TransformerEnd = TransformerEnd
def __str__(self):
str = 'class=RatioTapChanger\n'
attributes = self.__dict__
for key in attributes.keys():
str = str + key + '={}\n'.format(attributes[key])
return str
|
cimpy/cgmes_v2_4_15/RatioTapChanger.py
| 1,623 |
A tap changer that changes the voltage ratio impacting the voltage magnitude but not the phase angle across the transformer.
:tculControlMode: Specifies the regulation control mode (voltage or reactive) of the RatioTapChanger. Default: None
:stepVoltageIncrement: Tap step increment, in per cent of nominal voltage, per step position. Default: 0.0
:RatioTapChangerTable: The ratio tap changer of this tap ratio table. Default: None
:TransformerEnd: Ratio tap changer associated with this transformer end. Default: None
| 519 |
en
| 0.663309 |
import torch
import math
from torch import nn, Tensor
from torch.nn import functional as F
from semseg.models.backbones import *
from semseg.models.modules.common import ConvModule
class SpatialPath(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
ch = 64
self.conv_7x7 = ConvModule(c1, ch, 7, 2, 3)
self.conv_3x3_1 = ConvModule(ch, ch, 3, 2, 1)
self.conv_3x3_2 = ConvModule(ch, ch, 3, 2, 1)
self.conv_1x1 = ConvModule(ch, c2, 1, 1, 0)
def forward(self, x):
x = self.conv_7x7(x)
x = self.conv_3x3_1(x)
x = self.conv_3x3_2(x)
return self.conv_1x1(x)
class ContextPath(nn.Module):
def __init__(self, backbone: nn.Module) -> None:
super().__init__()
self.backbone = backbone
c3, c4 = self.backbone.channels[-2:]
self.arm16 = AttentionRefinmentModule(c3, 128)
self.arm32 = AttentionRefinmentModule(c4, 128)
self.global_context = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(c4, 128, 1, 1, 0)
)
self.up16 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.up32 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.refine16 = ConvModule(128, 128, 3, 1, 1)
self.refine32 = ConvModule(128, 128, 3, 1, 1)
def forward(self, x):
_, _, down16, down32 = self.backbone(x) # 4x256x64x128, 4x512x32x64
arm_down16 = self.arm16(down16) # 4x128x64x128
arm_down32 = self.arm32(down32) # 4x128x32x64
global_down32 = self.global_context(down32) # 4x128x1x1
global_down32 = F.interpolate(global_down32, size=down32.size()[2:], mode='bilinear', align_corners=True) # 4x128x32x64
arm_down32 = arm_down32 + global_down32 # 4x128x32x64
arm_down32 = self.up32(arm_down32) # 4x128x64x128
arm_down32 = self.refine32(arm_down32) # 4x128x64x128
arm_down16 = arm_down16 + arm_down32 # 4x128x64x128
arm_down16 = self.up16(arm_down16) # 4x128x128x256
arm_down16 = self.refine16(arm_down16) # 4x128x128x256
return arm_down16, arm_down32
class AttentionRefinmentModule(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
self.conv_3x3 = ConvModule(c1, c2, 3, 1, 1)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2, 1, bias=False),
nn.BatchNorm2d(c2),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.attention(fm)
return fm * fm_se
class FeatureFusionModule(nn.Module):
def __init__(self, c1, c2, reduction=1) -> None:
super().__init__()
self.conv_1x1 = ConvModule(c1, c2, 1, 1, 0)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2 // reduction, 1, bias=False),
nn.ReLU(True),
nn.Conv2d(c2 // reduction, c2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.attention(fm)
return fm + fm * fm_se
class Head(nn.Module):
def __init__(self, c1, n_classes, upscale_factor, is_aux=False) -> None:
super().__init__()
ch = 256 if is_aux else 64
c2 = n_classes * upscale_factor * upscale_factor
self.conv_3x3 = ConvModule(c1, ch, 3, 1, 1)
self.conv_1x1 = nn.Conv2d(ch, c2, 1, 1, 0)
self.upscale = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv_1x1(self.conv_3x3(x))
return self.upscale(x)
class BiSeNetv1(nn.Module):
def __init__(self, backbone: str = 'ResNet-18', num_classes: int = 19) -> None:
super().__init__()
backbone, variant = backbone.split('-')
self.context_path = ContextPath(eval(backbone)(variant))
self.spatial_path = SpatialPath(3, 128)
self.ffm = FeatureFusionModule(256, 256)
self.output_head = Head(256, num_classes, upscale_factor=8, is_aux=False)
self.context16_head = Head(128, num_classes, upscale_factor=8, is_aux=True)
self.context32_head = Head(128, num_classes, upscale_factor=16, is_aux=True)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.context_path.backbone.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def forward(self, x): # 4x3x1024x2048
spatial_out = self.spatial_path(x) # 4x128x128x256
context16, context32 = self.context_path(x) # 4x128x128x256, 4x128x64x128
fm_fuse = self.ffm(spatial_out, context16) # 4x256x128x256
output = self.output_head(fm_fuse) # 4xn_classesx1024x2048
if self.training:
context_out16 = self.context16_head(context16) # 4xn_classesx1024x2048
context_out32 = self.context32_head(context32) # 4xn_classesx1024x2048
return output, context_out16, context_out32
return output
if __name__ == '__main__':
model = BiSeNetv1('MobileNetV2-1.0', 19)
# model.init_pretrained('checkpoints/backbones/resnet/resnet18.pth')
model.eval()
image = torch.randn(1, 3, 224, 224)
output = model(image)
print(output.shape)
|
semseg/models/bisenetv1.py
| 6,206 |
4x256x64x128, 4x512x32x64 4x128x64x128 4x128x32x64 4x128x1x1 4x128x32x64 4x128x32x64 4x128x64x128 4x128x64x128 4x128x64x128 4x128x128x256 4x128x128x256 4x3x1024x2048 4x128x128x256 4x128x128x256, 4x128x64x128 4x256x128x256 4xn_classesx1024x2048 4xn_classesx1024x2048 4xn_classesx1024x2048 model.init_pretrained('checkpoints/backbones/resnet/resnet18.pth')
| 371 |
en
| 0.25245 |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
|
datalad/utils.py
| 87,210 |
Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
Filter class to reject all records
string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
Little adapter to help getting out/err values
Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
Given a path in POSIX" notation, regenerate one in native to the env one
Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
Return if any of regexes (list or str) searches successfully for value
Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
Given an archive `name`, create under `path` with specified `load` tree
Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
Encode unicode filename
Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
Given an object, wrap into a tuple if not list or tuple
Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
Surround filename in "" and escape " in the filename
Expand all variables and user handles in a path.
By default return an absolute path
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
Given a container, generate chunks from it with size up to `size`
Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
Return a dictionary with various encoding/locale information
Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
Compatibility wrapper for {platform,distro}.linux_distribution().
Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
Return a formatted string with suggestions for values given the known ones
Updates kwargs to be passed to tempfile. calls depending on env vars
Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
Determine the command class a wrapped __call__ belongs to
Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit.
Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
Join command line args into a string using quote_cmdlinearg
Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
Q&D helper to line profile the function and spit out stats
Set mtime for files. On Windows a merely adapter to os.utime
Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
Compute an MD5 sum for the given file
Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
Just a dummy cm to programmically switch context managers
Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)
Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
Behave like os.path.relpath, but always return POSIX paths...
on any platform.
Perform platform-appropriate argument quoting
A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
os.rmdir with our optional checking for open files
Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
Return a (sorted) list of files under path
Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
Context manager to consume all logs.
Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
Call f multiple times making exponentially growing delay between the calls
Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
Return a copy of the input with the 'update'
Primarily for updating dictionaries
Little helper to guarantee that path ends with /
emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*- ex: set sts=4 ts=4 sw=4 et: See COPYING file distributed along with the datalad package for the copyright and license terms. this import is required because other modules import opj from here. from datalad.dochelpers import get_docstring_split Some useful variables Takes ~200msec, so should not be called at import time output should not change through life time of datalad process Use deprecated (but faster) method if it's available. We require this for Python 3.8 and above. Those weren't used for any critical decision making, thus we just set them to None Use get_linux_distribution() directly where needed Maximal length of cmdline string Query the system and use hardcoded "knowledge" if None probably getconf ARG_MAX might not be available The last one would be the most conservative/Windows workaround for some kind of a bug which comes up with python 3.4 see https://github.com/datalad/datalad/issues/3150 or on older CentOS with conda and python as new as 3.9 see https://github.com/datalad/datalad/issues/5943 TODO: let Yarik know that the world is a paradise now whenever 1e6 is not large enough ATM (20181005) SC_ARG_MAX available only on POSIX systems so exception would be thrown e.g. on Windows, or somehow during Debian build for nd14.04 it is coming up with -1: https://github.com/datalad/datalad/issues/3015 Even with all careful computations we do, due to necessity to account for environment and what not, we still could not figure out "exact" way to estimate it, but it was shown that 300k safety margin on linux was sufficient. https://github.com/datalad/datalad/pull/2977issuecomment-436264710 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50% of the length for "safety margin". We might probably still blow due to env vars, unicode, etc... so any hard limit imho is not a proper solution Little helpers `getargspec` has been deprecated in Python 3. We use signature, and not getfullargspec, because only signature properly "passes" args from a functools.wraps decorated function. Note: getfullargspec works Ok on wrapt-decorated functions Loop through parameters and compose argspec Collect all kwonlyargs into a dedicated dict - name: default shortcuts yoh: must not come after kwonlyarg harmonize defaults to how original getargspec returned them -- just a tuple requires special handling, since it has a number of relevant variables and also Python changed its behavior and started to respect USERPROFILE only since python 3.8: https://bugs.python.org/issue36264 TODO: should we add this feature to minimize some talktative reprs such as of URL?if value is None: continue TODO: check on windows if hasattr check would work correctly and add value: Who knows why it is a ValueError, but let's try to be specific If there is a problem with I/O - non-interactive, otherwise reraise unused in -core TODO: might want to uniformize on windows to use '/' join POSIX style split and relpath native style python2.7 ntpath implementation of relpath cannot handle start=None handle this dance once, and import pathlib from here in all other places might be the "broken" symlink which would fail to stat etc Give W permissions back only to directories, no need to bother with files TODO: yoh thinks that if we could quickly check our Flyweight for repos if any of them is under the path, and could call .precommit on those to possibly stop batched processes etc, we did not have to do it on case by case Check for open files TODO the whole thing should be reimplemented with pathlib, but for now at least accept Path shutil fails to remove paths that exceed 260 characters on Windows machines that did not enable long path support. A workaround to remove long paths anyway is to preprend \\?\ to the path. https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDNwin32-file-namespaces just remove the symlink Original idea: https://stackoverflow.com/a/11115521/1265472 since the ones returned by psutil would not be aware of symlinks in the path we should also get realpath for path do absolute() in addition to always get an absolute path even with non-existing paths on windows note: could be done more efficiently so we do not renormalize path over and over again etc Catch a race condition where a process ends before we can examine its files otherwise we would just issue that error message in the log Can also be a directory unused in -core unused in -core unused in -core unused in -core convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS] trust no one - adjust also of the target file since it seemed like downloading under OSX (was it using curl?) didn't bother with timestamps doesn't work on OSX Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath]) Figure out encoding, defaulting to 'utf-8' which is our common target in contemporary digital society And now we could try to guess OPT: could be optimized, since key is called twice, but for our cases should be just as fine So we return False if was empty map all elements within item There could be a "smarter" solution but I think this would suffice should at least be 1. If blows then - not our fault for '--' below +3 for possible quotes and a space TODO: additional treatment for "too many arguments"? although as https://github.com/datalad/datalad/issues/1883issuecomment -436272758 shows there seems to be no hardcoded limit on of arguments, but may be we decide to go for smth like follow to be on safe side chunk_size = min(10240 - len(cmd), chunk_size) Generators helpers iterating over original generator yielding saved entries Decorators Originally better_wraps was created to provide `wrapt`-based, instead of `functools.wraps` implementation to preserve the correct signature of the decorated function. By using inspect.signature in our getargspec, which works fine on `functools.wraps`ed functions, we mediated this necessity. Borrowed from pandas Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team License: BSD-3 TODO: just provide decorators for tempfile.mk* functions. This is ugly! operate on a copy of tkwargs to avoid any side-effects TODO: don't remember why I had this one originally if len(targs)<2 and \ unused in -core it will be a dict of lineno: count gross timing we will count based on id(self) + wherefrom total count now we need to sort by value Upon total exit we print the stats Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe Context Managers unused in -core we mock must be some other file one -- leave it alone preserve -- they could have been mocked already Keep old settings Let's log everything into a string TODO: generalize with the one for swallow_outputs PY3 requires clearly one or another. race condition possible we closed and cleaned up already store for access while object exists TODO: it does store messages but without any formatting, i.e. even without date/time prefix etc. IMHO it should preserve formatting in case if file_ is set we want to log levelname so we could test against it so if HEAVYDEBUG etc -- show them! TODO: if file_ and there was an exception -- most probably worth logging it? although ideally it should be the next log outside added to that file_ ... oh well TODO: May be melt in with swallow_logs at some point: default: all of datalad's logging: Additional handlers Just in case we ever need original one color_scheme='Linux', TODO: we might want to mitigate by going through all flywheighted repos and tuning up their .paths to be resolved? we need to decide! It should be a path from MSYS. - it might start with a drive letter or not - it seems to be "illegal" to have a single letter directories under / path, i.e. if created - they aren't found - 'ln -s' does not fail to create a "symlink" but it just copies! so we are not likely to need original PWD purpose on those systems Verdict: directory was removed but we promised to be robust and still report the path we might know since we are still in PWD mode do absolute() in addition to always get an absolute path even with non-existing paths on windows This logic would fail to catch the case where chdir did happen to the directory where current PWD is pointing to, e.g. $> ls -ld $PWD lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp// hopa:~/.tmp/tmp $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())' ('/home/yoh/.tmp/tmp', '/tmp') but I guess that should not be too harmful Must not happen, but may be someone evil purges PWD from environ? for grep people -- ok, to chdir here! nothing more to do really, chdir was in the constructor Need to use self.__class__ so this instance, if the entire thing mocked during the test, still would use correct chpwd if not absolute -- relative to pwd we are in subdir or above the path = use relative path if benign "here" - cut off just return absolute path if DATALAD_TESTS_TEMP_DIR is set, use that as directory, let mktemp handle it otherwise. However, an explicitly provided dir=... will override this. MIH: not clear to me why we need to perform this (possibly expensive) resolve. It was already part of the original implementation 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f TODO globbing below can also be done with pathlib glob here for all files with the same name (-suffix) would be useful whenever we requested .img filename, and function creates .hdr as well MIH: this is undocumented behavior, and undesired in the general case. it should be made conditional and explicit For paranoid yoh who stepped into this already ones ;-) pragma: no cover Assume that all others as POSIX compliant so nothing to be done unused in -core TODO: use WEB_META_LOG whenever 789 merged the term trace is used to avoid confusion with a path in the sense of a filesystem path, but the analogy fits and nodes can be paths only DAGs, skip any cyclic traces only consider edges that lead off the end of the trace we got nothing yet, and this edges is not matching the start dive into potential subnodes while we can still go up new test path in the format we got it no luck, next round if we applied dirname() at the top, we give it another go with the actual path, if it was itself a symlink, it could be the top-level dataset itself ATM used in datalad_crawler extension, so do not remove yet just reraise on the last trial Life goes fast on proper systems, no need to delay it much Check for open files IO Helpers unused in -core read some bytes from the file add robustness, use a sniffer csv.py doesn't do Unicode; encode temporarily as UTF-8: decode UTF-8 back to Unicode, cell by cell: with python 3.5.1 (ok with 3.5.5) somehow kept running into Failed to import dlsub1: Parent module 'dltestm1' not loaded while running the test. Preloading pkg resolved the issue for now just for .py files for now relying on having .py extension -- assertion above This class is modified from Snakemake (v5.1.4) TODO: eventually we might want to make use of attr module create archive remove original tree might not want to do it if we change presentation below if separator includes new line - we add entire separator right away https://stackoverflow.com/a/15262019 the rest is for windows collects pieces of one arg most frequent may be even empty; must be last
| 34,521 |
en
| 0.81922 |
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from ory_keto_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_keto_client.model.delete_ory_access_control_policy_internal_server_error_body import DeleteOryAccessControlPolicyInternalServerErrorBody
globals()['DeleteOryAccessControlPolicyInternalServerErrorBody'] = DeleteOryAccessControlPolicyInternalServerErrorBody
class DeleteOryAccessControlPolicyInternalServerError(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'payload': 'Payload', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
|
clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py
| 7,071 |
NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
noqa: F401 noqa: F401 noqa: F401 noqa: F401 noqa: E501 noqa: E501 noqa: E501 discard variable.
| 3,783 |
en
| 0.771459 |
from io import BytesIO
from uniborg import util
from telethon import types
from telethon.errors import PhotoInvalidDimensionsError
from telethon.tl.functions.messages import SendMediaRequest
@borg.on(util.admin_cmd(r"^\.i$"))
async def on_file_to_photo(event):
await event.delete()
target = await event.get_reply_message()
try:
image = target.media.document
except AttributeError:
return
if not image.mime_type.startswith('image/'):
return # This isn't an image
if image.mime_type == 'image/webp':
return # Telegram doesn't let you directly send stickers as photos
if image.size > 10 * 1024 * 1024:
return # We'd get PhotoSaveFileInvalidError otherwise
file = await borg.download_media(target, file=BytesIO())
file.seek(0)
img = await borg.upload_file(file)
img.name = 'image.png'
try:
await borg(SendMediaRequest(
peer=await event.get_input_chat(),
media=types.InputMediaUploadedPhoto(img),
message=target.message,
entities=target.entities,
reply_to_msg_id=target.id
))
except PhotoInvalidDimensionsError:
return
|
stdplugins/file to img.py
| 1,199 |
This isn't an image Telegram doesn't let you directly send stickers as photos We'd get PhotoSaveFileInvalidError otherwise
| 122 |
en
| 0.927613 |
#!/bin/python
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
|
mtools/util/logevent.py
| 32,665 |
Custom datetime encoder for json output.
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
Default string conversion for LogEvent object is its line_str.
Extract counters like nscanned and nreturned from the logevent.
Extract level and component if available (lazy).
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
Parse system.profile doc, copy all values to member variables.
Extract query pattern from operations.
Extract log component if available (lazy).
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
Extract datetime if available (lazy).
Calculate duration if available (lazy).
Return line_str depending on source, logfile or system.profile.
Extract log level if available (lazy).
Extract namespace if available (lazy).
Extract ndeleted or nDeleted counter if available (lazy).
Extract ninserted or nInserted counter if available (lazy).
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
Extract nscanned or keysExamined counter if available (lazy).
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
Extract ntoreturn counter if available (lazy).
Extract numYields counter if available (lazy).
Extract nupdated or nModified counter if available (lazy).
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
Trigger extraction of all information.
These values are usually evaluated lazily.
Extract query pattern from operations.
Extract numYields counter if available (lazy).
Extract read lock (r) counter if available (lazy).
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
Extract query pattern from operations.
Split string into tokens (lazy).
Extract thread name if available (lazy).
Convert LogEvent object to a dictionary.
Convert LogEvent object to valid JSON.
Extract write lock (w) counter if available (lazy).
Extract ntoreturn counter if available (lazy).
!/bin/python datetime handler for json encoding create from string, remove line breaks at end of _line_str docs don't need to be parsed lazily, they are fast TODO: refactor from the legacy names to modern (eg: nscanned => keysExamined). Currently _extract_counters() maps newer property names into legacy equivalents for broader log file support. keysExamined docsExamined nModified nReturned or nMatched (updates) nInserted nDeleted split into items (whitespace split) split_tokens = self.split_tokens find duration from end if no datetime after 10 tokens, break to avoid parsing very long lines separate datetime str and linestr Fast check if timestamp format changed. If it has, trigger datetime evaluation. empty line, no need to parse datetime not the timestamp format that was hinted first check: less than 4 tokens can't be ctime check for ctime-pre-2.4 or ctime format sanity check, because the dateutil parser could interpret any numbers as a valid date convinced that this is a ISO-8601 format, the dateutil parser will do the rest assume current year unless self.year_rollover is set (from LogFile) force evaluation of thread to get access to datetime_offset and to protect from changes due to line truncation. check if this log line got truncated unknown warning, bail out trigger evaluation of operation trigger evaluation of operation workaround for <= 2.2 log files, where command was not listed separately extract counters (if present) TODO: refactor mtools to use current counter names throughout Transitionary hack: mapping of current names into prior equivalents trigger operation evaluation to get access to offset Remap counter to standard name, if applicable see if this is a pre-2.5.2 numYields with space in between (e.g. "numYields: 2") https://jira.mongodb.org/browse/SERVER-10101 token not parsable, skip get start of json query pattern no query pattern found remove zero-padding from day number change isoformat string to have 3 digit milliseconds and no : in offset set new string and format query pattern for system.profile events, all three cases. See SERVER-13245 sort pattern build a fake line_str
| 5,646 |
en
| 0.758627 |
# -*- coding: utf-8 -*-
"""
Calculation of cumulant expressions for non-linear response functions
of the third order for a multilevel three band system.
"""
from quantarhei.symbolic.cumulant import Ugde, Uedg, Uged, Uegd #, ExpdV
from quantarhei.symbolic.cumulant import gg #, g1, g2
from quantarhei.symbolic.cumulant import CumulantExpr
from quantarhei.symbolic.abc import a, b, f, tau, tau1, tau2, tau3, c, d #, e, t, T, tau, x, y
from quantarhei.symbolic.abc import t1, t2, t3
from quantarhei.symbolic.lang import python_code
from quantarhei.symbolic.lang import fortran_code
import time
def evaluate_cumulant(cum, positive_times = [], leading_index=None,
lang = "Python", arrays=None):
"""
"""
t0 = time.time()
A = cum.rewrite(gg)
expr = CumulantExpr(A)
expr = expr.evaluate()
t1 = time.time()
for tt in positive_times:
expr = CumulantExpr(expr)._make_positive(tt)
t2 = time.time()
#a = leading_index[0]
if leading_index is not None:
D = expr._leading_index(leading_index)
expr = D._getExpr()
t3 = time.time()
if lang == "Fortran":
ss = fortran_code(expr.__str__())
elif lang == "Python":
ss = python_code(expr.__str__(),arrays=arrays)
else:
raise Exception("Unknown language")
print(t1-t0)
print(t2-t1)
print(t3-t2)
return ss
def R1g():
"""
"""
A = Ugde(b,t1)*Uedg(b,t1+t2)*Ugde(a,t1+t2+t3)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2g():
"""
"""
A = Uedg(a,t1+t2)*Ugde(b,t1+t2+t3)*Uedg(b,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R3g():
"""
"""
A = Uedg(a,t1)*Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R4g():
"""
"""
A = Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)*Ugde(a,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R1fs():
"""
"""
A = (Uedg(a,t1+t2+t3)*Ugde(f,t1+t2+t3)*Uedg(f,t1+t2)
*Ugde(b,t1+t2)*Uedg(b,t1))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2fs():
"""
"""
A = (Ugde(b,t1)*Uedg(b,t1+t2+t3)*Ugde(f,t1+t2+t3)
*Uedg(f,t1+t2)*Ugde(a,t1+t2))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def print_R1gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R1fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g():
"""
"""
A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
*Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt2():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
#A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
A = (Uged(a,t1+tau1)*Uedg(b,t2-tau1)*Ugde(b,t2+t3-tau1)*Uegd(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def generate_nth_order_R2g(states_tuple, times_tuple):
order = len(states_tuple)
if order != len(times_tuple):
raise Exception("Wrong tuple/list length")
# starting state
a = states_tuple[0]
# final state (can be the same as starting)
b = states_tuple[len(states_tuple)-1]
# final time (must be t2)
tt = times_tuple[len(times_tuple)-1]
AL = Uged(a,t1)
Amid = Uedg(b,tt)*Ugde(b,t3+tt)
filL = 1
filR = 1
for k in range(len(times_tuple)-1):
tau = times_tuple[k]
s1 = states_tuple[k]
s2 = states_tuple[k+1]
filL = filL*Uedg(s1,tau)*Ugde(s2,tau)
filR = Uedg(s2,tau)*Ugde(s1,tau)*filR
A = AL*filL*Amid*filR
print(A)
print(evaluate_cumulant(A, positive_times=(t1, tt, t3),
leading_index=a, arrays=["gg"]))
def test():
A = Uged(a,t1+t2)*Ugde(d,t3)*Uegd(a,t2)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def oneex_twoex():
A = Uedg(f,t1)*Ugde(a,t1)
print(evaluate_cumulant(A, positive_times=(t1,), leading_index=a,
arrays="gg"))
# =============================================================================
# print("R1g:")
# st_R1g = "numpy.exp("+R1g()+")"
# print(st_R1g)
#
# print("")
# print("R2g:")
# print(R2g())
#
# print("")
# print("R3g:")
# print(R3g())
#
# print("")
# print("R4g:")
# print(R4g())
#
# print("")
# print("R1fs:")
# print(R1fs())
#
# print("")
# print("R2fs:")
# print(R2fs())
#
# print("")
# print("R1gt")
# print_R1gt()
#
# print("")
# print("R2gt")
# print_R2gt()
#
# print("")
# print("R1fst")
# print_R1fst()
#
# print("")
# print("R2fst")
# print_R2fst()
#
# =============================================================================
#print("")
#print("Trans_R2g")
#print_trans_R2g()
#
#print("")
#print("Trans_R2g_alt")
#print_trans_R2g_alt()
#
#print("")
#print("Trans_R2g_alt2")
#print_trans_R2g_alt2()
#print("***")
#states = (a, c, b) #(a,c,b)
#times = (tau1, tau2, t2) # (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#
#print("===")
#A = Uged(a,t1)*Uedg(a,tau1)*Ugde(c,tau1)*Uedg(c,tau2)*Ugde(b,tau2)*Uedg(b,t2)*Ugde(b,t2 + t3)*Uedg(b,tau2)*Ugde(c,tau2)*Uedg(c,tau1)*Ugde(a,tau1)
#
#print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
# leading_index=a, arrays=["gg"]))
#print("***")
#states = (a,b,c, d) #(a,c,b)
#times = (tau1, tau2, tau3, t2) # (tau1,tau2,t2)
#states = (a,c,b)
#times = (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#test()
oneex_twoex()
|
examples/symbolic/test_symbolic_8.py
| 8,025 |
Calculation of cumulant expressions for non-linear response functions
of the third order for a multilevel three band system.
-*- coding: utf-8 -*-, ExpdV, g1, g2, e, t, T, tau, x, ya = leading_index[0]A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3) *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3) *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1)) starting state final state (can be the same as starting) final time (must be t2) ============================================================================= print("R1g:") st_R1g = "numpy.exp("+R1g()+")" print(st_R1g) print("") print("R2g:") print(R2g()) print("") print("R3g:") print(R3g()) print("") print("R4g:") print(R4g()) print("") print("R1fs:") print(R1fs()) print("") print("R2fs:") print(R2fs()) print("") print("R1gt") print_R1gt() print("") print("R2gt") print_R2gt() print("") print("R1fst") print_R1fst() print("") print("R2fst") print_R2fst() =============================================================================print("")print("Trans_R2g")print_trans_R2g()print("")print("Trans_R2g_alt")print_trans_R2g_alt()print("")print("Trans_R2g_alt2")print_trans_R2g_alt2()print("***")states = (a, c, b) (a,c,b)times = (tau1, tau2, t2) (tau1,tau2,t2)generate_nth_order_R2g(states, times)print("===")A = Uged(a,t1)*Uedg(a,tau1)*Ugde(c,tau1)*Uedg(c,tau2)*Ugde(b,tau2)*Uedg(b,t2)*Ugde(b,t2 + t3)*Uedg(b,tau2)*Ugde(c,tau2)*Uedg(c,tau1)*Ugde(a,tau1)print(evaluate_cumulant(A, positive_times=(t1, t2, t3), leading_index=a, arrays=["gg"])) print("***")states = (a,b,c, d) (a,c,b)times = (tau1, tau2, tau3, t2) (tau1,tau2,t2)states = (a,c,b)times = (tau1,tau2,t2)generate_nth_order_R2g(states, times)test()
| 1,873 |
en
| 0.354115 |
# This example is inspired by https://github.com/dasguptar/treelstm.pytorch
import argparse, cPickle, math, os, random
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
from tree_lstm import SimilarityTreeLSTM
from dataset import Vocab, SICKDataIter
parser = argparse.ArgumentParser(description='TreeLSTM for Sentence Similarity on Dependency Trees')
parser.add_argument('--data', default='data/sick/',
help='path to raw dataset. required when preprocessed dataset is not available.')
parser.add_argument('--word_embed', default='data/glove/glove.840B.300d.txt',
help='directory with word embeddings. required when preprocessed dataset is not available.')
parser.add_argument('--batch_size', type=int, default=25,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--epochs', default=50, type=int,
help='number of total epochs to run')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--wd', default=0.0001, type=float,
help='weight decay factor')
parser.add_argument('--optimizer', default='adagrad',
help='optimizer (default: adagrad)')
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
parser.add_argument('--use-gpu', action='store_true',
help='whether to use GPU.')
opt = parser.parse_args()
logging.info(opt)
context = [mx.gpu(0) if opt.use_gpu else mx.cpu()]
rnn_hidden_size, sim_hidden_size, num_classes = 150, 50, 5
optimizer = opt.optimizer.lower()
mx.random.seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
batch_size = opt.batch_size
# read dataset
if os.path.exists('dataset.cPickle'):
with open('dataset.cPickle', 'rb') as f:
train_iter, dev_iter, test_iter, vocab = cPickle.load(f)
else:
root_dir = opt.data
segments = ['train', 'dev', 'test']
token_files = [os.path.join(root_dir, seg, '%s.toks'%tok)
for tok in ['a', 'b']
for seg in segments]
vocab = Vocab(filepaths=token_files, embedpath=opt.word_embed)
train_iter, dev_iter, test_iter = [SICKDataIter(os.path.join(root_dir, segment), vocab, num_classes)
for segment in segments]
with open('dataset.cPickle', 'wb') as f:
cPickle.dump([train_iter, dev_iter, test_iter, vocab], f)
logging.info('==> SICK vocabulary size : %d ' % vocab.size)
logging.info('==> Size of train data : %d ' % len(train_iter))
logging.info('==> Size of dev data : %d ' % len(dev_iter))
logging.info('==> Size of test data : %d ' % len(test_iter))
# get network
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)
# use pearson correlation and mean-square error for evaluation
metric = mx.metric.create(['pearsonr', 'mse'])
def to_target(x):
target = np.zeros((1, num_classes))
ceil = int(math.ceil(x))
floor = int(math.floor(x))
if ceil==floor:
target[0][floor-1] = 1
else:
target[0][floor-1] = ceil - x
target[0][ceil-1] = x - floor
return mx.nd.array(target)
def to_score(x):
levels = mx.nd.arange(1, 6, ctx=x.context)
return [mx.nd.sum(levels*mx.nd.exp(x), axis=1).reshape((-1,1))]
# when evaluating in validation mode, check and see if pearson-r is improved
# if so, checkpoint and run evaluation on test dataset
def test(ctx, data_iter, best, mode='validation', num_iter=-1):
data_iter.reset()
batches = len(data_iter)
data_iter.set_context(ctx[0])
preds = []
labels = [mx.nd.array(data_iter.labels, ctx=ctx[0]).reshape((-1,1))]
for _ in tqdm(range(batches), desc='Testing in {} mode'.format(mode)):
l_tree, l_sent, r_tree, r_sent, label = data_iter.next()
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
preds.append(z)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info(mode+' acc: %s=%f'%(name, acc))
if name == 'pearsonr':
test_r = acc
if mode == 'validation' and num_iter >= 0:
if test_r >= best:
best = test_r
logging.info('New optimum found: {}. Checkpointing.'.format(best))
net.collect_params().save('childsum_tree_lstm_{}.params'.format(num_iter))
test(ctx, test_iter, -1, 'test')
return best
def train(epoch, ctx, train_data, dev_data):
# initialization with context
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx[0])
net.embed.weight.set_data(vocab.embed.as_in_context(ctx[0]))
train_data.set_context(ctx[0])
dev_data.set_context(ctx[0])
# set up trainer for optimizing the network.
trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': opt.lr, 'wd': opt.wd})
best_r = -1
Loss = gluon.loss.KLDivLoss()
for i in range(epoch):
train_data.reset()
num_batches = len(train_data)
# collect predictions and labels for evaluation metrics
preds = []
labels = [mx.nd.array(train_data.labels, ctx=ctx[0]).reshape((-1,1))]
for j in tqdm(range(num_batches), desc='Training epoch {}'.format(i)):
# get next batch
l_tree, l_sent, r_tree, r_sent, label = train_data.next()
# use autograd to record the forward calculation
with ag.record():
# forward calculation. the output is log probability
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
# calculate loss
loss = Loss(z, to_target(label).as_in_context(ctx[0]))
# backward calculation for gradients.
loss.backward()
preds.append(z)
# update weight after every batch_size samples
if (j+1) % batch_size == 0:
trainer.step(batch_size)
# translate log-probability to scores, and evaluate
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info('training acc at epoch %d: %s=%f'%(i, name, acc))
best_r = test(ctx, dev_data, best_r, num_iter=i)
train(opt.epochs, context, train_iter, dev_iter)
|
example/gluon/tree_lstm/main.py
| 6,757 |
This example is inspired by https://github.com/dasguptar/treelstm.pytorch read dataset get network use pearson correlation and mean-square error for evaluation when evaluating in validation mode, check and see if pearson-r is improved if so, checkpoint and run evaluation on test dataset initialization with context set up trainer for optimizing the network. collect predictions and labels for evaluation metrics get next batch use autograd to record the forward calculation forward calculation. the output is log probability calculate loss backward calculation for gradients. update weight after every batch_size samples translate log-probability to scores, and evaluate
| 671 |
en
| 0.794063 |
import os
import glob
import sys
from typing import Optional, List, Union
from .utils.utils import calc_mean_score, save_json, image_dir_to_json, image_file_to_json
from .handlers.model_builder import Nima
from deepinsight_iqa.common.utility import thread_safe_singleton, set_gpu_limit
from deepinsight_iqa.data_pipeline.nima_gen.nima_datagen import NimaDataGenerator as TestDataGenerator
import tensorflow as tf
import six
import logging
logger = logging.getLogger(__name__)
@six.add_metaclass(thread_safe_singleton)
class Prediction:
def __init__(self, weights_file: str, base_model_name: str):
""" Invoke a predict method of this class to predict image quality using nima model
"""
try:
# set_gpu_limit()
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print("Unable to load NIMA weights", str(e))
sys.exit(1)
def predict(
self,
image_source: str,
predictions_file: Optional[str] = None,
img_format: str = 'jpg'
) -> List:
# load samples
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_source, img_type='jpg')
# initialize data generator
n_classes = 10
batch_size = 64
samples = []
sample = {"imgage_id": "img_1"}
samples.append(sample)
data_generator = TestDataGenerator(
samples, image_dir, batch_size, n_classes,
self.nima.preprocessing_function(), img_format=img_format
)
# get predictions
predictions = self.nima.nima_model.predict_generator(
data_generator, workers=1, use_multiprocessing=False, verbose=1)
# calc mean scores and add to samples
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
# print(json.dumps(samples, indent=2))
if predictions_file is not None:
save_json(samples, predictions_file)
return samples
|
deepinsight_iqa/nima/predict.py
| 2,273 |
Invoke a predict method of this class to predict image quality using nima model
set_gpu_limit() load samples initialize data generator get predictions calc mean scores and add to samples print(json.dumps(samples, indent=2))
| 234 |
en
| 0.572556 |
# qubit number=3
# total number=60
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.x(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=57
prog.cz(input_qubit[0],input_qubit[2]) # number=58
prog.h(input_qubit[2]) # number=59
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC292.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
data/p3BR/R2/benchmark/startQiskit_QC292.py
| 7,009 |
011 . x + 1
000 . x + 0
111 . x + 1
qubit number=3 total number=60 implement the oracle O_f NOTE: use multi_control_toffoli_gate ('noancilla' mode) https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate oracle.barrier() oracle.draw('mpl', filename=(kernel + '-oracle.png')) implement the Bernstein-Vazirani circuit initial n + 1 bits inverse last one (can be omitted if using O_f^\pm) circuit begin number=1 number=38 number=39 number=40 number=31 number=42 number=43 number=44 number=48 number=49 number=50 number=54 number=55 number=57 number=58 number=59 number=47 number=37 number=51 number=52 number=53 number=25 number=26 number=27 number=7 number=8 number=34 number=30 number=9 number=18 number=19 number=20 number=14 number=22 number=23 number=24 number=3 number=41 number=17 number=5 number=21 apply H to get superposition apply oracle O_f apply H back (QFT on Z_2^n) measure Q: which backend should we use? get state vector get simulate results provider = IBMQ.load_account() backend = provider.get_backend(backend_str) qobj = compile(prog, backend, shots) job = backend.run(qobj) job.result() transpile/schedule -> assemble -> backend.run "state": statevec, prog.draw('mpl', filename=(kernel + '.png'))
| 1,496 |
en
| 0.383851 |
import socket, threading, sys, traceback, os, tkinter
from ui import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from tkinter import *
from PIL import Image, ImageTk
from tkinter import messagebox, Tk
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from RtpPacket import RtpPacket
RECV_SIZE = 20480 + 14
HIGHT = 500
CACHE_FILE_NAME = "cache-"
CACHE_FILE_EXT = ".jpg"
class Client:
INIT = 0
READY = 1
PLAYING = 2
state = INIT
SETUP = 0
PLAY = 1
PAUSE = 2
TEARDOWN = 3
FASTER = 4
SLOWER = 5
# Initiation..
def __init__(self, serveraddr, serverport, rtpport, filename):
self.page_main = Ui_MainWindow()
self.state == self.READY
self.serverAddr = serveraddr
self.serverPort = int(serverport)
self.rtpPort = int(rtpport)
self.fileName = filename
self.rtspSeq = 0
self.sessionId = 0
self.requestSent = -1
self.teardownAcked = 0
self.connectToServer()
self.frameNbr = 0
self.createWidgets()
def createWidgets(self):
app = QtWidgets.QApplication(sys.argv)
page_tmp = QtWidgets.QMainWindow()
self.page_main.setupUi(page_tmp)
page_tmp.show()
self.page_main.btn_setup.clicked.connect(lambda: self.setupMovie())
self.page_main.btn_play.clicked.connect(lambda: self.playMovie())
self.page_main.btn_pause.clicked.connect(lambda: self.pauseMovie())
self.page_main.btn_teardown.clicked.connect(lambda: self.exitClient())
self.page_main.btn_faster.clicked.connect(lambda: self.fasterMovie())
self.page_main.btn_slower.clicked.connect(lambda: self.slowerMovie())
sys.exit(app.exec_())
def fasterMovie(self):
"""Let movie faster."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.FASTER)
def slowerMovie(self):
"""Let movie slower."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.SLOWER)
def setupMovie(self):
"""Setup init."""
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
def exitClient(self):
"""Teardown the client."""
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0) # Close the gui window
print(os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT)) # Delete the cache image from video
def pauseMovie(self):
"""Pause movie."""
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE)
def playMovie(self):
"""Play movie."""
if self.state == self.READY:
# Create a new thread to listen for RTP packets
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY)
def listenRtp(self):
"""Listen for RTP packets."""
while 1:
try:
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb+")
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
# self.cutFrameList.append(rtpPacket.getPayload())
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print("Current Seq Num: " + str(currFrameNbr))
if currFrameNbr > self.frameNbr and rtpPacket.getIfEnd(): # Discard the late packet
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
# Stop listening upon requesting PAUSE or TEARDOWN
if self.playEvent.isSet():
break
print('Frame receiving failed!')
# Upon receiving ACK for TEARDOWN request,
# close the RTP socket
if self.teardownAcked == 1:
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break
def writeFrame(self):
"""Write the received frame to a temp image file. Return the image file."""
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb")
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename
def updateMovie(self, imageFile):
"""Update the image file as video frame in the GUI."""
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True)
def connectToServer(self):
"""Connect to the Server. Start a new RTSP/TCP session."""
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
# tkMessageBox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
messagebox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
def sendRtspRequest(self, requestCode):
"""Send RTSP request to the server."""
# Setup
if requestCode == self.SETUP and self.state == self.INIT:
threading.Thread(target=self.recvRtspReply).start()
# Update RTSP sequence number.
self.rtspSeq += 1
# Write the RTSP request to be sent.
request = 'SETUP ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nTransport: RTP/UDP; client_port= ' + str(self.rtpPort)
# Keep track of the sent request.
self.requestSent = self.SETUP
# Play
elif requestCode == self.PLAY and self.state == self.READY:
self.rtspSeq += 1
request = 'PLAY ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PLAY
# Pause
elif requestCode == self.PAUSE and self.state == self.PLAYING:
self.rtspSeq += 1
request = 'PAUSE ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PAUSE
# Teardown
elif requestCode == self.TEARDOWN and not self.state == self.INIT:
self.rtspSeq += 1
request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.TEARDOWN
# Faster
elif requestCode == self.FASTER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'FASTER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
# Slower
elif requestCode == self.SLOWER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'SLOWER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
else:
return
# Send the RTSP request using rtspSocket.
self.rtspSocket.send(request.encode())
print('\nData sent:\n' + request)
def recvRtspReply(self):
"""Receive RTSP reply from the server."""
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode("utf-8"))
# Close the RTSP socket upon requesting Teardown
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break
def parseRtspReply(self, data):
"""Parse the RTSP reply from the server."""
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
# Process only if the server reply's sequence number is the same as the request's
if seqNum == self.rtspSeq:
session = int(lines[2].split(' ')[1])
# New RTSP session ID
if self.sessionId == 0:
self.sessionId = session
# Process only if the session ID is the same
if self.sessionId == session:
if int(lines[0].split(' ')[1]) == 200:
if self.requestSent == self.SETUP:
# Update RTSP state.
self.state = self.READY
# Open RTP port.
self.openRtpPort()
elif self.requestSent == self.PLAY:
self.state = self.PLAYING
elif self.requestSent == self.PAUSE:
self.state = self.READY
# The play thread exits. A new thread is created on resume.
self.playEvent.set()
elif self.requestSent == self.TEARDOWN:
self.state = self.INIT
# Flag the teardownAcked to close the socket.
self.teardownAcked = 1
def openRtpPort(self):
"""Open RTP socket binded to a specified port."""
# Create a new datagram socket to receive RTP packets from the server
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the timeout value of the socket to 0.5sec
self.rtpSocket.settimeout(0.5)
try:
# Bind the socket to the address using the RTP port given by the client user
self.rtpSocket.bind(("", self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort)
def handler(self):
"""Handler on explicitly closing the GUI window."""
self.pauseMovie()
if messagebox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else: # When the user presses cancel, resume playing.
self.playMovie()
if __name__ == "__main__":
try:
# serverAddr = sys.argv[1]
# serverPort = sys.argv[2]
# rtpPort = sys.argv[3]
# fileName = sys.argv[4]
serverAddr = sys.argv[1]
serverPort = sys.argv[4]
rtpPort = sys.argv[3]
fileName = sys.argv[2]
except:
print ("[Usage: ClientLauncher.py Server_name Server_port RTP_port Video_file]\n")
# root = tkinter.Tk()
client = Client(serverAddr, serverPort, rtpPort, fileName)
# client.master.title('RTP Client')
# root.mainloop()
|
Task2/Client_dev.py
| 11,439 |
Connect to the Server. Start a new RTSP/TCP session.
Teardown the client.
Let movie faster.
Handler on explicitly closing the GUI window.
Listen for RTP packets.
Open RTP socket binded to a specified port.
Parse the RTSP reply from the server.
Pause movie.
Play movie.
Receive RTSP reply from the server.
Send RTSP request to the server.
Setup init.
Let movie slower.
Update the image file as video frame in the GUI.
Write the received frame to a temp image file. Return the image file.
Initiation.. Close the gui window Delete the cache image from video Create a new thread to listen for RTP packets self.cutFrameList.append(rtpPacket.getPayload()) Discard the late packet Stop listening upon requesting PAUSE or TEARDOWN Upon receiving ACK for TEARDOWN request, close the RTP socket tkMessageBox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr) Setup Update RTSP sequence number. Write the RTSP request to be sent. Keep track of the sent request. Play Pause Teardown Faster Slower Send the RTSP request using rtspSocket. Close the RTSP socket upon requesting Teardown Process only if the server reply's sequence number is the same as the request's New RTSP session ID Process only if the session ID is the same Update RTSP state. Open RTP port. The play thread exits. A new thread is created on resume. Flag the teardownAcked to close the socket. Create a new datagram socket to receive RTP packets from the server Set the timeout value of the socket to 0.5sec Bind the socket to the address using the RTP port given by the client user When the user presses cancel, resume playing. serverAddr = sys.argv[1] serverPort = sys.argv[2] rtpPort = sys.argv[3] fileName = sys.argv[4] root = tkinter.Tk() client.master.title('RTP Client') root.mainloop()
| 1,787 |
en
| 0.777664 |
""" core app configuration """
import os
environment = os.getenv('LAMBTASTIC_ENV', 'development')
if environment == 'testing':
from .testing import *
elif environment == 'production':
from .production import *
else:
from .development import *
|
settings/__init__.py
| 257 |
core app configuration
| 22 |
en
| 0.551515 |
# -*- coding: utf-8 -*-
TIME_OUT = 60
EXCEPT_FILE = ['test.py','login.py','mix.py']
class Api(object):
login = "/api/users/login"
user_info="/api/users/info"
signin = "/api/users/sign/signIn"
map = "/api/RedEnvelope/updateUserMap"
find_redbag = "/api/RedEnvelope/findReds"
get_redbag = "/api/redUser/getRed"
test= "/api/sys/testJson"
|
config.py
| 364 |
-*- coding: utf-8 -*-
| 21 |
en
| 0.767281 |
"""
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: [email protected]
"""
import os
import common
from inmanta.loader import SourceInfo
from inmanta.module import Project
def test_collect_python_requirements(tmpdir):
# Create project
common.makeproject(tmpdir, "test-project", deps=[("mod1", ""), ("mod2", "")], imports=["mod1", "mod2"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
# Create mod1
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """iplib@git+https://github.com/bartv/python3-iplib
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
# Create mod2
common.makemodule(libs_dir, "mod2", project=False)
mod2 = os.path.join(libs_dir, "mod2")
mod2_req_txt = """# A comment
dummy-yummy # A comment
# Another comment
"""
common.add_file(mod2, "requirements.txt", mod2_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
project.load_module("mod2", allow_v1=True)
reqs = project.collect_python_requirements()
expected_reqs = ["iplib@git+https://github.com/bartv/python3-iplib", "pytest>=1.5", "iplib>=0.0.1", "dummy-yummy"]
assert sorted(reqs) == sorted(expected_reqs)
def test_requirements_from_source_info(tmpdir):
"""Test the code path used by the exporter"""
common.makeproject(tmpdir, "test-project", deps=[("mod1", "")], imports=["mod1"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """# I'm a comment
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
requirements = SourceInfo(mod1, "inmanta_plugins.mod1").requires
assert sorted(requirements) == sorted(["pytest>=1.5", "iplib>=0.0.1"])
# This would fail if the comments weren't filtered out
project.virtualenv.install_from_list(requirements)
|
tests/moduletool/test_python_dependencies.py
| 2,974 |
Test the code path used by the exporter
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: [email protected]
Create project Create mod1 Create mod2 This would fail if the comments weren't filtered out
| 708 |
en
| 0.841192 |
from __future__ import division
import fa
import sys
import os
from fa import chunker
if __name__ == "__main__":
from sys import stderr
import argparse
parser = argparse.ArgumentParser(description=(
"Create a set of synthetic genomes consisting "
"of subgroups per tax level. Some kmers are unique, "
"some are shared, and this provides a case where we can test"
" the efficacy and behavior of our bitmap method."))
parser.add_argument("-n", "--num-nucleotides-per-leaf",
type=int, default=13000)
parser.add_argument("-N", "--num-nucs-shared-per-subgroup",
type=int, default=2000)
parser.add_argument("-l", "--num-nucs-shared-per-level",
type=int, default=8000)
parser.add_argument("-d", "--tree-depth",
type=int, default=4)
parser.add_argument("-s", "--split-size", type=int,
default=3,
help=("Number of subgroups for "
"each parent node."))
parser.add_argument("--parent-map", "-p",
help="Path to which to write synthetic taxonomy.",
default="nodes.dmp")
parser.add_argument("-S", "--subgroup-size", type=int,
default=3,
help="Number of genomes for each subgroup")
parser.add_argument("-o", "--outdir", default=".", type=str)
parser.add_argument("--name-id-map", "-m", default="synth_nameidmap.txt")
args = parser.parse_args()
# Variables/settings for constructing synthetic genome
# and accessory files.
mult_per_layer = args.split_size * args.subgroup_size
depth = args.tree_depth
nleaves = mult_per_layer ** (depth - 1)
leaf_seqs = [fa.SeqId(fa.gen_seq(args.num_nucleotides_per_leaf), i) for
i in range(nleaves)]
nleaf_seq = len(leaf_seqs)
outdir = args.outdir
if not os.path.isdir(outdir):
if os.path.isfile(outdir):
raise Exception("Path set for outdir ('%s') is a"
" file... Nah, dawg." % outdir)
os.mkdir(outdir)
outdir = outdir + '/' # Append slash
name_id_map = outdir + args.name_id_map
parent_map = outdir + args.parent_map
# Variables for constructing the parent_map dictionary.
pcmap = {}
used_seqids = set(i.taxid() for i in leaf_seqs)
ctax = max(used_seqids) + 1
last_layer = []
for i in range(1, depth):
nchunks = nleaf_seq // (mult_per_layer ** i)
chunk_size = nleaf_seq // nchunks
assert nleaf_seq % chunk_size == 0
for seqsetid, seqset in enumerate(chunker(leaf_seqs, chunk_size)):
print("seqset len: %i" % len(seqset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_level)
for seq in seqset:
seq.seq += add
seq.subsets[i] = seqsetid
for sssid, seqsubset in enumerate(chunker(seqset,
args.subgroup_size)):
# print("seqsubset len: %i" % len(seqsubset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_subgroup)
for seq in seqset:
seq.seq += add
seq.subgroups[i] = seqsetid
if i == 1: # or it not last_layer
# Add leaf node to parent connections
for seq in seqset:
pcmap[seq.taxid()] = ctax + seqsetid
if i > 1:
# Add higher nodes to parent connections
if i == depth - 1:
pcmap.update((el, 1) for el in last_layer)
break
# This leaves the loop on the last layer in the tree
# because the root is 1 by construction
else:
# pcmap.update((tax, i + ctax) for tax in
# last_layer[i:i+mult_per_layer] for
# i in range(mult_per_layer))
for i in range(mult_per_layer):
for tax in last_layer[i:i + mult_per_layer]:
pcmap[tax] = i + ctax
last_layer = [ctax + i for i in range(nchunks)]
used_seqids.update(last_layer)
ctax = max(used_seqids) + 1
del used_seqids
del ctax
del last_layer
{seq.write(outdir + seq.filename()) for seq in leaf_seqs}
print("[1/3] Successfully created synthetic genomes.", file=stderr)
filenames = [outdir + seq.filename() for seq in leaf_seqs]
fa.write_nameid_map(name_id_map, filenames)
print("[2/3] Successfully wrote nameidmap to %s." % name_id_map,
file=stderr)
fa.write_parent_map(parent_map, pcmap)
print("[3/3] Successfully wrote child->parent map.", file=stderr)
stderr.write("Genomes: %s\n" % ', '.join(filenames))
stderr.write("Nameidmap: %s\n" % name_id_map)
stderr.write("Taxonomy: %s\n" % parent_map)
|
sim/main.py
| 5,017 |
Variables/settings for constructing synthetic genome and accessory files. Append slash Variables for constructing the parent_map dictionary. print("seqsubset len: %i" % len(seqsubset), file=stderr) or it not last_layer Add leaf node to parent connections Add higher nodes to parent connections This leaves the loop on the last layer in the tree because the root is 1 by construction pcmap.update((tax, i + ctax) for tax in last_layer[i:i+mult_per_layer] for i in range(mult_per_layer))
| 511 |
en
| 0.663166 |
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests generating test combinations."""
from collections import OrderedDict
# Dependency imports
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
class TestingCombinationsTest(test_util.TestCase):
def test_combine(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 1,
"b": 3
}, {
"a": 2,
"b": 2
}, {
"a": 2,
"b": 3
}], test_combinations.combine(a=[1, 2], b=[2, 3]))
def test_arguments_sorted(self):
self.assertEqual([
OrderedDict([("aa", 1), ("ab", 2)]),
OrderedDict([("aa", 1), ("ab", 3)]),
OrderedDict([("aa", 2), ("ab", 2)]),
OrderedDict([("aa", 2), ("ab", 3)])
], test_combinations.combine(ab=[2, 3], aa=[1, 2]))
def test_combine_single_parameter(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 2,
"b": 2
}], test_combinations.combine(a=[1, 2], b=2))
def test_add(self):
self.assertEqual(
[{
"a": 1
}, {
"a": 2
}, {
"b": 2
}, {
"b": 3
}],
(test_combinations.combine(a=[1, 2]) +
test_combinations.combine(b=[2, 3])))
@test_combinations.generate(
test_combinations.combine(a=[1, 0], b=[2, 3], c=[1]))
class CombineTheTestSuite(test_util.TestCase):
def test_add_things(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def test_add_things_one_more(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def not_a_test(self, a=0, b=0, c=0):
del a, b, c
self.fail()
def _test_but_private(self, a=0, b=0, c=0):
del a, b, c
self.fail()
# Check that nothing funny happens to a non-callable that starts with "_test".
test_member = 0
if __name__ == "__main__":
test_util.main()
|
tensorflow_probability/python/internal/test_combinations_test.py
| 2,657 |
Tests generating test combinations.
Copyright 2019 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================ Dependency imports Check that nothing funny happens to a non-callable that starts with "_test".
| 783 |
en
| 0.829514 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
from celery import Celery
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow import configuration
from xTool.utils.log.logging_mixin import LoggingMixin
from xTool.utils.module_loading import import_string
from xTool.executors.celery_executor import CeleryExecutor
'''
To start the celery worker, run the command:
airflow worker
'''
# 获得配置文件的路径,并导入celery默认配置
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
# 创建一个celery客户端
celery_app_name = configuration.conf.get('celery', 'CELERY_APP_NAME')
app = Celery(
celery_app_name,
config_source=celery_configuration)
@app.task
def execute_command(command):
"""airflow worker 执行shell命令 ."""
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
# celery worker 收到消息后,执行消息中的shell命令
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
|
airflow/executors/celery_executor.py
| 2,353 |
airflow worker 执行shell命令 .
-*- coding: utf-8 -*- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. 获得配置文件的路径,并导入celery默认配置 创建一个celery客户端 celery worker 收到消息后,执行消息中的shell命令
| 875 |
en
| 0.815397 |
from django.conf.urls import url
from . import views
app_name = 'reports'
urlpatterns = [
# url(r'^graph/', views.graph, name='graph'),
url(r'^graph/', views.statistics, name='graph'),
url(r'^csv_export/', views.csv_export, name='csv_export'),
]
|
reports/urls.py
| 268 |
url(r'^graph/', views.graph, name='graph'),
| 44 |
en
| 0.704375 |
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user.pk in [
author.pk for author in obj.authors.all()]):
return obj
raise Http404(_('No entry found matching the query'))
|
zinnia/views/mixins/entry_preview.py
| 855 |
Mixin implementing the preview of Entries.
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
Preview mixins for Zinnia views
| 243 |
en
| 0.909162 |
from libs import reaction as reactioncommand
class Reaction(reactioncommand.AdminReactionAddCommand):
'''Retries a text command
**Usage**
React to the message you want to re-run with the retry emoji
(The emoji is server-defined; ask your fellow server members for the correct emoji)'''
def matches(self, reaction, user):
return user == reaction.message.author
def action(self, reaction, user, client):
yield from client.on_message(reaction.message)
|
retry.py
| 481 |
Retries a text command
**Usage**
React to the message you want to re-run with the retry emoji
(The emoji is server-defined; ask your fellow server members for the correct emoji)
| 178 |
en
| 0.893991 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob, Maxime Raynal"
__maintainer__ = "Marc-Olivier Buob, Maxime Raynal"
__email__ = "{marc-olivier.buob,maxime.raynal}@nokia.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from collections import defaultdict
from pybgl.graph import Graph
from pybgl.incidence_automaton import (
IncidenceAutomaton, finals, initial, remove_vertex, vertices
)
from pybgl.depth_first_search import depth_first_search_graph
from pybgl.property_map import make_assoc_property_map
from pybgl.reverse import reverse_graph
def find_reachable_vertices(g: Graph, sources: set) -> set:
"""
Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices
"""
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys())
def prune_incidence_automaton(g: IncidenceAutomaton):
"""
Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton
"""
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = set(vertices(g)) - to_keep
for q in to_remove:
remove_vertex(q, g)
|
pybgl/prune_incidence_automaton.py
| 1,763 |
Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices
Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton
!/usr/bin/env python3 -*- coding: utf-8 -*-
| 517 |
en
| 0.83487 |
"""Module containing examples of report builder functions and classes."""
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
"""Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
# Initialize the report
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
# No calculation was performed, because sun was down
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
"""A class is required to build reports when running calculations with
multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
return example_fn_build_report(report, pvarray)
@staticmethod
def merge(reports):
"""Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
"""
report = reports[0]
# Merge only if more than 1 report
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report
|
pvfactors/report.py
| 3,158 |
A class is required to build reports when running calculations with
multiprocessing because of python constraints
Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
Module containing examples of report builder functions and classes.
Initialize the report Add elements to the report use center pvrow No calculation was performed, because sun was down Merge only if more than 1 report
| 1,488 |
en
| 0.74372 |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 17:38:25 2020
@author: Wu Yichen
"""
from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
import wideresnet as wrn
import torchvision.transforms as transforms
def uniform_mix_C(mixing_ratio, num_classes):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i], 2, replace=False)] = corruption_prob / 2
return C
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root='', train=True, meta=True, num_meta=1000,
corruption_prob=0, corruption_type='unif', transform=None, target_transform=None,
download=False, seed=1):
self.count = 0
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.meta = meta
self.corruption_prob = corruption_prob
self.num_meta = num_meta
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
self.train_coarse_labels = []
self.train_labels_true = []
self.soft_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
self.train_labels_true += entry['labels']
img_num_list = [int(self.num_meta/10)] * 10
num_classes = 10
else:
self.train_labels += entry['fine_labels']
self.train_labels_true += entry['fine_labels']
self.train_coarse_labels += entry['coarse_labels']
img_num_list = [int(self.num_meta/100)] * 100
num_classes = 100
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(self.train_labels) if label == j]
idx_to_meta = []
idx_to_train = []
print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
if meta is True:
self.train_data = self.train_data[idx_to_meta]
self.train_labels = list(np.array(self.train_labels)[idx_to_meta])
else:
self.train_data = self.train_data[idx_to_train]
self.train_labels = list(np.array(self.train_labels)[idx_to_train])
self.train_labels_true = list(np.array(self.train_labels_true)[idx_to_train])
self.soft_labels = list(np.zeros((len(self.train_data),num_classes),dtype=np.float32))
self.prediction = np.zeros((len(self.train_data),10,num_classes),dtype=np.float32)
clean_labels = self.train_labels
np.save('clean_labels.npy', clean_labels)
if corruption_type == 'unif':
C = uniform_mix_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip':
C = flip_labels_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip2':
C = flip_labels_C_two(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'hierarchical':
assert num_classes == 100, 'You must use CIFAR-100 with the hierarchical corruption.'
coarse_fine = []
for i in range(20):
coarse_fine.append(set())
for i in range(len(self.train_labels)):
coarse_fine[self.train_coarse_labels[i]].add(self.train_labels[i])
for i in range(20):
coarse_fine[i] = list(coarse_fine[i])
C = np.eye(num_classes) * (1 - corruption_prob)
for i in range(20):
tmp = np.copy(coarse_fine[i])
for j in range(len(tmp)):
tmp2 = np.delete(np.copy(tmp), j)
C[tmp[j], tmp2] += corruption_prob * 1/len(tmp2)
self.C = C
print(C)
elif corruption_type == 'clabels':
net = wrn.WideResNet(40, num_classes, 2, dropRate=0.3).cuda()
model_name = './cifar{}_labeler'.format(num_classes)
net.load_state_dict(torch.load(model_name))
net.eval()
else:
assert False, "Invalid corruption type '{}' given. Must be in {'unif', 'flip', 'hierarchical'}".format(corruption_type)
np.random.seed(seed)
if corruption_type == 'clabels':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
# obtain sampling probabilities
sampling_probs = []
print('Starting labeling')
for i in range((len(self.train_labels) // 64) + 1):
current = self.train_data[i*64:(i+1)*64]
current = [Image.fromarray(current[i]) for i in range(len(current))]
current = torch.cat([test_transform(current[i]).unsqueeze(0) for i in range(len(current))], dim=0)
data = V(current).cuda()
logits = net(data)
smax = F.softmax(logits / 5) # temperature of 1
sampling_probs.append(smax.data.cpu().numpy())
sampling_probs = np.concatenate(sampling_probs, 0)
print('Finished labeling 1')
new_labeling_correct = 0
argmax_labeling_correct = 0
for i in range(len(self.train_labels)):
old_label = self.train_labels[i]
new_label = np.random.choice(num_classes, p=sampling_probs[i])
self.train_labels[i] = new_label
if old_label == new_label:
new_labeling_correct += 1
if old_label == np.argmax(sampling_probs[i]):
argmax_labeling_correct += 1
print('Finished labeling 2')
print('New labeling accuracy:', new_labeling_correct / len(self.train_labels))
print('Argmax labeling accuracy:', argmax_labeling_correct / len(self.train_labels))
else:
for i in range(len(self.train_labels)):
self.train_labels_true[i] = self.train_labels[i]
for i in range(len(self.train_labels)):
self.train_labels[i] = np.random.choice(num_classes, p=C[self.train_labels[i]])
print('train',len(self.train_labels))
print('type',type(self.train_labels))
self.corruption_matrix = C
noise_labels = self.train_labels
np.save('noise_labels.npy', noise_labels)
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def label_update(self, results):
self.count += 1
# While updating the noisy label y_i by the probability s, we used the average output probability of the network of the past 10 epochs as s.
idx = (self.count - 1) % 10#10 #10
self.prediction[:, idx] = results
#self.prediction[:] =results
#print(self.prediction)
if self.count == 79: #79
self.soft_labels = self.prediction.mean(axis=1)
#print(self.soft_labels.shape)
#print(self.soft_labels)
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
if self.count > 79:
self.soft_labels = results
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
def __getitem__(self, index):
if self.train:
if self.meta:
#print(self.train_labels[index])
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
else:
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
soft_labels = self.soft_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.train :
if self.meta:
return img, target
else:
return img,target,target_true,soft_labels,index
else:
return img, target
def __len__(self):
if self.train:
if self.meta is True:
return self.num_meta
else:
return 50000 - self.num_meta
else:
return 10000
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
|
dataloader.py
| 14,764 |
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
returns a linear interpolation of a uniform matrix and an identity matrix
Created on Mon Apr 27 17:38:25 2020
@author: Wu Yichen
-*- coding: utf-8 -*- training set or test set now load the picked numpy arrays convert to HWC obtain sampling probabilities temperature of 1 convert to HWC While updating the noisy label y_i by the probability s, we used the average output probability of the network of the past 10 epochs as s.10 10self.prediction[:] =resultsprint(self.prediction)79print(self.soft_labels.shape)print(self.soft_labels)self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))print(self.train_labels[index]) doing this so that it is consistent with all other datasets to return a PIL Image extract file
| 1,079 |
en
| 0.691974 |
import os
import torch
from typing import List
from dqc.utils.datastruct import CGTOBasis
__all__ = ["loadbasis"]
_dtype = torch.double
_device = torch.device("cpu")
def loadbasis(cmd: str, dtype: torch.dtype = _dtype,
device: torch.device = _device, requires_grad: bool = False) -> \
List[CGTOBasis]:
"""
Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file
"""
res = []
if not os.path.exists(cmd):
file = _get_basis_file(cmd)
else:
file = cmd
# read the content
with open(file, "r") as f:
lines = f.read().split("\n")
# skip the header
while True:
line = lines.pop(0)
if line == "":
continue
if line.startswith("!"):
continue
break
# now it is at the orbital description
while len(lines) > 0:
line = lines.pop(0)
if line.startswith("**"):
break
desc = line.split()
nlines = int(desc[1])
if nlines == 0:
raise RuntimeError("Zero line on basis %s" % file)
# read the exponents and the coefficients
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
# coeffsT: list with shape (nbasis, ncontr)
# coeffs: list with shape (ncontr, nbasis)
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
# convert to tensor
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res
def _read_float(s: str) -> float:
s = s.replace("D", "E")
return float(s)
def _get_basis_file(cmd: str) -> str:
# parse the string command, check if the basis has already been downloaded
# (download if not), and return the file name
# parse to get the atomz and the basisname
atomz_str, raw_basisname = cmd.split(":")
raw_basisname = raw_basisname.strip()
atomz = int(atomz_str)
# get the path to the database
basisname = _normalize_basisname(raw_basisname)
thisdir = os.path.dirname(os.path.realpath(__file__))
fname = "%02d.gaussian94" % atomz
fdir = os.path.join(thisdir, ".database", basisname)
fpath = os.path.join(fdir, fname)
# if the file does not exist, download it
if not os.path.exists(fpath):
print("The %s basis for atomz %d does not exist, but we will download it" %
(raw_basisname, atomz))
if not os.path.exists(fdir):
os.makedirs(fdir)
_download_basis(fpath, atomz, raw_basisname)
return fpath
def _normalize_basisname(basisname: str) -> str:
b = basisname.lower()
b = b.replace("+", "p")
b = b.replace("*", "s")
b = b.replace("(", "_")
b = b.replace(")", "_")
b = b.replace(",", "_")
return b
def _download_basis(fname: str, atomz: int, basisname: str) -> None:
import basis_set_exchange as bse
s = bse.get_basis(basisname, elements=[atomz], fmt="gaussian94")
with open(fname, "w") as f:
f.write(s)
print("Downloaded to %s" % fname)
def _expand_angmoms(s: str, n: int) -> List[int]:
# convert the angular momentum characters into angmom and returns a list
# of n integer containing the angular momentums
if len(s) == n:
pass
elif n % len(s) == 0:
s = s * (n // len(s))
else:
raise RuntimeError("Do not know how to read orbital %s with %d coefficient columns" %
(s, n))
s = s.lower()
spdfmap = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
angmoms = [spdfmap[c] for c in s]
return angmoms
|
dqc/api/loadbasis.py
| 4,842 |
Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file
read the content skip the header now it is at the orbital description read the exponents and the coefficients coeffsT: list with shape (nbasis, ncontr) coeffs: list with shape (ncontr, nbasis) convert to tensor parse the string command, check if the basis has already been downloaded (download if not), and return the file name parse to get the atomz and the basisname get the path to the database if the file does not exist, download it convert the angular momentum characters into angmom and returns a list of n integer containing the angular momentums
| 1,104 |
en
| 0.756467 |
import datetime
import threading
import contextlib
import pyotp
import qrcode
from errbot import BotPlugin, botcmd, arg_botcmd, cmdfilter
# OTP expires every hour
_OTP_EXPIRE = datetime.timedelta(hours=1)
_BASE_TIME = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
class otp(BotPlugin):
'''
Implement One Time Passwords for command filtering.
'''
# lock protects storage
lock = threading.Lock()
def activate(self):
super(otp, self).activate()
# Set the data directory for the plugin
self.DATA_DIR = '{0}/ '.format(self.bot_config.BOT_DATA_DIR)
if 'commands' not in self:
self['commands'] = set()
if 'secrets' not in self:
self['secrets'] = dict()
@contextlib.contextmanager
def stored(self, key):
'''
This is a convenience tool to make plugin storage easier.
'''
value = self[key]
try:
yield value
finally:
self[key] = value
def get_configuration_template(self):
return dict(
provision_via_chat=False,
max_retries=10
)
def build_qrcode(self, user, url):
'''Internal method used to build the QRCode image for token provisioning.'''
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png')
def get_identity(self, message):
'''Wrapper to make sure the correct identity object is used.'''
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'''
WARNING: This command removes ALL OTP entries.
'''
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.'
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'''
Add a command to OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd)
#return 'Added {0} to OTP filtered commands.'.format(cmd)
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'''
Remove a command from OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
if cmd not in commands:
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd)
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'''
List the commands that are filtered by OTP.
'''
return dict(commands=self['commands'])
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'''
Send a new secret for a user.
'''
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user)
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'''
Authenticate with OTP to the bot to pass OTP filtering.
'''
# OTP shouldn't be done in a group chat channel.
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if identity not in self['secrets']:
return dict(not_enrolled=True)
secret, attempts, _ = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
secret, _, _ = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
# Increase the number of attempts, or burn secret
with self.lock:
with self.stored('secrets') as secrets:
secret, attempts, ts = secrets[identity]
if attempts > self.config.get('max_retries'):
secret = ''
secrets[identity] = (secret, attempts+1, ts)
return dict(success=False)
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
'''
Filter commands to determine if user has recently validated with OTP.
'''
with self.lock:
if command in self['commands']:
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if identity not in secrets:
# Command is filtered, user doesn't have an OTP token
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return None, None, None
_, _, lastotp = secrets[identity]
if datetime.datetime.now() - lastotp > _OTP_EXPIRE:
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return None, None, None
self.log.info('OTP ok, permit command.')
return message, command, args
|
plugins/otp/otp.py
| 5,488 |
Implement One Time Passwords for command filtering.
Internal method used to build the QRCode image for token provisioning.
Wrapper to make sure the correct identity object is used.
Add a command to OTP command filtering.
Authenticate with OTP to the bot to pass OTP filtering.
List the commands that are filtered by OTP.
WARNING: This command removes ALL OTP entries.
Filter commands to determine if user has recently validated with OTP.
Remove a command from OTP command filtering.
Send a new secret for a user.
This is a convenience tool to make plugin storage easier.
OTP expires every hour lock protects storage Set the data directory for the pluginreturn 'Added {0} to OTP filtered commands.'.format(cmd) OTP shouldn't be done in a group chat channel. Increase the number of attempts, or burn secret Command is filtered, user doesn't have an OTP token
| 858 |
en
| 0.888296 |
"""
WSGI config for kweetservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kweetservice.settings")
application = get_wsgi_application()
|
kweetservice/kweetservice/wsgi.py
| 401 |
WSGI config for kweetservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
| 218 |
en
| 0.765512 |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
|
src/transformers/models/convbert/modeling_tf_convbert.py
| 58,533 |
Head for sentence-level classification tasks.
Construct the embeddings from word, position and token_type embeddings.
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
TF 2.0 ConvBERT model.
coding=utf-8 Copyright 2021 The HuggingFace Inc. team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See all ConvBERT models at https://huggingface.co/models?filter=convbert Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] Take the dot product between "query" and "key" to get the raw attention scores. (batch size, num_heads, seq_len_q, seq_len_k) scale attention_scores Apply the attention mask is (precomputed for all layers in TFBertModel call() function) Normalize the attention scores to probabilities. This is actually dropping out entire tokens to attend to, which might seem a bit unusual, but is taken from the original Transformer paper. Mask heads if we want to (batch_size, seq_len_q, all_head_size) add attentions if we output them add attentions if we output them Add last layer We create a 3D attention mask from a 2D tensor mask. Sizes are [batch_size, 1, 1, to_seq_length] So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] this attention mask is more simple than the triangular masking of causal attention used in OpenAI GPT, we just need to prepare the broadcast dimension here. Since attention_mask is 1.0 for positions we want to attend and 0.0 for masked positions, this operation will create a tensor which is 0.0 for positions we want to attend and -10000.0 for masked positions. Since we are adding it to the raw scores before the softmax, this is effectively the same as removing these entirely. Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output take <s> token (equiv. to [CLS])
| 4,768 |
en
| 0.808814 |
""" Third party api wrappers"""
import os
import json
import nexmo
import africastalking
username = os.getenv('africastalking_username')
api_key = os.getenv('africastalking_api_key')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
class ProvidersWrapper:
""" Class with all the thirdy party helper functions"""
def send_message(number, message):
client = nexmo.Client(key=os.getenv('nexmokey'), secret=os.getenv('nexmosecret'))
response = client.send_message({
'from': 'Nexmo',
'to': number,
'text': message,
})
if response["messages"][0]["status"] != "0":
response = sms.send(message, ['+' + number])
return response
|
providers.py
| 745 |
Class with all the thirdy party helper functions
Third party api wrappers
| 73 |
en
| 0.806787 |
import time
import cv2
import numpy as np
from collections import defaultdict
class Tracker(object):
def __init__(self, pLK=None):
if pLK is None:
# default LK param
pLK = self.pLK0()
self.lk_ = cv2.SparsePyrLKOpticalFlow_create(
**pLK)
self.tmp_ = defaultdict(lambda:None)
def pLK0(self):
"""
Default LK Params.
"""
return dict(
winSize = (12,6),
maxLevel = 4, # == effective winsize up to 32*(2**4) = 512x256
crit= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03),
flags = 0,
minEigThreshold = 1e-3 # TODO : disable eig?
)
def __call__(self,
img1, img2,
pt1, pt2=None,
thresh=2.0,
return_msk=False
):
"""
Arguments:
img1(np.ndarray) : previous image. (color/mono) (HxWx?)
img2(np.ndarray) : current image (color/mono) (HxWx?)
pt1(np.ndarray) : previous points. (Mx2)
pt2(np.ndarray) : [Optional] current points estimate (Mx2)
thresh(float) : Flow Back-projection Error threshold
Returns:
pt2(np.ndarray) : current points. (Mx2)
idx(np.ndarray) : valid tracked indices from pt1 & pt2.
"""
if pt1.size <= 0:
# soft fail
pt2 = np.empty([0,2], dtype=np.float32)
if return_msk:
msk = np.empty([0], dtype=np.bool)
return pt2, msk
idx = np.empty([0], dtype=np.int32)
return pt2, idx
# stat img
h, w = np.shape(img2)[:2]
# convert to grayscale
# TODO : check if already gray/mono
if (np.ndim(img1) == 2) or img1.shape[2] == 1:
# already monochromatic
img1_gray = img1
img2_gray = img2
else:
# handle image # 1 + pre-allocated data cache
if self.tmp_['img1g'] is not None:
cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g'])
img1_gray = self.tmp_['img1g']
else:
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
self.tmp_['img1g'] = np.empty_like(img1_gray)
# handle image # 2 + pre-allocated data cache
if self.tmp_['img2g'] is not None:
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g'])
img2_gray = self.tmp_['img2g']
else:
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
self.tmp_['img2g'] = np.empty_like(img2_gray)
# forward flow
if pt2 is not None:
# set initial flow flags
self.lk_.setFlags(self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW )
pt2, st, _ = self.lk_.calc(
img1_gray, img2_gray, pt1, pt2
)
else:
pt2, st, _ = self.lk_.calc(
img1_gray, img2_gray, pt1, None
)
st_fw = st[:,0].astype(np.bool)
# backward flow
# unset initial flow flags
self.lk_.setFlags(self.lk_.getFlags() & ~cv2.OPTFLOW_USE_INITIAL_FLOW )
pt1_r, st, _ = self.lk_.calc(
img2_gray, img1_gray, pt2, None
)
st_bw = st[:,0].astype(np.bool)
# override error with reprojection error
# (default error doesn't make much sense anyways)
err = np.linalg.norm(pt1 - pt1_r, axis=-1)
# apply mask
msk = np.logical_and.reduce([
# error check
err < thresh,
# bounds check
0 <= pt2[:,0],
0 <= pt2[:,1],
pt2[:,0] < w,
pt2[:,1] < h,
# status check
st_fw,
st_bw,
])
if return_msk:
return pt2, msk
else:
idx = np.where(msk)[0]
return pt2, idx
def main():
from matplotlib import pyplot as plt
# params
w = 2*640
h = 2*480
n = 2*1024
di = 8
dj = 32
track = Tracker()
img1 = np.random.randint(0, 255, size=(h,w,3), dtype=np.uint8)
#img2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8)
img2 = np.roll(img1, di, axis=0)
img2 = np.roll(img2, dj, axis=1)
#img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pt1 = np.random.uniform((0,0), (w,h), size=(n,2)).astype(np.float32)
pt2, idx = track(img1, img2, pt1)
#pt2, idx = track(img1, img2, pt1, pt2)
fig, ax = plt.subplots(1,2)
ax[0].imshow(img1, alpha=0.5)
ax[0].plot(pt1[:,0], pt1[:,1], 'r+')
ax[1].imshow(img2, alpha=0.5)
ax[1].plot(pt1[:,0], pt1[:,1], 'bx')
ax[1].plot(pt2[:,0], pt2[:,1], 'r+')
plt.show()
if __name__ == "__main__":
main()
|
core/track.py
| 4,984 |
Arguments:
img1(np.ndarray) : previous image. (color/mono) (HxWx?)
img2(np.ndarray) : current image (color/mono) (HxWx?)
pt1(np.ndarray) : previous points. (Mx2)
pt2(np.ndarray) : [Optional] current points estimate (Mx2)
thresh(float) : Flow Back-projection Error threshold
Returns:
pt2(np.ndarray) : current points. (Mx2)
idx(np.ndarray) : valid tracked indices from pt1 & pt2.
Default LK Params.
default LK param == effective winsize up to 32*(2**4) = 512x256 TODO : disable eig? soft fail stat img convert to grayscale TODO : check if already gray/mono already monochromatic handle image 1 + pre-allocated data cache handle image 2 + pre-allocated data cache forward flow set initial flow flags backward flow unset initial flow flags override error with reprojection error (default error doesn't make much sense anyways) apply mask error check bounds check status check paramsimg2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8)img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)pt2, idx = track(img1, img2, pt1, pt2)
| 1,113 |
en
| 0.401842 |
from escpos.printer import Usb
from pathlib import Path
image = Path("/tamamo-no-mae/me-cloudy.png")
printer = Usb(0x0416, 0x5011, 0, profile="ZJ-5870")
printer.image(image);
printer.cut()
# with printer() as that:
# that.write('Hello, world!\n\n')
# # 000000000111111111122222222223
# # 123456789012345678901234567890
# that.write('Soluta sed voluptatem ut\n')
# that.write('facere aut. Modi placeat et\n')
# that.write('eius voluptate sint ut.\n')
# that.write('Facilis minima ex quia quia\n')
# that.write('consectetur ex ipsa. Neque et\n')
# that.write('voluptatem ipsa enim error\n')
# that.write('rthatrehenderit ex dolore.\n')
# that.write('Cupiditate ad voluptatem nisi.\n\n\n\n')
# ZJ-5870
|
demo.py
| 759 |
with printer() as that: that.write('Hello, world!\n\n') 000000000111111111122222222223 123456789012345678901234567890 that.write('Soluta sed voluptatem ut\n') that.write('facere aut. Modi placeat et\n') that.write('eius voluptate sint ut.\n') that.write('Facilis minima ex quia quia\n') that.write('consectetur ex ipsa. Neque et\n') that.write('voluptatem ipsa enim error\n') that.write('rthatrehenderit ex dolore.\n') that.write('Cupiditate ad voluptatem nisi.\n\n\n\n') ZJ-5870
| 541 |
en
| 0.375138 |
import operator
import numpy
import pytest
import cupy
from cupy import testing
class TestArrayElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True, no_bool=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, swap=True,
no_complex=True)
def test_ifloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_divmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_divmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_rdivmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0], swap=True,
no_complex=True)
def test_rdivmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1], swap=True,
no_complex=True)
def test_lt_scalar(self):
self.check_array_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_array_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_array_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_array_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_array_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_array_scalar_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return op(a, b)
def test_add_array(self):
self.check_array_array_op(operator.add)
def test_iadd_array(self):
self.check_array_array_op(operator.iadd)
def test_sub_array(self):
self.check_array_array_op(operator.sub, no_bool=True)
def test_isub_array(self):
self.check_array_array_op(operator.isub, no_bool=True)
def test_mul_array(self):
self.check_array_array_op(operator.mul)
def test_imul_array(self):
self.check_array_array_op(operator.imul)
def test_truediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.truediv)
def test_itruediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.itruediv)
def test_floordiv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.floordiv, no_complex=True)
def test_ifloordiv_array(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.ifloordiv, no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_pow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.pow(a, b)
def test_pow_array(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_pow_array()
else:
self.check_array_array_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.ipow(a, b)
def test_ipow_array(self):
self.check_ipow_array()
def test_divmod0_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[0])
def test_divmod1_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[1])
def test_lt_array(self):
self.check_array_array_op(operator.lt, no_complex=True)
def test_le_array(self):
self.check_array_array_op(operator.le, no_complex=True)
def test_gt_array(self):
self.check_array_array_op(operator.gt, no_complex=True)
def test_ge_array(self):
self.check_array_array_op(operator.ge, no_complex=True)
def test_eq_array(self):
self.check_array_array_op(operator.eq)
def test_ne_array(self):
self.check_array_array_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.sub, no_bool=True)
def test_broadcasted_isub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.isub, no_bool=True)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.floordiv, no_complex=True)
def test_broadcasted_ifloordiv(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.ifloordiv,
no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_broadcasted_pow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.pow(a, b)
def test_broadcasted_pow(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_broadcasted_pow()
else:
self.check_array_broadcasted_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_broadcasted_ipow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.ipow(a, b)
def test_broadcasted_ipow(self):
self.check_broadcasted_ipow()
def test_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_broadcasted_lt(self):
self.check_array_broadcasted_op(operator.lt, no_complex=True)
def test_broadcasted_le(self):
self.check_array_broadcasted_op(operator.le, no_complex=True)
def test_broadcasted_gt(self):
self.check_array_broadcasted_op(operator.gt, no_complex=True)
def test_broadcasted_ge(self):
self.check_array_broadcasted_op(operator.ge, no_complex=True)
def test_broadcasted_eq(self):
self.check_array_broadcasted_op(operator.eq)
def test_broadcasted_ne(self):
self.check_array_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type)
b = xp.array([[1], [2], [3]], y_type)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(operator.floordiv,
no_complex=True)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
def test_doubly_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_doubly_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_doubly_broadcasted_lt(self):
self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True)
def test_doubly_broadcasted_le(self):
self.check_array_doubly_broadcasted_op(operator.le, no_complex=True)
def test_doubly_broadcasted_gt(self):
self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True)
def test_doubly_broadcasted_ge(self):
self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True)
def test_doubly_broadcasted_eq(self):
self.check_array_doubly_broadcasted_op(operator.eq)
def test_doubly_broadcasted_ne(self):
self.check_array_doubly_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False):
if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_:
return xp.array(True)
a = xp.array([1, 2, 3, 4, 5], x_type)
b = xp.array([1, 2, 3, 4, 5], y_type)
return op(a, b[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub, no_bool=True)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
@testing.for_all_dtypes(no_bool=True)
def check_typecast(self, val, dtype):
operators = [
operator.add, operator.sub, operator.mul, operator.truediv]
for op in operators:
with numpy.errstate(divide='ignore', invalid='ignore'):
a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))
b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))
assert a.dtype == b.dtype
def test_typecast_bool1(self):
self.check_typecast(True)
def test_typecast_bool2(self):
self.check_typecast(False)
def test_typecast_int1(self):
self.check_typecast(0)
def test_typecast_int2(self):
self.check_typecast(-127)
def test_typecast_int3(self):
self.check_typecast(255)
def test_typecast_int4(self):
self.check_typecast(-32768)
def test_typecast_int5(self):
self.check_typecast(65535)
def test_typecast_int6(self):
self.check_typecast(-2147483648)
def test_typecast_int7(self):
self.check_typecast(4294967295)
def test_typecast_float1(self):
self.check_typecast(0.0)
def test_typecast_float2(self):
self.check_typecast(100000.0)
# Skip float16 because of NumPy #19514
@testing.for_all_dtypes(name='x_type', no_float16=True)
@testing.numpy_cupy_allclose()
def check_array_boolarray_op(self, op, xp, x_type):
a = xp.array([[2, 7, 1], [8, 2, 8]], x_type)
# Cast from np.bool8 array should not read bytes
b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool)
return op(a, b)
def test_add_array_boolarray(self):
self.check_array_boolarray_op(operator.add)
def test_iadd_array_boolarray(self):
self.check_array_boolarray_op(operator.iadd)
class TestArrayIntElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(2), a)
else:
return op(a, y_type(2))
def test_lshift_scalar(self):
self.check_array_scalar_op(operator.lshift)
def test_rlshift_scalar(self):
self.check_array_scalar_op(operator.lshift, swap=True)
def test_rshift_scalar(self):
self.check_array_scalar_op(operator.rshift)
def test_rrshift_scalar(self):
self.check_array_scalar_op(operator.rshift, swap=True)
def test_and_scalar(self):
self.check_array_scalar_op(operator.and_)
def test_rand_scalar(self):
self.check_array_scalar_op(operator.and_, swap=True)
def test_or_scalar(self):
self.check_array_scalar_op(operator.or_)
def test_ror_scalar(self):
self.check_array_scalar_op(operator.or_, swap=True)
def test_xor_scalar(self):
self.check_array_scalar_op(operator.xor)
def test_rxor_scalar(self):
self.check_array_scalar_op(operator.xor, swap=True)
def test_mod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod)
def test_rmod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(0), a)
else:
return op(a, y_type(0))
def test_lshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift)
def test_rlshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift, swap=True)
def test_rshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift)
def test_rrshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift, swap=True)
def test_and_scalarzero(self):
self.check_array_scalarzero_op(operator.and_)
def test_rand_scalarzero(self):
self.check_array_scalarzero_op(operator.and_, swap=True)
def test_or_scalarzero(self):
self.check_array_scalarzero_op(operator.or_)
def test_ror_scalarzero(self):
self.check_array_scalarzero_op(operator.or_, swap=True)
def test_xor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor)
def test_rxor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor, swap=True)
def test_mod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod)
def test_rmod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type)
return op(a, b)
def test_lshift_array(self):
self.check_array_array_op(operator.lshift)
def test_ilshift_array(self):
self.check_array_array_op(operator.ilshift)
def test_rshift_array(self):
self.check_array_array_op(operator.rshift)
def test_irshift_array(self):
self.check_array_array_op(operator.irshift)
def test_and_array(self):
self.check_array_array_op(operator.and_)
def test_iand_array(self):
self.check_array_array_op(operator.iand)
def test_or_array(self):
self.check_array_array_op(operator.or_)
def test_ior_array(self):
self.check_array_array_op(operator.ior)
def test_xor_array(self):
self.check_array_array_op(operator.xor)
def test_ixor_array(self):
self.check_array_array_op(operator.ixor)
def test_mod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.mod)
def test_imod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type)
b = xp.array([[0, 0, 1]], dtype=y_type)
return op(a, b)
def test_broadcasted_lshift(self):
self.check_array_broadcasted_op(operator.lshift)
def test_broadcasted_ilshift(self):
self.check_array_broadcasted_op(operator.ilshift)
def test_broadcasted_rshift(self):
self.check_array_broadcasted_op(operator.rshift)
def test_broadcasted_irshift(self):
self.check_array_broadcasted_op(operator.irshift)
def test_broadcasted_and(self):
self.check_array_broadcasted_op(operator.and_)
def test_broadcasted_iand(self):
self.check_array_broadcasted_op(operator.iand)
def test_broadcasted_or(self):
self.check_array_broadcasted_op(operator.or_)
def test_broadcasted_ior(self):
self.check_array_broadcasted_op(operator.ior)
def test_broadcasted_xor(self):
self.check_array_broadcasted_op(operator.xor)
def test_broadcasted_ixor(self):
self.check_array_broadcasted_op(operator.ixor)
def test_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.mod)
def test_broadcasted_imod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type)
b = xp.array([[0], [0], [1]], dtype=y_type)
return op(a, b)
def test_doubly_broadcasted_lshift(self):
self.check_array_doubly_broadcasted_op(operator.lshift)
def test_doubly_broadcasted_rshift(self):
self.check_array_doubly_broadcasted_op(operator.rshift)
def test_doubly_broadcasted_and(self):
self.check_array_doubly_broadcasted_op(operator.and_)
def test_doubly_broadcasted_or(self):
self.check_array_doubly_broadcasted_op(operator.or_)
def test_doubly_broadcasted_xor(self):
self.check_array_doubly_broadcasted_op(operator.xor)
def test_doubly_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.mod)
@pytest.mark.parametrize('value', [
None,
Ellipsis,
object(),
numpy._NoValue,
])
class TestArrayObjectComparison:
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_eq_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value == a
else:
return a == value
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_ne_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value != a
else:
return a != value
class HasEq:
def __eq__(self, other):
return (other == 2) | (other == 4)
class HasNe:
def __ne__(self, other):
return (other == 2) | (other == 4)
class HasEqSub(HasEq):
pass
class CustomInt(int):
pass
@pytest.mark.parametrize('dtype', ['int32', 'float64'])
@pytest.mark.parametrize('value', [
HasEq(),
HasNe(), # eq test passes because `==` does not fall back to `__ne__`.
HasEqSub(),
CustomInt(3),
])
class TestArrayObjectComparisonDifficult:
# OK to raise TypeError.
# If CuPy returns a result, it should match with NumPy's result.
def test_eq_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a == value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
def test_ne_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a != value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
|
tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py
| 27,032 |
There are some precission issues in HIP that prevent checking with atol=0 TODO(unno): sub for boolean array is deprecated in numpy>=1.13 TODO(unno): sub for boolean array is deprecated in numpy>=1.13 There are some precission issues in HIP that prevent checking with atol=0 Skip float16 because of NumPy 19514 Cast from np.bool8 array should not read bytes eq test passes because `==` does not fall back to `__ne__`. OK to raise TypeError. If CuPy returns a result, it should match with NumPy's result.
| 502 |
en
| 0.781484 |
def read_fasta(filename):
"""Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file.
"""
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n','')
if '>' in line:
if tmp_seq != None:
seqs_list.append((hd, tmp_seq))
tmp_seq = ''
hd = line.replace('>','')
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert len(seqs_list) > 0
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list
def write_fasta(outfile, seq_dict):
"""Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile.
"""
step = 70
with open(outfile, 'w') as file:
for header, sequence in seq_dict.items():
sequence_list = [sequence[i - step: i] for i in range(step, len(sequence) + 1, step)]
last = sequence[step * (len(sequence) // step):]
if last != '':
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write('>' + header + '\n' + sequence + '\n')
def reads_generator(fasta_file, read_length, k):
"""This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads.
"""
reads_list = []
overlap = k - 1
input_header, input_seq = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, n - overlap, read_length - overlap):
read_seq = input_seq[i: i + read_length]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for i, read in enumerate(reads_list)]
def write_fastq(reads_list, filename):
"""This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION.
"""
with open(filename, 'w') as fastq_file:
for read_id, read in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write(read + '\n')
fastq_file.write('+\n')
fastq_file.write('I' * len(read) + '\n') # max possible score
def read_fastq(filename):
"""This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file.
"""
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if '@' in line:
reads_dict[line[1:].replace('\n', '')] = next(
fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict
|
pridcon/utils.py
| 3,354 |
Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file.
This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file.
This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads.
Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile.
This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION.
max possible score
| 1,030 |
en
| 0.839018 |
from django import template
from home.models import Recipe, MixingAgent, Base, Ingredient, FacePack, CustomFacePack
import pdb
register = template.Library()
@register.inclusion_tag('facepack.html')
def facepack_display(item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
}
return {'item': res }
def facepack_display_abs(base_url, item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
#'base_url' : request.get_raw_uri().replace(request.get_full_path(),''),
'base_url' : base_url,
}
return {'item': res }
|
f2f/farms2face/home/templatetags/common_tags.py
| 2,013 |
'base_url' : request.get_raw_uri().replace(request.get_full_path(),''),
| 75 |
en
| 0.194243 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
FlowClient is a Python client to FlowAPI.
"""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from flowclient.api_query import APIQuery
from .connection import Connection
from flowclient.client import connect
from flowclient.async_api_query import ASyncAPIQuery
from .async_connection import ASyncConnection
from flowclient.async_client import connect_async
from .client import (
get_geography,
get_result,
get_result_by_query_id,
get_geojson_result,
get_status,
query_is_ready,
run_query,
get_available_dates,
)
from .query_specs import (
daily_location_spec,
modal_location_spec,
modal_location_from_dates_spec,
radius_of_gyration_spec,
unique_location_counts_spec,
topup_balance_spec,
subscriber_degree_spec,
topup_amount_spec,
event_count_spec,
displacement_spec,
pareto_interactions_spec,
nocturnal_events_spec,
handset_spec,
random_sample_spec,
unique_locations_spec,
most_frequent_location_spec,
total_active_periods_spec,
location_visits_spec,
majority_location_spec,
coalesced_location_spec,
mobility_classification_spec,
)
from . import aggregates
from .aggregates import (
location_event_counts,
meaningful_locations_aggregate,
meaningful_locations_between_label_od_matrix,
meaningful_locations_between_dates_od_matrix,
flows,
unique_subscriber_counts,
location_introversion,
total_network_objects,
aggregate_network_objects,
spatial_aggregate,
joined_spatial_aggregate,
histogram_aggregate,
active_at_reference_location_counts,
unmoving_at_reference_location_counts,
unmoving_counts,
consecutive_trips_od_matrix,
trips_od_matrix,
labelled_spatial_aggregate,
labelled_flows,
)
__all__ = [
"aggregates",
"connect_async",
"connect",
"get_geography",
"get_result",
"get_result_by_query_id",
"get_geojson_result",
"get_status",
"query_is_ready",
"run_query",
"get_available_dates",
"APIQuery",
"ASyncAPIQuery",
"location_event_counts",
"meaningful_locations_aggregate",
"meaningful_locations_between_label_od_matrix",
"meaningful_locations_between_dates_od_matrix",
"flows",
"unique_subscriber_counts",
"location_introversion",
"total_network_objects",
"aggregate_network_objects",
"spatial_aggregate",
"joined_spatial_aggregate",
"histogram_aggregate",
"active_at_reference_location_counts",
"unique_locations_spec",
"unmoving_at_reference_location_counts",
"unmoving_counts",
"consecutive_trips_od_matrix",
"trips_od_matrix",
"labelled_spatial_aggregate",
"labelled_flows",
]
|
flowclient/flowclient/__init__.py
| 2,963 |
FlowClient is a Python client to FlowAPI.
This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
| 236 |
en
| 0.925807 |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
# This error is unlikely to happen, as the return code should indicate of error beforehand
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response["result"]}'
return session
''' COMMANDS '''
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result["sub_verdict"]})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result["families"]}**\n'
presentable_result += f' Scan Time: {result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
''' EXECUTION CODE '''
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
|
Packs/Intezer/Integrations/IntezerV2/IntezerV2.py
| 9,809 |
Disable insecure warnings This error is unlikely to happen, as the return code should indicate of error beforehand python2 uses __builtin__ python3 uses builtins
| 161 |
en
| 0.803378 |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
|
app/core/models.py
| 1,214 |
Custom user model that supports using email instead of username
Creates and saves a new super user
Creates and saves a new user
| 127 |
en
| 0.874647 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Nodes via the DB API"""
import datetime
import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
node = utils.create_test_node()
self.assertEqual([], node.tags)
self.assertEqual([], node.traits)
def test_create_node_with_tags(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
tags=['tag1', 'tag2'])
def test_create_node_with_traits(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
traits=['trait1', 'trait2'])
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
utils.create_test_node)
def test_create_node_instance_already_associated(self):
instance = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
self.assertRaises(exception.InstanceAssociated,
utils.create_test_node,
uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
def test_create_node_name_duplicate(self):
node = utils.create_test_node(name='spam')
self.assertRaises(exception.DuplicateName,
utils.create_test_node,
name=node.name)
def test_get_node_by_id(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_name(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_name(node.name)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_name,
'spam-eggs-bacon-spam')
def test_get_nodeinfo_list_defaults(self):
node_id_list = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
node_id_list.append(node.id)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(node_id_list))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = uuidutils.generate_uuid()
extra = {'foo': i}
node = utils.create_test_node(extra=extra, uuid=uuid)
uuids[node.id] = uuid
extras[node.id] = extra
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1')
node3 = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid(),
reservation='another-fake-host')
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual(sorted([node2.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': False})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'reserved_by_any_of': ['fake-host',
'another-fake-host']})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.DEPLOYWAIT)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.INSPECTING)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(
filters={'inspection_started_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.INSPECTING})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_nodeinfo_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_nodeinfo_list(
filters={'description_contains': 'Hello'})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'description_contains':
'World!'})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
six.assertCountEqual(self, uuids, res_uuids)
for r in res:
self.assertEqual([], r.tags)
self.assertEqual([], r.traits)
def test_get_node_list_with_filters(self):
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid(),
chassis_id=ch1['id'])
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1',
power_state='power on')
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
uuids = [uuidutils.generate_uuid(),
node1.uuid,
uuidutils.generate_uuid()]
res = self.dbapi.get_node_list(filters={'uuid_in': uuids})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': False})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
def test_get_node_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_node_list(filters={
'description_contains': 'Hello'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={
'description_contains': 'World!'})
self.assertEqual([node2.id], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': uuidutils.generate_uuid()})
def test_get_node_by_instance(self):
node = utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_instance_wrong_uuid(self):
utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, node.id)
def test_destroy_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid, node.uuid)
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_tags_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_tags_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_volume_connector_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_connector_get_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_target_gets_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_volume_target_gets_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_traits_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_traits_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
allocation = utils.create_test_allocation(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.AllocationNotFound,
self.dbapi.get_allocation_by_id, allocation.id)
def test_update_node(self):
node = utils.create_test_node()
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
self.assertEqual([], res.tags)
self.assertEqual([], res.traits)
def test_update_node_with_tags(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([tag.tag], [t.tag for t in res.tags])
def test_update_node_with_traits(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([trait.trait], [t.trait for t in res.traits])
def test_update_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_uuid(self):
node = utils.create_test_node()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_node, node.id,
{'uuid': ''})
def test_update_node_associate_and_disassociate(self):
node = utils.create_test_node()
new_i_uuid = uuidutils.generate_uuid()
res = self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(node.id, {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_instance_already_associated(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid())
new_i_uuid = uuidutils.generate_uuid()
self.dbapi.update_node(node1.id, {'instance_uuid': new_i_uuid})
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.InstanceAssociated,
self.dbapi.update_node,
node2.id,
{'instance_uuid': new_i_uuid})
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_name_duplicate(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
name='spam')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.DuplicateName,
self.dbapi.update_node,
node2.id,
{'name': node1.name})
def test_update_node_no_provision(self):
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_started_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_finished_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_finished_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
def test_reserve_node(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
uuid = node.uuid
r1 = 'fake-reservation'
# reserve the node
res = self.dbapi.reserve_node(r1, uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
# check reservation
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
self.dbapi.reserve_node(r1, uuid)
# release reservation
self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
# reserve the node
self.dbapi.reserve_node(r1, uuid)
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_node,
r2, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.release_node,
r2, uuid)
def test_reservation_after_release(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
# another host succeeds
self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
def test_reservation_in_exception_message(self):
node = utils.create_test_node()
uuid = node.uuid
r = 'fake-reservation'
self.dbapi.reserve_node(r, uuid)
exc = self.assertRaises(exception.NodeLocked, self.dbapi.reserve_node,
'another', uuid)
self.assertIn(r, str(exc))
def test_reservation_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.uuid)
def test_release_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.uuid)
def test_release_non_locked_node(self):
node = utils.create_test_node()
self.assertIsNone(node.reservation)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.uuid)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_node_provisioning(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
node = utils.create_test_node()
# assert provision_updated_at is None
self.assertIsNone(node.provision_updated_at)
self.dbapi.touch_node_provisioning(node.uuid)
node = self.dbapi.get_node_by_uuid(node.uuid)
# assert provision_updated_at has been updated
self.assertEqual(test_time,
timeutils.normalize_time(node.provision_updated_at))
def test_touch_node_provisioning_not_found(self):
self.assertRaises(
exception.NodeNotFound,
self.dbapi.touch_node_provisioning, uuidutils.generate_uuid())
def test_get_node_by_port_addresses(self):
wrong_node = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid())
node = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid())
addresses = []
for i in (1, 2, 3):
address = '52:54:00:cf:2d:4%s' % i
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id, address=address)
if i > 1:
addresses.append(address)
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=wrong_node.id,
address='aa:bb:cc:dd:ee:ff')
res = self.dbapi.get_node_by_port_addresses(addresses)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual([], res.traits)
def test_get_node_by_port_addresses_not_found(self):
node = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id,
address='aa:bb:cc:dd:ee:ff')
self.assertRaisesRegex(exception.NodeNotFound,
'was not found',
self.dbapi.get_node_by_port_addresses,
['11:22:33:44:55:66'])
def test_get_node_by_port_addresses_multiple_found(self):
node1 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
addresses = ['52:54:00:cf:2d:4%s' % i for i in (1, 2)]
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node1.id,
address=addresses[0])
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node2.id,
address=addresses[1])
self.assertRaisesRegex(exception.NodeNotFound,
'Multiple nodes',
self.dbapi.get_node_by_port_addresses,
addresses)
|
ironic/tests/unit/db/test_nodes.py
| 35,617 |
Tests for manipulating Nodes via the DB API
Copyright 2013 Hewlett-Packard Development Company, L.P. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ensure unknown filters explode even with good filters present node with provision_updated timeout node with None in provision_updated_at node without timeout node with provision_updated timeout node with None in provision_updated_at node without timeout ensure unknown filters explode even with good filters present reserve the node check reservation release reservation reserve the node another host fails to reserve or release another host succeeds assert provision_updated_at is None assert provision_updated_at has been updated
| 1,205 |
en
| 0.848218 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
python/pyspark/sql/session.py
| 25,242 |
Builder for :class:`SparkSession`.
The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \
... .master("local") \
... .appName("Word Count") \
... .config("spark.some.config.option", "some-value") \
... .getOrCreate()
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
Returns the underlying :class:`SparkContext`.
Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
Stop the underlying :class:`SparkContext`.
Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
The version of Spark on which this application is running.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This SparkContext may be an existing one. we need to propagate the confs before we create the SparkSession. Otherwise, confs like warehouse path and metastore url will not be set correctly ( these confs cannot be changed once the SparkSession is created). If we had an instantiated SparkSession attached with a SparkContext which is stopped now, we need to renew the instantiated SparkSession. Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate. convert python objects to sql data make sure data could consumed multiple times convert python objects to sql data
| 11,426 |
en
| 0.527757 |
""" director subsystem's configuration
- config-file schema
- settings
"""
from typing import Dict
import trafaret as T
from aiohttp import ClientSession, web
from yarl import URL
from servicelib.application_keys import APP_CLIENT_SESSION_KEY, APP_CONFIG_KEY
APP_DIRECTOR_API_KEY = __name__ + ".director_api"
CONFIG_SECTION_NAME = "director"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key("host", default="director",): T.String(),
T.Key("port", default=8001): T.ToInt(),
T.Key("version", default="v0"): T.Regexp(
regexp=r"^v\d+"
), # storage API version basepath
}
)
def build_api_url(config: Dict) -> URL:
api_baseurl = URL.build(
scheme="http", host=config["host"], port=config["port"]
).with_path(config["version"])
return api_baseurl
def get_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
def get_client_session(app: web.Application) -> ClientSession:
return app[APP_CLIENT_SESSION_KEY]
|
services/web/server/src/simcore_service_webserver/director/config.py
| 1,074 |
director subsystem's configuration
- config-file schema
- settings
storage API version basepath
| 98 |
en
| 0.484274 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-07 15:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def add_author_to_blog(apps, schema_editor): # pylint: disable=unused-argument
"""Author is the claimant"""
Blog = apps.get_model("lowfat", "Blog") # pylint: disable=invalid-name
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save()
class Migration(migrations.Migration):
dependencies = [
('lowfat', '0089_auto_20170306_1706'),
]
operations = [
migrations.AddField(
model_name='blog',
name='author',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Claimant'),
),
migrations.AddField(
model_name='historicalblog',
name='author',
field=models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='lowfat.Claimant'),
),
migrations.AlterField(
model_name='blog',
name='fund',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='lowfat.Fund'),
),
migrations.RunPython(add_author_to_blog),
]
|
lowfat/migrations/0090_auto_20170307_1518.py
| 1,389 |
Author is the claimant
-*- coding: utf-8 -*- Generated by Django 1.10.5 on 2017-03-07 15:18 pylint: disable=unused-argument pylint: disable=invalid-name
| 154 |
en
| 0.619554 |
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Generic, Optional, TypeVar
from goodboy.errors import Error
from goodboy.messages import DEFAULT_MESSAGES, MessageCollectionType, type_name
from goodboy.schema import Rule, SchemaWithUtils
N = TypeVar("N")
class NumericBase(Generic[N], SchemaWithUtils):
"""
Abstract base class for Int/Float schemas, should not be used directly. Use
:class:`Int` or :class:`Float` instead.
"""
def __init__(
self,
*,
allow_none: bool = False,
messages: MessageCollectionType = DEFAULT_MESSAGES,
rules: list[Rule] = [],
less_than: Optional[N] = None,
less_or_equal_to: Optional[N] = None,
greater_than: Optional[N] = None,
greater_or_equal_to: Optional[N] = None,
allowed: Optional[list[N]] = None,
):
super().__init__(allow_none=allow_none, messages=messages, rules=rules)
self._less_than = less_than
self._less_or_equal_to = less_or_equal_to
self._greater_than = greater_than
self._greater_or_equal_to = greater_or_equal_to
self._allowed = allowed
def _validate(
self, value: Any, typecast: bool, context: dict[str, Any] = {}
) -> tuple[Optional[N], list[Error]]:
value, type_errors = self._validate_exact_type(value)
if type_errors:
return None, type_errors
errors = []
if self._allowed is not None and value not in self._allowed:
errors.append(self._error("not_allowed", {"allowed": self._allowed}))
if self._less_than is not None and value >= self._less_than:
errors.append(
self._error("greater_or_equal_to", {"value": self._less_than})
)
if self._less_or_equal_to is not None and value > self._less_or_equal_to:
errors.append(
self._error("greater_than", {"value": self._less_or_equal_to})
)
if self._greater_than is not None and value <= self._greater_than:
errors.append(
self._error("less_or_equal_to", {"value": self._greater_than})
)
if self._greater_or_equal_to is not None and value < self._greater_or_equal_to:
errors.append(
self._error("less_than", {"value": self._greater_or_equal_to})
)
value, rule_errors = self._call_rules(value, typecast, context)
return value, errors + rule_errors
@abstractmethod
def _validate_exact_type(self, value: Any) -> tuple[Optional[N], list[Error]]:
...
class Float(NumericBase[float]):
"""
Accept ``float`` values. Integer values are converted to floats.
When type casting enabled, strings and other values with magic method
`__float__ <https://docs.python.org/3/reference/datamodel.html#object.__float__>`_
are converted to floats.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
"""
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[float], list[Error]]:
if isinstance(input, float):
return input, []
if isinstance(input, int):
return float(input), []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
try:
return float(input), []
except ValueError:
return None, [self._error("invalid_numeric_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[float], list[Error]]:
if isinstance(value, float):
return value, []
elif isinstance(value, int):
return float(value), []
else:
return None, [
self._error("unexpected_type", {"expected_type": type_name("float")})
]
class Int(NumericBase[int]):
"""
Accept ``int`` values.
When type casting enabled, strings and other values with magic method
`__int__ <https://docs.python.org/3/reference/datamodel.html#object.__int__>`_ are
converted to integers.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
"""
def _typecast(
self, input: Any, context: dict[str, Any] = {}
) -> tuple[Optional[int], list[Error]]:
if isinstance(input, int):
return input, []
if not isinstance(input, str):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
try:
return int(input), []
except ValueError:
return None, [self._error("invalid_integer_format")]
def _validate_exact_type(self, value: Any) -> tuple[Optional[int], list[Error]]:
if not isinstance(value, int):
return None, [
self._error("unexpected_type", {"expected_type": type_name("int")})
]
else:
return value, []
|
src/goodboy/types/numeric.py
| 5,973 |
Accept ``float`` values. Integer values are converted to floats.
When type casting enabled, strings and other values with magic method
`__float__ <https://docs.python.org/3/reference/datamodel.html#object.__float__>`_
are converted to floats.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
Accept ``int`` values.
When type casting enabled, strings and other values with magic method
`__int__ <https://docs.python.org/3/reference/datamodel.html#object.__int__>`_ are
converted to integers.
:param allow_none: If true, value is allowed to be ``None``.
:param messages: Override error messages.
:param rules: Custom validation rules.
:param less_than: Accept only values less than option value.
:param less_or_equal_to: Accept only values less than or equal to option value.
:param greater_than: Accept only values greater than option value.
:param greater_or_equal_to: Accept only values greater than or equal to option
value.
:param allowed: Allow only certain values.
Abstract base class for Int/Float schemas, should not be used directly. Use
:class:`Int` or :class:`Float` instead.
| 1,527 |
en
| 0.511337 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A tool to extract a build, executed by a buildbot slave.
"""
import optparse
import os
import shutil
import sys
import traceback
import urllib
from common import chromium_utils
from slave import build_directory
from slave import slave_utils
class ExtractHandler(object):
def __init__(self, url, archive_name):
self.url = url
self.archive_name = archive_name
class GSHandler(ExtractHandler):
def download(self):
status = slave_utils.GSUtilCopy(self.url, '.')
if 0 != status:
return False
try:
shutil.move(os.path.basename(self.url), self.archive_name)
except OSError:
os.remove(self.archive_name)
shutil.move(os.path.basename(self.url), self.archive_name)
return True
class WebHandler(ExtractHandler):
@chromium_utils.RunAndPrintDots
def download(self):
try:
rc = urllib.urlretrieve(self.url, self.archive_name)
print '\nDownload complete'
except IOError:
print '\nFailed to download build'
return False
return rc
def GetBuildUrl(options, build_revision, webkit_revision=None):
"""Compute the url to download the build from. This will use as a base
string, in order of preference:
0) options.build_archive_url
1) options.build_url
2) options.factory_properties.build_url
3) build url constructed from build_properties. This last type of
construction is not compatible with the 'force build' button.
Args:
options: options object as specified by parser below.
build_revision: Revision for the build.
webkit_revision: WebKit revision (optional)
"""
if options.build_archive_url:
return options.build_archive_url, None
base_filename, version_suffix = slave_utils.GetZipFileNames(
options.master_name,
options.build_number,
options.parent_build_number,
build_revision, webkit_revision, extract=True)
replace_dict = {
'base_filename': base_filename,
'parentname': options.parent_builder_name,
'parentslavename': options.parent_slave_name,
'parent_builddir': options.parent_build_dir,
}
# If builddir isn't specified, assume buildbot used the builder name
# as the root folder for the build.
if not replace_dict.get('parent_builddir') and replace_dict.get('parentname'):
replace_dict['parent_builddir'] = replace_dict.get('parentname', '')
url = options.build_url
if not url:
url = ('http://%(parentslavename)s/b/build/slave/%(parent_builddir)s/'
'chrome_staging')
if url[-4:] != '.zip': # assume filename not specified
# Append the filename to the base URL. First strip any trailing slashes.
url = url.rstrip('/')
url = '%s/%s' % (url, '%(base_filename)s.zip')
url = url % replace_dict
archive_name = url.split('/')[-1]
versioned_url = url.replace('.zip', version_suffix + '.zip')
return versioned_url, archive_name
def real_main(options):
""" Download a build, extract it to build\\BuildDir\\full-build-win32
and rename it to build\\BuildDir\\Target
"""
abs_build_dir = os.path.abspath(
build_directory.GetBuildOutputDirectory(options.src_dir))
target_build_output_dir = os.path.join(abs_build_dir, options.target)
# Generic name for the archive.
archive_name = 'full-build-%s.zip' % chromium_utils.PlatformName()
# Just take the zip off the name for the output directory name.
output_dir = os.path.join(abs_build_dir, archive_name.replace('.zip', ''))
src_dir = os.path.dirname(abs_build_dir)
if not options.build_revision and not options.build_archive_url:
(build_revision, webkit_revision) = slave_utils.GetBuildRevisions(
src_dir, options.webkit_dir, options.revision_dir)
else:
build_revision = options.build_revision
webkit_revision = options.webkit_revision
url, archive_name = GetBuildUrl(options, build_revision, webkit_revision)
if archive_name is None:
archive_name = 'build.zip'
base_url = None
else:
base_url = '/'.join(url.split('/')[:-1] + [archive_name])
if url.startswith('gs://'):
handler = GSHandler(url=url, archive_name=archive_name)
else:
handler = WebHandler(url=url, archive_name=archive_name)
# We try to download and extract 3 times.
for tries in range(1, 4):
print 'Try %d: Fetching build from %s...' % (tries, url)
failure = False
# If the url is valid, we download the file.
if not failure:
if not handler.download():
if options.halt_on_missing_build:
return slave_utils.ERROR_EXIT_CODE
failure = True
# If the versioned url failed, we try to get the latest build.
if failure:
if url.startswith('gs://') or not base_url:
continue
else:
print 'Fetching latest build at %s' % base_url
base_handler = handler.__class__(base_url, handler.archive_name)
if not base_handler.download():
continue
print 'Extracting build %s to %s...' % (archive_name, abs_build_dir)
try:
chromium_utils.RemoveDirectory(target_build_output_dir)
chromium_utils.ExtractZip(archive_name, abs_build_dir)
# For Chrome builds, the build will be stored in chrome-win32.
if 'full-build-win32' in output_dir:
chrome_dir = output_dir.replace('full-build-win32', 'chrome-win32')
if os.path.exists(chrome_dir):
output_dir = chrome_dir
print 'Moving build from %s to %s' % (output_dir, target_build_output_dir)
shutil.move(output_dir, target_build_output_dir)
except (OSError, IOError, chromium_utils.ExternalError):
print 'Failed to extract the build.'
# Print out the traceback in a nice format
traceback.print_exc()
# Try again...
continue
# If we got the latest build, then figure out its revision number.
if failure:
print "Trying to determine the latest build's revision number..."
try:
build_revision_file_name = os.path.join(
target_build_output_dir,
chromium_utils.FULL_BUILD_REVISION_FILENAME)
build_revision_file = open(build_revision_file_name, 'r')
print 'Latest build is revision: %s' % build_revision_file.read()
build_revision_file.close()
except IOError:
print "Could not determine the latest build's revision number"
if failure:
# We successfully extracted the archive, but it was the generic one.
return slave_utils.WARNING_EXIT_CODE
return 0
# If we get here, that means that it failed 3 times. We return a failure.
return slave_utils.ERROR_EXIT_CODE
def main():
option_parser = optparse.OptionParser()
option_parser.add_option('--target',
help='build target to archive (Debug or Release)')
option_parser.add_option('--src-dir', default='src',
help='path to the top-level sources directory')
option_parser.add_option('--build-dir', help='ignored')
option_parser.add_option('--master-name', help='Name of the buildbot master.')
option_parser.add_option('--build-number', type=int,
help='Buildbot build number.')
option_parser.add_option('--parent-build-dir',
help='Path to build directory on parent buildbot '
'builder.')
option_parser.add_option('--parent-builder-name',
help='Name of parent buildbot builder.')
option_parser.add_option('--parent-slave-name',
help='Name of parent buildbot slave.')
option_parser.add_option('--parent-build-number', type=int,
help='Buildbot parent build number.')
option_parser.add_option('--build-url',
help='Base url where to find the build to extract')
option_parser.add_option('--build-archive-url',
help='Exact url where to find the build to extract')
# TODO(cmp): Remove --halt-on-missing-build when the buildbots are upgraded
# to not use this argument.
option_parser.add_option('--halt-on-missing-build', action='store_true',
help='whether to halt on a missing build')
option_parser.add_option('--build_revision',
help='Revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit_revision',
help='Webkit revision of the build that is being '
'archived. Overrides the revision found on '
'the local disk')
option_parser.add_option('--webkit-dir', help='WebKit directory path, '
'relative to the src/ dir.')
option_parser.add_option('--revision-dir',
help=('Directory path that shall be used to decide '
'the revision number for the archive, '
'relative to the src/ dir.'))
option_parser.add_option('--build-output-dir', help='ignored')
chromium_utils.AddPropertiesOptions(option_parser)
options, args = option_parser.parse_args()
if args:
print 'Unknown options: %s' % args
return 1
if not options.master_name:
options.master_name = options.build_properties.get('mastername', '')
if not options.build_number:
options.build_number = options.build_properties.get('buildnumber')
if not options.parent_build_dir:
options.parent_build_dir = options.build_properties.get('parent_builddir')
if not options.parent_builder_name:
options.parent_builder_name = options.build_properties.get('parentname')
if not options.parent_slave_name:
options.parent_slave_name = options.build_properties.get('parentslavename')
if not options.parent_build_number:
options.parent_build_number = options.build_properties.get(
'parent_buildnumber')
if not options.build_url:
options.build_url = options.factory_properties.get('build_url')
if not options.halt_on_missing_build:
options.halt_on_missing_build = options.factory_properties.get(
'halt_on_missing_build')
if not options.target:
options.target = options.factory_properties.get('target', 'Release')
if not options.webkit_dir:
options.webkit_dir = options.factory_properties.get('webkit_dir')
if not options.revision_dir:
options.revision_dir = options.factory_properties.get('revision_dir')
options.src_dir = (options.factory_properties.get('extract_build_src_dir')
or options.src_dir)
return real_main(options)
if '__main__' == __name__:
sys.exit(main())
|
scripts/slave/extract_build.py
| 10,914 |
!/usr/bin/env python Copyright (c) 2012 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. If builddir isn't specified, assume buildbot used the builder name as the root folder for the build. assume filename not specified Append the filename to the base URL. First strip any trailing slashes. Generic name for the archive. Just take the zip off the name for the output directory name. We try to download and extract 3 times. If the url is valid, we download the file. If the versioned url failed, we try to get the latest build. For Chrome builds, the build will be stored in chrome-win32. Print out the traceback in a nice format Try again... If we got the latest build, then figure out its revision number. We successfully extracted the archive, but it was the generic one. If we get here, that means that it failed 3 times. We return a failure. TODO(cmp): Remove --halt-on-missing-build when the buildbots are upgraded to not use this argument.
| 1,048 |
en
| 0.869038 |
#!/Users/fahmi.abdulaziz/PycharmProjects/tmdb/bin/python3.8
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import libcst as cst
import pathlib
import sys
from typing import (Any, Callable, Dict, List, Sequence, Tuple)
def partition(
predicate: Callable[[Any], bool],
iterator: Sequence[Any]
) -> Tuple[List[Any], List[Any]]:
"""A stable, out-of-place partition."""
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
# Returns trueList, falseList
return results[1], results[0]
class osloginCallTransformer(cst.CSTTransformer):
CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'delete_posix_account': ('name', ),
'delete_ssh_public_key': ('name', ),
'get_login_profile': ('name', 'project_id', 'system_id', ),
'get_ssh_public_key': ('name', ),
'import_ssh_public_key': ('parent', 'ssh_public_key', 'project_id', ),
'update_ssh_public_key': ('name', 'ssh_public_key', 'update_mask', ),
}
def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
try:
key = original.func.attr.value
kword_params = self.METHOD_TO_PARAMS[key]
except (AttributeError, KeyError):
# Either not a method from the API or too convoluted to be sure.
return updated
# If the existing code is valid, keyword args come after positional args.
# Therefore, all positional args must map to the first parameters.
args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
if any(k.keyword.value == "request" for k in kwargs):
# We've already fixed this file, don't fix it again.
return updated
kwargs, ctrl_kwargs = partition(
lambda a: a.keyword.value not in self.CTRL_PARAMS,
kwargs
)
args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
request_arg = cst.Arg(
value=cst.Dict([
cst.DictElement(
cst.SimpleString("'{}'".format(name)),
cst.Element(value=arg.value)
)
# Note: the args + kwargs looks silly, but keep in mind that
# the control parameters had to be stripped out, and that
# those could have been passed positionally or by keyword.
for name, arg in zip(kword_params, args + kwargs)]),
keyword=cst.Name("request")
)
return updated.with_changes(
args=[request_arg] + ctrl_kwargs
)
def fix_files(
in_dir: pathlib.Path,
out_dir: pathlib.Path,
*,
transformer=osloginCallTransformer(),
):
"""Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
"""
pyfile_gen = (
pathlib.Path(os.path.join(root, f))
for root, _, files in os.walk(in_dir)
for f in files if os.path.splitext(f)[1] == ".py"
)
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
# Parse the code and insert method call fixes.
tree = cst.parse_module(src)
updated = tree.visit(transformer)
# Create the path and directory structure for the new file.
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
# Generate the updated source file at the corresponding path.
with open(updated_path, 'w') as f:
f.write(updated.code)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="""Fix up source that uses the oslogin client library.
The existing sources are NOT overwritten but are copied to output_dir with changes made.
Note: This tool operates at a best-effort level at converting positional
parameters in client method calls to keyword based parameters.
Cases where it WILL FAIL include
A) * or ** expansion in a method call.
B) Calls via function or method alias (includes free function calls)
C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
These all constitute false negatives. The tool will also detect false
positives when an API method shares a name with another method.
""")
parser.add_argument(
'-d',
'--input-directory',
required=True,
dest='input_dir',
help='the input directory to walk for python files to fix up',
)
parser.add_argument(
'-o',
'--output-directory',
required=True,
dest='output_dir',
help='the directory to output files fixed via un-flattening',
)
args = parser.parse_args()
input_dir = pathlib.Path(args.input_dir)
output_dir = pathlib.Path(args.output_dir)
if not input_dir.is_dir():
print(
f"input directory '{input_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if not output_dir.is_dir():
print(
f"output directory '{output_dir}' does not exist or is not a directory",
file=sys.stderr,
)
sys.exit(-1)
if os.listdir(output_dir):
print(
f"output directory '{output_dir}' is not empty",
file=sys.stderr,
)
sys.exit(-1)
fix_files(input_dir, output_dir)
|
bin/fixup_oslogin_v1_keywords.py
| 6,293 |
Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory
A stable, out-of-place partition.
!/Users/fahmi.abdulaziz/PycharmProjects/tmdb/bin/python3.8 -*- coding: utf-8 -*- Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Returns trueList, falseList Either not a method from the API or too convoluted to be sure. If the existing code is valid, keyword args come after positional args. Therefore, all positional args must map to the first parameters. We've already fixed this file, don't fix it again. Note: the args + kwargs looks silly, but keep in mind that the control parameters had to be stripped out, and that those could have been passed positionally or by keyword. Parse the code and insert method call fixes. Create the path and directory structure for the new file. Generate the updated source file at the corresponding path.
| 1,428 |
en
| 0.848648 |
# coding:utf-8
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminTextareaWidget
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.http import urlencode
from . import settings as USettings
from .commands import *
from django.utils.six import string_types
# 修正输入的文件路径,输入路径的标准格式:abc,不需要前后置的路径符号
# 如果输入的路径参数是一个函数则执行,否则可以拉接受时间格式化,用来生成如file20121208.bmp的重命名格式
def calc_path(OutputPath, instance=None):
if callable(OutputPath):
try:
OutputPath = OutputPath(instance)
except:
OutputPath = ""
else:
try:
import datetime
OutputPath = datetime.datetime.now().strftime(OutputPath)
except:
pass
return OutputPath
# width=600, height=300, toolbars="full", imagePath="", filePath="", upload_settings={},
# settings={},command=None,event_handler=None
class UEditorWidget(forms.Textarea):
def __init__(self, attrs=None):
params = attrs.copy()
width = params.pop("width")
height = params.pop("height")
toolbars = params.pop("toolbars", "full")
imagePath = params.pop("imagePath", "")
filePath = params.pop("filePath", "")
upload_settings = params.pop("upload_settings", {})
settings = params.pop("settings", {})
command = params.pop("command", None)
event_handler = params.pop("event_handler", None)
# 扩展命令
self.command = command
self.event_handler = event_handler
# 上传路径
self.upload_settings = upload_settings.copy()
self.upload_settings.update({
"imagePathFormat": imagePath,
"filePathFormat": filePath
})
# 保存
self._upload_settings = self.upload_settings.copy()
self.recalc_path(None)
self.ueditor_settings = {
'toolbars': toolbars,
'initialFrameWidth': width,
'initialFrameHeight': height
}
# 以下处理工具栏设置,将normal,mini等模式名称转化为工具栏配置值
if toolbars == "full":
del self.ueditor_settings['toolbars']
elif isinstance(toolbars, string_types) and toolbars in USettings.TOOLBARS_SETTINGS:
self.ueditor_settings[
"toolbars"] = USettings.TOOLBARS_SETTINGS[toolbars]
else:
self.ueditor_settings["toolbars"] = toolbars
# raise ValueError('toolbars should be a string defined in DjangoUeditor.settings.TOOLBARS_SETTINGS, options are full(default), besttome, mini and normal!')
self.ueditor_settings.update(settings)
super(UEditorWidget, self).__init__(attrs)
def recalc_path(self, model_inst):
"""计算上传路径,允许是function"""
try:
uSettings = self.upload_settings
if 'filePathFormat' in self._upload_settings:
uSettings['filePathFormat'] = calc_path(
self._upload_settings['filePathFormat'], model_inst)
if 'imagePathFormat' in self._upload_settings:
uSettings['imagePathFormat'] = calc_path(
self._upload_settings['imagePathFormat'], model_inst)
if 'scrawlPathFormat' in self._upload_settings:
uSettings['scrawlPathFormat'] = calc_path(
self._upload_settings['scrawlPathFormat'], model_inst)
if 'videoPathFormat' in self._upload_settings:
uSettings['videoPathFormat'] = calc_path(
self._upload_settings['videoPathFormat'], model_inst),
if 'snapscreenPathFormat' in self._upload_settings:
uSettings['snapscreenPathFormat'] = calc_path(
self._upload_settings['snapscreenPathFormat'], model_inst)
if 'catcherPathFormat' in self._upload_settings:
uSettings['catcherPathFormat'] = calc_path(
self._upload_settings['catcherPathFormat'], model_inst)
if 'imageManagerListPath' in self._upload_settings:
uSettings['imageManagerListPath'] = calc_path(
self._upload_settings['imageManagerListPath'], model_inst)
if 'fileManagerListPath' in self._upload_settings:
uSettings['fileManagerListPath'] = calc_path(
self._upload_settings['fileManagerListPath'], model_inst)
# 设置默认值,未指定涂鸦、截图、远程抓图、图片目录时,默认均等于imagePath
if uSettings['imagePathFormat'] != "":
default_path = uSettings['imagePathFormat']
uSettings['scrawlPathFormat'] = uSettings.get(
'scrawlPathFormat', default_path)
uSettings['videoPathFormat'] = uSettings.get(
'videoPathFormat', default_path)
uSettings['snapscreenPathFormat'] = uSettings.get(
'snapscreenPathFormat', default_path)
uSettings['catcherPathFormat'] = uSettings.get(
'catcherPathFormat', default_path)
uSettings['imageManagerListPath'] = uSettings.get(
'imageManagerListPath', default_path)
if uSettings['filePathFormat'] != "":
uSettings['fileManagerListPath'] = uSettings.get(
'fileManagerListPath', uSettings['filePathFormat'])
except:
pass
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
# 传入模板的参数
editor_id = "id_%s" % name.replace("-", "_")
uSettings = {
"name": name,
"id": editor_id,
"value": value
}
if isinstance(self.command, list):
cmdjs = ""
if isinstance(self.command, list):
for cmd in self.command:
cmdjs = cmdjs + cmd.render(editor_id)
else:
cmdjs = self.command.render(editor_id)
uSettings["commands"] = cmdjs
uSettings["settings"] = self.ueditor_settings.copy()
uSettings["settings"].update({
"serverUrl": "/ueditor/controller/?%s" % urlencode(self._upload_settings)
})
# 生成事件侦听
if self.event_handler:
uSettings["bindEvents"] = self.event_handler.render(editor_id)
context = {
'UEditor': uSettings,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MEDIA_URL': settings.MEDIA_URL,
'MEDIA_ROOT': settings.MEDIA_ROOT
}
return mark_safe(render_to_string('ueditor.html', context))
class Media:
js = ("ueditor/ueditor.config.js",
"ueditor/ueditor.all.min.js")
class AdminUEditorWidget(AdminTextareaWidget, UEditorWidget):
def __init__(self, **kwargs):
super(AdminUEditorWidget, self).__init__(**kwargs)
|
DjangoUeditor/widgets.py
| 7,260 |
计算上传路径,允许是function
coding:utf-8 修正输入的文件路径,输入路径的标准格式:abc,不需要前后置的路径符号 如果输入的路径参数是一个函数则执行,否则可以拉接受时间格式化,用来生成如file20121208.bmp的重命名格式 width=600, height=300, toolbars="full", imagePath="", filePath="", upload_settings={}, settings={},command=None,event_handler=None 扩展命令 上传路径 保存 以下处理工具栏设置,将normal,mini等模式名称转化为工具栏配置值 raise ValueError('toolbars should be a string defined in DjangoUeditor.settings.TOOLBARS_SETTINGS, options are full(default), besttome, mini and normal!') 设置默认值,未指定涂鸦、截图、远程抓图、图片目录时,默认均等于imagePath 传入模板的参数 生成事件侦听
| 520 |
zh
| 0.629573 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.