hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
072cbcaf1b4e307e4fb2c1041376b217c41bf6f0
| 2,905 |
py
|
Python
|
Divide and Conquer/MergeSort/MergeSort.py
|
NiclasDev63/Algorithmen
|
64c24d0501f9e12ea9be6ae4fa946ad8a40134f6
|
[
"MIT"
] | null | null | null |
Divide and Conquer/MergeSort/MergeSort.py
|
NiclasDev63/Algorithmen
|
64c24d0501f9e12ea9be6ae4fa946ad8a40134f6
|
[
"MIT"
] | null | null | null |
Divide and Conquer/MergeSort/MergeSort.py
|
NiclasDev63/Algorithmen
|
64c24d0501f9e12ea9be6ae4fa946ad8a40134f6
|
[
"MIT"
] | null | null | null |
"""
Anfang des Algorithmus
"""
def mergeSort(list):
# überprüft, ob die Länge der übergebenen Liste größer als 1 ist
if len(list) > 1:
# länge der Liste
n = len(list)
# mitte der Liste
mitte = n //2
# linke hälfte der Liste
linke_haelfte = list[:mitte]
# rechte hälfte der Liste
rechte_haelfte = list[mitte:]
# ruft die Funktion mit der linke Hälfte der Liste auf und sortiert diese
mergeSort(linke_haelfte)
# ruft die Funktion mit der linken Hälfte der Liste auf und sortiert diese
mergeSort(rechte_haelfte)
# initialisiert die vairablen i, j und k und setzt den Startwert dieser auf 0
i = j = k = 0
# erste Schleife
# Diese Schleife wir solange durchlaufen, bis i größer oder gleich der Länge von der
# linken Hälfte ist oder bis j größer oder gleich der Länge von der rechten Hälfte ist
while i < len(linke_haelfte) and j < len(rechte_haelfte):
# Wenn das Element, welches sich an der i Stelle von der linken Hälfte befindet
# größer ist als das Element an der j Stelle der rechten Hälfte, bekommt unsere
# übergebene Liste an der Stelle k den Wert zugewiesen, der sich an der i Stelle von der linken Hälfte befindet
# und der Wert i erhöht sich um 1
if linke_haelfte[i] < rechte_haelfte[j]:
list[k] = linke_haelfte[i]
i += 1
# Ansonsten bekommt unsere übergebene Liste an der Stelle k den Wert zugewiesen,
# der sich an der j Stelle von der rechten Hälfte befindet und der Wert j erhöht sich um 1
else:
list[k] = rechte_haelfte[j]
j += 1
# Nach jedem Schleifendurchlauf wird der Wert k um 1 erhöht
k += 1
# zweite Schleife
# Hierdurch wird überprüft, ob sich noch ein Element in der linken Hälfte befindet,
# welches noch nicht bei unserer übergebenen List einsortiert wurde
while i < len(linke_haelfte):
list[k] = linke_haelfte[i]
k += 1
i += 1
# dritte Schleife
# Hierdurch wird überprüft, ob sich noch ein Element in der rechten Hälfte befindet,
# welches noch nicht bei unserer übergebenen List einsortiert wurde
while j < len(rechte_haelfte):
list[k] = rechte_haelfte[j]
k += 1
j += 1
"""
Ende des Algorithmus
"""
# Wir erstellen eine Liste die sortiert werden soll
list = [20, 21, 13, 6, 85, 16]
# Hier geben wir unsere unsortierte liste in der Konsole aus
print("Unsortierte Liste: " + str(list))
# ruft unsere Funktion mit der zu sortierenden Liste auf
mergeSort(list)
# Hier geben wir unsere sortierte liste in der Konsole aus
print("Sortierte Liste: " + str(list))
| 32.640449 | 123 | 0.619621 |
4ae9786ca8ffcb225ebb57110db7cc8df8a943f5
| 29 |
py
|
Python
|
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 31 |
2020-06-17T13:19:12.000Z
|
2022-03-27T08:56:38.000Z
|
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | null | null | null |
frds/data/wrds/execucomp/__init__.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 8 |
2020-06-14T15:21:51.000Z
|
2021-09-29T06:28:53.000Z
|
from .anncomp import Anncomp
| 14.5 | 28 | 0.827586 |
918e75300fe04c031168c214fb97059362ff96a6
| 92 |
py
|
Python
|
2014/12/social-security-benefits-age/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/12/social-security-benefits-age/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/12/social-security-benefits-age/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1rn4wrIqnhLzikYukOvdxlbcPZL0h6GCLluwEZ57sKPM'
| 23 | 68 | 0.847826 |
fe7258b46d5b675697101e12e4807134f8b3a39f
| 450 |
py
|
Python
|
src/onegov/feriennet/views/calendar.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/calendar.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/views/calendar.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from morepath.request import Response
from onegov.core.security import Public
from onegov.feriennet import FeriennetApp
from onegov.feriennet.models import AttendeeCalendar
@FeriennetApp.view(
model=AttendeeCalendar,
permission=Public)
def view_attendee_calendar(self, request):
return Response(
self.calendar(request),
content_type='text/calendar',
content_disposition=f'inline; filename={self.name}.ics'
)
| 28.125 | 63 | 0.76 |
feb2bd3cfad0d130272ebf6e968522f8c8c04151
| 5,026 |
py
|
Python
|
benchmark_test/scripts/main.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-01-11T18:40:22.000Z
|
2021-01-11T18:40:22.000Z
|
benchmark_test/scripts/main.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
benchmark_test/scripts/main.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
from milvus import Milvus,DataType
import sys, getopt
import time
import logging
import milvus_toolkit as toolkit
import milvus_load
# import milvus_load as load
import config
def connect_server():
try:
milvus = Milvus(host=config.MILVUS_HOST, port=config.MILVUS_PORT)
return milvus
except Exception as e:
logging.error(e)
def main():
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hc",
["help", "collection=", "dim=", "index=", "create", "load", "build", "performance", "info", "describe", "show", "has", "rows", "describe_index", "drop", "drop_index", "version",
"search_param=", "recall","partition_tag=","create_partition"]
)
except getopt.GetoptError:
print("Usage: python milvus_toolkindex_type.py -q <nq> -k <topk> -c <collection> -s")
sys.exit(2)
for opt_name, opt_value in opts:
if opt_name in ("-h", "--help"):
print("python milvus_toolkindex_type.py test.py -q <nq> -k <topk> -c <collection> -c -s")
sys.exit(2)
elif opt_name == "--collection":
collection_name = opt_value
elif opt_name == "--dim":
dim = int(opt_value)
elif opt_name == "--index":
index_type = opt_value
elif opt_name == "--search_param":
search_param = int(opt_value)
elif opt_name == "--partition_tag":
partition_tag = opt_value
# create collection
elif opt_name in ("-c", "--create"):
milvus = connect_server()
collection_param = { "fields": [ {"name": "Vec", "type": DataType.FLOAT_VECTOR, "params": {"dim": 128}} ], "segment_row_limit": 1000000 ,'auto_id': False}
# print(param)
print(milvus.create_collection(collection_name, collection_param))
sys.exit(2)
# insert data to milvus
elif opt_name == "--load":
# connect_server()
milvus_load.load(collection_name)
#build index
elif opt_name == "--build":
# connect_server()
time1 = time.time()
toolkit.build_collection(collection_name,index_type)
print("build cost time: ", time.time() - time1)
sys.exit(2)
# test search performance
elif opt_name == "--performance":
# connect_server()d
toolkit.search(collection_name,search_param)
sys.exit(2)
# save search result
elif opt_name == "--recall":
# connect_server()
toolkit.recall_test(collection_name, search_param)
# elif opt_name == "--compare":
# toolkit.
elif opt_name == "--create_partition":
milvus = connect_server()
milvus.create_partition(collection_name,partition_tag)
# present collection info
elif opt_name == "--info":
milvus = connect_server()
print(milvus.get_collection_stats(collection_name)[1])
sys.exit(2)
# Describe collection
elif opt_name == "--describe":
milvus = connect_server()
print(milvus.get_collection_info(collection_name)[1])
sys.exit(2)
# Show collections in Milvus server
elif opt_name == "--show":
milvus = connect_server()
print(milvus.list_collections()[1])
sys.exit(2)
# Show if collection exists
elif opt_name == "--has":
milvus = connect_server()
print(milvus.has_collection(collection_name)[1])
sys.exit(2)
# Get collection row count
elif opt_name == "--rows":
milvus = connect_server()
print(milvus.count_entities(collection_name))
sys.exit(2)
# describe index, get information of index
elif opt_name == "--describe_index":
milvus = connect_server()
print(milvus.get_index_info(collection_name)[1])
sys.exit(2)
# Flush collection inserted data to disk.
elif opt_name == "--flush":
milvus = connect_server()
status = milvus.flush([collection_name])
print(status)
sys.exit(2)
# Drop collection
elif opt_name == "--drop":
milvus = connect_server()
status = milvus.drop_collection(collection_name)
print(status)
sys.exit(2)
# Drop index
elif opt_name == "--drop_index":
milvus = connect_server()
status = milvus.drop_index(collection_name)
print(status)
sys.exit(2)
# Get milvus version
elif opt_name == "--version":
milvus = connect_server()
print("server_version: ", milvus.server_version()[1])
print("client_version: ", milvus.client_version())
if __name__ == '__main__':
main()
| 29.052023 | 190 | 0.558695 |
fe166f2cb2378aa8c0ff87a20790b84ae0bd8218
| 2,223 |
py
|
Python
|
research/cv/3dcnn/src/n4correction.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/3dcnn/src/n4correction.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/3dcnn/src/n4correction.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
If you need to use n4correction.py code, you need to copy it to the bin directory where antsRegistration etc are
located. Then run python n4correction.py
python n4correction.py
"""
from __future__ import division
import os
import sys
import glob
from multiprocessing import Pool, cpu_count
def n4_correction(im_input):
""" n4 correction """
command = 'N4BiasFieldCorrection -d 3 -i ' + im_input + ' ' + ' -s 3 -c [50x50x30x20] -b [300] -o ' + \
im_input.replace('.nii.gz', '_corrected.nii.gz')
os.system(command)
def batch_works(k):
""" batch works """
if k == n_processes - 1:
paths = all_paths[k * int(len(all_paths) / n_processes):]
else:
paths = all_paths[k * int(len(all_paths) / n_processes): (k + 1) * int(len(all_paths) / n_processes)]
for path in paths:
n4_correction(glob.glob(os.path.join(path, '*_t1.nii.gz'))[0])
n4_correction(glob.glob(os.path.join(path, '*_t1ce.nii.gz'))[0])
n4_correction(glob.glob(os.path.join(path, '*_t2.nii.gz'))[0])
n4_correction(glob.glob(os.path.join(path, '*_flair.nii.gz'))[0])
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("Need at least the input data directory")
input_path = sys.argv[1]
all_paths = []
for dirpath, dirnames, files in os.walk(input_path):
if os.path.basename(dirpath)[0:7] == 'Brats17':
all_paths.append(dirpath)
n_processes = cpu_count()
pool = Pool(processes=n_processes)
pool.map(batch_works, range(n_processes))
| 36.442623 | 112 | 0.65857 |
a3f45dfd03f5aeef4b0b86c0a67a8ba9a99c57a3
| 262 |
py
|
Python
|
leetcode/1389-Create-Target-Array-in-the-Given-Order/iteration.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/1389-Create-Target-Array-in-the-Given-Order/iteration.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/1389-Create-Target-Array-in-the-Given-Order/iteration.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
def createTargetArray(self, nums: List[int], index: List[int]) -> List[int]:
target = []
for i, num in enumerate(nums):
idx = index[i]
target = target[:idx] + [num] + target[idx:]
return target
| 32.75 | 80 | 0.545802 |
4a8515519924b899e3d88e839b3156001c20c902
| 3,761 |
py
|
Python
|
add-new-cloud-location.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
add-new-cloud-location.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
add-new-cloud-location.py
|
globalwatchpost/CloudRegionsList
|
f428b39fc797403cf6ed5d38196a5df29f59d8af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import json
import logging
import pprint
import argparse
import os
import sys
def _readRegionFile( regionFile, logger):
with open( regionFile ) as inputJson:
jsonContents = json.load(inputJson)
return jsonContents
def _appendOrCreateJson(args, logger):
if os.path.exists( args.location_json_file ):
logger.debug( "JSON file {0} exists, read existing".format(args.location_json_file) )
cloudRegions = _readRegionFile(args.location_json_file, logger)
else:
logger.debug( "JSON file {0} does not exist, create new".format(args.location_json_file) )
cloudRegions = {}
#logger.debug( "Starting contents:\n{0}".format(json.dumps(cloudRegions, indent=4, sort_keys=True)) )
if args.cloud_provider not in cloudRegions:
cloudRegions[args.cloud_provider] = {}
if args.provider_cloud_region_name in cloudRegions[args.cloud_provider]:
logger.critical( "Cloud {0} already has an entry for region name {1}, aborting".format(
args.cloud_provider, args.provider_cloud_region_name) )
sys.exit(1)
logger.debug( "Country code string: {0}".format(args.country_code_list) )
cloudRegions[args.cloud_provider][args.provider_cloud_region_name] = {
'city' : args.city,
'continent' : args.continent,
'geo_region' : args.geo_region,
'iso_3166-1' : json.loads( args.country_code_list ),
'iso_3166-2' : json.loads( args.subdivision_code_list )
}
if args.notes is not None:
cloudRegions[args.cloud_provider][args.provider_cloud_region_name][ 'notes' ] = args.notes
#logger.debug("Updated cloud region contents:\n{0}".format(
# json.dumps(cloudRegions, indent=4, sort_keys=True)) )
with open( args.location_json_file, "w" ) as outputFileHandle:
json.dump( cloudRegions, outputFileHandle, indent=4, sort_keys=True )
def _getArguments(logger):
argParser = argparse.ArgumentParser(description="Add new cloud region")
argParser.add_argument("location_json_file", help="Filename of JSON file to create/add locations to" )
argParser.add_argument("cloud_provider", choices=[ "AWS", "Azure", "Google_Cloud" ] )
argParser.add_argument( "provider_cloud_region_name", help='Provider region string, e.g., "me-south-1"' )
argParser.add_argument( "continent",
choices=[
'Africa',
'Asia',
'Europe',
'North America',
'South America',
'Antarctica',
'Australia'
],
)
argParser.add_argument(
"geo_region",
choices=[
"America-North",
"America-South",
"Europe",
"Africa",
"Middle_East",
"Asia-South",
"Asia-Southeast",
"Asia-Northeast",
"Asia-East",
"Oceania",
],
),
argParser.add_argument( "country_code_list", help='ISO 3166-1 alpha 2 code list, e.g. "[ \"KE\" ]"' )
argParser.add_argument( "subdivision_code_list", help='ISO 3166-2 code list, e.g., "[ \"UK-EN\" ]"' )
argParser.add_argument( "city", help='City name, e.g., "Mumbai"' )
argParser.add_argument( "notes", help="Additional info (optional)", nargs='?' )
return argParser.parse_args()
def _main(logger):
args = _getArguments(logger)
_appendOrCreateJson(args, logger)
print( "Provider {0} region {1} added to {2}".format(
args.cloud_provider, args.provider_cloud_region_name, args.location_json_file) )
if __name__ == "__main__":
logging.basicConfig( level = logging.INFO)
logger = logging.getLogger()
_main(logger)
| 34.824074 | 112 | 0.635203 |
4ac1bfcff2ba03836ed0b44ba220d831e37cef68
| 743 |
py
|
Python
|
lintcode/067-408-Add-Binary/AddBinary_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2017-05-18T06:11:02.000Z
|
2017-05-18T06:11:02.000Z
|
lintcode/067-408-Add-Binary/AddBinary_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
lintcode/067-408-Add-Binary/AddBinary_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
# @param {string} a a number
# @param {string} b a number
# @return {string} the result
def addBinary(self, a, b):
# Write your code here
alen, blen = len(a), len(b)
if alen > blen:
b = '0' * (alen - blen) + b
nlen = alen
else:
a = '0' * (blen - alen) + a
nlen = blen
res, c = '', 0
for i in range(nlen - 1, -1, -1):
at, bt = int(a[i]), int(b[i])
if at + bt + c > 1:
res = str(at + bt + c - 2) + res
c = 1
else:
res = str(at + bt + c) + res
c = 0
if c == 1:
res = '1' + res
return res
| 26.535714 | 48 | 0.372813 |
60670e881b7f5631aa2ba8b36e794006776c5a97
| 1,699 |
py
|
Python
|
TyphoonCon/2021/rev/Debug_Or_Me/program.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
TyphoonCon/2021/rev/Debug_Or_Me/program.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
TyphoonCon/2021/rev/Debug_Or_Me/program.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
import os
import sys
import uuid
import shutil
import string
# Gets the file from the user
def get_file():
# Get the size of the file
print "Size: ",
size = int(raw_input())
if(size > 1000000):
print "File too big"
sys.exit(0)
print "File Contents\n"
print "================="
file_contents = sys.stdin.read(size)
return file_contents
# Write a file
def write_file(name, contents):
fd = open(name, "w+")
fd.write(contents)
fd.close()
# Specify the permissions of the file
os.chown(name, 0, 0) # Root user
os.chmod(name, 0o777) # Read & execute
# Clean up the current execution
def cleanup(foldername):
shutil.rmtree(foldername)
# Setup process information for a user making a call
def setup_call():
filename = str(uuid.uuid4())
foldername = str(uuid.uuid4())
folder = "/home/ctf/programs/"
# Get the file from the user
contents = get_file()
# Location of the chroot jail
os.mkdir(folder + foldername)
os.mkdir(folder + foldername + "/etc")
# Copy the standard user information here
shutil.copy("/etc/passwd", folder + foldername + "/etc/passwd")
# Move the current working directory in here for later
os.chdir(folder + foldername)
# User executable to create
write_file(folder + foldername + "/" + filename, contents)
# No special characters, spaces and (most importantly) "."s
allowlist = set(string.ascii_lowercase + string.digits + "/" + "-")
filename = ''.join(c for c in filename if c in allowlist)
command = "/home/ctf/MaxDebugger " + "./" + filename
os.system(command)
cleanup(folder + foldername)
setup_call()
| 24.271429 | 71 | 0.645085 |
e0c3fb11667f31764623059a9b9ce5383e28a829
| 728 |
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-010/ph-10.11-3-primel-e3.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-010/ph-10.11-3-primel-e3.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-010/ph-10.11-3-primel-e3.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
def is_primel_4(n):
if n == 2:
return True # 2 is prime
if n % 2 == 0:
print(n, "is divisible by 2")
return False # all even numbers except 2 are not prime
if n < 2:
return False # numbers less then 2 are not prime
prime = True
m = n // 2 + 1
for x in range(3, m, 2):
if n % x == 0:
print(n, "is divisible by", x)
prime = False
break
return prime
while True:
number = int(input("Please enter a number (enter 0 to exit): "))
prime = is_primel_4(number)
if number is 0:
break
elif prime is True:
print(number, "is a prime number.")
else:
print(number, "is not a prime number")
| 26 | 68 | 0.532967 |
1c4340270dcdef2713ee8a18152ba3aebc09a59b
| 2,836 |
py
|
Python
|
pyventskalender/tag20.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
pyventskalender/tag20.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
pyventskalender/tag20.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
# Heute wollen wir ein externes Paket installieren.
#
# Dazu wird in Python `pip` verwendet.
#
# Setzt Datei virtualenv_und_pip.md voraus.
# %% Venv erstellen -- Tests 10 20
# Damit man Pakete später wieder einfach löschen kann, legt man am besten ein
# Virtual Environment an.
# Das geht am besten in der Konsole powershell.
# In Visual Studio Code kannst du eine aufmachen, indem du oben auf Terminal
# und dann New Terminal (oder Neues Terminal) gehst.
# Stelle bitte sicher, dass du in dem richtigen Ordner bist.
# Dazu kannst du `ls`, und es sollte dir u.a.
# `ich_will_meine_belohnung.py` anzeigen.
# Um jetzt das Virtual Environment anzulegen, gib ein:
# python -m venv --system-site-packages venv
# Es müsste dann der Ordner `venv` angelegt werden.
# Wenn du Glück hast, fragt dich Visual Studio Code direkt, ob du das Virtual
# Environent aktivieren willst -- sag "Ja".
# Wenn nicht, dann kannst du jetzt das Virtual Environment aktivieren.
# Am einfachsten geht das, wenn du die Konsole schließt (`exit`) und dann
# Strg+Shift+P drückst, dann geht oben eine Eingabe auf.
# Gibt dort "Python: Select Interpreter" ein und wähle das Element aus (Enter).
# Wähle dort "Enter Interpreter Path" und gib ".\venv\Scripts\python.exe" ein
# (Linux/Mac: "./venv/bin/python").
# Wenn du jetzt eine neue Konsole aufmachst, dann sorgt Visual Studio Code
# dafür, dass du das Virtual Environment nutzt.
# %% Installieren per pip -- Test 30
# Um jetzt tatsächlich dort etwas zu installieren öffne eine Konsole.
# Um zu prüfen, ob sie das Virtual Environment verwendet, gib
# Get-Command python
# ein (Linux/Mac User: `which python`).
# Das sollte den aktuellen Ordner mit venv\Scripts\python ausgeben (Linux/Mac: "venv/bin/python").
# Wenn dem nicht so ist, Konsole schließen (`exit`) und neu öffnen und hoffen.
# Wenn dem immer noch nicht so ist, das Virtual Environment nochmal aktivieren.
# Jetzt kommt endlich die Installation:
# pip install cat_fact
# %%
# War sie erfolgreich, kannst du
# catFact
# eingeben und bekommst einen wichtigen Fakt über Katzen.
# %%
# Das können wir jetzt auch gleich in Code machen:
try:
import requests
from cat_fact.client import CatClient
cat_client = CatClient(requests.Session(), "http://cat-fact.herokuapp.com")
cat_client.get_random_fact("cat")
except ImportError:
# Wir können offenbar nicht importieren, was wir wollen
"Katzen sind tolle Tiere"
except ModuleNotFoundError:
# Wir können offenbar nicht importieren, was wir wollen
"Katzen sind tolle Tiere"
# %%
# Das ist ein ganz schön langer output, aber der Teil in "text" ist der interessante.
# Speichere doch den Output von `get_random_fact` (ein `dict`) und gib dir den
# Wert zum Schlüssel "text" aus.
# %%
# Die Wichtigsten Schritte zu Virtualenv findest du in `virtualenv_und_pip.md`.
| 41.101449 | 98 | 0.744711 |
98ca7f1c2120931c40437aa3392b0f5dec9c624b
| 3,276 |
py
|
Python
|
test/test_tag13.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag13.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
test/test_tag13.py
|
kopp/pyventskalender
|
6f6455f3c1db07f65a772b2716e4be95fbcd1804
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
import re
try:
from pyventskalender import tag13_loesung as heute
except ImportError:
from pyventskalender import tag13 as heute
def make_fake_input(replies, prompts):
"""
Create a function to use as mock for `input` that will store the prompt
(passed to `input`) in `prompts` and that returns the `replies` as
`input` would return a reply the user types.
"""
def fake_input(prompt):
prompts.append(prompt)
return replies.pop(0)
return fake_input
EXPECTED_RE = re.compile(r"(<.*?>)")
class Tag13Tests(TestCase):
def find_in_text(self, text):
self.assertIsNotNone(
heute.EINZUSETZENDES_WORT_RE.search(text),
msg="Teste deine Regular Expression mit '{}'".format(text)
)
def find_not_in_text(self, text):
self.assertIsNone(
heute.EINZUSETZENDES_WORT_RE.search(text),
msg="Teste deine Regular Expression mit '{}' -- sollte nicht klappen".format(text)
)
def find_correct_groups(self, text):
expected_groups = EXPECTED_RE.findall(text)
groups = heute.EINZUSETZENDES_WORT_RE.findall(text)
self.assertEqual(groups, expected_groups)
def test_10_einzusetzendes_wort_re(self):
for text in ["<x>", "a <x>", "<x> a", "<x> <y>", "a <x> <y>", "a <x> <y> b", "a <x> b <y> c"]:
self.find_in_text(text)
for text in ["a", "<b", "c>", ">a<"]:
self.find_not_in_text(text)
for text in ["<x>", "a <x>", "a <ü>", "a b c <d e> f <gh i> <j><k>"]:
self.find_correct_groups(text)
def mad_lib_durchlauf(self, text, replies):
prompts = []
expected_prompts = EXPECTED_RE.findall(text)
expected_result = text
for prompt, reply in zip(expected_prompts, replies):
expected_result = expected_result.replace(prompt, reply)
with patch('builtins.input', make_fake_input(replies, prompts)):
ergebnis = heute.mad_libs(text)
self.assertIsNone(
EXPECTED_RE.search(ergebnis),
msg="Es sollten keine zu ersetzenden Elemente mehr im Ergebnis {} sein".format(ergebnis)
)
self.assertEqual(len(prompts), len(expected_prompts),
msg="Es wurde nicht die richtige Zahl an zu ersetzenden Worten gefunden/den User gefragt; text war '{}'".format(text))
for present, expected in zip(prompts, expected_prompts):
self.assertIn(expected, present,
msg="Der erwartete Prompt ist nicht aufgetaucht")
self.assertEqual(ergebnis, expected_result)
def test_20_mad_libs(self):
self.mad_lib_durchlauf(
"a",
[]
)
self.mad_lib_durchlauf(
"a <x>",
["y"]
)
self.mad_lib_durchlauf(
"a <x> <y>",
["u", "v"]
)
self.mad_lib_durchlauf(
"a <asdf asdf asdf x> <xxx xxx xxx>",
["abcde fghi", "jkl mno"]
)
self.mad_lib_durchlauf(
"a <x> b <foo bar> c d <a b c - d> e",
["x-A", "foo bar-B", "a b c - d-C"]
)
| 34.851064 | 147 | 0.579365 |
98ef24026fdfed7dce0ddb5b6011b9f3caad1442
| 6,454 |
py
|
Python
|
src/test/tests/databases/bov.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/test/tests/databases/bov.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/test/tests/databases/bov.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
# ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: bov.py
#
# Tests: mesh - 3D rectilinear, multiple domain
# plots - Pseudocolor, Subset, Label, Contour
# operators - Slice
#
# Programmer: Brad Whitlock
# Date: Fri Mar 17 14:37:45 PST 2006
#
# Modifications:
# Brad Whitlock, Thu May 4 14:02:29 PST 2006
# Added testing of INT and DOUBLE BOV files.
#
# ----------------------------------------------------------------------------
def SaveTestImage(name):
# Save these images somewhat larger than a regular test case image
# since the images contain a lot of text.
backup = GetSaveWindowAttributes()
swa = SaveWindowAttributes()
swa.width = 500
swa.height = 500
swa.screenCapture = 0
Test(name, swa)
SetSaveWindowAttributes(backup)
def TestBOVDivide(prefix, db, doSubset):
# Take a picture to make sure that the division took. There will be
# a lot of bricks.
OpenDatabase(db)
if doSubset:
AddPlot("Subset", "bricks")
subAtts = SubsetAttributes()
subAtts.legendFlag = 0
SetPlotOptions(subAtts)
else:
AddPlot("Pseudocolor", "myvar")
DrawPlots()
v = View3DAttributes()
v.viewNormal = (0.534598, 0.40012, 0.744385)
v.focus = (15, 15, 15)
v.viewUp = (-0.228183, 0.916444, -0.32873)
v.viewAngle = 30
v.parallelScale = 8.66025
v.nearPlane = -17.3205
v.farPlane = 17.3205
v.imagePan = (0, 0)
v.imageZoom = 1
v.perspective = 1
v.eyeAngle = 2
v.centerOfRotationSet = 0
v.centerOfRotation = (15, 15, 15)
SetView3D(v)
Test(prefix + "00")
# Make sure there are the right number of zones.
Query("NumZones",use_actual_data=0)
TestText(prefix + "01", GetQueryOutputString())
# Let's slice a few times to make sure that crucial areas have the
# right values
AddPlot("Mesh", "mesh")
AddPlot("Label", "myvar")
L = LabelAttributes()
L.textHeight1 = 0.03
L.textHeight2 = 0.03
SetPlotOptions(L)
SetActivePlots((0,1,2))
AddOperator("Slice")
s = SliceAttributes()
s.originType = s.Intercept # Point, Intercept, Percent, Zone, Node
s.originIntercept = 10.001
s.normal = (0, 0, 1)
s.axisType = s.ZAxis # XAxis, YAxis, ZAxis, Arbitrary
s.upAxis = (0, 1, 0)
s.project2d = 1
SetOperatorOptions(s)
DrawPlots()
v2 = GetView2D()
v2.windowCoords = (12.0201, 13.0004, 9.99781, 10.9888)
v2.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v2.fullFrameActivationMode = v2.Auto # On, Off, Auto
v2.fullFrameAutoThreshold = 100
SetView2D(v2)
SaveTestImage(prefix+"02")
# Move to another slice on the far edge that will have the max zone #
s.originIntercept = 19.998
SetOperatorOptions(s)
v3 = View2DAttributes()
v3.windowCoords = (19.2017, 20.0179, 19.1966, 20.0217)
v3.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v3.fullFrameActivationMode = v3.Auto # On, Off, Auto
v3.fullFrameAutoThreshold = 100
SetView2D(v3)
SaveTestImage(prefix+"03")
# Move to another slice in the middle.
s.originIntercept = 15.01
SetOperatorOptions(s)
v4 = View2DAttributes()
v4.windowCoords = (14.6419, 15.361, 15.638, 16.365)
v4.viewportCoords = (0.2, 0.95, 0.15, 0.95)
v4.fullFrameActivationMode = v4.Auto # On, Off, Auto
v4.fullFrameAutoThreshold = 100
SetView2D(v4)
SaveTestImage(prefix+"04")
DeleteAllPlots()
# Test that ghost zones are right.
AddPlot("Pseudocolor", "myvar")
p = PseudocolorAttributes()
p.SetOpacityType(p.Constant)
p.opacity = 0.25
SetPlotOptions(p)
DrawPlots()
v5 = View3DAttributes()
v5.viewNormal = (0.772475, 0.402431, 0.491255)
v5.focus = (15, 15, 15)
v5.viewUp = (-0.355911, 0.915018, -0.18992)
v5.viewAngle = 30
v5.parallelScale = 8.66025
v5.nearPlane = -17.3205
v5.farPlane = 17.3205
v5.imagePan = (-0.0253114, 0.0398304)
v5.imageZoom = 1.20806
v5.perspective = 1
v5.eyeAngle = 2
v5.centerOfRotationSet = 0
v5.centerOfRotation = (15, 15, 15)
SetView3D(v5)
Test(prefix+"05")
# Zoom in on a contour plot to make sure that there are no tears.
# This means that the ghost zones were created properly.
ClearWindow()
p.SetOpacityType(p.FullyOpaque)
SetPlotOptions(p)
AddOperator("Isosurface")
iso = IsosurfaceAttributes()
iso.variable = "radial"
SetOperatorOptions(iso)
DrawPlots()
v6 = View3DAttributes()
v6.viewNormal = (0.373168, 0.412282, 0.831125)
v6.focus = (15, 15, 15)
v6.viewUp = (-0.181836, 0.910964, -0.370244)
v6.viewAngle = 30
v6.parallelScale = 8.66025
v6.nearPlane = -17.3205
v6.farPlane = 17.3205
v6.imagePan = (0.0994254, 0.0810457)
v6.imageZoom = 1.94126
v6.perspective = 1
v6.eyeAngle = 2
v6.centerOfRotationSet = 0
v6.centerOfRotation = (15, 15, 15)
SetView3D(v6)
Test(prefix+"06")
DeleteAllPlots()
CloseDatabase(db)
def TestBOVType(bovtype, prefixes):
# Test the original BOV file without it being divided.
TestSection("Reading BOV file of %s" % bovtype)
TestBOVDivide(prefixes[0], data_path("bov_test_data/%s_indices.bov") % bovtype, 0)
#
# Test 2 BOV files that are being subdivided into smaller bricks
# by the BOV plugin so that there are multiple domains that
# can be processed in parallel.
#
TestSection("Decomposing BOV of %s into smaller bricks" % bovtype)
TestBOVDivide(prefixes[1], data_path("bov_test_data/%s_indices_div.bov") % bovtype, 1)
TestSection("Decomposing BOV of %s with small header into smaller bricks" % bovtype)
TestBOVDivide(prefixes[2], data_path("bov_test_data/%s_indices_div_with_header.bov") % bovtype, 1)
def main():
# Define some expressions
DefineScalarExpression("x", "coord(mesh)[0]")
DefineScalarExpression("y", "coord(mesh)[1]")
DefineScalarExpression("z", "coord(mesh)[2]")
DefineScalarExpression("dx", "x - 15.")
DefineScalarExpression("dy", "y - 15.")
DefineScalarExpression("dz", "z - 15.")
DefineScalarExpression("radial", "sqrt(dx*dx + dy*dy + dz*dz)")
TestBOVType("FLOAT", ("bov_0_", "bov_1_", "bov_2_"))
TestBOVType("DOUBLE", ("bov_3_", "bov_4_", "bov_5_"))
TestBOVType("INT", ("bov_6_", "bov_7_", "bov_8_"))
Exit()
main()
| 31.950495 | 102 | 0.630462 |
c7093baead0a3858b4a849f88382b23cebfef603
| 14,768 |
py
|
Python
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/ServiceDeskPlus_On_Premise/Integrations/ServiceDeskPlus_On_Premise/test_data/result_constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
EXPECTED_CREATE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': {
'Subject': 'Create request test',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123456789',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Group',
'id': '123640000000008036'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T12:05:00.000Z',
'HasNotes': False,
'Status': 'On Hold',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'The description of the request',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}
}
}
EXPECTED_UPDATE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': {
'Subject': 'Test create request',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123456789',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Business',
'id': '123640000000008033'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T15:06:17.000Z',
'HasNotes': False,
'Status': 'Open',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'Update the description',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}
}
}
EXPECTED_LIST_SINGLE_REQUEST = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': [{
'Subject': 'Test create request',
'Mode': {
'name': 'E-Mail',
'id': '123640000000006665'
},
'IsRead': False,
'CancellationRequested': False,
'IsTrashed': False,
'Id': '123640000000240013',
'Group': {
'site': None,
'deleted': False,
'name': 'Network',
'id': '123640000000006681'
},
'Requester': {
'email_id': None,
'is_technician': False,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000244019',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=-1&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'CreatedTime': '2020-06-24T12:05:00.000Z',
'Level': {
'name': 'Tier 1',
'id': '123640000000006671'
},
'Impact': {
'name': 'Affects Business',
'id': '123640000000008033'
},
'Priority': {
'color': '#ff0000',
'name': 'High',
'id': '123640000000006805'
},
'CreatedBy': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'IsEscalated': False,
'LastUpdatedTime': '2020-06-24T15:27:44.000Z',
'HasNotes': False,
'Status': 'Open',
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'RequestType': {
'name': 'Incident',
'id': '123640000000008391'
},
'DisplayId': '102',
'TimeElapsed': '0',
'Description': 'Update the description',
'IsServiceRequest': False,
'Urgency': {
'name': 'Normal',
'id': '123640000000007921'
},
'HasRequestInitiatedChange': False,
'IsReopened': False,
'HasAttachments': False,
'HasLinkedRequests': False,
'IsOverdue': False,
'HasProblem': False,
'IsFcr': False,
'HasProject': False,
'IsFirstResponseOverdue': False,
'UnrepliedCount': 0
}]
}
}
EXPECTED_LIST_MULTIPLE_REQUESTS = {
'ServiceDeskPlus(val.ID===obj.ID)': {
'Request': [{
'Requester': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:07:36.000Z',
'DisplayId': '74',
'Subject': 'request 1',
'Technician': {
'email_id': '[email protected]',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000215007',
'Status': 'Open'
}, {
'Requester': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:05:44.000Z',
'DisplayId': '73',
'Subject': 'check request outputs',
'Technician': {
'email_id': '[email protected]',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000216003',
'Status': 'Open'
}, {
'Requester': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Template': {
'name': 'Default Request',
'id': '123640000000006655'
},
'CreatedTime': '2020-06-08T12:15:35.000Z',
'DisplayId': '75',
'Subject': 'updated request 2 from demisto',
'Technician': {
'email_id': '[email protected]',
'cost_per_hour': '0',
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142552',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712510951&t=user&height=60&width=60',
'sms_mail_id': None
},
'IsServiceRequest': False,
'CancellationRequested': False,
'HasNotes': False,
'Id': '123640000000217001',
'Status': 'Open'
}]
}
}
EXPECTED_LINKED_REQUEST_LIST = {
'ServiceDeskPlus.Request(val.ID===obj.ID)': {
'LinkRequests': [{
'LinkedRequest': {
'subject': 'Test create request',
'id': '123640000000240013',
'udf_fields': {
'udf_char1': None
},
'display_id': '102'
}
}, {
'LinkedRequest': {
'subject': 'Updating the last request',
'id': '123640000000241001',
'udf_fields': {
'udf_char1': None
},
'display_id': '96'
}
}]
}
}
EXPECTED_RESOLUTION_LIST = {
'ServiceDeskPlus.Request(val.ID===obj.ID)': {
'Resolution': {
'SubmittedOn': '2020-06-09T14:32:15.000Z',
'SubmittedBy': {
'email_id': '[email protected]',
'is_technician': True,
'sms_mail': None,
'phone': None,
'name': 'First Last',
'mobile': None,
'id': '123640000000142582',
'photo_url': 'https://contacts.zoho.com/file?exp=10&ID=712874208&t=user&height=60&width=60',
'is_vip_user': False,
'department': None
},
'Content': 'changing resolution from demisto'
}
}
}
EXPECTED_NO_RESOLUTION_LIST = {}
| 34.504673 | 108 | 0.423483 |
c7a8a37b528a3a357e7dca76b59eb6031bafe297
| 664 |
py
|
Python
|
exercises/zh/solution_02_13.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/zh/solution_02_13.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/zh/solution_02_13.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
from spacy.matcher import Matcher
nlp = spacy.load("zh_core_web_sm")
doc = nlp("荣耀将于7月16日发布新一代 MagicBook 锐龙笔记本,显然会配备7nm工艺、Zen2 架构的"
"全新锐龙4000系列,但具体采用低功耗的锐龙4000U 系列,还是高性能的锐龙4000H 系列,"
"目前还没有官方消息。今天,推特曝料大神公布了全新 MagicBook Pro 锐龙本的配置情况。"
)
# 创建匹配模板
pattern1 = [{"POS": "ADJ"},{"TEXT": "笔记本"}]
pattern2 = [{"TEXT": "锐龙"}, {"LIKE_NUM": True}, {"IS_ASCII": True}]
# 初始化matcher并加入模板
matcher = Matcher(nlp.vocab)
matcher.add("PATTERN1", [pattern1])
matcher.add("PATTERN2", [pattern2])
# 遍历匹配结果
for match_id, start, end in matcher(doc):
# 打印匹配到的字符串名字及匹配到的span的文本
print(doc.vocab.strings[match_id], doc[start:end].text)
| 27.666667 | 67 | 0.691265 |
c7b641c8bd8cacb558deee4ba82032cf245d7a95
| 22,227 |
py
|
Python
|
Packs/Polygon/Integrations/Polygon/Polygon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Polygon/Integrations/Polygon/Polygon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Polygon/Integrations/Polygon/Polygon.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
''' IMPORTS '''
import requests
from io import StringIO
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_NAME = "Group-IB THF Polygon"
LANGUAGE_TO_CODE = {
"english": "en",
"russian": "ru"
}
FILE_TYPE = "F"
URL_TYPE = "U"
API = 'api/'
ANALGIN_UPLOAD = API + 'analgin/upload/'
ATTACHES = API + 'attaches/'
ATTACH = ATTACHES + "?id={}"
REPORT = ATTACHES + "{}/{}/{}/polygon_report/"
REPORT_EXPORT = ATTACHES + "{}/{}/{}/polygon_report_export/"
PCAP_EXPORT = ATTACHES + '{}/{}/{}/dump.pcap/dump.pcap/polygon_report_file_download/'
VIDEO_EXPORT = ATTACHES + '{}/{}/{}/shots/video.webm/video.webm/polygon_report_file_download/'
HASH_REPUTATION = API + 'reports/check_hash/{}/{}/'
class RegistryKey(Common.Indicator):
"""
Registry Key indicator class
"""
def __init__(self, path, value, name=None):
self.path = path
self.name = name
self.value = value
def to_context(self):
key_context = {
'Path': self.path,
'Name': self.name,
'Value': self.value
}
return {"RegistryKey": key_context}
class Process(Common.Indicator):
"""
Process indicator class
"""
def __init__(self, name, pid, hostname=None, md5=None, sha1=None,
command_line=None, path=None, start_time=None, end_time=None,
parent=None, sibling=None, child=None):
self.name = name
self.pid = pid
self.hostname = hostname
self.md5 = md5
self.sha1 = sha1
self.command_line = command_line
self.path = path
self.start_time = start_time
self.end_time = end_time
self.parent = parent
self.sibling = sibling
self.child = child
def to_context(self):
process_context = {
'Name': self.name,
'PID': self.pid,
'Hostname': self.hostname,
'MD5': self.md5,
'SHA1': self.sha1,
'CommandLine': self.command_line,
'Path': self.path,
'StartTime': self.start_time,
'EndTime': self.end_time,
'Parent': self.parent,
'Sibling': self.sibling,
'Child': self.child
}
return {"Process": process_context}
class Client(BaseClient):
def __init__(self, base_url, verify, api_key, language):
super().__init__(base_url=base_url, verify=verify)
self._language = language
self._headers = {'X-API-KEY': api_key}
def _check_report_available(self, file_info):
report = False
if "analgin_result" in file_info:
if "commit" in file_info.get("analgin_result", {}):
if "reports" in file_info.get("analgin_result", {}):
if len(file_info["analgin_result"].get("reports", [])):
if "id" in file_info["analgin_result"]["reports"][0]:
report = True
return report
def _get_fids(self, resp):
fids = resp.get("data", {}).get("ids", [])
if not fids:
err_msg = "There is no analysis ID in THF response." \
"Try to upload file/url one more time."
raise DemistoException(err_msg)
return fids[0]
def upload_file(self, file_name, file_path, password=""):
with open(file_path, 'rb') as f:
resp = self._http_request(
method='post',
url_suffix=ANALGIN_UPLOAD,
files={'files': (file_name, f)},
data=dict(language=self._language, password=password)
)
return self._get_fids(resp)
def upload_url(self, url):
resp = self._http_request(
method='post',
url_suffix=ANALGIN_UPLOAD,
files={'files': ("url.txt", StringIO(url))},
data=dict(language=self._language)
)
return self._get_fids(resp)
def get_attach(self, id=None):
url = ATTACH.format(id) if id else ATTACHES
results = self._http_request('get', url).get("data", {}).get("results", [])
if id:
try:
results = results[0]
except Exception:
raise DemistoException(f"File with ID={id} does not exist")
return results
def get_analysis_info(self, tds_analysis_id):
file = self.get_attach(tds_analysis_id)
resp = dict(file=file)
if self._check_report_available(file):
try:
report = self._http_request('get', REPORT.format(tds_analysis_id,
file["analgin_result"]["commit"],
file["analgin_result"]["reports"][0]["id"]))
if "data" in report:
resp.update({'report': report['data']})
except Exception:
pass
return resp
def get_url(self, file):
if self._check_report_available(file):
return self._http_request(
method='get',
url_suffix=file.get("file_url")[1:],
resp_type="content"
).decode()
raise DemistoException("No reports found")
def export_report(self, tds_analysis_id):
file = self.get_attach(tds_analysis_id)
if self._check_report_available(file):
return self._http_request(
method='get',
url_suffix=REPORT_EXPORT.format(tds_analysis_id,
file["analgin_result"]["commit"],
file["analgin_result"]["reports"][0]["id"]),
resp_type="content"
)
raise DemistoException(f"No reports for analysis: {tds_analysis_id}")
def export_pcap(self, tds_analysis_id):
file = self.get_attach(tds_analysis_id)
if self._check_report_available(file):
return self._http_request(
method='get',
url_suffix=PCAP_EXPORT.format(tds_analysis_id,
file["analgin_result"]["commit"],
file["analgin_result"]["reports"][0]["id"]),
resp_type="content"
)
raise DemistoException(f"No reports for analysis: {tds_analysis_id}")
def export_video(self, tds_analysis_id):
file = self.get_attach(tds_analysis_id)
if self._check_report_available(file):
return self._http_request(
method='get',
url_suffix=VIDEO_EXPORT.format(tds_analysis_id,
file["analgin_result"]["commit"],
file["analgin_result"]["reports"][0]["id"]),
resp_type="content"
)
raise DemistoException(f"No reports for analysis: {tds_analysis_id}")
def get_hash_reputation(self, hash_type, hash_value):
return self._http_request(
method='get',
url_suffix=HASH_REPUTATION.format(hash_type, hash_value)
).get("data", {})
def drop_prefix(id_with_prefix):
return id_with_prefix[1:]
def serialize_report_info(report, analysis_type):
info = report.get('info', {})
res = {
"Verdict": "Malicious" if info.get("verdict") else "Benign",
"Started": info.get("started"),
"Analyzed": info.get("ended"),
"Internet-connection": "Available" if info.get("internet_available") else "Unavailable",
}
if info.get('verdict'):
res.update({
"Probability": "{:.2f}%".format(info.get("probability", 0.0)),
"Families": ", ".join(info.get("families", [])),
"Score": info.get("score", 0),
"DumpExists": any(map(lambda vals: len(vals) > 0, report.get("network", {}).values()))
})
if analysis_type == FILE_TYPE:
res.update({
"Type": report.get("target", {}).get("file", {}).get("type")
})
else:
res.update({
"URL": report.get("target", {}).get("url")
})
return res
def serialize_analysis_info(info, analysis_type, report):
res = {
'ID': analysis_type + str(info.get("id", "")),
'Status': 'Finished' if report else 'In Progress',
'Result': info.get('verdict')
}
if analysis_type == FILE_TYPE:
res.update({
'Name': info.get("original_filename"),
'Size': info.get('file_size'),
'MD5': info.get('md5'),
'SHA1': info.get('sha1'),
'SHA256': info.get('sha256'),
})
return res
def get_human_readable_analysis_info(analysis_info):
return tableToMarkdown(
f"Analysis {analysis_info.get('ID')}",
analysis_info,
removeNull=True
)
def get_main_indicator(report, analysis_type):
score = Common.DBotScore.GOOD
malicious = None
if report.get("info", {}).get("verdict"):
score = Common.DBotScore.BAD
malicious = "Verdict probability: {}%".format(
report.get("info", {}).get("probability")
)
signatures: list = []
for signature in report.get("signatures", []):
if signature.get("name") == "yara_rules":
signatures += [s.get('ioc') for s in signature.get('marks', [])]
if signatures:
malicious += ", iocs: {}".format(", ".join(signatures))
if analysis_type == FILE_TYPE:
tfile = report.get("target", {}).get("file", {})
return Common.File(
name=tfile.get("name"),
file_type=tfile.get("type"),
md5=tfile.get("md5"),
sha1=tfile.get("sha1"),
sha256=tfile.get("sha256"),
dbot_score=Common.DBotScore(
indicator=tfile.get("md5"),
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=score,
malicious_description=malicious
)
)
else:
url = report.get("target", {}).get("url")
return Common.URL(
url=url,
dbot_score=Common.DBotScore(
indicator=url,
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=score,
malicious_description=malicious
)
)
def get_packages_indicators(res):
report = res['report']
command_results = []
for package in report.get("packages", []):
info = package.get('file_info', {})
file = Common.File(
name=info.get('name'),
file_type=info.get('type'),
md5=info.get('md5'),
sha1=info.get('sha1'),
sha256=info.get('sha256'),
dbot_score=Common.DBotScore(
indicator=info.get('sha1'),
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=0
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New File indicator was created {file.name}", {}),
indicator=file,
raw_response=res
)
)
return command_results
def get_network_indicators(res):
report = res['report']
command_results = []
network = report.get('network', {})
for dns in network.get('dns', []):
domain = Common.Domain(
domain=dns.get('request'),
dns=", ".join([answer.get('data') for answer in dns.get('answers')]),
dbot_score=Common.DBotScore(
indicator=dns.get('request'),
indicator_type=DBotScoreType.DOMAIN,
integration_name=INTEGRATION_NAME,
score=0
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New Domain indicator was created {domain.domain}", {}),
indicator=domain,
raw_response=res
)
)
for host in network.get('hosts', []) + [h[0] for h in network.get('dead_hosts', [])]:
ip = Common.IP(
ip=host,
dbot_score=Common.DBotScore(
indicator=host,
indicator_type=DBotScoreType.IP,
integration_name=INTEGRATION_NAME,
score=0
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New IP indicator was created {ip.ip}", {}),
indicator=ip,
raw_response=res
)
)
for http in network.get('http', []):
url = Common.URL(
url=http.get('uri'),
dbot_score=Common.DBotScore(
indicator=http.get('uri'),
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=0
)
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New URL indicator was created {url.url}", {}),
indicator=url,
raw_response=res
)
)
return command_results
def get_monitor_indicators(res):
report = res['report']
command_results = []
for p in report.get('goo_monitor', {}).get('processes', []):
process = Process(
name=p.get('basename'),
pid=str(p.get('pid')),
command_line=p.get('cmdline'),
start_time=p.get('started_at'),
end_time=p.get('exited_at'),
path=p.get('filename'),
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New Process indicator was created {process.name}", {}),
indicator=process,
raw_response=res
)
)
for regkey in p.get('regkeys', []):
if regkey.get('action') == 'regkey_written':
reg = RegistryKey(
path=regkey.get('ioc'),
value=str(regkey.get('value'))
)
command_results.append(CommandResults(
readable_output=tableToMarkdown(f"New RegistryKey indicator was created {reg.value}", {}),
indicator=reg,
raw_response=res
)
)
return command_results
def get_report_indicators(res, analysis_type):
report = res['report']
command_results = []
human_readable = ''
indicator = get_main_indicator(report, analysis_type)
if isinstance(indicator, Common.File):
human_readable = tableToMarkdown(f"New File indicator was created {indicator.name}", {})
elif isinstance(indicator, Common.URL):
human_readable = tableToMarkdown(f"New URL indicator was created {indicator.url}", {})
command_results.append(CommandResults(
readable_output=human_readable,
indicator=indicator,
raw_response=res
)
)
command_results.extend(get_packages_indicators(res))
command_results.extend(get_network_indicators(res))
command_results.extend(get_monitor_indicators(res))
return command_results
def analysis_info_command(client, args):
tds_analysis_id_array = argToList(args.get('tds_analysis_id'))
all_results = []
for tds_analysis_id in tds_analysis_id_array:
analysis_type = tds_analysis_id[0]
res = client.get_analysis_info(drop_prefix(tds_analysis_id))
analysis_info = serialize_analysis_info(res.get('file'), analysis_type, report='report' in res)
command_results = []
if 'report' in res:
if analysis_type == URL_TYPE:
res['report']['target']['url'] = client.get_url(res.get('file'))
analysis_info.update(serialize_report_info(res['report'], analysis_type))
command_results = get_report_indicators(res, analysis_type)
human_readable = get_human_readable_analysis_info(analysis_info)
command_results.append(CommandResults(
readable_output=human_readable,
outputs_prefix="Polygon.Analysis",
outputs_key_field="ID",
outputs=analysis_info,
raw_response=res
))
all_results.extend(command_results)
return all_results
def export_report_command(client, args):
tds_analysis_id = drop_prefix(args.get('tds_analysis_id'))
report = client.export_report(tds_analysis_id)
demisto.results(fileResult(
filename='report.tar',
data=report
))
def export_pcap_command(client, args):
tds_analysis_id = drop_prefix(args.get('tds_analysis_id'))
pcap = client.export_pcap(tds_analysis_id)
demisto.results(fileResult(
filename='dump.pcap',
data=pcap
))
def export_video_command(client, args):
tds_analysis_id = drop_prefix(args.get('tds_analysis_id'))
video = client.export_video(tds_analysis_id)
if not video:
return_results("No screen activity detected")
else:
demisto.results(fileResult(
filename='video.webm',
data=video
))
def upload_url_command(client, args):
url = args.get('url')
res = client.upload_url(url)
res = f"{URL_TYPE}{res}"
outputs = {
'ID': res,
'URL': url,
'Status': 'In Progress',
}
results = CommandResults(
readable_output=tableToMarkdown("Url uploaded successfully", outputs),
outputs_prefix='Polygon.Analysis',
outputs_key_field='ID',
outputs=outputs,
raw_response=res
)
return results
def upload_file_command(client, args):
file_id = args.get('file_id')
password = args.get('password')
file_obj = demisto.getFilePath(file_id)
# Ignoring non ASCII
file_name = file_obj.get('name', '').encode('ascii', 'ignore')
file_path = file_obj.get('path')
res = client.upload_file(file_name, file_path, password)
res = f"{FILE_TYPE}{res}"
outputs = {
'ID': res,
'EntryID': file_id,
'FileName': file_obj.get('name'),
'Status': 'In Progress'
}
results = CommandResults(
readable_output=tableToMarkdown("File uploaded successfully", outputs),
outputs_prefix='Polygon.Analysis',
outputs_key_field='ID',
outputs=outputs,
raw_response=res
)
return results
def file_command(client, args):
files = argToList(args.get('file'))
all_results = []
for file in files:
hash_type = get_hash_type(file)
if hash_type != "Unknown":
res = client.get_hash_reputation(hash_type, file)
analysis_info = {
hash_type.upper(): file,
'Found': res.get('found'),
'Verdict': res.get('verdict'),
'Score': res.get('score'),
'Malware-families': res.get('malware_families')
}
score = Common.DBotScore.NONE
malicious = None
if res.get("found"):
if res.get("verdict"):
score = Common.DBotScore.BAD
malicious = "THF Polygon score: {}".format(res.get('score'))
if res.get('malware_families'):
malicious += ", {}".format(", ".join(res.get("malware_families", [])))
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=score,
malicious_description=malicious
)
indicator = Common.File(**{hash_type: file, "dbot_score": dbot_score})
result = CommandResults(
outputs_prefix="Polygon.Analysis",
outputs_key_field=hash_type.upper(),
outputs=analysis_info,
indicator=indicator,
raw_response=res
)
all_results.append(result)
return all_results
def test_module(client):
client.get_attach()
return 'ok'
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
base_url = params.get('server')
api_key = params.get('api_key')
verify_certificate = not params.get('insecure', False)
report_language = LANGUAGE_TO_CODE[params.get('report_language')]
# Remove proxy if not set to true in params
handle_proxy()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
api_key=api_key,
verify=verify_certificate,
language=report_language
)
commands = {
'polygon-upload-file': upload_file_command,
'polygon-upload-url': upload_url_command,
'polygon-analysis-info': analysis_info_command,
'polygon-export-report': export_report_command,
'polygon-export-pcap': export_pcap_command,
'polygon-export-video': export_video_command,
'file': file_command
}
if command == 'test-module':
return_results(test_module(client))
elif command in ['polygon-export-report', 'polygon-export-pcap', 'polygon-export-video']:
commands[command](client, demisto.args())
elif command in ['polygon-analysis-info', 'file']:
results = commands[command](client, demisto.args())
for r in results:
return_results(r)
elif command in ['polygon-upload-file', 'polygon-upload-url']:
return_results(commands[command](client, demisto.args()))
# Log exceptions
except Exception as err:
return_error(f'Failed to execute {command} command. Error: {str(err)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 34.567652 | 110 | 0.567823 |
401334e1d42e7431ef24f6a8647a0328db046fa4
| 585 |
py
|
Python
|
diversos/Casas.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1 |
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
diversos/Casas.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
diversos/Casas.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3 |
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
class House:
def __init__(self, price):
self._price = price
@property
def price(self):
return self._price
@price.setter
def price(self, new_price):
if new_price > 0 and isinstance(new_price, float):
self._price = new_price
else:
print("Please enter a valid price")
@price.deleter
def price(self):
del self._price
print('-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-')
casa = House(5000)
print(f'Casa -> {casa.price}')
casa.price = 6002.00
print(f'Casa -> {casa.price}')
| 20.892857 | 68 | 0.529915 |
dc16c0531eb8266f80582753cabb4f965f78d8e6
| 681 |
py
|
Python
|
comp/zenefits/oa/oa3/QueensThreaten.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
comp/zenefits/oa/oa3/QueensThreaten.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
comp/zenefits/oa/oa3/QueensThreaten.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
def qt(a):
res = 0
n = len(a)
ldial, rdial = {}, {}
threat = []
for i in range(n):
j = a[i]
threat.append(0)
if i + j in ldial:
threat[i] += 1
pre = ldial[i + j][-1]
threat[pre] += 1
if threat[pre] == 4:
return 4
ldial[i + j].append(i)
else:
ldial[i + j] = [i]
if i - j in rdial:
threat[i] += 1
pre = rdial[i - j][-1]
threat[pre] += 1
if threat[pre] == 4:
return 4
rdial[i - j].append(i)
else:
rdial[i - j] = [i]
return max(threat)
| 23.482759 | 34 | 0.358297 |
90a82a592519c118c8c2b46044d3d9aa57e4947d
| 5,042 |
py
|
Python
|
backend/apps/iamstudent/management/commands/createfakeusers.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 2 |
2020-03-28T13:56:39.000Z
|
2020-03-29T10:16:12.000Z
|
backend/apps/iamstudent/management/commands/createfakeusers.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | 76 |
2020-03-27T21:53:04.000Z
|
2020-03-30T20:27:43.000Z
|
backend/apps/iamstudent/management/commands/createfakeusers.py
|
match4healthcare/match4healthcare
|
acf69e3b781d715f0a947c2a9df6646e94f1ca6b
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
import numpy as np
from apps.accounts.models import User
from apps.iamstudent.models import AUSBILDUNGS_TYPEN_COLUMNS, BEZAHLUNG_CHOICES, Student
from apps.ineedstudent.models import Hospital
from ._utils import BIG_CITY_PLZS
FAKE_MAIL = "@example.com"
def new_mail(x):
return ("%s" % x) + FAKE_MAIL
class Command(BaseCommand):
# has to be "help" because we inherit from django manage.py Command, thus ignore A003
help = "Populates the database with fake users or deletes them." # noqa: A003
def add_arguments(self, parser):
parser.add_argument(
"--delete",
action="store_true",
help='Delete all users with an email ending in "%s"' % FAKE_MAIL,
)
parser.add_argument(
"--add-students", nargs=1, help="Add [N] new students to the poll",
)
parser.add_argument(
"--add-hospitals", nargs=1, help="Add [N] new hospitals to the poll",
)
parser.add_argument(
"--no-input", action="store_true", help="Answer yes to all questions.",
)
def handle(self, *args, **options):
if (
not options["delete"]
and options["add_hospitals"] is None
and options["add_students"] is None
):
self.print_help("", "")
return None
self.all_yes = options["no_input"]
if options["delete"]:
self.delete_all_fakes()
if options["add_hospitals"] is not None:
self.add_fake_hospitals(int(options["add_hospitals"][0]))
if options["add_students"] is not None:
self.add_fake_students(int(options["add_students"][0]))
def delete_all_fakes(self):
qs = User.objects.filter(email__contains=FAKE_MAIL)
n = qs.count()
if n == 0:
self.stdout.write(self.style.SUCCESS("No fake users detected."))
return
is_sure = (
input(
'You are about to delete %s users with emails including "%s". '
"Are you sure you want to delete them? [y/n]" % (n, FAKE_MAIL)
)
if not self.all_yes
else "y"
)
if is_sure != "y":
self.stdout.write(self.style.WARNING("Users NOT deleted."))
return
qs.delete()
self.stdout.write(self.style.SUCCESS("Successfully deleted these %s fake users." % n))
def add_fake_students(self, n):
plzs = np.random.choice(BIG_CITY_PLZS, size=n)
months = np.random.choice(np.arange(1, 12), size=n)
days = np.random.choice(np.arange(2, 15), size=n)
year = 2020
n_users = User.objects.all().count()
for i in range(n):
m = new_mail(i + n_users)
kwd = dict(
zip(
AUSBILDUNGS_TYPEN_COLUMNS,
np.random.choice([True, False], size=len(AUSBILDUNGS_TYPEN_COLUMNS)),
)
)
u = User.objects.create(
username=m,
email=m,
is_student=True,
validated_email=True,
date_joined=datetime.now() - timedelta(days=np.random.randint(0, 30)),
)
u.set_password(m)
u.save()
Student.objects.create(
user=u,
plz=plzs[i],
braucht_bezahlung=BEZAHLUNG_CHOICES[np.random.choice([0, 1, 2])][0],
is_activated=np.random.choice([True, False], p=[0.95, 0.05]),
availability_start="{}-{:02d}-{:02d}".format(year, months[i], days[i]),
zeitliche_verfuegbarkeit=np.random.choice([1, 2, 3, 4]),
umkreis=np.random.choice([1, 2, 3, 4], p=[0.2, 0.5, 0.27, 0.03]),
unterkunft_gewuenscht=np.random.choice([True, False], p=[0.1, 0.9]),
**kwd
)
self.stdout.write(self.style.SUCCESS("Created %s students." % n))
def add_fake_hospitals(self, n):
plzs = np.random.choice(BIG_CITY_PLZS, size=n)
n_users = User.objects.all().count()
for i in range(n):
m = new_mail(i + n_users)
u = User.objects.create(
username=m,
email=m,
is_student=True,
validated_email=True,
date_joined=datetime.now() - timedelta(days=np.random.randint(0, 30)),
)
u.set_password(m)
u.save()
Hospital.objects.create(
user=u,
plz=plzs[i],
ansprechpartner="Frau Müller",
sonstige_infos="Wir haben viel zu tun.",
is_approved=np.random.choice([True, False], p=[0.7, 0.3]),
appears_in_map=np.random.choice([True, False], p=[0.8, 0.2]),
)
self.stdout.write(self.style.SUCCESS("Created %s hospitals." % n))
| 34.29932 | 94 | 0.545617 |
31e545b10c2193b6085f6ecba684524f2f876b5e
| 953 |
py
|
Python
|
Backend/migrations/alembic/versions/3610493d8979_create_cases_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 15 |
2020-04-24T20:18:11.000Z
|
2022-01-31T21:05:05.000Z
|
Backend/migrations/alembic/versions/3610493d8979_create_cases_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 2 |
2021-05-19T07:15:09.000Z
|
2022-03-07T08:29:34.000Z
|
Backend/migrations/alembic/versions/3610493d8979_create_cases_table.py
|
dbvis-ukon/coronavis
|
f00374ac655c9d68541183d28ede6fe5536581dc
|
[
"Apache-2.0"
] | 4 |
2020-04-27T16:20:13.000Z
|
2021-02-23T10:39:42.000Z
|
"""create cases table
Revision ID: 3610493d8979
Revises:
Create Date: 2020-11-25 20:42:26.698495
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '3610493d8979'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind().execute("""
create table cases
(
datenbestand timestamp with time zone,
idbundesland integer,
bundesland varchar(255),
landkreis varchar(255),
objectid integer,
meldedatum timestamp with time zone,
gender varchar(255),
agegroup varchar(255),
casetype varchar(255),
id serial not null
constraint cases_pkey
primary key,
idlandkreis varchar
);
create index cases_meldedatum_idlandkreis_index
on cases (meldedatum, idlandkreis);
create index cases_datenbestand_index
on cases (datenbestand desc);
""")
def downgrade():
op.drop_table('cases')
| 20.717391 | 47 | 0.681007 |
31ea940cb3251ed0ab119f4780f455aa41b51195
| 21,860 |
py
|
Python
|
src/onegov/core/request.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/request.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/core/request.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
import collections
import morepath
from cached_property import cached_property
from datetime import timedelta
from onegov.core.cache import lru_cache
from onegov.core.utils import append_query_param
from itsdangerous import (
BadSignature,
SignatureExpired,
TimestampSigner,
URLSafeSerializer,
URLSafeTimedSerializer
)
from more.content_security import ContentSecurityRequest
from more.webassets.core import IncludeRequest
from morepath.authentication import NO_IDENTITY
from onegov.core import utils
from onegov.core.crypto import random_token
from ua_parser import user_agent_parser
from webob.exc import HTTPForbidden
from wtforms.csrf.session import SessionCSRF
Message = collections.namedtuple('Message', ['text', 'type'])
class ReturnToMixin(object):
""" Provides a safe and convenient way of using return-to links.
Return-to links are links with an added 'return-to' query parameter
which points to the url a specific view (usually with a form) should
return to, once all is said and done.
There's no magic involved. If a view should honor the return-to
paramter, it should use request.redirect instead of morepath.redirect.
If no return-to parameter was specified, rqeuest.redirect is a
transparent proxy to morepath.redirect.
To create a link::
url = request.return_to(original_url, redirect)
To honor the paramter in a view, if present::
return request.redirect(default_url)
*Do not use the return-to parameter directly*. Redirect parameters
are notorious for being used in phising attacks. By using ``return_to``
and ``redirect`` you are kept safe from these attacks as the redirect
url is signed and verified.
For the same reason you should not allow the user-data for return-to
links. Those are meant for internally generated links!
"""
@property
def identity_secret(self):
raise NotImplementedError
@property
def redirect_signer(self):
return URLSafeSerializer(self.identity_secret, 'return-to')
@lru_cache(maxsize=16)
def sign_url_for_redirect(self, url):
return self.redirect_signer.dumps(url)
def return_to(self, url, redirect):
signed = self.sign_url_for_redirect(redirect)
return utils.append_query_param(url, 'return-to', signed)
def return_here(self, url):
return self.return_to(url, self.url)
def redirect(self, url):
if 'return-to' in self.GET:
try:
url = self.redirect_signer.loads(self.GET['return-to'])
except BadSignature:
pass
return morepath.redirect(url)
class CoreRequest(IncludeRequest, ContentSecurityRequest, ReturnToMixin):
""" Extends the default Morepath request with virtual host support and
other useful methods.
Virtual hosting might be supported by Morepath directly in the future:
https://github.com/morepath/morepath/issues/185
"""
@cached_property
def identity_secret(self):
return self.app.identity_secret
@cached_property
def session(self):
return self.app.session()
def link_prefix(self, *args, **kwargs):
""" Override the `link_prefix` with the application base path provided
by onegov.server, because the default link_prefix contains the
hostname, which is not useful in our case - we'll add the hostname
ourselves later.
"""
return getattr(self.app, 'application_base_path', '')
@cached_property
def x_vhm_host(self):
""" Return the X_VHM_HOST variable or an empty string.
X_VHM_HOST acts like a prefix to all links generated by Morepath.
If this variable is not empty, it will be added in front of all
generated urls.
"""
return self.headers.get('X_VHM_HOST', '').rstrip('/')
@cached_property
def x_vhm_root(self):
""" Return the X_VHM_ROOT variable or an empty string.
X_VHM_ROOT is a bit more tricky than X_VHM_HOST. It tells Morepath
where the root of the application is situated. This means that the
value of X_VHM_ROOT must be an existing path inside of Morepath.
We can understand this best with an example. Let's say you have a
Morepath application that serves a blog under /blog. You now want to
serve the blog under a separate domain, say blog.example.org.
If we just served Morepath under blog.example.org, we'd get urls like
this one::
blog.example.org/blog/posts/2014-11-17-16:00
In effect, this subdomain would be no different from example.org
(without the blog subdomain). However, we want the root of the host to
point to /blog.
To do this we set X_VHM_ROOT to /blog. Morepath will then automatically
return urls like this::
blog.example.org/posts/2014-11-17-16:00
"""
return self.headers.get('X_VHM_ROOT', '').rstrip('/')
@cached_property
def url(self):
""" Returns the current url, taking the virtual hosting in account. """
url = self.transform(self.path)
if self.query_string:
url += '?' + self.query_string
return url
def transform(self, url):
""" Applies X_VHM_HOST and X_VHM_ROOT to the given url (which is
expected to not contain a host yet!). """
if self.x_vhm_root:
url = '/' + utils.lchop(url, self.x_vhm_root).lstrip('/')
if self.x_vhm_host:
url = self.x_vhm_host + url
else:
url = self.host_url + url
return url
def link(self, *args, query_params={}, **kwargs):
""" Extends the default link generating function of Morepath. """
result = self.transform(super().link(*args, **kwargs))
for key, value in query_params.items():
result = append_query_param(result, key, value)
pass
return result
def class_link(self, *args, **kwargs):
""" Extends the default class link generating function of Morepath. """
return self.transform(super().class_link(*args, **kwargs))
def filestorage_link(self, path):
""" Takes the given filestorage path and returns an url if the path
exists. The url might point to the local server or it might point to
somehwere else on the web.
"""
app = self.app
if not app.filestorage.exists(path):
return None
if app.filestorage.hasurl(path):
url = app.filestorage.geturl(path)
if not url.startswith('file://'):
return url
return self.link(app.modules.filestorage.FilestorageFile(path))
@cached_property
def theme_link(self):
""" Returns the link to the current theme. Computed once per request.
The theme is automatically compiled and stored if it doesn't exist yet,
or if it is outdated.
"""
theme = self.app.settings.core.theme
assert theme is not None, "Do not call if no theme is used"
force = self.app.always_compile_theme or (
self.app.allow_shift_f5_compile
and self.headers.get('cache-control') == 'no-cache'
and self.headers.get('x-requested-with') != 'XMLHttpRequest')
filename = self.app.modules.theme.compile(
self.app.themestorage, theme, self.app.theme_options,
force=force
)
return self.link(self.app.modules.theme.ThemeFile(filename))
@cached_property
def browser_session(self):
""" Returns a browser_session bound to the request. Works via cookies,
so requests without cookies won't be able to use the browser_session.
The browser session is bound to the application (by id), so no session
data is shared between the applications.
If no data is written to the browser_session, no session_id cookie
is created.
"""
if 'session_id' in self.cookies:
session_id = self.app.unsign(self.cookies['session_id'])
session_id = session_id or random_token()
else:
session_id = random_token()
def on_dirty(session, token):
# once the cookie has been set, we do not change it, not even if
# the user logs out - this way we can still identify the user and
# send him messages, for example when logging out
if 'session_id' in self.cookies:
return
self.cookies['session_id'] = self.app.sign(token)
@self.after
def store_session(response):
# Safari 12.x has a nasty bug, where same-site will lead
# to a failure to safe cookies in certain scenarios. As a
# work around, we do not send the same-site directive to
# this specific Safari release. At the point of this
# writing this version is about to be phased out, so this
# has only a minor security impact.
#
# See https://bugs.webkit.org/show_bug.cgi?id=198181
ua = self.agent['user_agent']
if ua['family'] == 'Safari' and ua['major'] == '12':
samesite = None
else:
samesite = self.app.same_site_cookie_policy
response.set_cookie(
'session_id',
self.cookies['session_id'],
secure=self.app.identity_secure,
httponly=True,
samesite=samesite
)
return self.app.modules.browser_session.BrowserSession(
cache=self.app.session_cache,
token=session_id,
on_dirty=on_dirty
)
def get_form(self, form_class, i18n_support=True, csrf_support=True,
data=None, model=None):
""" Returns an instance of the given form class, set up with the
correct translator and with CSRF protection enabled (the latter
doesn't work yet).
Form classes passed to this function (or defined through the
``App.form`` directive) may define a ``on_request`` method, which
is called after the request has been bound to the form and before
the view function is called.
"""
meta = {}
if i18n_support:
translate = self.get_translate(for_chameleon=False)
form_class = self.app.modules.i18n.get_translation_bound_form(
form_class, translate)
meta['locales'] = [self.locale, 'en'] if self.locale else []
if csrf_support:
meta['csrf'] = True
meta['csrf_context'] = self.browser_session
meta['csrf_class'] = SessionCSRF
meta['csrf_secret'] = self.app.csrf_secret.encode('utf-8')
meta['csrf_time_limit'] = timedelta(
seconds=self.app.csrf_time_limit)
# XXX it might be cleaner to always use the request in the meta,
# instead of adding it to the form like it is done below - the meta
# can also be accessed by form widgets
meta['request'] = self
formdata = self.POST and self.POST or None
form = form_class(formdata=formdata, meta=meta, data=data)
assert not hasattr(form, 'request')
form.request = self
form.model = model
if hasattr(form, 'on_request'):
form.on_request()
return form
def translate(self, text):
""" Transalates the given text, if it's a translatable text. """
if not hasattr(text, 'domain'):
return text
return self.translator(text)
@cached_property
def translator(self):
""" Returns the translate function for basic string translations. """
translator = self.get_translate()
if translator:
return lambda text: text.interpolate(translator.gettext(text))
return lambda text: text.interpolate(text)
@cached_property
def default_locale(self):
""" Returns the default locale. """
return self.app.default_locale
@cached_property
def locale(self):
""" Returns the current locale of this request. """
settings = self.app.settings
locale = settings.i18n.locale_negotiator(self.app.locales, self)
return locale or self.app.default_locale
@cached_property
def html_lang(self):
""" The language code for the html tag. """
return self.locale and self.locale.replace('_', '-') or ''
def get_translate(self, for_chameleon=False):
""" Returns the translate method to the given request, or None
if no such method is availabe.
:for_chameleon:
True if the translate instance is used for chameleon (which is
special).
"""
if not self.app.locales:
return None
if for_chameleon:
return self.app.chameleon_translations.get(self.locale)
else:
return self.app.translations.get(self.locale)
def message(self, text, type):
""" Adds a message with the given type to the messages list. This
messages list may then be displayed by an application building on
onegov.core.
For example:
http://foundation.zurb.com/docs/components/alert_boxes.html
Four default types are defined on the request for easier use:
:meth:`success`
:meth:`warning`
:meth:`info`
:meth:`alert`
The messages are stored with the session and to display them, the
template using the messages should call :meth:`consume_messages`.
"""
if not self.browser_session.has('messages'):
self.browser_session.messages = [Message(text, type)]
else:
# this is a bit akward, but I don't see an easy way for this atm.
# (otoh, usually there's going to be one message only)
self.browser_session.messages = self.browser_session.messages + [
Message(text, type)
]
def consume_messages(self):
""" Returns the messages, removing them from the session in the
process. Call only if you can be reasonably sure that the user
will see the messages.
"""
yield from self.browser_session.pop('messages', ())
def success(self, text):
""" Adds a success message. """
self.message(text, 'success')
def warning(self, text):
""" Adds a warning message. """
self.message(text, 'warning')
def info(self, text):
""" Adds an info message. """
self.message(text, 'info')
def alert(self, text):
""" Adds an alert message. """
self.message(text, 'alert')
@cached_property
def is_logged_in(self):
""" Returns True if the current request is logged in at all. """
return self.identity is not NO_IDENTITY
@cached_property
def agent(self):
""" Returns the user agent, parsed by ua-parser. """
return user_agent_parser.Parse(self.user_agent or "")
def has_permission(self, model, permission, user=None):
""" Returns True if the current or given user has the given permission
on the given model.
"""
if permission is None:
return True
identity = self.identity
if user:
identity = self.app.application_bound_identity(
user.id, user.group_id, user.role
)
return self.app._permits(identity, model, permission)
def has_access_to_url(self, url):
""" Returns true if the current user has access to the given url.
The domain part of the url is completely ignored. This method should
only be used if you have no other choice. Loading the object by
url first is slower than if you can get the object otherwise.
The initial use-case for this function is the to parameter in the
login view. If the to-url is accessible anyway, we skip the login
view.
If we can't find a view for the url, a KeyError is thrown.
"""
obj, view_name = self.app.object_by_path(url, with_view_name=True)
if obj is None:
raise KeyError("Could not find view for '{}'".format(url))
permission = self.app.permission_by_view(obj, view_name)
return self.has_permission(obj, permission)
def exclude_invisible(self, models):
""" Excludes models invisble to the current user from the list. """
return [m for m in models if self.is_visible(m)]
def is_visible(self, model):
""" Returns True if the given model is visible to the current user.
In addition to the `is_public` check, this checks if the model is
secret and should therefore not be visible (though it can still be
reached via URL).
"""
if not self.is_public(model):
return False
if not self.is_private(model) and hasattr(model, 'access'):
if model.access == 'secret':
return False
return True
def is_public(self, model):
""" Returns True if the current user has the Public permission for
the given model.
"""
return self.has_permission(model, self.app.modules.security.Public)
def is_personal(self, model):
""" Returns True if the current user has the Personal permission for
the given model.
"""
return self.has_permission(model, self.app.modules.security.Personal)
def is_private(self, model):
""" Returns True if the current user has the Private permission for
the given model.
"""
return self.has_permission(model, self.app.modules.security.Private)
def is_secret(self, model):
""" Returns True if the current user has the Secret permission for
the given model.
"""
return self.has_permission(model, self.app.modules.security.Secret)
@cached_property
def current_role(self):
""" Returns the user-role of the current request, if logged in.
Otherwise, None is returned.
"""
return self.is_logged_in and self.identity.role or None
def has_role(self, *roles):
""" Returns true if the current user has any of the given roles. """
assert roles and all(roles)
return self.current_role in roles
@cached_property
def csrf_salt(self):
if not self.browser_session.has('csrf_salt'):
self.browser_session['csrf_salt'] = random_token()
return self.browser_session['csrf_salt']
def new_csrf_token(self, salt=None):
""" Returns a new CSRF token. A CSRF token can be verified
using :meth:`is_valid_csrf_token`.
Note that forms do their own CSRF protection. This is meant
for CSRF protection outside of forms.
onegov.core uses the Synchronizer Token Pattern for CSRF protection:
`<https://www.owasp.org/index.php/\
Cross-Site_Request_Forgery_%28CSRF%29_Prevention_Cheat_Sheet>`_
New CSRF tokens are signed usign a secret attached to the session (but
not sent out to the user). Clients have to return the CSRF token they
are given. The token has to match the secret, which the client doesn't
know. So an attacker would have to get access to both the cookie and
the html source to be able to forge a request.
Since cookies are marked as HTTP only (no javascript access), this
even prevents CSRF attack combined with XSS.
"""
# no csrf tokens for anonymous users (there's not really a point
# to doing this)
if not self.is_logged_in:
return ''
assert salt or self.csrf_salt
salt = salt or self.csrf_salt
# use app.identity_secret here, because that's being used for
# more.itsdangerous, which uses the same algorithm
signer = TimestampSigner(self.identity_secret, salt=salt)
return signer.sign(random_token())
def assert_valid_csrf_token(self, signed_value=None, salt=None):
""" Validates the given CSRF token and returns if it was
created by :meth:`new_csrf_token`. If there's a mismatch, a 403 is
raised.
If no signed_value is passed, it is taken from
request.params.get('csrf-token').
"""
signed_value = signed_value or self.params.get('csrf-token')
salt = salt or self.csrf_salt
if not signed_value:
raise HTTPForbidden()
if not salt:
raise HTTPForbidden()
signer = TimestampSigner(self.identity_secret, salt=salt)
try:
signer.unsign(signed_value, max_age=self.app.csrf_time_limit)
except (SignatureExpired, BadSignature):
raise HTTPForbidden()
def new_url_safe_token(self, data, salt=None):
""" Returns a new URL safe token. A token can be deserialized
using :meth:`load_url_safe_token`.
"""
serializer = URLSafeTimedSerializer(self.identity_secret)
return serializer.dumps(data, salt=salt)
def load_url_safe_token(self, data, salt=None, max_age=3600):
""" Deserialize a token created by :meth:`new_url_safe_token`.
If the token is invalid, None is returned.
"""
if not data:
return None
serializer = URLSafeTimedSerializer(self.identity_secret)
try:
return serializer.loads(data, salt=salt, max_age=max_age)
except (SignatureExpired, BadSignature):
return None
| 33.944099 | 79 | 0.634629 |
c34cd081b94daecadbc28c7a2816b0c0bd23c156
| 2,594 |
py
|
Python
|
3rdparty/webkit/Source/JavaScriptCore/Scripts/cssmin.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | null | null | null |
3rdparty/webkit/Source/JavaScriptCore/Scripts/cssmin.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | null | null | null |
3rdparty/webkit/Source/JavaScriptCore/Scripts/cssmin.py
|
mchiasson/PhaserNative
|
f867454602c395484bf730a7c43b9c586c102ac2
|
[
"MIT"
] | 1 |
2019-01-25T13:55:25.000Z
|
2019-01-25T13:55:25.000Z
|
#!/usr/bin/env python
# Copyright (C) 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import re
def cssminify(css):
rules = (
(r"\/\*.*?\*\/", ""), # delete comments
(r"\n", ""), # delete new lines
(r"\s+", " "), # change multiple spaces to one space
(r"\s?([;{},~>!])\s?", r"\1"), # delete space where it is not needed
(r":\s", ":"), # delete spaces after colons, but not before. E.g. do not break selectors "a :focus", "b :matches(...)", "c :not(...)" where the leading space is significant
(r"\s?([-+])(?:\s(?![0-9(])(?!var))", r"\1"), # delete whitespace around + and - when not followed by a number, paren, or var(). E.g. strip for selector "a + b" but not "calc(a + b)" which requires spaces.
(r";}", "}") # change ';}' to '}' because the semicolon is not needed
)
css = css.replace("\r\n", "\n")
for rule in rules:
css = re.compile(rule[0], re.MULTILINE | re.UNICODE | re.DOTALL).sub(rule[1], css)
return css
if __name__ == "__main__":
import sys
if sys.version_info[0] == 3 and sys.stdin.encoding != 'UTF-8':
import io
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='UTF-8')
sys.stdout.write(cssminify(sys.stdin.read()))
| 51.88 | 213 | 0.655359 |
825b21c8cfd435979399a40ff70ea26d798b757c
| 363 |
py
|
Python
|
Problems/Dynamic Programming/Hard/EditDistance/test_edit_distance.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Hard/EditDistance/test_edit_distance.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Hard/EditDistance/test_edit_distance.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from edit_distance import editDistance
class Test(TestCase):
def test_edit_distance(self):
self.assertEqual(editDistance("geek", "gesek"), 1)
self.assertEqual(editDistance("cat", "cut"), 1)
self.assertEqual(editDistance("sunday", "saturday"), 3)
self.assertEqual(editDistance("FOOD", "MONEY"), 4)
| 40.333333 | 63 | 0.69697 |
7d9aa95179cc71bf37aa661d24c5733337502ae9
| 2,528 |
py
|
Python
|
prompter.py
|
15menou/srocket
|
7650d25ef03499ce38ae957cd6ce356541f6cf61
|
[
"Apache-2.0"
] | null | null | null |
prompter.py
|
15menou/srocket
|
7650d25ef03499ce38ae957cd6ce356541f6cf61
|
[
"Apache-2.0"
] | null | null | null |
prompter.py
|
15menou/srocket
|
7650d25ef03499ce38ae957cd6ce356541f6cf61
|
[
"Apache-2.0"
] | null | null | null |
from data import Aspect
from log import Log
import tkinter as tk
import re
import time
class Prompter:
H_LIM = 30 # Number of character in the same row. Does not include tag.
V_LIM = 10 # Number of rows
def __init__(self, init_frame, name=None):
self.frame = init_frame
if name is None:
name = 'Prompter'
self.name = name
self.title = tk.Label(self.frame, text=self.name.strip())
self.title.config(Aspect.frame_opt())
self.title.config(justify=tk.CENTER)
self.title.grid(row=0, column=0, sticky=Aspect.def_sticky)
self.history = list()
self.text = list()
self.label = tk.Label(self.frame)
self.label.config(justify=tk.LEFT)
self.label.config(Aspect.frame_opt())
self.label.config(relief=tk.SUNKEN)
self.feed(Prompter.V_LIM * Prompter.H_LIM * ' ')
self.feed('{} Prompter initialization.'.format(self.name))
self.label.grid(row=1, column=0, columnspan=5)
Log.comment("Prompter '{}' initialized.".format(self.name))
def get_text(self):
return " \n".join(self.text)
def feed(self, txt):
tag_txt = self.get_tag_txt()
empty_tag = int(len(tag_txt) * 1.7) * ' '
txt = re.sub('\n', ' ', txt)
def cut(s):
if len(s) < Prompter.H_LIM:
return [s]
else:
first_part = s[:Prompter.H_LIM]
second_part = s[Prompter.H_LIM:]
if not len(first_part) == 0 and not len(second_part) == 0:
if not(first_part[-1] == ' ' or second_part[0] == ' '):
first_part += '-'
return [first_part] + cut(second_part)
table = cut(txt)
for k in range(len(table)):
if k == 0:
table[k] = tag_txt + ' ' + table[k]
else:
table[k] = empty_tag + ' ' + table[k]
self.add_content(table)
self.reshape()
self.refresh()
def add_content(self, tab):
self.text += tab
self.history += tab
def reshape(self):
n = len(self.text)
if n > Prompter.V_LIM:
self.text = self.text[n - Prompter.V_LIM:]
def refresh(self):
self.label.config(text=self.get_text())
def get_tag_txt(self):
txt = ''
# type "%H:%M:%S" for full time.
time_flag = time.strftime("%H:%M", time.gmtime())
txt += '[{}]'.format(time_flag)
return txt
| 31.209877 | 76 | 0.542326 |
7da97a4b26e2d6c08081677a580ce6ad5a06ebcc
| 5,376 |
py
|
Python
|
tests/utils/tensorflow/test_model_data_utils.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 30 |
2020-11-30T12:55:45.000Z
|
2022-01-20T02:53:03.000Z
|
tests/utils/tensorflow/test_model_data_utils.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 209 |
2020-03-18T18:28:12.000Z
|
2022-03-01T13:42:29.000Z
|
tests/utils/tensorflow/test_model_data_utils.py
|
chaneyjd/rasa
|
104a9591fc10b96eaa7fe402b6d64ca652b7ebe2
|
[
"Apache-2.0"
] | 10 |
2021-01-11T02:08:43.000Z
|
2021-11-19T09:12:46.000Z
|
import scipy.sparse
import numpy as np
import copy
from rasa.shared.nlu.training_data.formats.markdown import INTENT
from rasa.utils.tensorflow import model_data_utils
from rasa.shared.nlu.training_data.features import Features
from rasa.shared.nlu.constants import ACTION_NAME
from rasa.utils.tensorflow.constants import SENTENCE
shape = 100
def test_create_zero_features():
# DENSE FEATURES
dense_feature_sentence_features = Features(
features=np.random.rand(shape),
attribute=INTENT,
feature_type=SENTENCE,
origin=[],
)
features = [[None, None, [dense_feature_sentence_features]]]
zero_features = model_data_utils.create_zero_features(features)
assert len(zero_features) == 1
assert zero_features[0].is_dense()
assert (zero_features[0].features == np.zeros(shape)).all()
# SPARSE FEATURES
sparse_feature_sentence_features = Features(
features=scipy.sparse.coo_matrix(np.random.rand(shape)),
attribute=INTENT,
feature_type=SENTENCE,
origin=[],
)
features = [[None, None, [sparse_feature_sentence_features]]]
zero_features = model_data_utils.create_zero_features(features)
assert len(zero_features) == 1
assert zero_features[0].is_sparse()
assert (zero_features[0].features != scipy.sparse.coo_matrix((1, shape))).nnz == 0
def test_surface_attributes():
intent_features = {
INTENT: [
Features(
features=np.random.rand(shape),
attribute=INTENT,
feature_type=SENTENCE,
origin=[],
)
]
}
action_name_features = scipy.sparse.coo_matrix(np.random.rand(shape))
action_name_features = {
ACTION_NAME: [
Features(
features=action_name_features,
attribute=ACTION_NAME,
feature_type=SENTENCE,
origin=[],
)
]
}
state_features = copy.deepcopy(intent_features)
state_features.update(copy.deepcopy(action_name_features))
# test on 2 dialogs -- one with dialog length 3 the other one with dialog length 2
dialogs = [[state_features, intent_features, {}], [{}, action_name_features]]
surfaced_features = model_data_utils.surface_attributes(dialogs)
assert INTENT in surfaced_features and ACTION_NAME in surfaced_features
# check that number of lists corresponds to number of dialogs
assert (
len(surfaced_features.get(INTENT)) == 2
and len(surfaced_features.get(ACTION_NAME)) == 2
)
# length of each list corresponds to length of the dialog
assert (
len(surfaced_features.get(INTENT)[0]) == 3
and len(surfaced_features.get(INTENT)[1]) == 2
)
assert (
len(surfaced_features.get(ACTION_NAME)[0]) == 3
and len(surfaced_features.get(ACTION_NAME)[1]) == 2
)
# check that features are correctly populated with `None`s
assert (
surfaced_features.get(INTENT)[0][2] is None
and surfaced_features.get(INTENT)[1][0] is None
and surfaced_features.get(INTENT)[1][1] is None
)
assert (
surfaced_features.get(ACTION_NAME)[0][1] is None
and surfaced_features.get(ACTION_NAME)[0][2] is None
and surfaced_features.get(ACTION_NAME)[1][0] is None
)
# check that all features are the same as before
assert all(
[
(turn[0].features == intent_features[INTENT][0].features).all()
for dialogue in surfaced_features.get(INTENT)
for turn in dialogue
if turn is not None
]
)
assert all(
[
(turn[0].features != action_name_features[ACTION_NAME][0].features).nnz == 0
for dialogue in surfaced_features.get(ACTION_NAME)
for turn in dialogue
if turn is not None
]
)
def test_map_tracker_features():
zero_features = np.zeros(shape)
zero_features_as_features = Features(
features=zero_features, attribute=INTENT, feature_type=SENTENCE, origin=[]
)
# create zero features
zero_features_list = [zero_features_as_features]
# create tracker state features by setting a random index in the array to 1
random_inds = np.random.randint(shape, size=6)
list_of_features = []
for idx in random_inds:
current_features = copy.deepcopy(zero_features_as_features)
current_features.features[idx] = 1
list_of_features.append([current_features])
# organize the created features into lists ~ dialog history
tracker_features = [
[list_of_features[0], None, list_of_features[1]],
[None, None, list_of_features[2]],
[list_of_features[3], list_of_features[4], list_of_features[5]],
]
(
attribute_masks,
dense_features,
sparse_features,
) = model_data_utils.map_tracker_features(tracker_features, zero_features_list)
expected_mask = np.array([[1, 0, 1], [0, 0, 1], [1, 1, 1]])
assert np.all(np.squeeze(np.array(attribute_masks), 2) == expected_mask)
assert np.array(dense_features["sentence"]).shape[-1] == zero_features.shape[-1]
assert sparse_features == {}
| 36.080537 | 89 | 0.642485 |
816dee7e9951f5258ef805685b899e03f00f713f
| 266 |
py
|
Python
|
4_loop/3_Gitter.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
4_loop/3_Gitter.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
4_loop/3_Gitter.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
"""
Gitter
- Schaffst du es mit einem zweiten Loop ein Gittermuster
herzustellen?
"""
newPage(300, 300)
for i in range(0, width(), 10):
stroke(0)
line((i,0),(i, width()))
for i in range(0, width(), 10):
stroke(0)
line((0,i),(width(),i))
| 15.647059 | 57 | 0.575188 |
81996e48d936abf4eebdb25aa7a17e8c1d951a23
| 13,157 |
py
|
Python
|
Packs/Jira/Integrations/JiraV2/test_data/raw_response.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | null | null | null |
Packs/Jira/Integrations/JiraV2/test_data/raw_response.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | 60 |
2022-02-24T14:54:47.000Z
|
2022-03-31T10:38:41.000Z
|
Packs/Jira/Integrations/JiraV2/test_data/raw_response.py
|
cstone112/content
|
7f039931b8cfc20e89df52d895440b7321149a0d
|
[
"MIT"
] | null | null | null |
QUERY_ISSUE_RESPONSE = {
"expand": "names,schema",
"issues": [
{
"expand": "operations,versionedRepresentations,editmeta,changelog,renderedFields",
"fields": {
"aggregateprogress": {
"progress": 0,
"total": 0
},
"aggregatetimeestimate": None,
"aggregatetimeoriginalestimate": None,
"aggregatetimespent": None,
"assignee": None,
"components": [],
"created": "2019-05-04T00:44:31.743+0300",
"creator": {
"accountId": "557058:fb80ffc0-b374-4260-99a0-ea0c140a4e76",
"accountType": "atlassian",
"active": True,
"avatarUrls": {
"16x16": "",
"24x24": "",
"32x32": "",
"48x48": ""
},
"displayName": "jon doe",
"emailAddress": "email",
"self": "https://demistodev.atlassian.net/rest/api/2/user?accountId=id",
"timeZone": "Asia"
},
"customfield_10000": "{}",
"customfield_10001": "John Doe",
"customfield_10002": None,
"customfield_10003": None,
"customfield_10004": None,
"customfield_10005": None,
"customfield_10006": None,
"customfield_10007": None,
"customfield_10008": None,
"customfield_10009": None,
"customfield_10013": None,
"customfield_10014": None,
"customfield_10015": {
"hasEpicLinkFieldDependency": False,
"nonEditableReason": {
"message": "The Parent Link is only available to Jira Premium users.",
"reason": "PLUGIN_LICENSE_ERROR"
},
"showField": False
},
"customfield_10016": None,
"customfield_10017": "10000_*:*_1_*:*_1023607418_*|*_10001_*:*_1_*:*_0",
"customfield_10018": None,
"customfield_10019": "0|i006cf:",
"customfield_10022": None,
"customfield_10023": [],
"customfield_10024": None,
"customfield_10025": None,
"customfield_10027": None,
"customfield_10029": None,
"customfield_10031": None,
"customfield_10032": None,
"customfield_10033": None,
"customfield_10034": None,
"customfield_10035": None,
"customfield_10036": None,
"customfield_10037": None,
"customfield_10038": None,
"customfield_10039": None,
"customfield_10040": None,
"customfield_10041": None,
"customfield_10042": None,
"customfield_10043": None,
"description": "hello",
"duedate": None,
"environment": None,
"fixVersions": [],
"issuelinks": [],
"issuetype": {
"avatarId": 10318,
"description": "A task that needs to be done.",
"iconUrl": "a",
"id": "10001",
"name": "Task",
"self": "https://localhost/rest/api/2/issuetype/10001",
"subtask": False
},
"labels": [],
"lastViewed": None,
"priority": {
"iconUrl": "https://localhost/images/icons/priorities/high.svg",
"id": "2",
"name": "High",
"self": "https://localhost/rest/api/2/priority/2"
},
"progress": {
"progress": 0,
"total": 0
},
"project": {
"avatarUrls": {
"16x16": "",
"24x24": "",
"32x32": "",
"48x48": ""
},
"id": "10005",
"key": "VIK",
"name": "VikTest",
"projectTypeKey": "software",
"self": "https://localhost/rest/api/2/project/10005",
"simplified": False
},
"reporter": {
"accountId": "557058:fb80ffc0-b374-4260-99a0-ea0c140a4e76",
"accountType": "atlassian",
"active": True,
"avatarUrls": {
"16x16": "",
"24x24": "",
"32x32": "",
"48x48": ""
},
"displayName": "displayName",
"emailAddress": "email",
"self": "https://localhost/rest/api/2/user?accountId=id",
"timeZone": "Asia/Jerusalem"
},
"resolution": {
"description": "Work has been completed on this issue.",
"id": "10000",
"name": "Done",
"self": "https://localhost/rest/api/2/resolution/10000"
},
"resolutiondate": "2019-05-15T21:04:39.147+0300",
"security": None,
"status": {
"description": "",
"iconUrl": "https://localhost/images/icons/status_generic.gif",
"id": "10000",
"name": "To Do",
"self": "https://localhost/rest/api/2/status/10000",
"statusCategory": {
"colorName": "blue-gray",
"id": 2,
"key": "new",
"name": "To Do",
"self": "https://localhost/rest/api/2/statuscategory/2"
}
},
"statuscategorychangedate": "2019-05-15T21:24:07.222+0300",
"subtasks": [],
"summary": "JiraTestMohitM",
"timeestimate": None,
"timeoriginalestimate": None,
"timespent": None,
"updated": "2019-05-15T21:24:07.222+0300",
"versions": [],
"votes": {
"hasVoted": False,
"self": "https://localhost/rest/api/2/issue/VIK-3/votes",
"votes": 0
},
"watches": {
"isWatching": True,
"self": "https://localhost/rest/api/2/issue/VIK-3/watchers",
"watchCount": 1
},
"workratio": -1
},
"id": "12652",
"key": "VIK-3",
"self": "https://localhost/rest/api/latest/issue/12652"
}
],
"maxResults": 1,
"startAt": 0,
"total": 1115
}
GET_ISSUE_RESPONSE = {
'expand': 'renderedFields,names,schema,operations,editmeta,changelog,versionedRepresentations,customfield_10022.requestTypePractice',
'id': '19141', 'key': 'VIK-238',
'fields': {
'statuscategorychangedate': '2021-04-04T12:25:48.335+0300',
'issuetype': {'id': '10001',
'description': 'A task that needs to be done.',
'name': 'Task', 'subtask': False, 'avatarId': 10318, 'hierarchyLevel': 0},
'timespent': None,
'project': {'id': '10005',
'key': 'VIK', 'name': 'VikTest', 'projectTypeKey': 'software', 'simplified': False,
'avatarUrls': {
'48x48': ''}},
'customfield_10031': None, 'customfield_10032': None, 'fixVersions': [],
'customfield_10033': None,
'customfield_10034': None, 'aggregatetimespent': None, 'resolution': None,
'customfield_10035': None,
'customfield_10036': None, 'customfield_10037': None, 'customfield_10027': None,
'customfield_10029': None, 'resolutiondate': None, 'workratio': -1, 'lastViewed': None,
'issuerestriction': {'issuerestrictions': {}, 'shouldDisplay': False},
'watches': {'self': '',
'watchCount': 1, 'isWatching': True},
'created': '2021-04-04T12:25:48.114+0300',
'customfield_10022': None,
'priority': {'self': '',
'iconUrl': '',
'name': 'Medium', 'id': '3'}, 'customfield_10023': [],
'customfield_10024': None, 'customfield_10025': None, 'labels': [],
'customfield_10016': None,
'customfield_10017': None, 'customfield_10018': None, 'customfield_10019': '0|i00g5j:',
'aggregatetimeoriginalestimate': None, 'timeestimate': None, 'versions': [],
'issuelinks': [],
'assignee': None, 'updated': '2021-04-04T12:49:43.546+0300',
'status': {'self': '',
'description': '',
'iconUrl': '',
'name': 'To Do', 'id': '10000',
'statusCategory': {
'self': '',
'id': 2, 'key': 'new', 'colorName': 'blue-gray', 'name': 'To Do'}},
'components': [], 'timeoriginalestimate': None,
'description': 'changeing again again\n\nagain gain',
'customfield_10013': None, 'customfield_10014': None,
'customfield_10015': {'hasEpicLinkFieldDependency': False, 'showField': False,
'nonEditableReason': {'reason': 'PLUGIN_LICENSE_ERROR',
'message': 'The Parent Link is only available to Jira Premium users.'}},
'timetracking': {}, 'customfield_10005': None, 'customfield_10006': None,
'security': None,
'customfield_10007': None, 'customfield_10008': None, 'customfield_10009': None,
'attachment': [
{'self': '',
'content': 'https://someurl.com',
'id': '15451',
'filename': '[email protected]', 'author': {
'accountId': 'accountid',
'emailAddress': 'email',
'avatarUrls': {
'48x48': ''},
'displayName': 'displayName', 'active': True, 'timeZone': 'Asia/Jerusalem',
'accountType': 'atlassian'},
'created': '2021-04-04T12:49:42.881+0300', 'size': 8225,
'mimeType': 'application/json',
}],
'aggregatetimeestimate': None, 'summary': 'test master1',
'creator': {
'accountId': 'accountid',
'accountType': 'atlassian',
'active': True,
'avatarUrls': {
'16x16': '',
'24x24': '',
'32x32': '',
'48x48': ''
},
'displayName': 'jon doe',
'emailAddress': 'email',
'self': 'https://localhost/rest/api/2/user?accountId=id',
'timeZone': 'Asia'
}
}
}
FIELDS_RESPONSE = [
{'id': 'customfield_10001', 'key': 'customfield_10001', 'name': 'Owner', 'untranslatedName': 'Owner',
'custom': True, 'orderable': True, 'navigable': True, 'searchable': True,
'clauseNames': ['cf[10001]', 'Owner', 'Owner[User Picker (single user)]'],
'schema': {'type': 'user', 'custom': 'com.atlassian.jira.plugin.system.customfieldtypes:userpicker',
'customId': 10001}},
{'id': 'resolution', 'key': 'resolution', 'name': 'Resolution', 'custom': False, 'orderable': True,
'navigable': True, 'searchable': True, 'clauseNames': ['resolution'],
'schema': {'type': 'resolution', 'system': 'resolution'}},
{'id': 'resolutiondate', 'key': 'resolutiondate', 'name': 'Resolved', 'custom': False, 'orderable': False,
'navigable': True, 'searchable': True, 'clauseNames': ['resolutiondate', 'resolved'],
'schema': {'type': 'datetime', 'system': 'resolutiondate'}}
]
EXPECTED_RESP = {
'customfield_10001': 'Owner',
'resolution': 'Resolution',
'resolutiondate': 'Resolved'
}
ATTACHMENT = {
"self": "https://localhost/rest/api/2/attachment/16188",
"id": "16188",
"filename": "test",
"author": {
"self": "https://localhost/rest/api/2/user?accountId=557058%3Afb80ffc0-b374-4260-99a0"
"-ea0c140a4e76",
"accountId": "account id",
"emailAddress": "mail",
"avatarUrls": {},
"displayName": "name",
"active": True,
"timeZone": "Asia/Jerusalem",
"accountType": "atlassian"
},
"created": "2021-11-17T12:40:06.700+0200",
"size": 4,
"mimeType": "binary/octet-stream",
"content": "https://localhost/rest/api/2/attachment/content/16188"
}
| 43.279605 | 137 | 0.454891 |
c4a13038316d44f93ab753312eb6725827a6d46d
| 15,882 |
py
|
Python
|
Packs/TheHiveProject/Integrations/TheHiveProject/TheHiveProject_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/TheHiveProject/Integrations/TheHiveProject/TheHiveProject_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/TheHiveProject/Integrations/TheHiveProject/TheHiveProject_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import io
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
def test_list_cases_command(requests_mock):
from TheHiveProject import list_cases_command, Client
mock_response = util_load_json('test_data/cases_list.json')
requests_mock.get('https://test/api/case',
json=mock_response)
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.post('https://test/api/case/task/_search',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {}
response = list_cases_command(client, args)
assert len(response.outputs) == len(mock_response)
assert response.outputs_prefix == 'TheHive.Cases'
assert response.outputs_key_field == 'id'
def test_get_case_command(requests_mock):
from TheHiveProject import get_case_command, Client
mock_response = util_load_json('test_data/cases_list.json')
requests_mock.get('https://test/api/case/1',
json=mock_response[0])
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.post('https://test/api/case/task/_search',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'id': '1'}
response = get_case_command(client, args)
assert response.outputs == mock_response[0]
assert response.outputs_prefix == 'TheHive.Cases'
assert response.outputs_key_field == 'id'
def test_update_case_command(requests_mock):
from TheHiveProject import update_case_command, Client
mock_original_response = util_load_json('test_data/cases_list.json')
mock_response = mock_original_response.copy()
mock_response[0]["title"] = "updated title"
mock_response[0]["description"] = "updated description"
requests_mock.get('https://test/api/case/1',
json=mock_original_response[0])
requests_mock.patch('https://test/api/case/1',
json=mock_response[0])
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.post('https://test/api/case/task/_search',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'id': '1',
'title': 'updated title',
'description': 'updated description'}
response = update_case_command(client, args)
assert response.outputs == mock_response[0]
assert response.outputs_prefix == 'TheHive.Cases'
assert response.outputs_key_field == 'id'
def test_create_case_command(requests_mock):
from TheHiveProject import create_case_command, Client
mock_response = {"_id": "4",
"id": "4",
"instance": "",
"mirroring": "both",
"observables": [],
"caseId": "4",
"createdBy": "[email protected]",
"createdAt": "2021-07-22T09:15:09Z",
"updatedAt": "2021-07-22T09:15:09Z",
"_type": "case",
"title": "added case title",
"description": "added case description",
"severity": 2,
"status": "Open",
"tasks": [],
"owner": "[email protected]"}
requests_mock.post('https://test/api/case',
json=mock_response)
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'title': 'added case title',
'description': 'added case description',
'owner': '[email protected]'}
response = create_case_command(client, args)
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Cases'
assert response.outputs_key_field == 'id'
def test_merge_cases_command(requests_mock):
from TheHiveProject import merge_cases_command, Client
mock_response = util_load_json('test_data/merged_cases.json')
requests_mock.post('https://test/api/case/1/_merge/2',
json=mock_response)
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'firstCaseID': '1',
'secondCaseID': '2'}
response = merge_cases_command(client, args)
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Cases'
assert response.outputs_key_field == 'id'
def test_get_case_tasks_command(requests_mock):
from TheHiveProject import get_case_tasks_command, Client
mock_response = util_load_json('test_data/cases_list.json')
requests_mock.post('https://test/api/case/task/_search',
json=mock_response[1]['tasks'])
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/case/2',
json=mock_response[1])
requests_mock.get('https://test/api/case/task/1/log',
json=[])
requests_mock.get('https://test/api/case/task/2/log',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'id': '2'}
response = get_case_tasks_command(client, args)
assert response.outputs == mock_response[1]['tasks']
assert response.outputs_prefix == 'TheHive.Tasks'
assert response.outputs_key_field == 'id'
def test_get_task_command(requests_mock):
from TheHiveProject import get_task_command, Client
mock_response = util_load_json('test_data/cases_list.json')[1]['tasks'][0]
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/task/1',
json=mock_response)
requests_mock.get('https://test/api/case/task/1',
json=mock_response)
requests_mock.get('https://test/api/case/task/1/log',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'id': '1'}
response = get_task_command(client, args)
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Tasks'
assert response.outputs_key_field == 'id'
def test_update_task_command(requests_mock):
from TheHiveProject import update_task_command, Client
mock_original_response = util_load_json('test_data/cases_list.json')[1]['tasks'][0]
mock_response = mock_original_response.copy()
mock_response['title'] = 'updated title'
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/task/1',
json=mock_original_response)
requests_mock.get('https://test/api/case/task/1',
json=mock_original_response)
requests_mock.patch('https://test/api/case/task/1',
json=mock_response)
requests_mock.get('https://test/api/case/task/1/log',
json=[])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {'id': '1'}
response = update_task_command(client, args)
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Tasks'
assert response.outputs_key_field == 'id'
def test_get_users_list_command(requests_mock):
from TheHiveProject import get_users_list_command, Client
mock_response = util_load_json('test_data/users_list.json')
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.post('https://test/api/user/_search',
json=mock_response)
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
response = get_users_list_command(client, {})
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Users'
assert response.outputs_key_field == 'id'
def test_get_user_command(requests_mock):
from TheHiveProject import get_user_command, Client
mock_response = util_load_json('test_data/users_list.json')[0]
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/user/1',
json=mock_response)
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
response = get_user_command(client, {'id': '1'})
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Users'
assert response.outputs_key_field == 'id'
def test_create_local_user_command(requests_mock):
from TheHiveProject import create_local_user_command, Client
mock_response = util_load_json('test_data/added_user.json')
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.post('https://test/api/user',
json=mock_response)
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {
'login': '[email protected]',
'name': 'Test User',
'roles': ["read", "admin"],
'password': '1234'
}
response = create_local_user_command(client, args)
assert response.outputs == mock_response
assert response.outputs_prefix == 'TheHive.Users'
assert response.outputs_key_field == 'id'
def test_list_observables_command(requests_mock):
from TheHiveProject import list_observables_command, Client
mock_response = util_load_json('test_data/cases_list.json')[2]
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/case/3',
json=mock_response)
requests_mock.post('https://test/api/case/task/_search',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=mock_response['observables'])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {
'id': '3'
}
response = list_observables_command(client, args)
assert response.outputs == mock_response['observables']
assert response.outputs_prefix == 'TheHive.Observables'
assert response.outputs_key_field == 'id'
def test_create_observable_command(requests_mock):
from TheHiveProject import create_observable_command, Client
mock_original_response = util_load_json('test_data/cases_list.json')[0]
mock_response = mock_original_response.copy()
mock_response['observables'].append(
{
"_id": "4",
"id": "4",
"createdBy": "[email protected]",
"createdAt": 1627206318617,
"_type": "case_artifact",
"dataType": "domain",
"data": "datas for test",
"startDate": 1627206318617,
"tlp": 2,
"tags": [],
"ioc": False,
"sighted": False,
"message": "messages for test",
"reports": {},
"stats": {}
}
)
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.get('https://test/api/case/1',
json=mock_original_response)
requests_mock.post('https://test/api/case/task/_search',
json=[])
requests_mock.post('https://test/api/case/artifact/_search',
json=[])
requests_mock.post('https://test/api/case/1/artifact',
json=mock_response['observables'])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {
'id': '1'
}
response = create_observable_command(client, args)
assert response.outputs == mock_response['observables']
assert response.outputs_prefix == 'TheHive.Observables'
assert response.outputs_key_field == 'id'
def test_update_observable_command(requests_mock):
from TheHiveProject import update_observable_command, Client
mock_original_response = util_load_json('test_data/cases_list.json')[2]
mock_response = mock_original_response.copy()
mock_response['observables'][0]['message'] = "update message for test"
requests_mock.get('https://test/api/status',
json={'versions': {'TheHive': 'version'}})
requests_mock.patch('https://test/api/case/artifact/1',
json=mock_response['observables'][0])
client = Client(
base_url='https://test/api',
verify=False,
headers={
'Authorization': 'Bearer APIKEY'
},
proxy=False,
mirroring='both'
)
args = {
'id': '1',
'message': "update message for test"
}
response = update_observable_command(client, args)
assert response.outputs['message'] == "update message for test"
assert "Updated Observable" in response.readable_output
assert response.outputs_prefix == 'TheHive.Observables'
assert response.outputs_key_field == 'id'
| 30.136622 | 87 | 0.591424 |
f20a7120affd6bdcca022e14213658a558b5e1ff
| 4,652 |
py
|
Python
|
tools/pythonpkg/tests/fast/arrow/test_tpch.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,824 |
2021-04-06T19:21:01.000Z
|
2022-03-31T14:29:50.000Z
|
tools/pythonpkg/tests/fast/arrow/test_tpch.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 1,310 |
2021-04-06T16:04:52.000Z
|
2022-03-31T13:52:53.000Z
|
tools/pythonpkg/tests/fast/arrow/test_tpch.py
|
AldoMyrtaj/duckdb
|
3aa4978a2ceab8df25e4b20c388bcd7629de73ed
|
[
"MIT"
] | 270 |
2021-04-09T06:18:28.000Z
|
2022-03-31T11:55:37.000Z
|
import duckdb
try:
import pyarrow
import pyarrow.parquet
import numpy as np
can_run = True
except:
can_run = False
def munge(cell):
try:
cell = round(float(cell), 2)
except (ValueError, TypeError):
cell = str(cell)
return cell
def check_result(result,answers):
for q_res in answers:
db_result = result.fetchone()
cq_results = q_res.split("|")
# The end of the rows, continue
if cq_results == [''] and str(db_result) == 'None' or str(db_result[0]) == 'None':
continue
ans_result = [munge(cell) for cell in cq_results]
db_result = [munge(cell) for cell in db_result]
assert ans_result == db_result
return True
class TestTPCHArrow(object):
def test_tpch_arrow(self,duckdb_cursor):
if not can_run:
return
tpch_tables = ['part', 'partsupp', 'supplier', 'customer', 'lineitem', 'orders', 'nation', 'region']
arrow_tables = []
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CALL dbgen(sf=0.01);")
for tpch_table in tpch_tables:
duck_tbl = duckdb_conn.table(tpch_table)
arrow_tables.append(duck_tbl.arrow())
duck_arrow_table = duckdb_conn.from_arrow_table(arrow_tables[-1])
duckdb_conn.execute("DROP TABLE "+tpch_table)
duck_arrow_table.create(tpch_table)
for i in range (1,23):
query = duckdb_conn.execute("select query from tpch_queries() where query_nr="+str(i)).fetchone()[0]
answers = duckdb_conn.execute("select answer from tpch_answers() where scale_factor = 0.01 and query_nr="+str(i)).fetchone()[0].split("\n")[1:]
result = duckdb_conn.execute(query)
assert(check_result(result,answers))
print ("Query " + str(i) + " works")
def test_tpch_arrow_01(self,duckdb_cursor):
if not can_run:
return
tpch_tables = ['part', 'partsupp', 'supplier', 'customer', 'lineitem', 'orders', 'nation', 'region']
arrow_tables = []
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CALL dbgen(sf=0.1);")
for tpch_table in tpch_tables:
duck_tbl = duckdb_conn.table(tpch_table)
arrow_tables.append(duck_tbl.arrow())
duck_arrow_table = duckdb_conn.from_arrow_table(arrow_tables[-1])
duckdb_conn.execute("DROP TABLE "+tpch_table)
duck_arrow_table.create(tpch_table)
for i in range (1,23):
query = duckdb_conn.execute("select query from tpch_queries() where query_nr="+str(i)).fetchone()[0]
answers = duckdb_conn.execute("select answer from tpch_answers() where scale_factor = 0.1 and query_nr="+str(i)).fetchone()[0].split("\n")[1:]
result = duckdb_conn.execute(query)
assert(check_result(result,answers))
print ("Query " + str(i) + " works")
def test_tpch_arrow_batch(self,duckdb_cursor):
if not can_run:
return
tpch_tables = ['part', 'partsupp', 'supplier', 'customer', 'lineitem', 'orders', 'nation', 'region']
arrow_tables = []
duckdb_conn = duckdb.connect()
duckdb_conn.execute("CALL dbgen(sf=0.01);")
for tpch_table in tpch_tables:
duck_tbl = duckdb_conn.table(tpch_table)
arrow_tables.append(pyarrow.Table.from_batches(duck_tbl.arrow().to_batches(10)))
duck_arrow_table = duckdb_conn.from_arrow_table(arrow_tables[-1])
duckdb_conn.execute("DROP TABLE "+tpch_table)
duck_arrow_table.create(tpch_table)
for i in range (1,23):
query = duckdb_conn.execute("select query from tpch_queries() where query_nr="+str(i)).fetchone()[0]
answers = duckdb_conn.execute("select answer from tpch_answers() where scale_factor = 0.01 and query_nr="+str(i)).fetchone()[0].split("\n")[1:]
result = duckdb_conn.execute(query)
assert(check_result(result,answers))
print ("Query " + str(i) + " works")
duckdb_conn.execute("PRAGMA threads=4")
duckdb_conn.execute("PRAGMA verify_parallelism")
for i in range (1,23):
query = duckdb_conn.execute("select query from tpch_queries() where query_nr="+str(i)).fetchone()[0]
answers = duckdb_conn.execute("select answer from tpch_answers() where scale_factor = 0.01 and query_nr="+str(i)).fetchone()[0].split("\n")[1:]
result = duckdb_conn.execute(query)
assert(check_result(result,answers))
print ("Query " + str(i) + " works (Parallel)")
| 41.168142 | 155 | 0.622743 |
485ec9242818ff29033f359d413791762b7121d3
| 232 |
py
|
Python
|
authApp/views/__init__.py
|
xlausae/Web-Service
|
fbc4b45f34fe5ac69d8da2ffe09e5c32046e27d5
|
[
"MIT"
] | null | null | null |
authApp/views/__init__.py
|
xlausae/Web-Service
|
fbc4b45f34fe5ac69d8da2ffe09e5c32046e27d5
|
[
"MIT"
] | 1 |
2022-01-05T23:52:37.000Z
|
2022-01-05T23:52:37.000Z
|
authApp/views/__init__.py
|
xlausae/Web-Service
|
fbc4b45f34fe5ac69d8da2ffe09e5c32046e27d5
|
[
"MIT"
] | null | null | null |
from .createUserView import UserCreateView
from .detailUserView import UserDetailView
from .transactionsView import TransactionCreateView, TransactionDeleteView, TransactionDetailView, TransactionUpdateView, TransactionsAccountView
| 58 | 145 | 0.900862 |
d2313b95a4f98218a3c51a4a61a6ad6dfefc3197
| 389 |
py
|
Python
|
Packs/Arcanna/Scripts/PrepareArcannaRawJson/PrepareArcannaRawJson.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Arcanna/Scripts/PrepareArcannaRawJson/PrepareArcannaRawJson.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Arcanna/Scripts/PrepareArcannaRawJson/PrepareArcannaRawJson.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import json
JSON_CONTEXT_KEY = "JsonObject"
json_str = demisto.args()['input']
demisto.debug(json_str)
obj = json.loads(json_str)
if "_source" in obj:
new_obj = obj["_source"]
obj = new_obj
objStr = json.dumps(obj)
demisto.setContext(JSON_CONTEXT_KEY, objStr)
demisto.results(objStr)
| 19.45 | 46 | 0.74036 |
965eb667b722ff337f8561d51e25ed61e6384daf
| 1,101 |
py
|
Python
|
src/mvn-search.py
|
lburgazzoli/lb-py-misc
|
714bfdaf557f7a05d35a6349c78cc8c281007345
|
[
"Apache-2.0"
] | null | null | null |
src/mvn-search.py
|
lburgazzoli/lb-py-misc
|
714bfdaf557f7a05d35a6349c78cc8c281007345
|
[
"Apache-2.0"
] | null | null | null |
src/mvn-search.py
|
lburgazzoli/lb-py-misc
|
714bfdaf557f7a05d35a6349c78cc8c281007345
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2014 lb.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import requests
import os
import sys
murl = 'http://search.maven.org/solrsearch/select'
pars = {'rows': '20', 'wt': 'json', 'q': sys.argv[1]}
prxs = {}
if os.environ.get('http_proxy'):
prxs['http'] = os.environ.get('http_proxy')
if os.environ.get('http_proxy'):
prxs['https'] = os.environ.get('https_proxy')
r = requests.get(murl, params=pars, proxies=prxs)
for result in r.json()['response']['docs']:
print("{}:{}:{}".format(result['g'], result['a'], result['latestVersion']))
| 32.382353 | 79 | 0.693915 |
f7048417b44c70cf49dfe6ef12bd1218e7cfb1d0
| 2,606 |
py
|
Python
|
webinterface/tests/unit_tests/test_model_Assignment.py
|
monoclecat/Cleaning-Schedule-generator
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 2 |
2021-11-28T23:04:00.000Z
|
2022-01-13T19:47:45.000Z
|
webinterface/tests/unit_tests/test_model_Assignment.py
|
monoclecat/Cleaning-Schedule-generator
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 25 |
2020-03-29T14:40:46.000Z
|
2021-09-22T17:37:15.000Z
|
webinterface/tests/unit_tests/test_model_Assignment.py
|
monoclecat/cleaning-schedule-management-system
|
b12fa8a6f834a89b805bf062a0df45279a7a8796
|
[
"MIT"
] | 1 |
2020-07-04T11:42:17.000Z
|
2020-07-04T11:42:17.000Z
|
from django.test import TestCase
from webinterface.models import *
class AssignmentTest(TestCase):
@classmethod
def setUpTestData(cls):
# Config
cls.reference_week = 2500
# Schedule
cls.schedule = Schedule.objects.create(name="schedule", cleaners_per_date=2, frequency=2, weekday=3)
# Cleaners
cls.cleaner1 = Cleaner.objects.create(name="cleaner1")
cls.cleaner2 = Cleaner.objects.create(name="cleaner2")
cls.cleaner3 = Cleaner.objects.create(name="cleaner3")
# CleaningDays
cls.cleaning_week1 = CleaningWeek.objects.create(week=cls.reference_week, schedule=cls.schedule)
cls.cleaning_week2 = CleaningWeek.objects.create(week=cls.reference_week + 1, schedule=cls.schedule)
# Assignments
cls.assignment1 = Assignment.objects.create(
cleaner=cls.cleaner1, schedule=cls.schedule, cleaning_week=cls.cleaning_week1)
cls.assignment2 = Assignment.objects.create(
cleaner=cls.cleaner2, schedule=cls.schedule, cleaning_week=cls.cleaning_week1)
cls.assignment3 = Assignment.objects.create(
cleaner=cls.cleaner3, schedule=cls.schedule, cleaning_week=cls.cleaning_week2)
# DutySwitch
cls.dutyswitch = DutySwitch.objects.create(requester_assignment=cls.assignment1)
def test__str(self):
self.assertIn(self.schedule.name, self.assignment1.__str__())
self.assertIn(self.cleaner1.name, self.assignment1.__str__())
self.assertIn(self.assignment1.assignment_date().strftime('%d. %b %Y'), self.assignment1.__str__())
def test__assignment_date(self):
self.assertEqual(self.assignment1.assignment_date(),
epoch_week_to_monday(self.reference_week) + datetime.timedelta(days=self.schedule.weekday))
def test__all_cleaners_in_week_for_schedule(self):
all_cleaners = self.assignment1.all_cleaners_in_week_for_schedule()
self.assertIn(self.cleaner1, all_cleaners)
self.assertIn(self.cleaner2, all_cleaners)
self.assertNotIn(self.cleaner3, all_cleaners)
def test__other_cleaners_in_week_for_schedule(self):
other_cleaners = self.assignment1.other_cleaners_in_week_for_schedule()
self.assertNotIn(self.cleaner1, other_cleaners)
self.assertIn(self.cleaner2, other_cleaners)
self.assertNotIn(self.cleaner3, other_cleaners)
def test__switch_requested(self):
self.assertEqual(self.assignment1.switch_requested(), self.dutyswitch)
self.assertEqual(self.assignment2.switch_requested(), None)
| 44.931034 | 116 | 0.719493 |
5446c83fc2f7a85f42a102a1bbaed3c5858ca1a8
| 1,743 |
py
|
Python
|
official/nlp/emotect/src/finetune_eval_config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/nlp/emotect/src/finetune_eval_config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/nlp/emotect/src/finetune_eval_config.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .ernie_model import ErnieConfig
optimizer_cfg = edict({
'optimizer': 'AdamWeightDecay',
'AdamWeightDecay': edict({
'learning_rate': 2e-5,
'end_learning_rate': 1e-7,
'power': 1.0,
'weight_decay': 1e-5,
'decay_filter': lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),
'eps': 1e-6,
}),
'Adam': edict({
'learning_rate': 2e-5
}),
'Adagrad': edict({
'learning_rate': 2e-5
})
})
ernie_net_cfg = ErnieConfig(
seq_length=64,
vocab_size=18000,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="relu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=513,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| 29.542373 | 101 | 0.662077 |
49a7953845572abec98c7db62b15b5b4affa533f
| 9,211 |
py
|
Python
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/misc/netG.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-05-10T09:16:23.000Z
|
2019-05-10T09:16:23.000Z
|
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/misc/netG.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | null | null | null |
mltrain-nips-2017/lu_jensen/visdial_workshop.pytorch/misc/netG.py
|
gopala-kr/ds-notebooks
|
bc35430ecdd851f2ceab8f2437eec4d77cb59423
|
[
"MIT"
] | 1 |
2019-10-14T07:30:18.000Z
|
2019-10-14T07:30:18.000Z
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import pdb
import math
import numpy as np
import torch.nn.functional as F
from misc.share_Linear import share_Linear
from misc.utils import l2_norm
class _netG(nn.Module):
def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout):
super(_netG, self).__init__()
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.decoder = nn.Linear(nhid, ntoken+1)
self.d = dropout
self.beta = 3
self.vocab_size = ntoken
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, emb, hidden):
output, hidden = self.rnn(emb, hidden)
output = F.dropout(output, self.d, training=self.training)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
logprob = F.log_softmax(self.beta * decoded)
return logprob, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.rnn_type == 'LSTM':
return (Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
def sample_beam(self, netW, input, hidden_state, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = input.size(1)
#assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq_all = torch.LongTensor(self.seq_length, batch_size, beam_size).zero_()
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
# copy the hidden state for beam_size time.
state = []
for state_tmp in hidden_state:
state.append(state_tmp[:,k,:].view(1,1,-1).expand(1,beam_size, self.nhid).clone())
state = tuple(state)
beam_seq = torch.LongTensor(self.seq_length, beam_size).zero_()
beam_seq_logprobs = torch.FloatTensor(self.seq_length, beam_size).zero_()
beam_logprobs_sum = torch.zeros(beam_size) # running sum of logprobs for each beam
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = input.data.resize_(1, beam_size).fill_(self.vocab_size)
xt = netW(Variable(it, requires_grad=False))
else:
pdb.set_trace()
"""perform a beam merge. that is,
for every previous beam we now many new possibilities to branch out
we need to resort our beams to maintain the loop invariant of keeping
the top beam_size most likely sequences."""
logprobsf = logprobs.float() # lets go to CPU for more efficiency in indexing operations
ys,ix = torch.sort(logprobsf,1,True) # sorted array of logprobs along each previous beam (last true = descending)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 1: # at first time step only the first beam is active
rows = 1
for cc in range(cols): # for each column (word, essentially)
for qq in range(rows): # for each beam expansion
# compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[qq,cc]
if beam_seq[t-2, qq] == self.vocab_size:
local_logprob.data.fill_(-9999)
candidate_logprob = beam_logprobs_sum[qq] + local_logprob
candidates.append({'c':ix.data[qq,cc], 'q':qq, 'p':candidate_logprob.data[0], 'r':local_logprob.data[0]})
candidates = sorted(candidates, key=lambda x: -x['p'])
# construct new beams
new_state = [_.clone() for _ in state]
if t > 1:
# well need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t-1].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t-1].clone()
for vix in range(beam_size):
v = candidates[vix]
# fork beam index q into index vix
if t > 1:
beam_seq[:t-1, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t-1, vix] = beam_seq_logprobs_prev[:, v['q']]
# rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][0, vix] = state[state_ix][0, v['q']] # dimension one is time step
# append new end terminal at the end of this beam
beam_seq[t-1, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t-1, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
if v['c'] == self.vocab_size or t == self.seq_length:
# END token special case here, or we reached the end.
# add the beam to a set of done beams
self.done_beams[k].append({'seq': beam_seq[:, vix].clone(),
'logps': beam_seq_logprobs[:, vix].clone(),
'p': beam_logprobs_sum[vix]
})
# encode as vectors
it = beam_seq[t-1].view(1,-1)
xt = netW(Variable(it.cuda()))
if t >= 1:
state = new_state
output, state = self.rnn(xt, state)
output = F.dropout(output, self.d, training=self.training)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
logprobs = F.log_softmax(self.beta * decoded)
self.done_beams[k] = sorted(self.done_beams[k], key=lambda x: -x['p'])
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def sample(self, netW, input, state, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
seq_length = opt.get('seq_length', 9)
self.seq_length = seq_length
if beam_size > 1:
return self.sample_beam(netW, input, state, opt)
batch_size = input.size(1)
seq = []
seqLogprobs = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = input.data
elif sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).cuda()
sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = netW(Variable(it.view(1,-1), requires_grad=False))
if t >= 1:
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.rnn(xt, state)
output = F.dropout(output, self.d, training=self.training)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
logprobs = F.log_softmax(self.beta * decoded)
return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
| 47.973958 | 180 | 0.546629 |
49dbba4e4a61afb029c5d7a04add795b7d34877a
| 993 |
py
|
Python
|
oldp/apps/contact/tests/test_views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | 3 |
2020-06-27T08:19:35.000Z
|
2020-12-27T17:46:02.000Z
|
oldp/apps/contact/tests/test_views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
oldp/apps/contact/tests/test_views.py
|
ImgBotApp/oldp
|
575dc6f711dde3470d910e21c9440ee9b79a69ed
|
[
"MIT"
] | null | null | null |
from django.core import mail
from django.test import LiveServerTestCase
from django.urls import reverse
from oldp.apps.contact.views import MAIL_SUBJECT
class ContactViewsTestCase(LiveServerTestCase):
def test_form(self):
res = self.client.get(reverse('contact:form'))
self.assertTemplateUsed(res, 'contact/form.html')
self.assertContains(res, 'csrfmiddlewaretoken')
def test_form_submit(self):
res = self.client.post(reverse('contact:form'), {
'name': 'My name',
'email': '[email protected]',
'message': 'My Message'
})
self.assertRedirects(res, reverse('contact:thankyou'))
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, MAIL_SUBJECT % {'name': 'My name'})
# self.assertHTMLEqual(res.)
def test_thank_you(self):
res = self.client.get(reverse('contact:thankyou'))
self.assertTemplateUsed(res, 'contact/thankyou.html')
| 29.205882 | 84 | 0.660624 |
b7334d108cd830477e0922532bc08c15a5dfc7bb
| 1,128 |
py
|
Python
|
aoc2020/day_18/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_18/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_18/part_2.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from aoc2020 import *
from collections import deque
class Solution(SolutionABC):
expected = 693891
def solve(self) -> any:
return sum(self.resource_lines("input", self.evaluate))
@classmethod
def evaluate(cls, line) -> int:
stack = deque()
terms = []
for i in range(len(line)):
c = line[i]
if c == "(":
stack.append(i + 1)
elif c == ")":
j = stack.pop()
if not stack:
terms.append(cls.evaluate(line[j:i]))
elif not stack and c == '+':
terms.append(c)
elif not stack and c == '*':
terms.append(c)
elif not stack and c != ' ':
terms.append(int(c))
while "+" in terms:
i = terms.index("+")
a, b = terms[i-1], terms[i+1]
terms = terms[:i-1] + [a + b] + terms[i+2:]
while "*" in terms:
i = terms.index("*")
a, b = terms[i-1], terms[i+1]
terms = terms[:i-1] + [a * b] + terms[i+2:]
return terms[0]
| 29.684211 | 63 | 0.441489 |
b78c52e8fca478308153b5f380df579e5e09aa10
| 2,451 |
py
|
Python
|
tests/onegov/election_day/models/test_data_source.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/models/test_data_source.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/models/test_data_source.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import date
from onegov.ballot import Election
from onegov.ballot import Vote
from onegov.election_day.models import DataSource
from onegov.election_day.models import DataSourceItem
def test_data_source(session):
session.add(DataSource(name='ds_vote', type='vote'))
session.add(DataSource(name='ds_majorz', type='majorz'))
session.add(DataSource(name='ds_proporz', type='proporz'))
session.flush()
ds_vote = session.query(DataSource).filter_by(type='vote').one()
assert ds_vote.name == 'ds_vote'
assert ds_vote.label == 'Vote'
assert ds_vote.token
ds_majorz = session.query(DataSource).filter_by(type='majorz').one()
assert ds_majorz.name == 'ds_majorz'
assert ds_majorz.label == 'Election based on the simple majority system'
assert ds_majorz.token
ds_proporz = session.query(DataSource).filter_by(type='proporz').one()
assert ds_proporz.name == 'ds_proporz'
assert ds_proporz.label == 'Election based on proportional representation'
assert ds_proporz.token
dt = date(2015, 6, 14)
session.add(Vote(title='v', domain='canton', date=dt))
session.add(Election(title='m', type='majorz', domain='canton', date=dt))
session.add(Election(title='p', type='proporz', domain='canton', date=dt))
session.flush()
vote = session.query(Vote).one()
majorz = session.query(Election).filter_by(type='majorz').one()
proporz = session.query(Election).filter_by(type='proporz').one()
assert ds_vote.query_candidates().one() == vote
assert ds_majorz.query_candidates().one() == majorz
assert ds_proporz.query_candidates().one() == proporz
ds_vote.items.append(
DataSourceItem(district='1', number='11', vote_id=vote.id)
)
ds_majorz.items.append(
DataSourceItem(district='2', number='22', election_id=majorz.id)
)
ds_proporz.items.append(
DataSourceItem(district='3', number='33', election_id=proporz.id)
)
session.flush()
item = ds_vote.items.one()
assert item.item == vote
assert item.name == 'v'
assert item.district == '1'
assert item.number == '11'
item = ds_majorz.items.one()
assert item.item == majorz
assert item.name == 'm'
assert item.district == '2'
assert item.number == '22'
item = ds_proporz.items.one()
assert item.item == proporz
assert item.name == 'p'
assert item.district == '3'
assert item.number == '33'
| 34.521127 | 78 | 0.683395 |
b7c0bcfe0eeca748484965b3a09def20f822745e
| 2,924 |
py
|
Python
|
oneflow/python/framework/job_build_and_infer_cfg_error.py
|
Ldpe2G/oneflow
|
69febbe613c1dcc3aed1652a37e4ec7d18d2f1ba
|
[
"Apache-2.0"
] | 1 |
2020-12-24T09:26:36.000Z
|
2020-12-24T09:26:36.000Z
|
oneflow/python/framework/job_build_and_infer_cfg_error.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/framework/job_build_and_infer_cfg_error.py
|
duijiudanggecl/oneflow
|
d2096ae14cf847509394a3b717021e2bd1d72f62
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from google.protobuf import text_format
import oneflow.python.framework.session_context as session_ctx
import oneflow_api.oneflow.core.common.error as error_cfg
import traceback
import os
class JobBuildAndInferCfgError(Exception):
def __init__(self, error_cfg):
assert error_cfg.has_error_type()
self.error_cfg_ = error_cfg
self.error_summary_ = self.error_cfg_.error_summary()
self.error_cfg_.clear_error_summary()
self.msg_ = self.error_cfg_.msg()
self.error_cfg_.clear_msg()
def get_op_kernel_not_found_error_str(error_cfg):
error_msg = str(self.error_cfg_.op_kernel_not_found_error())
error_msg = error_msg.replace("\\", "")
error_msg = error_msg.replace("op_kernels_not_found_debug_str:", "")
error_msg = "\n".join(
[e.strip()[1:-1] for e in error_msg.strip().split("\n")]
)
return (
"\n\nFailure messages of registered kernels for current Op node: \n%s"
% error_msg
)
def get_multiple_op_kernels_matched_error_str(error_cfg):
error_msg = str(self.error_cfg_.multiple_op_kernels_matched_error())
error_msg = error_msg.replace("\\", "")
error_msg = error_msg.replace("matched_op_kernels_debug_str:", "")
error_msg = "\n".join(
[e.strip()[1:-1] for e in error_msg.strip().split("\n")]
)
return (
"\n\nThere exists multiple registered kernel candidates for current Op node: \n%s"
% error_msg
)
def __str__(self):
ret = (
"\n\nerror msg: \n\n\033[1;31m%s\033[0m" % str(self.error_summary_).strip()
)
if error_cfg_.has_op_kernel_not_found_error():
ret += self.get_op_kernel_not_found_error_str(self.error_cfg_)
self.error_cfg_.clear_op_kernel_not_found_error()
elif error_cfg_.multiple_op_kernels_matched_error():
ret += self.get_multiple_op_kernels_matched_error_str(self.error_cfg_)
self.error_cfg_.clear_multiple_op_kernels_matched_error()
ret += "\n%s" % str(self.error_cfg_)
ret += "\n%s" % str(self.msg_).strip()
return ret
| 37.487179 | 98 | 0.661765 |
12ce0cae247f8eeec08ec7115cd9a3052b849886
| 242 |
py
|
Python
|
src/model/device.py
|
agusalex/PacketSnifferServer
|
95f72dfd5e3b4e11d4b8a70390be010b9bbb267f
|
[
"MIT"
] | null | null | null |
src/model/device.py
|
agusalex/PacketSnifferServer
|
95f72dfd5e3b4e11d4b8a70390be010b9bbb267f
|
[
"MIT"
] | 1 |
2021-05-24T19:25:35.000Z
|
2021-05-24T19:25:35.000Z
|
src/model/device.py
|
agusalex/PacketSnifferServer
|
95f72dfd5e3b4e11d4b8a70390be010b9bbb267f
|
[
"MIT"
] | null | null | null |
from serial import Serial
class Device:
def __init__(self, port: str, baud_rate=9600):
self.serial = None
self.baud_rate = baud_rate
self.port = port
self.serial = Serial(self.port, int(self.baud_rate))
| 22 | 60 | 0.644628 |
424fd150653ae8e890c9d2e140629bc4ddfcb306
| 1,831 |
py
|
Python
|
src/extras/psycholinguistic_db_creator.py
|
Somsubhra/Enrich
|
cf1e69b86ceb64c8b09c98b442e09c1196b50125
|
[
"MIT"
] | 1 |
2015-11-30T09:27:51.000Z
|
2015-11-30T09:27:51.000Z
|
src/extras/psycholinguistic_db_creator.py
|
Somsubhra/Enrich
|
cf1e69b86ceb64c8b09c98b442e09c1196b50125
|
[
"MIT"
] | null | null | null |
src/extras/psycholinguistic_db_creator.py
|
Somsubhra/Enrich
|
cf1e69b86ceb64c8b09c98b442e09c1196b50125
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = '[email protected]'
# All imports
from nltk import PorterStemmer
from extras import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database')
| 30.016393 | 105 | 0.611688 |
426672b8011b5bc1dadbb1dce2709dffeae830e5
| 352 |
py
|
Python
|
nz_ueditor/app.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_ueditor/app.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_ueditor/app.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from flask import Flask,render_template
from flask_script import Manager
from ueditor import bp #导入蓝本
import config
app = Flask(__name__)
manager = Manager(app)
app.config.from_object(config) #让配置文件生效
app.register_blueprint(bp)
@app.route('/')
def hello_world():
return render_template('index.html')
if __name__ == '__main__':
manager.run()
| 20.705882 | 40 | 0.761364 |
c46f622e16cbe221d0e9efb5647fccd9fd509255
| 761 |
py
|
Python
|
src/python/test1.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | 1 |
2020-06-10T11:34:59.000Z
|
2020-06-10T11:34:59.000Z
|
src/python/test1.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | null | null | null |
src/python/test1.py
|
allenjzhang/playground
|
ef32d383d6c1751e204cb77db6658c6ed72624ad
|
[
"MIT"
] | 2 |
2020-05-26T06:39:04.000Z
|
2020-11-16T06:34:23.000Z
|
nums = [2, 3, 4, 5, 7, 10, 12]
for n in nums:
print(n, end=", ")
class CartItem:
def __init__(self, name, price) -> None:
self.price = price
self.name = name
def __repr__(self) -> str:
return "({0}, ${1})".format(self.name, self.price)
class ShoppingCart:
def __init__(self) -> None:
self.items = []
def add(self, cart_item):
self.items.append(cart_item)
def __iter__(self):
return self.items.__iter__()
@property
def total_price(self):
total = 0.0
for item in self.items:
total += item.price
return total
print()
print()
cart = ShoppingCart()
cart.add(CartItem("CD", 9.99))
cart.add(CartItem("Vinyle", 14.99))
for c in cart:
print(c)
print("Total is ${0:,}".format(cart.total_price))
| 18.560976 | 56 | 0.618922 |
fbfb73503d157eb9ad612355e7bd35c528340ac9
| 520 |
py
|
Python
|
exercises/es/test_04_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/test_04_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/test_04_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert (
'spacy.blank("es")' in __solution__
), "¿Creaste el modelo de español en blanco?"
assert (
len(nlp.pipe_names) == 1 and nlp.pipe_names[0] == "ner"
), "¿Añadiste el entity recognizer al pipeline?"
assert (
len(ner.labels) == 1 and ner.labels[0] == "ROPA"
), "¿Añadiste el label al entity recognizer?"
__msg__.good(
"¡Bien hecho! Ahora el pipeline está listo, así que vamos a comenzar a escribir el "
"loop de entrenamiento."
)
| 32.5 | 92 | 0.601923 |
e15a62daad16e4a9b777e5357fb42b534ee37b68
| 91 |
py
|
Python
|
Online-Judges/CodingBat/Python/String-01/05-extra_end.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/String-01/05-extra_end.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/String-01/05-extra_end.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def extra_end(str):
if len(str) <= 2:
return (str * 3)
return (str[-2:]*3)
| 18.2 | 24 | 0.494505 |
835747867c4bd1a9bf761077a8c936899b3852ba
| 5,855 |
py
|
Python
|
repo/script.video.F4mProxy/lib/flvlib/helpers.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 1 |
2017-11-26T18:18:46.000Z
|
2017-11-26T18:18:46.000Z
|
repo/script.video.F4mProxy/lib/flvlib/helpers.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | null | null | null |
repo/script.video.F4mProxy/lib/flvlib/helpers.py
|
dbiesecke/dbiesecke.github.io
|
5894473591f078fd22d1cb33794c5e656ae9b8dd
|
[
"MIT"
] | 3 |
2019-09-30T19:52:05.000Z
|
2020-04-12T21:20:56.000Z
|
import os
import time
import datetime
from StringIO import StringIO
from UserDict import DictMixin
class UTC(datetime.tzinfo):
"""
A UTC tzinfo class, based on
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
ZERO = datetime.timedelta(0)
def utcoffset(self, dt):
return self.ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return self.ZERO
utc = UTC()
class OrderedAttrDict(DictMixin):
"""
A dictionary that preserves insert order and also has an attribute
interface.
Values can be transparently accessed and set as keys or as attributes.
"""
def __init__(self, dict=None, **kwargs):
self.__dict__["_order_priv_"] = []
self.__dict__["_data_priv_"] = {}
if dict is not None:
self.update(dict)
if len(kwargs):
self.update(kwargs)
# Mapping interface
def __setitem__(self, key, value):
if key not in self:
self._order_priv_.append(key)
self._data_priv_[key] = value
def __getitem__(self, key):
return self._data_priv_[key]
def __delitem__(self, key):
del self._data_priv_[key]
self._order_priv_.remove(key)
def keys(self):
return list(self._order_priv_)
# Attribute interface
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
try:
del self[name]
except KeyError:
raise AttributeError(name)
# Equality
def __eq__(self, other):
try:
my_iter = self.iteritems()
his_iter = other.iteritems()
except AttributeError:
return False
my_empty = False
his_empty = False
while True:
try:
my_key, my_val = my_iter.next()
except StopIteration:
my_empty = True
try:
his_key, his_val = his_iter.next()
except StopIteration:
his_empty = True
if my_empty and his_empty:
return True
if my_empty or his_empty:
return False
if (my_key, my_val) != (his_key, his_val):
return False
# String representation
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self)
def __str__(self):
return '{' + ', '.join([('%r: %r' % (key, self[key]))
for key in self._order_priv_]) + '}'
class ASPrettyPrinter(object):
"""Pretty printing of AS objects"""
def pformat(cls, val, indent=0):
cls.io = StringIO()
cls.pprint_lookup(val, indent)
return cls.io.getvalue()
pformat = classmethod(pformat)
def pprint(cls, val):
print cls.pformat(val)
pprint = classmethod(pprint)
def pprint_lookup(cls, val, ident):
if isinstance(val, basestring):
return cls.pprint_string(val)
if isinstance(val, (int, long, float)):
return cls.pprint_number(val)
if isinstance(val, datetime.datetime):
return cls.pprint_datetime(val)
if hasattr(val, 'iterkeys'):
# dict interface
return cls.pprint_dict(val, ident)
if hasattr(val, 'append'):
# list interface
return cls.pprint_list(val, ident)
# Unknown type ?
cls.io.write("%r" % (val, ))
return False
pprint_lookup = classmethod(pprint_lookup)
def pprint_string(cls, val):
if isinstance(val, unicode):
cls.io.write("u'%s'" % val.encode("UTF8"))
else:
cls.io.write("'%s'" % val)
return False
pprint_string = classmethod(pprint_string)
def pprint_number(cls, val):
cls.io.write(str(val))
return False
pprint_number = classmethod(pprint_number)
def pprint_datetime(cls, val):
cls.io.write(val.replace(microsecond=0).isoformat(' '))
return False
pprint_datetime = classmethod(pprint_datetime)
def pprint_dict(cls, val, indent):
def pprint_item(k):
last_pos = cls.io.tell()
cls.io.write(repr(k))
cls.io.write(": ")
new_indent = indent + cls.io.tell() - last_pos + 1
return cls.pprint_lookup(val[k], new_indent)
cls.io.write('{')
indented = False
keys = list(val.iterkeys())
if keys:
for k in keys[:-1]:
indented |= pprint_item(k)
cls.io.write(",\n%s " % (" "*indent))
indented |= pprint_item(keys[-1])
cls.io.write('}')
return (len(keys) > 1) | indented
pprint_dict = classmethod(pprint_dict)
def pprint_list(cls, val, indent):
last_pos = cls.io.tell()
cls.io.write('[')
new_indent = indent + cls.io.tell() - last_pos
indented = False
values = list(iter(val))
if values:
for v in values[:-1]:
indented |= cls.pprint_lookup(v, new_indent)
cls.io.write(",\n%s" % (" "*new_indent))
indented |= cls.pprint_lookup(values[-1], new_indent)
cls.io.write(']')
return (len(values) > 1) | indented
pprint_list = classmethod(pprint_list)
pformat = ASPrettyPrinter.pformat
pprint = ASPrettyPrinter.pprint
def force_remove(path):
try:
os.remove(path)
except OSError:
pass
| 28.42233 | 75 | 0.5462 |
55f6debea61e05e26edbf682dfd275eb15219ec8
| 981 |
py
|
Python
|
checks.py
|
BigBoss1964/discord-rules
|
02257a84450388ceffbad5b6bb9e28175734403f
|
[
"MIT"
] | null | null | null |
checks.py
|
BigBoss1964/discord-rules
|
02257a84450388ceffbad5b6bb9e28175734403f
|
[
"MIT"
] | null | null | null |
checks.py
|
BigBoss1964/discord-rules
|
02257a84450388ceffbad5b6bb9e28175734403f
|
[
"MIT"
] | null | null | null |
from discord import Member
from discord.ext import commands
from discord.ext.commands import Context as CommandContext, MissingPermissions
#ADMINS = [
# 333220752117596160,
# 368800541393813505,
#]
class NotADeveloper(Exception):
pass
class MissingPermissions(Exception):
pass
def is_developer():
def predicate(ctx: CommandContext):
if ctx.message.author.id != 333220752117596160:
raise NotADeveloper("Oh you have found an dev only command, but hey devs only ;)")
return commands.check(predicate)
def admin_permissions():
def predicate(ctx: CommandContext):
author: Member = ctx.message.author
if author.id == 333220752117596160:
return True
elif author.guild_permissions.administrator == False:
raise MissingPermissions("You are missing administrator permissions")
else:
return True
return commands.check(predicate)
| 26.513514 | 95 | 0.675841 |
1843f4b80dc8fca6d724dcdceffced88131aebef
| 3,271 |
py
|
Python
|
pew-religions/Religion-Leah.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 16,124 |
2015-01-01T06:18:12.000Z
|
2022-03-31T00:46:52.000Z
|
pew-religions/Religion-Leah.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 179 |
2015-01-07T10:19:57.000Z
|
2022-02-21T21:19:14.000Z
|
pew-religions/Religion-Leah.py
|
h4ckfu/data
|
bdc02fd5051dfb31e42f8e078832ceead92f9958
|
[
"CC-BY-4.0"
] | 12,163 |
2015-01-03T14:23:36.000Z
|
2022-03-31T10:10:23.000Z
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
religions = ['Buddhist', 'Catholic', 'Evangel Prot', 'Hindu', 'Hist Black Prot', 'Jehovahs Witness', 'Jewish', 'Mainline Prot', 'Mormon', 'Muslim', 'Orthodox Christian', 'Unaffiliated']
csv = open("current.csv", 'w')
csv.truncate()
def write_row(matrix):
arr = np.asarray(matrix[0])[0]
row = ','.join([str(a) for a in arr]) + '\n'
csv.write(row)
# Intitial distribution of religions in US
first = np.matrix([.007, .208, .254, .007, .065, .008, .019, .147, .016, .009, .005, .228])
# Normed to sum to 100%
current = first / np.sum(first)
t0 = current
write_row(current)
# Transition matrix
trans = np.matrix(((0.390296314, 0.027141947, 0.06791021, 0.001857564, 0, 0, 0.011166082, 0.059762879, 0, 0, 0, 0.396569533),
(0.005370791, 0.593173325, 0.103151608, 0.000649759, 0.010486747, 0.005563864, 0.002041424, 0.053825329, 0.004760476, 0.001130529, 0.000884429, 0.199488989),
(0.00371836, 0.023900817, 0.650773331, 0.000250102, 0.016774503, 0.003098214, 0.001865491, 0.122807467, 0.004203107, 0.000186572, 0.002123778, 0.151866648),
(0, 0, 0.0033732, 0.804072618, 0, 0.001511151, 0, 0.01234639, 0, 0.00209748, 0, 0.17659916),
(0.002051357, 0.016851659, 0.09549708, 0, 0.699214315, 0.010620473, 0.000338804, 0.024372871, 0.000637016, 0.009406884, 0.000116843, 0.129892558),
(0, 0.023278276, 0.109573979, 0, 0.077957568, 0.336280578, 0, 0.074844833, 0.007624035, 0, 0, 0.35110361),
(0.006783201, 0.004082693, 0.014329604, 0, 0, 0.000610585, 0.745731278, 0.009587587, 0, 0, 0.002512334, 0.184058682),
(0.005770357, 0.038017215, 0.187857555, 0.000467601, 0.008144075, 0.004763516, 0.003601208, 0.451798506, 0.005753587, 0.000965543, 0.00109818, 0.25750798),
(0.007263135, 0.01684885, 0.06319935, 0.000248467, 0.0059394, 0, 0.001649896, 0.03464334, 0.642777489, 0.002606278, 0, 0.208904711),
(0, 0.005890381, 0.023573308, 0, 0.011510643, 0, 0.005518343, 0.014032084, 0, 0.772783807, 0, 0.15424369),
(0.004580353, 0.042045841, 0.089264134 , 0, 0.00527346, 0, 0, 0.061471387, 0.005979218, 0.009113978, 0.526728084, 0.243246723),
(0.006438308, 0.044866331, 0.1928814, 0.002035375, 0.04295005, 0.010833621, 0.011541439, 0.09457963, 0.01365141, 0.005884336, 0.002892072, 0.525359211)))
# Fertility array
fert = np.matrix(((2.1, 2.3, 2.3, 2.1, 2.5, 2.1, 2, 1.9, 3.4, 2.8, 2.1, 1.7)))
# Create data frame for printing later
religionDataFrame = pd.DataFrame()
for x in range(0,100):
### beginning of conversion step
# apply transition matrix to current distribution
current = current * trans
### beginning of fertility step
# divide by two for couple number
current = current/2
# adjust by fertility
current = np.multiply(fert, current)
# normalize to 100%
current = current / np.sum(current)
write_row(current)
# add to data frame
religionDataFrame = religionDataFrame.append(pd.DataFrame(current), ignore_index=True)
csv.close()
religionDataFrame.columns = religions
religionDataFrame.to_csv("current_pandas_save.csv")
| 51.109375 | 185 | 0.653011 |
a13ff456b0d98bf11e3d99358f6e0855e84320f7
| 495 |
py
|
Python
|
djangoForm/member/views.py
|
lanuxos/djangoForm
|
4a0f02eaf27735c7eaaa4c282426cc911e2a399a
|
[
"MIT"
] | null | null | null |
djangoForm/member/views.py
|
lanuxos/djangoForm
|
4a0f02eaf27735c7eaaa4c282426cc911e2a399a
|
[
"MIT"
] | null | null | null |
djangoForm/member/views.py
|
lanuxos/djangoForm
|
4a0f02eaf27735c7eaaa4c282426cc911e2a399a
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from .models import Member
from .forms import AddMemberForm
def Add(request):
# context = {}
if request.POST:
form = AddMemberForm(request.POST)
if form.is_valid:
form.save()
return redirect('member-page')
else:
form = AddMemberForm()
return render(request, 'member/home.html', {'form': form})
def Info(request):
context = {}
return render(request, 'member/info.html', context)
| 24.75 | 62 | 0.644444 |
62e82875bcbd4b5feea4b7efbe9f6f3fb8563c7a
| 19,720 |
py
|
Python
|
mkdocs_static_i18n/plugin.py
|
NaviduRoshika/checklist-manager-documentation
|
c7a47c103b3c168c2665b247ba017bad77af7561
|
[
"MIT"
] | 63 |
2021-02-08T14:04:02.000Z
|
2022-03-27T09:33:04.000Z
|
mkdocs_static_i18n/plugin.py
|
NaviduRoshika/checklist-manager-documentation
|
c7a47c103b3c168c2665b247ba017bad77af7561
|
[
"MIT"
] | 84 |
2021-02-08T13:30:14.000Z
|
2022-03-31T07:13:05.000Z
|
mkdocs_static_i18n/plugin.py
|
ultrabug/mkdocs-static-i18n
|
9432cef306ebd42a203cd11a0bfdf6cad712b32f
|
[
"MIT"
] | 16 |
2021-03-08T02:04:38.000Z
|
2022-03-18T03:45:40.000Z
|
import logging
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
from mkdocs import __version__ as mkdocs_version
from mkdocs.commands.build import _build_page, _populate_page
from mkdocs.config.config_options import Type
from mkdocs.plugins import BasePlugin
from mkdocs.structure.nav import get_navigation
from mkdocs_static_i18n.struct import I18nFile
from .struct import I18nFiles, Locale
try:
from mkdocs.localization import install_translations
except ImportError:
install_translations = None
try:
import pkg_resources
material_dist = pkg_resources.get_distribution("mkdocs-material")
material_version = material_dist.version
material_languages = [
lang.split(".html")[0]
for lang in material_dist.resource_listdir("material/partials/languages")
]
except Exception:
material_languages = []
material_version = None
log = logging.getLogger("mkdocs.plugins." + __name__)
LUNR_LANGUAGES = [
"ar",
"da",
"de",
"en",
"es",
"fi",
"fr",
"hu",
"it",
"ja",
"nl",
"no",
"pt",
"ro",
"ru",
"sv",
"th",
"tr",
"vi",
]
MKDOCS_THEMES = ["mkdocs", "readthedocs"]
class I18n(BasePlugin):
config_scheme = (
("default_language", Locale(str, required=True)),
("default_language_only", Type(bool, default=False, required=False)),
("languages", Locale(dict, required=True)),
("material_alternate", Type(bool, default=True, required=False)),
("nav_translations", Type(dict, default={}, required=False)),
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.i18n_configs = {}
self.i18n_files = defaultdict(list)
self.i18n_navs = {}
self.material_alternates = None
@staticmethod
def _is_url(value):
return value.startswith("http://") or value.startswith("https://")
def _dict_replace_value(self, directory, old, new):
"""
Return a copy of the given dict with value replaced.
"""
x = {}
for k, v in directory.items():
if isinstance(v, dict):
v = self._dict_replace_value(v, old, new)
elif isinstance(v, list):
v = self._list_replace_value(v, old, new)
elif isinstance(v, str) or isinstance(v, Path):
if str(v) == str(old):
v = new
if not self._is_url(v):
v = str(Path(v))
x[k] = v
return x
def _list_replace_value(self, listing, old, new):
"""
Return a copy of the given list with value replaced.
"""
x = []
for e in listing:
if isinstance(e, list):
e = self._list_replace_value(e, old, new)
elif isinstance(e, dict):
e = self._dict_replace_value(e, old, new)
elif isinstance(e, str) or isinstance(e, Path):
if str(e) == str(old):
e = new
if not self._is_url(e):
e = str(Path(e))
x.append(e)
return x
def on_config(self, config, **kwargs):
"""
Enrich configuration with language specific knowledge.
"""
self.default_language = self.config["default_language"]
# Make a order preserving list of all the configured
# languages, add the default one first if not listed by the user
self.all_languages = []
for language in self.config["languages"]:
if language not in self.all_languages:
self.all_languages.append(language)
if self.default_language not in self.all_languages:
self.all_languages.insert(0, self.default_language)
# Make a localized copy of the config, the plugins are mutualized
# We remove it from the config before (deep)copying it
plugins = config.pop("plugins")
for language in self.all_languages:
self.i18n_configs[language] = deepcopy(config)
self.i18n_configs[language]["plugins"] = plugins
config["plugins"] = plugins
# Set theme locale to default language
if self.default_language != "en":
if config["theme"].name in MKDOCS_THEMES:
if mkdocs_version >= "1.2":
config["theme"]["locale"] = self.default_language
log.info(
f"Setting the default 'theme.locale' option to '{self.default_language}'"
)
elif config["theme"].name == "material":
config["theme"].language = self.default_language
log.info(
f"Setting the default 'theme.language' option to '{self.default_language}'"
)
# Skip language builds requested?
if self.config["default_language_only"] is True:
return config
# Support for mkdocs-material>=7.1.0 language selector
if self.config["material_alternate"] and len(self.all_languages) > 1:
if material_version and material_version >= "7.1.0":
if not config["extra"].get("alternate") or kwargs.get("force"):
# Add index.html file name when used with
# use_directory_urls = True
link_suffix = ""
if config.get("use_directory_urls") is False:
link_suffix = "index.html"
config["extra"]["alternate"] = [
{
"name": self.config["languages"].get(
self.config["default_language"],
self.config["default_language"],
),
"link": f"./{link_suffix}",
"lang": self.config["default_language"],
}
]
for language in self.all_languages:
if language == self.config["default_language"]:
continue
config["extra"]["alternate"].append(
{
"name": self.config["languages"][language],
"link": f"./{language}/{link_suffix}",
"lang": language,
}
)
elif "alternate" in config["extra"]:
for alternate in config["extra"]["alternate"]:
if not alternate.get("link", "").startswith("./"):
log.info(
"The 'extra.alternate' configuration contains a "
"'link' option that should starts with './' in "
f"{alternate}"
)
if "navigation.instant" in config["theme"]._vars.get("features", []):
log.warning(
"mkdocs-material language switcher contextual link is not "
"compatible with theme.features = navigation.instant"
)
else:
self.material_alternates = config["extra"].get("alternate")
# Support for the search plugin lang
if "search" in config["plugins"]:
search_langs = config["plugins"]["search"].config["lang"] or []
for language in self.all_languages:
if language in LUNR_LANGUAGES:
if language not in search_langs:
search_langs.append(language)
log.info(
f"Adding '{language}' to the 'plugins.search.lang' option"
)
else:
log.warning(
f"Language '{language}' is not supported by "
f"lunr.js, not setting it in the 'plugins.search.lang' option"
)
# Report misconfigured nav_translations, see #66
if self.config["nav_translations"]:
for lang in self.config["languages"]:
if lang in self.config["nav_translations"]:
break
else:
log.info(
"Ignoring 'nav_translations' option: expected a language key "
f"from {list(self.config['languages'].keys())}, got "
f"{list(self.config['nav_translations'].keys())}"
)
self.config["nav_translations"] = {}
# Make sure awesome-pages is always called first, see #65
if "awesome-pages" in config["plugins"]:
config["plugins"].move_to_end("awesome-pages", last=False)
for events in config["plugins"].events.values():
for idx, event in enumerate(list(events)):
try:
if (
str(event.__module__)
== "mkdocs_awesome_pages_plugin.plugin"
):
events.insert(0, events.pop(idx))
except AttributeError:
# partials don't have a module
pass
return config
def on_files(self, files, config):
"""
Construct the main + lang specific file tree which will be used to
generate the navigation for the default site and per language.
"""
main_files = I18nFiles([])
main_files.default_locale = self.default_language
main_files.locale = self.default_language
for language in self.all_languages:
self.i18n_files[language] = I18nFiles([])
self.i18n_files[language].default_locale = self.default_language
self.i18n_files[language].locale = language
for fileobj in files:
main_i18n_file = I18nFile(
fileobj,
"",
all_languages=self.all_languages,
default_language=self.default_language,
docs_dir=config["docs_dir"],
site_dir=config["site_dir"],
use_directory_urls=config.get("use_directory_urls"),
)
main_files.append(main_i18n_file)
# user requested only the default version to be built
if self.config["default_language_only"] is True:
continue
for language in self.all_languages:
i18n_file = I18nFile(
fileobj,
language,
all_languages=self.all_languages,
default_language=self.default_language,
docs_dir=config["docs_dir"],
site_dir=config["site_dir"],
use_directory_urls=config.get("use_directory_urls"),
)
# this 'append' method is reimplemented in I18nFiles to avoid duplicates
self.i18n_files[language].append(i18n_file)
if (
main_i18n_file.is_documentation_page()
and language != self.default_language
and main_i18n_file.src_path == i18n_file.src_path
):
log.debug(
f"file {main_i18n_file.src_path} is missing translation in '{language}'"
)
# these comments are here to help me debug later if needed
# print([{p.src_path: p.url} for p in main_files.documentation_pages()])
# print([{p.src_path: p.url} for p in self.i18n_files["en"].documentation_pages()])
# print([{p.src_path: p.url} for p in self.i18n_files["fr"].documentation_pages()])
# print([{p.src_path: p.url} for p in main_files.static_pages()])
# print([{p.src_path: p.url} for p in self.i18n_files["en"].static_pages()])
# print([{p.src_path: p.url} for p in self.i18n_files["fr"].static_pages()])
return main_files
def _fix_config_navigation(self, language, files):
"""
When a static navigation is set in mkdocs.yml a user will usually
structurate its navigation using the main (default language)
documentation markdown pages.
This function localizes the given pages to their translated
counterparts if available.
"""
for i18n_page in files.documentation_pages():
if Path(i18n_page.src_path).suffixes == [f".{language}", ".md"]:
config_path_expects = [
i18n_page.non_i18n_src_path.with_suffix(".md"),
i18n_page.non_i18n_src_path.with_suffix(
f".{self.default_language}.md"
),
]
for config_path in config_path_expects:
self.i18n_configs[language]["nav"] = self._list_replace_value(
self.i18n_configs[language]["nav"],
config_path,
i18n_page.src_path,
)
def _maybe_translate_navigation(self, language, nav):
translated_nav = self.config["nav_translations"].get(language, {})
if translated_nav:
for item in nav:
if hasattr(item, "title") and item.title in translated_nav:
item.title = translated_nav[item.title]
if hasattr(item, "children") and item.children:
self._maybe_translate_navigation(language, item.children)
def on_nav(self, nav, config, files):
"""
Translate i18n aware navigation to honor the 'nav_translations' option.
"""
for language in self.config["languages"]:
if self.i18n_configs[language]["nav"]:
self._fix_config_navigation(language, self.i18n_files[language])
self.i18n_navs[language] = get_navigation(
self.i18n_files[language], self.i18n_configs[language]
)
# If awesome-pages is used, we want to use it to structurate our
# localized navigations as well
if "awesome-pages" in config["plugins"]:
self.i18n_navs[language] = config["plugins"]["awesome-pages"].on_nav(
self.i18n_navs[language],
config=self.i18n_configs[language],
files=self.i18n_files[language],
)
if self.config["nav_translations"].get(language, {}):
log.info(f"Translating navigation to {language}")
self._maybe_translate_navigation(language, self.i18n_navs[language])
return nav
def _fix_search_duplicates(self, language, search_plugin):
"""
We want to avoid indexing the same pages twice if the default language
has its own version built as well as the /language version too as this
would pollute the search results.
When this happens, we favor the default language location if its
content is the same as its /language counterpart.
"""
entries = deepcopy(search_plugin.search_index._entries)
default_lang_entries = filter(
lambda x: not x["location"].startswith(
tuple(self.config["languages"].keys())
),
search_plugin.search_index._entries,
)
target_lang_entries = list(
filter(lambda x: x["location"].startswith(f"{language}/"), entries)
)
for default_lang_entry in default_lang_entries:
expected_locations = [
f"{language}/{default_lang_entry['location']}",
f"{language}/{default_lang_entry['location'].rstrip('/')}",
f"{language}/{default_lang_entry['location'].replace('/#', '#')}",
]
duplicated_entries = filter(
lambda x: x["location"] in expected_locations
and x["text"] == default_lang_entry["text"],
target_lang_entries,
)
for duplicated_entry in duplicated_entries:
search_plugin.search_index._entries.remove(duplicated_entry)
def on_page_context(self, context, page, config, nav):
"""
Make the language switcher contextual to the current page.
This allows to switch language while staying on the same page.
"""
# export some useful i18n related variables on page context, see #75
context["i18n_config"] = self.config
context["i18n_page_locale"] = page.file.dest_language or self.default_language
context["i18n_page_file_locale"] = page.file.locale_suffix
if self.material_alternates:
alternates = deepcopy(self.material_alternates)
page_url = page.url
for language in self.all_languages:
if page.url.startswith(f"{language}/"):
prefix_len = len(language) + 1
page_url = page.url[prefix_len:]
break
for alternate in alternates:
if alternate["link"].endswith("/"):
separator = ""
else:
separator = "/"
if config.get("use_directory_urls") is False:
alternate["link"] = alternate["link"].replace("/index.html", "", 1)
alternate["link"] += f"{separator}{page_url}"
config["extra"]["alternate"] = alternates
return context
def on_post_build(self, config):
"""
Derived from mkdocs commands build function.
We build every language on its own directory.
"""
# skip language builds requested?
if self.config["default_language_only"] is True:
return
dirty = False
search_plugin = config["plugins"].get("search")
for language in self.config["languages"]:
log.info(f"Building {language} documentation")
config = self.i18n_configs[language]
env = self.i18n_configs[language]["theme"].get_env()
files = self.i18n_files[language]
nav = self.i18n_navs[language]
# Support mkdocs-material theme language
if config["theme"].name == "material":
if language in material_languages:
config["theme"].language = language
else:
log.warning(
f"Language {language} is not supported by "
f"mkdocs-material=={material_version}, not setting "
"the 'theme.language' option"
)
# Include theme specific files
files.add_files_from_theme(env, config)
# Include static files
files.copy_static_files(dirty=dirty)
for file in files.documentation_pages():
_populate_page(file.page, config, files, dirty)
for file in files.documentation_pages():
_build_page(file.page, config, files, nav, env, dirty)
# Update the search plugin index with language pages
if search_plugin:
if (
language == self.default_language
and self.default_language in self.config["languages"]
):
self._fix_search_duplicates(language, search_plugin)
search_plugin.on_post_build(config)
| 41.25523 | 97 | 0.546501 |
3db4b1cff5b57c3e5445ca8b9d95963c7a0b931d
| 5,447 |
py
|
Python
|
balsn-2021-writeup/alldata/dist/src/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
balsn-2021-writeup/alldata/dist/src/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
balsn-2021-writeup/alldata/dist/src/challenge.py
|
Jimmy01240397/balsn-2021-writeup
|
91b71dfbddc1c214552280b12979a82ee1c3cb7e
|
[
"MIT"
] | null | null | null |
class MasterMetaClass(type):
def __new__(cls, class_name, class_parents, class_attr):
def getFlag(self):
print('Here you go, my master')
with open('flag') as f:
print(f.read())
class_attr[getFlag.__name__] = getFlag
attrs = ((name, value) for name, value in class_attr.items() if not name.startswith('__'))
class_attr = dict(('IWant'+name.upper()+'Plz', value) for name, value in attrs)
newclass = super().__new__(cls, class_name, class_parents, class_attr)
return newclass
def __init__(*argv):
print('Bad guy! No Flag !!')
raise 'Illegal'
class BalsnMetaClass(type):
def getFlag(self):
print('You\'re not Master! No Flag !!')
def __new__(cls, class_name, class_parents, class_attr):
newclass = super().__new__(cls, class_name, class_parents, class_attr)
setattr(newclass, cls.getFlag.__name__, cls.getFlag)
return newclass
def secure_vars(s):
attrs = {name:value for name, value in vars(s).items() if not name.startswith('__')}
return attrs
safe_dict = {
'BalsnMetaClass' : BalsnMetaClass,
'MasterMetaClass' : MasterMetaClass,
'False' : False,
'True' : True,
'abs' : abs,
'all' : all,
'any' : any,
'ascii' : ascii,
'bin' : bin,
'bool' : bool,
'bytearray' : bytearray,
'bytes' : bytes,
'chr' : chr,
'complex' : complex,
'dict' : dict,
'dir' : dir,
'divmod' : divmod,
'enumerate' : enumerate,
'filter' : filter,
'float' : float,
'format' : format,
'hash' : hash,
'help' : help,
'hex' : hex,
'id' : id,
'int' : int,
'iter' : iter,
'len' : len,
'list' : list,
'map' : map,
'max' : max,
'min' : min,
'next' : next,
'oct' : oct,
'ord' : ord,
'pow' : pow,
'print' : print,
'range' : range,
'reversed' : reversed,
'round' : round,
'set' : set,
'slice' : slice,
'sorted' : sorted,
'str' : str,
'sum' : sum,
'tuple' : tuple,
'type' : type,
'vars' : secure_vars,
'zip' : zip,
'__builtins__':None
}
def createMethod(code):
if len(code) > 45:
print('Too long!! Bad Guy!!')
return
for x in ' _$#@~':
code = code.replace(x,'')
def wrapper(self):
exec(code, safe_dict, {'self' : self})
return wrapper
def setName(pattern):
while True:
name = input(f'Give me your {pattern} name :')
if (name.isalpha()):
break
else:
print('Illegal Name...')
return name
def setAttribute(cls):
attrName = setName('attribute')
while True:
attrValue = input(f'Give me your value:')
if (attrValue.isalnum()):
break
else:
print('Illegal value...')
setattr(cls, attrName, attrValue)
def setMethod(cls):
methodName = setName('method')
code = input(f'Give me your function:')
func = createMethod(code)
setattr(cls, methodName, func)
def getAttribute(obj):
attrs = [attr for attr in dir(obj) if not callable(getattr(obj, attr)) and not attr.startswith("__")]
x = input('Please enter the attribute\'s name :')
if x not in attrs:
print(f'You can\'t access the attribute {x}')
return
else:
try:
print(f'{x}: {getattr(obj, x)}')
except:
print("Something went wrong in your attribute...")
return
def callMethod(cls, obj):
attrs = [attr for attr in dir(obj) if callable(getattr(obj, attr)) and not attr.startswith("__")]
x = input('Please enter the method\'s name :')
if x not in attrs:
print(f'You can\'t access the method {x}')
return
else:
try:
print(f'calling method {x}...')
cls.__dict__[x](obj)
print('done')
except:
print('Something went wrong in your method...')
return
class Guest(metaclass = BalsnMetaClass):
pass
if __name__ == '__main__':
print(f'Welcome!!We have prepared a class named "Guest" for you')
cnt = 0
while cnt < 3:
cnt += 1
print('1. Add attribute')
print('2. Add method')
print('3. Finish')
x = input("Option ? :")
if x == "1":
setAttribute(Guest)
elif x == "2":
setMethod(Guest)
elif x == "3":
break
else:
print("invalid input.")
cnt -= 1
print("Well Done! We Create an instance for you !")
obj = Guest()
cnt = 0
while cnt < 3:
cnt += 1
print('1. Inspect attribute')
print('2. Using method')
print('3. Exit')
x = input("Option ? :")
if x == "1":
getAttribute(obj)
elif x == "2":
callMethod(Guest, obj)
elif x == "3":
print("Okay...exit...")
break
else:
print("invalid input.")
cnt -= 1
| 29.443243 | 105 | 0.491463 |
9aa7346e1d7ff32b0e215e3bcb0acdee8be1f162
| 3,218 |
py
|
Python
|
03 Python/Smart Home Dashboard/aufgabe/hwio/dht11.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
03 Python/Smart Home Dashboard/aufgabe/hwio/dht11.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
03 Python/Smart Home Dashboard/aufgabe/hwio/dht11.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | 1 |
2020-10-10T20:24:05.000Z
|
2020-10-10T20:24:05.000Z
|
import random, time
import Adafruit_DHT
from .hwdevice import HardwareDevice
class DHT11Sensor(HardwareDevice):
"""
Simple Implementierung für den Zugriff auf einen DHT11-Luftsensor via
1-wire. Die Klasse kapselt im Grunde genommen einfach nur die Bibliothek
Adafruit_DHT und ermöglicht es dabei, sich in regelmäßigen Abständen neue
Werte auszulesen.
"""
def __init__(self, gpio_pin, interval_sec, read_cb=None):
"""
Konstrutor.
@param gpio_pin: PIN-Nummer des Sensors
@param interval_sec: Interval, in dem die Daten gelesen werden
@param read_cb: Rückruffunktion für neue Daten
Die Callback-Funktion muss folgende Signatur haben:
def dht11_read_cb(temperature, humidity):
…
"""
self._sensor = Adafruit_DHT.DHT11
self._gpio_pin = gpio_pin
self._interval_sec = interval_sec
self._read_cb = read_cb
self._last_read = 0
def tick(self):
"""
Hauptmethode, in welcher die Sensordaten gelesen werden.
"""
# TODO: Alle zwei Sekunden einen neuen Wert auslesen und diesen an
# die Callback-Funktion übergeben
temperature = 0
humidity = 0
if self._read_cb:
self._read_cb(temperature, humidity)
def close(self):
"""
Hardwarezugriff beenden. Wird hier nicht benötigt, muss aber überschrieben
werden, da es eine abstrakte Methode ist.
"""
pass
class DHT11MockSensor(HardwareDevice):
"""
Mockklasse zum Simulieren eines DHT11-Luftsensors, wenn man gerade keinen
eigenen zur Hand hat. Die Klasse simuliert einfach irgendwelche Veränderungen
ausgehend von einem Zufallswert zwischen 10 und 35 für beide Werte.
"""
def __init__(self, interval_sec, read_cb=None):
"""
Konstrutor.
@param interval_sec: Interval, in dem die Daten gelesen werden
@param read_cb: Rückruffunktion für neue Daten
Die Callback-Funktion muss folgende Signatur haben:
def dht11_read_cb(temperature, humidity):
…
"""
self._interval_sec = interval_sec
self._read_cb = read_cb
self._last_read = 0
self._temperature = random.randint(10, 35)
self._humidity = random.randint(10, 35)
def tick(self):
"""
Hauptmethode, in welcher die Sensordaten gelesen werden.
"""
now = time.perf_counter()
elapsed_sec = now - self._last_read
if elapsed_sec >= self._interval_sec and self._read_cb:
self._last_read = now
self._temperature += random.randint(-15, 15) / 10.0
self._humidity += random.randint(-15, 15) / 10.0
self._temperature = max(min(round(self._temperature, 1), 40.0), 0.0)
self._humidity = max(min(round(self._humidity, 1), 40.0), 0.0)
if self._read_cb:
self._read_cb(self._temperature, self._humidity)
def close(self):
"""
Hardwarezugriff beenden. Wird hier nicht benötigt, muss aber überschrieben
werden, da es eine abstrakte Methode ist.
"""
pass
| 31.54902 | 82 | 0.634556 |
9af950702a2ebb5f6336a283e2251b1ee7d247bf
| 340 |
py
|
Python
|
Webpage/arbeitsstunden/management/commands/importData.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
Webpage/arbeitsstunden/management/commands/importData.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | 46 |
2022-01-08T12:03:24.000Z
|
2022-03-30T08:51:05.000Z
|
Webpage/arbeitsstunden/management/commands/importData.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
from django.core.management.base import BaseCommand
from arbeitsstunden.management.commands.utils.csv import importCSV
class Command(BaseCommand):
help = "Import the given csv Files (ONLY FOR GIVEN STRUCT)"
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
importCSV()
| 21.25 | 66 | 0.688235 |
627201e22f8a993ecbcd1fca02813ecf40503511
| 580 |
py
|
Python
|
packages/watchmen-model/src/watchmen_model/console/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/console/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/console/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from .connected_space import ConnectedSpace
from .connected_space_graphic import ConnectedSpaceGraphic, SubjectGraphic, TopicGraphic
from .dashboard import Dashboard, DashboardParagraph, DashboardReport
from .data_result_set import SubjectDataResultSet
from .report import Report, ReportDimension, ReportFunnel, ReportFunnelType, ReportIndicator, ReportIndicatorArithmetic
from .subject import Subject, SubjectDataset, SubjectDatasetColumn, SubjectDatasetCriteria, \
SubjectDatasetCriteriaIndicator, SubjectDatasetCriteriaIndicatorArithmetic, SubjectDatasetJoin, SubjectJoinType
| 72.5 | 119 | 0.887931 |
65517b6da935e28ce3d5338cf77a6c7704eb1338
| 402 |
py
|
Python
|
data_tests/saved__backend__py3.9/python/subpackages.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
data_tests/saved__backend__/pythran/subpackages.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
data_tests/saved__backend__/python/subpackages.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
from numpy.fft import rfft
from numpy.random import randn
from numpy.linalg import matrix_power
from scipy.special import jv
def test_np_fft(u):
u_fft = rfft(u)
return u_fft
def test_np_linalg_random(u):
(nx, ny) = u.shape
u[:] = randn(nx, ny)
u2 = u.T * u
u4 = matrix_power(u2, 2)
return u4
def test_sp_special(v, x):
return jv(v, x)
__transonic__ = ("0.4.2",)
| 16.08 | 37 | 0.651741 |
b2a6aaa575a111593f24942b3504db9fda9e4e79
| 381 |
py
|
Python
|
tintz/newsletter/views.py
|
dcfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | null | null | null |
tintz/newsletter/views.py
|
dcfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | 2 |
2021-03-19T21:51:51.000Z
|
2021-06-10T18:22:50.000Z
|
tintz/newsletter/views.py
|
danielfranca/tintz-backend
|
9f29e17cafc31ab7dc568d1e2c984e6f1b1fc3fc
|
[
"MIT"
] | null | null | null |
from api.views import BaseApiView, SerializerFactory
from newsletter.models import Subscriber
class SubscriberApi(BaseApiView):
"""
Endpoint to add subscriber
"""
model = Subscriber
serializer_class = SerializerFactory(model)
http_method_names = ['post']
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
| 25.4 | 52 | 0.706037 |
a24002ceaae927f83c59be3f7e52a69ad1e52916
| 85 |
py
|
Python
|
emsite/emapp/apps.py
|
thejeshpr/ExpenseManager
|
a3c452a87c9287a814247434ebb4039e164d7936
|
[
"MIT"
] | null | null | null |
emsite/emapp/apps.py
|
thejeshpr/ExpenseManager
|
a3c452a87c9287a814247434ebb4039e164d7936
|
[
"MIT"
] | null | null | null |
emsite/emapp/apps.py
|
thejeshpr/ExpenseManager
|
a3c452a87c9287a814247434ebb4039e164d7936
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class EmappConfig(AppConfig):
name = 'emapp'
| 14.166667 | 33 | 0.741176 |
ac1329a5f73c750f0af9c8360ea18569082039da
| 2,425 |
py
|
Python
|
__init__.py
|
Pusnow/ExcelStaticWeb
|
8f42240f03b0d666cf111334f142a212ef43c324
|
[
"MIT"
] | 1 |
2020-06-29T12:33:15.000Z
|
2020-06-29T12:33:15.000Z
|
__init__.py
|
Pusnow/ExcelStaticWeb
|
8f42240f03b0d666cf111334f142a212ef43c324
|
[
"MIT"
] | null | null | null |
__init__.py
|
Pusnow/ExcelStaticWeb
|
8f42240f03b0d666cf111334f142a212ef43c324
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from jinja2 import Environment, FileSystemLoader
import os, shutil
from openpyxl import load_workbook
from resizer import resize
wb = load_workbook(filename = 'sbtmwebport.xlsx')
env = Environment(loader=FileSystemLoader(u'./templates'))
if os.path.isdir(u"./result"):
shutil.rmtree(u"./result")
shutil.copytree(u"./static","./result")
features = []
indexsheet = wb['index.html']
featindex = 6
filename = indexsheet['C'+str(featindex)].value
while filename :
features.append(filename)
featindex += 1
filename = indexsheet['C'+str(featindex)].value
index = env.get_template('index.html')
indexhtml = index.render(features=features).encode('utf-8')
pieces = []
projectsheet = wb['projects.html']
projectindex = 5
filename = projectsheet['B'+str(projectindex)].value
while filename :
tag = projectsheet['C'+str(projectindex)].value
maker = projectsheet['D'+str(projectindex)].value
year = projectsheet['E'+str(projectindex)].value
objective = projectsheet['F'+str(projectindex)].value
link = projectsheet['G'+str(projectindex)].value
headtext = projectsheet['H'+str(projectindex)].value
maintext = projectsheet['I'+str(projectindex)].value
imgs =[]
col = 10
img = dict(origin =projectsheet.cell(row = projectindex, column = col).value)
while img['origin'] :
img["thumb"] = resize(img["origin"],"./projectimage/","./result/",940,529)
imgs.append(img)
col += 1
img = dict(origin =projectsheet.cell(row = projectindex, column = col).value)
page = tag + ".html"
p = dict (name = filename, tag = tag, maker = maker, year = year, objective = objective, link = link, headtext = headtext, maintext = maintext, imgs = imgs, page = page)
pieces.append(p)
projectindex += 1
filename = projectsheet['B'+str(projectindex)].value
projects = env.get_template('projects.html')
projectshtml = projects.render(pieces = pieces ).encode('utf-8')
project = env.get_template('project-details.html')
for pc in pieces :
ph = project.render(piece = pc , otherprojects = pieces).encode('utf-8')
f = open(u"./result/"+pc['page'], 'w')
f.write(ph)
f.close()
contact = env.get_template('contact.html')
contacthtml = contact.render().encode('utf-8')
f = open(u"./result/index.html", 'w')
f.write(indexhtml)
f.close()
f = open(u"./result/projects.html", 'w')
f.write(projectshtml)
f.close()
f = open(u"./result/contact.html", 'w')
f.write(contacthtml)
f.close()
| 24.744898 | 170 | 0.698969 |
3bd5a6c29a4bcb1850274a7ecc8b8f9c5ef680cf
| 807 |
py
|
Python
|
backend/app/core/solver/select_images_analysis.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2022-02-20T12:45:04.000Z
|
2022-02-20T12:45:04.000Z
|
backend/app/core/solver/select_images_analysis.py
|
JamesNeumann/learning-by-annotations
|
c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad
|
[
"MIT"
] | 21 |
2021-11-01T10:13:56.000Z
|
2021-12-02T10:02:13.000Z
|
backend/app/core/solver/select_images_analysis.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2021-12-16T18:20:55.000Z
|
2021-12-16T18:20:55.000Z
|
from typing import List, Tuple
class SelectImagesAnalysis:
@staticmethod
def check_select_images(
*, task_solution: List[str], user_solution: List[str]
) -> Tuple[List[str], List[str]]:
"""
Checks whether the user selected all images in the task solution or not.
Returns all correcty and wrongly selected image indices.
:param task_solution: Task solution
:param user_solution: User solution
:return: The solve result
"""
correct_images = []
wrong_images = []
for image_index in user_solution:
if image_index in task_solution:
correct_images.append(image_index)
else:
wrong_images.append(image_index)
return correct_images, wrong_images
| 28.821429 | 80 | 0.630731 |
5a008d52465a3419cf13e549076903852b97db89
| 3,284 |
py
|
Python
|
data/raspi/src/praxis/QualityAssurance2020/WestenDigitalRuleChecker.py
|
softwareengel/softwareengel-blog
|
e24192c096be4361c7f7bbe00d2c3e8263c9fd0b
|
[
"MIT"
] | null | null | null |
data/raspi/src/praxis/QualityAssurance2020/WestenDigitalRuleChecker.py
|
softwareengel/softwareengel-blog
|
e24192c096be4361c7f7bbe00d2c3e8263c9fd0b
|
[
"MIT"
] | null | null | null |
data/raspi/src/praxis/QualityAssurance2020/WestenDigitalRuleChecker.py
|
softwareengel/softwareengel-blog
|
e24192c096be4361c7f7bbe00d2c3e8263c9fd0b
|
[
"MIT"
] | null | null | null |
class WesternDigitalRuleChecker(object):
def __init__(self):
self.estimated_value = 0
self.standard_deviation = 0
def __init__(self, estimated_value, standard_deviation):
self.estimated_value = estimated_value
self.standard_deviation = standard_deviation
def check_rule_1(self, datapoints):
#TODO Vervollstaendige die Methode und gebe true zurueck,
# wenn die Regel erfuellt wird und false, wenn gegen die Regel verstossen wird.
# Das "pass" kann geloescht werden, sobald du deinen Code schreibst.
"""Any single data point falls outside the 3σ limit from the centerline"""
pass
def check_rule_2(self, datapoints):
"""Two out of three consecutive points fall beyond the
2σ limit (in zone A or beyond), on the same side of the centerline"""
if (len(datapoints) >= 3):
number_of_points_outside_2_deviations = 0
for item in datapoints[len(datapoints) - 3:len(datapoints)]:
if (self.estimated_value + 2 * self.standard_deviation < item):
number_of_points_outside_2_deviations += 1
if (self.estimated_value - 2 * self.standard_deviation > item):
number_of_points_outside_2_deviations -= 1
if number_of_points_outside_2_deviations >= 2 or number_of_points_outside_2_deviations <= -2:
print("Rule 2 failed")
return False
return True
def check_rule_3(self, datapoints):
"""Four out of five consecutive points fall beyond the
1σ limit (in zone B or beyond), on the same side of the centerline"""
if (len(datapoints) >= 5):
number_of_points_outside_1_deviation = 0
for item in datapoints[len(datapoints) - 5:len(datapoints)]:
if (self.estimated_value + self.standard_deviation < item):
number_of_points_outside_1_deviation += 1
if (self.estimated_value - self.standard_deviation > item):
number_of_points_outside_1_deviation -= 1
if (number_of_points_outside_1_deviation >= 4 or number_of_points_outside_1_deviation <= -4):
print("Rule 3 failed")
return False
return True
def check_rule_4(self, datapoints):
"""Nine consecutive points fall on the same side of the centerline (in zone C or beyond)"""
if (len(datapoints) >= 9):
relevant_points = datapoints[len(datapoints) - 9:len(datapoints)]
for index, item in enumerate(relevant_points):
if (index > 0):
if relevant_points[index - 1] > self.estimated_value:
if item < self.estimated_value:
return True
if relevant_points[index - 1] < self.estimated_value:
if item > self.estimated_value:
return True
if item == self.estimated_value:
return True
elif index == 0:
if item == self.estimated_value:
return True
print("Rule 4 failed")
return False
return True
| 45.611111 | 105 | 0.598965 |
5a8990b0df3ee178179e0281cae093af1f8af965
| 220 |
py
|
Python
|
cpu_temp.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
cpu_temp.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
cpu_temp.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import subprocess
ret = subprocess.check_output(['vcgencmd', 'measure_temp']).decode('utf8')
_, temp = ret.split('=')
temp = temp[:-3]
print('Die Temperatur der CPU beträgt {} °C'.format(temp))
| 24.444444 | 74 | 0.686364 |
0c6de50a38db12ccb166644ee82778a2a9dbebf4
| 758 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 5 |
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/framework/partitioners/__init__.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-24T16:27:01.000Z
|
2022-01-24T16:27:01.000Z
|
# copyright (c) 2020 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license"
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from .base import BasePartitioner
from .transnetv2_partitioner import TransNetV2Partitioner
__all__ = ['BasePartitioner', 'TransNetV2Partitioner']
| 39.894737 | 74 | 0.779683 |
f05629f5eebc99e7a71e25bc24ed8f1b765d014b
| 1,118 |
py
|
Python
|
SBTK_League_Helper/test.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/test.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
SBTK_League_Helper/test.py
|
juanchodepisa/sbtk
|
7cba7748e75a02b491e551d6c1be9bd7987c5051
|
[
"MIT"
] | null | null | null |
from src.interfacing.ogs.connect import Authentication
puppets = [1121, 1122, 1124, 1125]
a = Authentication("Leira", "Leira1234567890", testing=True)
# tourney id 7370
# a.post(['tournaments'],{
# "name":"Test Tournament 3",
# "group":9,
# "tournament_type":"roundrobin",
# "description":"A big grand tournament",
# "board_size":5,
# "handicap":0, #default -1 for auto
# "time_start": "2015-10-24T18:40:00Z",
# "time_control_parameters":{
# "time_control":"fischer",
# "initial_time":259200,
# "max_time":604800,
# "time_increment":86400
# },
# "exclusivity": "invite", # open, group. default
# "exclude_provisional": False, # default
# "auto_start_on_max": True, # default
# "analysis_enabled": False, #default
# "settings":{
# "maximum_players":10,
# },
# "players_start": 6, #default
# "first_pairing_method": "slaughter", #random, slide, strength . default
# "subsequent_pairing_method": "random", # default
# "min_ranking":0,
# "max_ranking":36
# })
# r= a.post (['tournaments', 7379, 'players'], app_param= {"player_id":1122} )
# print (r)
| 28.666667 | 78 | 0.644007 |
b2f4e87f1f62bdb29286e3e662a4b78e9d38c5a0
| 422 |
py
|
Python
|
PINp/2014/Koleganov_N_S/task_1_10.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Koleganov_N_S/task_1_10.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Koleganov_N_S/task_1_10.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
# Задача 1. Вариант 10.
# Напишите программу, которая будет сообщать род деятельности и псевдоним под
# которым скрывается Ричард Дженкинс. После вывода информации программа должна
# дожидаться пока пользователь нажмет Enter для выхода.
# Колеганов Никита Сергеевич
# 29.05.2016
print("Ричард Дженкинс извествен по имени Ричард Дейл. Американсикй актер кино, театра и телевидения")
input("\n\nНажмите Enter для выхода.")
| 42.2 | 102 | 0.800948 |
e8e120eaf3eb895d334ec7924c792d4839703574
| 2,091 |
py
|
Python
|
graph/astar/node.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | null | null | null |
graph/astar/node.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | null | null | null |
graph/astar/node.py
|
yujungcheng/algorithm_and_data_structure
|
3742238227067217b82bf35ca3a968db4375f3c9
|
[
"Apache-2.0"
] | 1 |
2020-04-16T01:17:04.000Z
|
2020-04-16T01:17:04.000Z
|
#!/usr/bin/env python3
class Node():
def __init__(self, parent=None, position=None):
self.parent = parent # parent node
self.position = position
#self.position = (position[1], position[0]) # y, x
self.distance_from_start = 0
self.distance_to_end = 0
self.cost = 0
self.move = 0 # move counter
def __str__(self):
if self.position == None:
return "None"
else:
return str(self.position)
def __eq__(self, node):
return self.position == node.position
# unused
def is_position(self, x, y):
if self.position[0] == x and self.position[1] == y:
return True
return False
def get_neighbor_positions(self, order=None):
neighbors = []
offsets = []
if order == 0:
offsets = [(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1),(-1,0),(-1,1)]
elif order == 1:
offsets = [(1,-1),(1,0),(0,-1),(1,1),(-1,-1),(0,1),(-1,0),(-1,1)]
elif order == 2:
offsets = [(1,-1),(1,0),(0,-1),(1,1),(-1,-1),(0,1),(-1,0),(-1,1)]
elif order == 3:
offsets = [(0,1),(0,-1),(-1,0),(1,0),(1,1),(1,-1),(-1,-1),(-1,1)]
elif order == 4:
offsets = [(0,-1),(0,1),(-1,0),(1,0),(-1,-1),(-1,1),(1,-1),(1,1)]
else:
offsets = [(1,1),(1,0),(0,1),(1,-1),(-1,1),(0,-1),(-1,0),(-1,-1)]
check_parent = True
if self.parent is None:
check_parent = False
else:
if self.parent.position is None:
check_parent = False
for offset in offsets:
if offset[0] == 0 and offset[1] == 0:
continue
y = self.position[0] + offset[0]
x = self.position[1] + offset[1]
if check_parent:
if self.parent.position[0] == y and self.parent.position[1] == x:
continue
if x < 0 or y < 0: # skip minus position
continue
neighbors.append((y,x))
return neighbors
| 32.671875 | 81 | 0.463893 |
68226c72f18553b4652b8598c98b21d42508b8d5
| 1,092 |
py
|
Python
|
gemtown/musicians/migrations/0001_initial.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
gemtown/musicians/migrations/0001_initial.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | 5 |
2020-09-04T20:13:39.000Z
|
2022-02-17T22:03:33.000Z
|
gemtown/musicians/migrations/0001_initial.py
|
doramong0926/gemtown
|
2c39284e3c68f0cc11994bed0ee2abaad0ea06b6
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-04-20 06:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Musician',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('cover_image', models.ImageField(blank=True, upload_to='')),
('nickname', models.TextField(max_length=254)),
('description', models.TextField(blank=True)),
('career', models.CharField(choices=[('under_1year', 'Under 1 year'), ('_3yr', '3yr'), ('_2yr', '2yr'), ('_5yr', '5yr'), ('_6yr_10yr', '6yr ~ 10yr'), ('over_10year', 'Over 10 year'), ('_4yr', '4yr')], max_length=20)),
],
options={
'ordering': ['-created_at'],
},
),
]
| 36.4 | 233 | 0.552198 |
d7e5865756cb44f4e1935edd2394e75b6b5ed892
| 59,271 |
py
|
Python
|
Packs/AzureDevOps/Integrations/AzureDevOps/AzureDevOps.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/AzureDevOps/Integrations/AzureDevOps/AzureDevOps.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/AzureDevOps/Integrations/AzureDevOps/AzureDevOps.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
# type: ignore
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import copy
from requests import Response
from typing import Callable
INCIDENT_TYPE_NAME = "Azure DevOps"
OUTGOING_MIRRORED_FIELDS = {'status': 'The status of the pull request.',
'title': 'The title of the pull request.',
'description': 'The description of the pull request.',
'project': 'The name of the project.',
'repository_id': 'The repository ID of the pull request target branch.',
'pull_request_id': 'the ID of the pull request'}
class Client:
"""
API Client to communicate with AzureDevOps.
"""
def __init__(self, client_id: str, organization: str, verify: bool, proxy: bool):
if '@' in client_id: # for use in test-playbook
client_id, refresh_token = client_id.split('@')
integration_context = get_integration_context()
integration_context.update(current_refresh_token=refresh_token)
set_integration_context(integration_context)
self.ms_client = MicrosoftClient(
self_deployed=True,
auth_id=client_id,
token_retrieval_url='https://login.microsoftonline.com/organizations/oauth2/v2.0/token',
grant_type=DEVICE_CODE,
base_url=f'https://dev.azure.com/{organization}',
verify=verify,
proxy=proxy,
scope='499b84ac-1321-427f-aa17-267ca6975798/user_impersonation offline_access')
self.organization = organization
def pipeline_run_request(self, project: str, pipeline_id: str, branch_name: str) -> dict:
"""
Run a pipeline.
Args:
project (str): The name or the ID of the project.
pipeline_id (str): The ID of the pipeline.
branch_name (str): The name of the repository branch which run the pipeline.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
data = {"resources": {"repositories": {"self": {"refName": f'refs/heads/{branch_name}'}}}}
url_suffix = f"{project}/_apis/pipelines/{pipeline_id}/runs"
response = self.ms_client.http_request(method='POST',
url_suffix=url_suffix,
params=params,
json_data=data,
resp_type='json')
return response
def user_add_request(self, user_email: str, account_license_type: str, group_type: str, project_id: str) -> dict:
"""
Add a user, assign license and extensions and make them a member of a project group in an account.
Args:
user_email (str): The Email of the user to add to the organization.
account_license_type (str): The type of account license (e.g. Express, Stakeholder etc.).
group_type (str): Project Group (e.g. Contributor, Reader etc.).
project_id (str): The ID of the project.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.3'}
data = {
"accessLevel": {
"accountLicenseType": account_license_type
},
"projectEntitlements": [
{
"group": {
"groupType": group_type
},
"projectRef": {
"id": project_id}
}
],
"user": {
"principalName": user_email,
"subjectKind": "user"
}
}
full_url = f"https://vsaex.dev.azure.com/{self.organization}/_apis/UserEntitlements"
response = self.ms_client.http_request(method='POST',
full_url=full_url,
params=params,
json_data=data,
resp_type='json')
return response
def user_remove_request(self, user_id: str) -> Response:
"""
Delete a user from the account.
Args:
user_id (str): The ID of the user to remove from the account.
Returns:
Response: API response from Azure.
"""
params = {'api-version': '6.1-preview.3'}
full_url = f'https://vsaex.dev.azure.com/{self.organization}/_apis/userentitlements/{user_id}'
response = self.ms_client.http_request(method='DELETE',
full_url=full_url,
params=params,
resp_type='response')
return response
def pull_request_create_request(self, project: str, repository_id: str, source_branch: str,
target_branch: str, title: str, description: str, reviewers: list) -> dict:
"""
Create a new pull request in Azure DevOps.
Args:
project (str): The name or the ID of the project.
repository_id (str): The repository ID of the pull request's target branch.
source_branch (str): The name of the source branch of the pull request.
target_branch (str): The name of the target branch of the pull request.
title (str): The title of the pull request.
description (str): The description of the pull request.
reviewers (list): Pull-request reviewers IDs.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
data = {
"sourceRefName": source_branch,
"targetRefName": target_branch,
"description": description,
"reviewers": reviewers,
"title": title
}
url_suffix = f'{project}/_apis/git/repositories/{repository_id}/pullrequests'
response = self.ms_client.http_request(method='POST',
url_suffix=url_suffix,
params=params,
json_data=data,
resp_type='json')
return response
def pull_request_update_request(self, project: str, repository_id: str, pull_request_id: str,
title: str = None, description: str = None, status: str = None,
last_merge_source_commit: dict = None) -> dict:
"""
Update a pull request.
Args:
project (str): The name or the ID of the project.
repository_id (str): The repository ID of the pull request's target branch.
pull_request_id (str): The ID of the pull-request.
title (str): The updated pull-request title.
description (str): The updated pull-request description.
status (str): The updated pull-request status.
last_merge_source_commit (dict): Commit object at the head of the source branch
at the time of the last pull request merge.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
data = remove_empty_elements({"description": description, "status": status,
"title": title, "LastMergeSourceCommit": last_merge_source_commit})
url_suffix = f'{project}/_apis/git/repositories/{repository_id}/pullrequests/{pull_request_id}'
response = self.ms_client.http_request(method='PATCH',
url_suffix=url_suffix,
params=params,
json_data=data,
resp_type='json')
return response
def pull_requests_get_request(self, project: str, repository_id: str, pull_request_id: str) -> dict:
"""
Retrieve pull request information request.
Args:
project (str): The name or the ID of the project.
repository_id (str): The repository ID of the pull request's target branch.
pull_request_id (str): The ID of the pull-request.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
url_suffix = f'{project}/_apis/git/repositories/{repository_id}/pullrequests/{pull_request_id}'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='json')
return response
def pull_requests_list_request(self, project: str, repository: str, skip: int = None, limit: int = None) -> dict:
"""
Retrieve pull requests in repository.
Args:
project (str): The name or the ID of the project.
repository (str): The repository name of the pull request's target branch.
skip (int): The number of results to skip.
limit (int): The number of results to retrieve.
Returns:
dict: API response from Azure.
"""
params = remove_empty_elements({'api-version': '6.1-preview.1', "$skip": skip, "$top": limit})
url_suffix = f'{project}/_apis/git/repositories/{repository}/pullrequests/'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='json')
return response
def project_list_request(self, skip: int = None, limit: int = None) -> dict:
"""
Retrieve all projects in the organization that the authenticated user has access to.
Args:
skip (int): The number of results to skip.
limit (int): The number of results to retrieve.
Returns:
dict: API response from Azure.
"""
params = remove_empty_elements({'api-version': '6.1-preview.4', "$skip": skip, "$top": limit})
response = self.ms_client.http_request(method='GET',
url_suffix='_apis/projects',
params=params,
resp_type='json')
return response
def repository_list_request(self, project: str) -> dict:
"""
Retrieve git repositories in the organization project.
Args:
project (str): The name of the project to which the repositories belong to.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
url_suffix = f'{project}/_apis/git/repositories'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='json')
return response
def users_query_request(self, query: str) -> dict:
"""
Query users in the organization.
Args:
query (str): Users or organization query prefix.
For example, If we want to retrieve information about the user 'Tom'
we can enter the value of this argument as 'Tom'.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
url_suffix = '_apis/IdentityPicker/Identities'
data = {"query": query, "identityTypes": ["user", "group"], "operationScopes": ["ims", "source"],
"properties": ["DisplayName", "IsMru", "ScopeName", "SamAccountName", "Active", "SubjectDescriptor",
"Department", "JobTitle", "Mail", "MailNickname", "PhysicalDeliveryOfficeName",
"SignInAddress", "Surname", "Guest", "TelephoneNumber", "Manager", "Description"]}
response = self.ms_client.http_request(method='POST',
url_suffix=url_suffix,
params=params,
json_data=data,
resp_type='json')
return response
def get_pipeline_run_request(self, project: str, pipeline_id: str, run_id: str) -> dict:
"""
Retrieve pipeline run information.
Args:
project (str): The name of the project.
pipeline_id (str): The ID of the pipeline to retrieve.
run_id (str): The ID of the pipeline run to retrieve.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
url_suffix = f'{project}/_apis/pipelines/{pipeline_id}/runs/{run_id}'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='json')
return response
def pipeline_run_list_request(self, project: str, pipeline_id: str) -> dict:
"""
Retrieve project pipeline runs list.
Args:
project (str): The name of the project.
pipeline_id (str): The ID of the pipeline to retrieve.
Returns:
dict: API response from Azure.
"""
params = {'api-version': '6.1-preview.1'}
url_suffix = f'{project}/_apis/pipelines/{pipeline_id}/runs'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='json')
return response
def pipeline_list_request(self, project: str, limit: int = None,
continuation_token: str = None) -> Response:
"""
Retrieve project pipelines list.
Args:
project (str): The name of the project.
limit (int): The number of results to retrieve.
continuation_token (str): A continuation token from a previous request, to retrieve the next page of results.
Returns:
Response: API response from Azure.
"""
params = remove_empty_elements({'api-version': '6.1-preview.1',
'$top': limit,
'continuationToken': continuation_token})
url_suffix = f'{project}/_apis/pipelines'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='response')
return response
def branch_list_request(self, project: str, repository: str, limit: int = None,
continuation_token: str = None) -> Response:
"""
Retrieve repository branches list.
Args:
project (str): The name of the project.
repository (str): The name of the project repository.
limit (int): The number of results to retrieve.
continuation_token (str): A continuation token from a previous request, to retrieve the next page of results.
Returns:
Response: API response from Azure.
"""
params = remove_empty_elements({'api-version': '6.1-preview.1',
'$top': limit,
'continuationToken': continuation_token,
'filter': 'heads'})
url_suffix = f'{project}/_apis/git/repositories/{repository}/refs'
response = self.ms_client.http_request(method='GET',
url_suffix=url_suffix,
params=params,
resp_type='response')
return response
def generate_pipeline_run_output(response: dict, project: str) -> dict:
"""
Create XSOAR context output for retrieving pipeline run information.
Args:
response (dict): API response from Azure.
project (str): The name of the pipeline project.
Returns:
dict: XSOAR command outputs.
"""
outputs = copy.deepcopy(response)
outputs['createdDate'] = arg_to_datetime(outputs.get('createdDate')).isoformat()
outputs['run_id'] = outputs.pop('id')
outputs['project'] = project
outputs['result'] = outputs.get('result', 'unknown')
return outputs
def filter_pipeline_run_table(run: dict) -> dict:
"""
Filter pipeline-run required information for representing to the user.
Args:
run (dict): Pipeline-run information.
Returns:
dict: Filtered pipeline-run information.
"""
return {
"pipeline_id": dict_safe_get(run, ['pipeline', 'id']),
"run_state": run.get('state'),
"creation_date": run.get('createdDate'),
"run_id": run.get('run_id'),
"result": run.get('result', 'unknown')
}
def generate_pipeline_run_readable_information(outputs: Union[dict, list],
message: str = "Pipeline Run Information:") -> str:
"""
Create XSOAR readable output for retrieving pipe-line information.
Args:
outputs (dict/list): API response from Azure.
message (str): XSOAR readable outputs table message.
Returns:
str: XSOAR readable outputs.
"""
if not isinstance(outputs, list):
outputs = [outputs]
readable_table = []
for run in outputs:
readable_table.append(filter_pipeline_run_table(run))
readable_output = tableToMarkdown(
message,
readable_table,
headers=['pipeline_id', 'run_state', 'creation_date', 'run_id', 'result'],
headerTransform=string_to_table_header
)
return readable_output
def pipeline_run_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Run a pipeline.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
pipeline_id = args['pipeline_id']
branch_name = args['branch_name']
should_poll = argToBoolean(args.get('polling', False))
# create new pipeline-run.
response = client.pipeline_run_request(project, pipeline_id, branch_name)
state = response.get('state')
# Running polling flow
if should_poll and state != 'completed':
interval = arg_to_number(args.get('interval', 30))
timeout = arg_to_number(args.get('timeout', 60))
run_id = response.get('id')
polling_args = {
'run_id': run_id,
'interval': interval,
'scheduled': True,
'timeout': timeout,
**args
}
# Schedule poll for the piplenine status
scheduled_command = ScheduledCommand(
command='azure-devops-pipeline-run-get',
next_run_in_seconds=interval,
timeout_in_seconds=timeout,
args=polling_args)
# Result with scheduled_command only - no update to the war room
command_results = CommandResults(scheduled_command=scheduled_command)
# Polling flow is done or user did not trigger the polling flow (should_poll = False)
else:
outputs = generate_pipeline_run_output(response, project)
readable_output = generate_pipeline_run_readable_information(outputs)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PipelineRun',
outputs_key_field='run_id',
outputs=outputs,
raw_response=response
)
return command_results
def user_add_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Add a user, assign license and extensions and make them a member of a project group in an account.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
user_email = args['user_email']
account_license_type = args['account_license_type']
group_type = args['group_type']
project_id = args['project_id']
response = client.user_add_request(user_email, account_license_type, group_type, project_id)
if not dict_safe_get(response, ['operationResult', 'isSuccess']):
error = dict_safe_get(response, ['operationResult', 'errors'])
if not isinstance(error, list) or not error or len(error) == 0:
raise ValueError('Error occurred. API response is not in the appropriate format.')
error_message = error[0].get('value')
raise DemistoException(error_message)
user_information = {
"id": dict_safe_get(response, ['userEntitlement', 'id']),
"accountLicenseType": dict_safe_get(response,
['userEntitlement', 'accessLevel', 'accountLicenseType']),
"lastAccessedDate": dict_safe_get(response, ['userEntitlement', 'lastAccessedDate']),
}
readable_output = tableToMarkdown(
"User Information:",
user_information,
headers=['id', 'accountLicenseType', 'lastAccessedDate'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.User',
outputs_key_field='id',
outputs=response.get('userEntitlement'),
raw_response=response
)
return command_results
def user_remove_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Remove the user from all project memberships.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
user_id = args['user_id']
client.user_remove_request(user_id)
readable_output = f'User {user_id} was successfully removed from the organization.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def filter_pull_request_table(pull_request: dict) -> dict:
"""
Filter pull-request required information for representing to the user.
Args:
pull_request (dict): Pull-request information.
Returns:
dict: Filtered pull-request information.
"""
return {
"repository_id": dict_safe_get(pull_request, ['repository', 'id']),
"repository_name": dict_safe_get(pull_request, ['repository', 'name']),
"project_id": dict_safe_get(pull_request, ['repository', 'project', 'id']),
"project_name": dict_safe_get(pull_request, ['repository', 'project', 'name']),
"pull_request_id": pull_request.get('pullRequestId'),
"status": pull_request.get('status'),
"title": pull_request.get('title'),
"description": pull_request.get('description'),
"created_by": dict_safe_get(pull_request, ['createdBy', 'displayName']),
"creation_date": pull_request.get('creationDate')
}
def generate_pull_request_readable_information(response: Union[dict, list],
message: str = "Pull Request Information:") -> str:
"""
Create XSOAR readable output for retrieving pull-request information.
Args:
response (dict/list): API response from Azure.
message (str): XSOAR readable outputs table message.
Returns:
str: XSOAR readable outputs.
"""
if not isinstance(response, list):
response = [response]
readable_table = []
for pr in response:
readable_table.append(filter_pull_request_table(pr))
readable_output = tableToMarkdown(
message,
readable_table,
headers=['title', 'description', 'created_by', 'pull_request_id',
'repository_name', 'repository_id', 'project_name', 'project_id', 'creation_date'],
headerTransform=string_to_table_header
)
return readable_output
def pull_request_create_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create a new pull-request.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
repository_id = args['repository_id']
source_branch = args['source_branch']
target_branch = args['target_branch']
title = args['title']
description = args['description']
reviewers_ids = argToList(args['reviewers_ids'])
reviewers = [{"id": reviewer} for reviewer in reviewers_ids]
source_branch = source_branch if source_branch.startswith('refs/') else f'refs/heads/{source_branch}'
target_branch = target_branch if target_branch.startswith('refs/') else f'refs/heads/{target_branch}'
response = client.pull_request_create_request(
project, repository_id, source_branch, target_branch, title, description, reviewers)
outputs = copy.deepcopy(response)
outputs['creationDate'] = arg_to_datetime(response.get('creationDate')).isoformat()
readable_output = generate_pull_request_readable_information(outputs)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PullRequest',
outputs_key_field='pullRequestId',
outputs=outputs,
raw_response=response
)
return command_results
def pull_request_update_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Update a pull request.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
repository_id = args['repository_id']
pull_request_id = args['pull_request_id']
title = args.get('title')
description = args.get('description')
status = args.get('status')
if not (title or description or status):
raise Exception('At least one of the arguments: title, description, or status must be provided.')
last_merge_source_commit = None
if status == "completed":
pr_data = client.pull_requests_get_request(project, repository_id, pull_request_id)
last_merge_source_commit = pr_data.get("lastMergeSourceCommit")
response = client.pull_request_update_request(
project, repository_id, pull_request_id, title, description, status, last_merge_source_commit)
outputs = copy.deepcopy(response)
outputs['creationDate'] = arg_to_datetime(response.get('creationDate')).isoformat()
readable_output = generate_pull_request_readable_information(outputs)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PullRequest',
outputs_key_field='pullRequestId',
outputs=outputs,
raw_response=response
)
return command_results
def pull_request_get_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve pull-request information.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
repository_id = args['repository_id']
pull_request_id = args['pull_request_id']
response = client.pull_requests_get_request(project, repository_id, pull_request_id)
outputs = copy.deepcopy(response)
outputs['creationDate'] = arg_to_datetime(response.get('creationDate')).isoformat()
readable_output = generate_pull_request_readable_information(outputs)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PullRequest',
outputs_key_field='pullRequestId',
outputs=outputs,
raw_response=response
)
return command_results
def pull_requests_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve pull requests in repository.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
repository = args['repository']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
offset = (page - 1) * limit
response = client.pull_requests_list_request(project, repository, offset, limit)
readable_message = f'Pull Request List:\n Current page size: {limit}\n Showing page {page} out of ' \
f'others that may exist.'
outputs = copy.deepcopy(response.get('value'))
for pr in outputs:
pr['creationDate'] = arg_to_datetime(pr.get('creationDate')).isoformat()
readable_output = generate_pull_request_readable_information(outputs, message=readable_message)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PullRequest',
outputs_key_field='pullRequestId',
outputs=outputs,
raw_response=response
)
return command_results
def project_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve all projects in the organization that the authenticated user has access to.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
offset = (page - 1) * limit
response = client.project_list_request(offset, limit)
readable_message = f'Project List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
outputs = copy.deepcopy(response.get('value', []))
output_headers = ['name', 'id', 'state', 'revision', 'visibility', 'lastUpdateTime']
for project in outputs:
project['lastUpdateTime'] = arg_to_datetime(project.get('lastUpdateTime')).isoformat()
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=output_headers,
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.Project',
outputs_key_field='id',
outputs=outputs,
raw_response=response
)
return command_results
def repository_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve git repositories in the organization project.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
start = (page - 1) * limit
end = start + limit
readable_message = f'Repositories List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
response = client.repository_list_request(project)
outputs = []
if response.get('count') and response.get('count') >= start:
min_index = min(response.get('count'), end)
for repo in response.get('value')[start:min_index]:
outputs.append(repo)
readable_data = copy.deepcopy(outputs)
for repo in readable_data:
repo["size (Bytes)"] = repo.pop("size")
readable_output = tableToMarkdown(
readable_message,
readable_data,
headers=['id', 'name', 'webUrl', 'size (Bytes)'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.Repository',
outputs_key_field='id',
outputs=outputs,
raw_response=response
)
return command_results
def users_query_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Query users in the organization.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
query = args['query']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
start = (page - 1) * limit
end = start + limit
readable_message = f'Users List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
response = client.users_query_request(query)
outputs = []
results = response.get('results')
readable_user_information = []
if results and len(results) > 0:
identities = results[0].get('identities')
if len(identities) >= start:
min_index = min(len(identities), end)
for identity in identities[start:min_index]:
# Updating the id key as well.
identity["id"] = identity.get("localId")
outputs.append(identity)
if identity.get("localDirectory") == "vsd":
readable_user_information.append(
{"entityType": identity.get("entityType"), "id": identity.get("localId"),
"email": identity.get("signInAddress")})
readable_output = tableToMarkdown(
readable_message,
readable_user_information,
headers=['email', 'entityType', 'id'],
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.User',
outputs_key_field='id',
outputs=outputs,
raw_response=response
)
return command_results
def pipeline_run_get_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve pipeline run information.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
pipeline_id = args['pipeline_id']
run_id = args['run_id']
scheduled = argToBoolean(args.get('scheduled', False))
response = client.get_pipeline_run_request(project, pipeline_id, run_id)
# This is part of a scheduled command run
state = response.get("state")
if scheduled and state != 'completed':
# schedule next poll
scheduled_command = ScheduledCommand(
command='azure-devops-pipeline-run-get',
next_run_in_seconds=arg_to_number(args.get('interval', 30)),
timeout_in_seconds=arg_to_number(args.get('timeout', 60)),
args=args,
)
# result with scheduled_command only - no update to the war room
command_results = CommandResults(scheduled_command=scheduled_command)
else:
outputs = generate_pipeline_run_output(response, project)
readable_output = generate_pipeline_run_readable_information(outputs)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PipelineRun',
outputs_key_field='run_id',
outputs=outputs,
raw_response=response
)
return command_results
def pipeline_run_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve project pipeline runs list. The command retrieves up to the top 10000 runs for a particular pipeline.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
pipeline_id = args['pipeline_id']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
start = (page - 1) * limit
end = start + limit
readable_message = f'Pipeline runs List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
readable_output = readable_message
response = client.pipeline_run_list_request(project, pipeline_id)
outputs = []
if response.get('count') and response.get('count') >= start:
min_index = min(response.get('count'), end)
for run in response.get('value')[start:min_index]:
data = generate_pipeline_run_output(run, project)
outputs.append(data)
readable_output = generate_pipeline_run_readable_information(outputs, message=readable_message)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.PipelineRun',
outputs_key_field='run_id',
outputs=outputs,
raw_response=response
)
return command_results
def get_pagination_continuation_token(limit: int, page: int, client_request: Callable, args: dict) -> str:
"""
Get next continuation token for request pagination.
Args:
limit (): Number of elements to retrieve.
page (): Page number.
client_request (Callable): Client request function.
args (dict): Request function arguments.
Returns:
str: Continuation token
"""
offset = limit * (page - 1)
response = client_request(limit=offset, **args)
return response.headers.get('x-ms-continuationtoken')
def pipeline_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve project pipelines list.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
readable_message = f'Pipelines List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
continuation_token = None
if page > 1:
continuation_token = get_pagination_continuation_token(limit=limit, page=page,
client_request=client.pipeline_list_request,
args={"project": project})
if not continuation_token:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureDevOps.Pipeline',
outputs=[],
raw_response=[]
)
response = client.pipeline_list_request(project, limit, continuation_token).json()
outputs = copy.deepcopy(response.get("value"))
for pipeline in outputs:
pipeline['project'] = project
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=['id', 'name', 'revision', 'folder'],
headerTransform=string_to_table_header
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.Pipeline',
outputs_key_field='id',
outputs=outputs,
raw_response=response
)
def branch_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieve repository branches list.
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
project = args['project']
repository = args['repository']
page = arg_to_number(args.get('page') or '1')
limit = arg_to_number(args.get('limit') or '50')
readable_message = f'Branches List:\n Current page size: {limit}\n Showing page {page} out others that may exist.'
if page < 1 or limit < 1:
raise Exception('Page and limit arguments must be greater than 1.')
continuation_token = None
if page > 1:
continuation_token = get_pagination_continuation_token(limit=limit, page=page,
client_request=client.branch_list_request,
args={"project": project, "repository": repository})
if not continuation_token:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureDevOps.Branch',
outputs_key_field='name',
outputs=[],
raw_response=[]
)
response = client.branch_list_request(project, repository, limit, continuation_token).json()
outputs = copy.deepcopy(response.get("value", []))
for branch in outputs:
branch['project'] = project
branch['repository'] = repository
readable_output = tableToMarkdown(
readable_message,
outputs,
headers=['name'],
headerTransform=string_to_table_header
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='AzureDevOps.Branch',
outputs_key_field='name',
outputs=outputs,
raw_response=response
)
# --Mirroring Commands--
def get_update_args(delta: dict, data: dict) -> dict:
"""
Change the updated field names to fit the pull-request update command.
Args:
delta (dict): Updated fields from XSOAR incident mirroring.
data (dict): Incident source fields from XSOAR incident mirroring.
Returns:
dict: Updated argument information.
"""
arguments = {'project': data.get('project'), 'repository_id': data.get('repository_id'),
'pull_request_id': data.get('pull_request_id'), 'title': delta.get('title'),
'description': delta.get('description'), 'status': delta.get('status')}
return arguments
def update_remote_system_command(client: Client, args: Dict[str, Any]) -> str:
"""
Pushes local changes to the remote system
Args:
client (Client): Azure DevOps API client.
args (dict): Command arguments from XSOAR.
args['data']: the data to send to the remote system
args['entries']: the entries to send to the remote system
args['incident_changed']: boolean telling us if the local incident indeed changed or not
args['remote_incident_id']: the remote incident id
Returns:
str: The new ID of the updated incident.
"""
remote_args = UpdateRemoteSystemArgs(args)
if remote_args.delta:
demisto.debug(f'Got the following delta keys {str(list(remote_args.delta.keys()))} to update Azure DevOps '
f'incident {remote_args.remote_incident_id}')
else:
demisto.debug('There is no delta fields in Azure DevOps\n')
try:
if remote_args.incident_changed:
update_args = get_update_args(remote_args.delta, remote_args.data)
demisto.debug(f'Sending incident with remote ID [{remote_args.remote_incident_id}] to Azure DevOps\n')
pull_request_update_command(client, update_args)
else:
demisto.debug(f'Skipping updating remote incident fields [{remote_args.remote_incident_id}] '
f'as it is not new nor changed')
except Exception as e:
demisto.info(f"Error in Azure DevOps outgoing mirror for incident {remote_args.remote_incident_id} \n"
f"Error message: {str(e)}")
finally:
return remote_args.remote_incident_id
def get_mapping_fields_command() -> GetMappingFieldsResponse:
"""
Returns the list of fields for an incident type.
"""
incident_type_scheme = SchemeTypeMapping(type_name=INCIDENT_TYPE_NAME)
demisto.debug(f'Collecting incident mapping for incident type - "{INCIDENT_TYPE_NAME}"')
for argument, description in OUTGOING_MIRRORED_FIELDS.items():
incident_type_scheme.add_field(name=argument, description=description)
mapping_response = GetMappingFieldsResponse()
mapping_response.add_scheme_type(incident_type_scheme)
return mapping_response
# --Authorization Commands--
def start_auth(client) -> CommandResults:
result = client.ms_client.start_auth('!azure-devops-auth-complete')
return CommandResults(readable_output=result)
def complete_auth(client) -> str:
client.ms_client.get_access_token()
return 'Authorization completed successfully.'
def test_connection(client) -> str:
try:
client.ms_client.get_access_token()
except Exception as err:
return f'Authorization Error: \n{err}'
return 'Success!'
def reset_auth() -> CommandResults:
set_integration_context({})
return CommandResults(readable_output='Authorization was reset successfully. Run **!azure-devops-auth-start** to '
'start the authentication process.')
def parse_incident(pull_request: dict, integration_instance: str) -> dict:
"""
Parse pull request to XSOAR Incident.
Args:
pull_request (dict): Pull-request information.
integration_instance (str): The name of the integration instance.
Returns:
dict: XSOAR Incident.
"""
incident_data = filter_pull_request_table(pull_request)
incident_data['mirror_direction'] = 'Out'
incident_data['mirror_instance'] = integration_instance
incident = {'name': "Azure DevOps - Pull request ID: " + str(incident_data.get('pull_request_id')),
'rawJSON': json.dumps(incident_data)}
return incident
def count_active_pull_requests(project: str, repository: str, client: Client, first_fetch: datetime = None) -> int:
"""
Count the number of active pull-requests in the repository.
Args:
project (str): The name of the project which the pull requests belongs to.
repository (str): The repository name of the pull request's target branch.
client (Client): Azure DevOps API client.
first_fetch (datetime): Indicated the oldest pull-request time.
Returns:
int: Pull-requests number.
"""
count = 0
limit = 100
max_iterations = 100
while max_iterations > 0:
max_iterations -= 1
response = client.pull_requests_list_request(project, repository, skip=count, limit=limit)
if response.get("count") == 0:
break
if first_fetch:
last_pr_date = arg_to_datetime(
response.get("value")[response.get("count") - 1].get('creationDate').replace('Z', ''))
if last_pr_date < first_fetch: # If the oldest pr in the result is older than 'first_fetch' argument.
for pr in response.get("value"):
if arg_to_datetime(pr.get('creationDate').replace('Z', '')) > first_fetch:
count += 1
else: # Stop counting
max_iterations = -1
break
else:
count += response.get("count")
else:
count += response.get("count")
return count
def get_last_fetch_incident_index(project: str, repository: str, client: Client, last_id: int):
"""
Retrieve the index of the last fetched pull-request.
index if the pull request is no active anymore - return -1.
Args:
project (str): The name of the project which the pull requests belongs to.
repository (str): The repository name of the pull request's target branch.
client (Client): Azure DevOps API client.
last_id (int): Last fetch pull-request ID.
Returns:
int: Last fetched pull-request.
"""
count = 0
limit = 100
max_iterations = 100
while max_iterations > 0:
response = client.pull_requests_list_request(project, repository, skip=count, limit=limit)
if response.get("count") == 0:
break
pr_ids = [pr.get('pullRequestId') for pr in response.get('value')]
if last_id in pr_ids:
return pr_ids.index(last_id) + count
else:
if max(pr_ids) < last_id:
break
count += response.get("count")
max_iterations -= 1
return -1
def get_closest_index(project: str, repository: str, client: Client, last_id: int) -> int:
"""
This method used for find the closest index to the last fetched pull-request ID.
This method is used to find the ID of the next pull-request after the last_id.
The correctness of the method stems from the fact that the pull-request ID is an incremental number,
and from the way the pull-requests are retrieved from the API.
Args:
project (str): The name of the project which the pull requests belongs to.
repository (str): The repository name of the pull request's target branch.
client (Client): Azure DevOps API client.
last_id (int): Last fetch pull-request ID.
Returns:
int: Closest index to the last fetched pull-request ID.
"""
count = 0
limit = 100
max_iterations = 100
while max_iterations > 0:
response = client.pull_requests_list_request(project, repository, skip=count, limit=limit)
if response.get("count") == 0:
break
pr_ids = [pr.get('pullRequestId') for pr in response.get('value')]
min_id = min(pr_ids)
max_id = max(pr_ids)
if min_id < last_id < max_id: # The closest index is in this page.
closest_id = -1
for pr_id in pr_ids:
if pr_id < last_id:
break
closest_id = pr_id
return pr_ids.index(closest_id) + count
elif max_id < last_id: # The closest index is in the previous page.
return count - 1
else:
count += response.get("count")
max_iterations -= 1
if response.get("count") == 0:
break
return -1
def is_new_pr(project: str, repository: str, client: Client, last_id: int) -> bool:
"""
Validate if there is new pull-request in the repository.
Args:
project (str): The name of the project which the pull requests belongs to.
repository (str): The repository name of the pull request's target branch.
client (Client): Azure DevOps API client.
last_id (int): Last fetch pull-request ID.
Returns:
bool: True if there is new pull-request in the repository, otherwise False.
"""
response = client.pull_requests_list_request(project, repository, skip=0, limit=1)
num_prs = response.get("count", 0)
last_pr_id = response.get('value')[0].get('pullRequestId', 0) if len(response.get('value')) > 0 else None
if num_prs == 0 or last_pr_id <= last_id:
demisto.debug(f'Number of PRs is: {num_prs}. Last fetched PR id: {last_pr_id}')
return False
return True
def fetch_incidents(client, project: str, repository: str, integration_instance: str, max_fetch: int = 50,
first_fetch: str = None) -> None:
"""
Fetch new active pull-requests from repository.
Args:
client (Client): Azure DevOps API client.
project (str): The name of the project which the pull requests belongs to.
repository (str): The repository name of the pull request's target branch.
integration_instance (str): The name of the integration instance.
max_fetch (int): Maximum incidents for one fetch.
first_fetch (str): Indicated the date from which to start fetching pull-requests.
"""
last_run = demisto.getLastRun()
last_id = last_run.get("last_id", None)
if last_id:
if not is_new_pr(project, repository, client, last_id): # There is no new pr
demisto.incidents([])
return
last_id_index = get_last_fetch_incident_index(project, repository, client, last_id)
if last_id_index == -1: # Last pull-request state is no-active
last_id_index = get_closest_index(project, repository, client, last_id) + 1
else: # In the first iteration of fetch-incident ,
# we have to find the oldest active pull-request index.
if first_fetch:
first_fetch = arg_to_datetime(first_fetch)
last_id_index = count_active_pull_requests(project, repository, client, first_fetch)
skip = last_id_index - max_fetch
if skip <= 0:
skip = 0
max_fetch = last_id_index
response = client.pull_requests_list_request(project, repository, skip=skip, limit=max_fetch)
pr_data = reversed(response.get("value"))
last = None
incidents = []
for pr in pr_data:
incidents.append(parse_incident(pr, integration_instance))
last = pr.get('pullRequestId')
if last:
demisto.setLastRun({
'last_id': last
})
demisto.incidents(incidents)
def main() -> None:
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
client_id = params['client_id']
organization = params['organization']
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
is_mirroring = params.get('is_mirroring', False)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(
client_id=client_id,
organization=organization,
verify=verify_certificate,
proxy=proxy)
if command == 'azure-devops-auth-start':
return_results(start_auth(client))
elif command == 'azure-devops-auth-complete':
return_results(complete_auth(client))
elif command == 'azure-devops-auth-test':
return_results(test_connection(client))
elif command == 'azure-devops-user-add':
return_results(user_add_command(client, args))
elif command == 'azure-devops-user-remove':
return_results(user_remove_command(client, args))
elif command == 'azure-devops-pull-request-create':
return_results(pull_request_create_command(client, args))
elif command == 'azure-devops-pull-request-get':
return_results(pull_request_get_command(client, args))
elif command == 'azure-devops-pull-request-update':
return_results(pull_request_update_command(client, args))
elif command == 'azure-devops-pull-request-list':
return_results(pull_requests_list_command(client, args))
elif command == 'azure-devops-project-list':
return_results(project_list_command(client, args))
elif command == 'azure-devops-repository-list':
return_results(repository_list_command(client, args))
elif command == 'azure-devops-user-list':
return_results(users_query_command(client, args))
elif command == 'azure-devops-pipeline-run-get':
return_results(pipeline_run_get_command(client, args))
elif command == 'azure-devops-pipeline-run-list':
return_results(pipeline_run_list_command(client, args))
elif command == 'azure-devops-pipeline-list':
return_results(pipeline_list_command(client, args))
elif command == 'azure-devops-branch-list':
return_results(branch_list_command(client, args))
elif command == 'test-module':
return_results(
'The test module is not functional, '
'run the azure-devops-auth-test command instead.')
elif command == 'fetch-incidents':
integration_instance = demisto.integrationInstance()
fetch_incidents(client, params.get('project'), params.get('repository'), integration_instance,
arg_to_number(params.get('max_fetch', 50)), params.get('first_fetch'))
elif command == 'azure-devops-auth-reset':
return_results(reset_auth())
elif command == 'azure-devops-pipeline-run':
return_results(pipeline_run_command(client, args))
elif command == 'get-mapping-fields':
return_results(get_mapping_fields_command())
elif command == 'update-remote-system':
if is_mirroring:
return_results(update_remote_system_command(client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
demisto.error(traceback.format_exc())
return_error(str(e))
from MicrosoftApiModule import * # noqa: E402
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 35.598198 | 123 | 0.613217 |
cc107882b08662f5008275ceceac29457af32609
| 521 |
py
|
Python
|
codeit/algorithm/greedy_min_fee.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/greedy_min_fee.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
codeit/algorithm/greedy_min_fee.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
def min_fee(pages_to_print):
sorted_pages_to_print = sorted(pages_to_print)
total_fee = 0
while sorted_pages_to_print:
size = len(sorted_pages_to_print)
minute = sorted_pages_to_print.pop(0)
total_fee += size * minute
return total_fee
if __name__ == '__main__':
from util import test_value
test_value(min_fee([6, 11, 4, 1]), 39)
test_value(min_fee([3, 2, 1]), 10)
test_value(min_fee([3, 1, 4, 3, 2]), 32)
test_value(min_fee([8, 4, 2, 3, 9, 23, 6, 8]), 188)
| 27.421053 | 55 | 0.642994 |
cc111a60b1c3286389be46f24d927396c94313b7
| 363 |
py
|
Python
|
Algorithms/Sorting/closest_numbers.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/closest_numbers.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/closest_numbers.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
N = int(input().strip())
arr = [int(i) for i in input().strip().split()]
sorted_arr = sorted(arr)
diff_arr = [sorted_arr[i + 1] - sorted_arr[i] for i in range(0, N - 1)]
min_diff = min(diff_arr)
idx = [i for i in range(len(diff_arr)) if diff_arr[i] == min_diff]
for i in idx:
print(sorted_arr[i], sorted_arr[i + 1], end=' ')
print()
| 25.928571 | 71 | 0.636364 |
0bf77cc1ae923cdc3f7bea80df568d4bfe83a4fe
| 191 |
py
|
Python
|
src/d2py/tools/write/release.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 3 |
2022-03-09T14:08:42.000Z
|
2022-03-10T04:17:17.000Z
|
src/d2py/tools/write/release.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 3 |
2021-11-07T13:11:26.000Z
|
2022-03-19T03:28:48.000Z
|
src/d2py/tools/write/release.py
|
xinetzone/d2py
|
657362a0451921ef5a7b05b4a8378f7379063cdf
|
[
"Apache-2.0"
] | 1 |
2022-03-15T14:18:32.000Z
|
2022-03-15T14:18:32.000Z
|
'''代码,可能改变'''
from invoke import task
@task
def install(ctx, name='doc'):
# --use-feature=in-tree-build
ctx.run(f'pip install .[{name}] ')
# namespace = Collection(docs, deploy)
| 14.692308 | 38 | 0.633508 |
045e5f368eabae6bf0f87a44c614e05c252db2b2
| 5,614 |
py
|
Python
|
beispielanwendungen/hallowelt/hallowelt10_eigenedialoge.py
|
pbouda/pyqt-und-pyside-buch
|
a4ec10663ccc8aeda075c9a06b9707ded52382c8
|
[
"CC-BY-4.0"
] | 5 |
2017-03-11T13:27:27.000Z
|
2022-01-09T10:52:05.000Z
|
beispielanwendungen/hallowelt/hallowelt10_eigenedialoge.py
|
pbouda/pyqt-und-pyside-buch
|
a4ec10663ccc8aeda075c9a06b9707ded52382c8
|
[
"CC-BY-4.0"
] | 2 |
2021-02-14T10:59:59.000Z
|
2021-10-30T21:46:32.000Z
|
beispielanwendungen/hallowelt/hallowelt10_eigenedialoge.py
|
pbouda/pyqt-und-pyside-buch
|
a4ec10663ccc8aeda075c9a06b9707ded52382c8
|
[
"CC-BY-4.0"
] | 1 |
2019-08-07T03:08:18.000Z
|
2019-08-07T03:08:18.000Z
|
# -*- coding: utf-8 -*-
import sys
from PyQt4 import QtCore, QtGui
def main(argv):
app = QtGui.QApplication(argv)
mainwindow = MainWindow()
mainwindow.show()
sys.exit(app.exec_())
class MainWindow(QtGui.QMainWindow):
meinSignal = QtCore.pyqtSignal()
def __init__(self, *args):
QtGui.QMainWindow.__init__(self, *args)
self.createMenu()
self.createComponents()
self.createLayout()
self.createConnects()
self.setWindowTitle(self.tr(u"Hallo Welt"))
def createMenu(self):
self.actionDateiOeffnen = QtGui.QAction(self.tr(u"Datei öffnen..."), self)
self.actionDateiSpeichern = QtGui.QAction(self.tr(u"Speichern"), self)
self.actionBeenden = QtGui.QAction(self.tr(u"Beenden"), self)
self.actionBeenden.setMenuRole(QtGui.QAction.QuitRole)
menuDatei = self.menuBar().addMenu(self.tr(u"Datei"))
menuDatei.addAction(self.actionDateiOeffnen)
menuDatei.addAction(self.actionDateiSpeichern)
actiongroupAnsichten = QtGui.QActionGroup(self)
#self.actionLinksAusrichten = QtGui.QAction(self.tr(u"Links Ausrichten"), self)
self.actionLinksAusrichten = QtGui.QAction(self.tr(u"Links Ausrichten"), actiongroupAnsichten)
self.actionLinksAusrichten.setCheckable(True)
self.actionLinksAusrichten.setChecked(True)
#self.actionRechtsAusrichten = QtGui.QAction(self.tr(u"Rechts Ausrichten"), self)
self.actionRechtsAusrichten = QtGui.QAction(self.tr(u"Rechts Ausrichten"), actiongroupAnsichten)
self.actionRechtsAusrichten.setCheckable(True)
actiongroupAnsichten.setExclusive(False)
menuAnsicht = menuDatei.addMenu(self.tr(u"Ansicht"))
menuAnsicht.addAction(self.actionLinksAusrichten)
menuAnsicht.addAction(self.actionRechtsAusrichten)
menuDatei.addSeparator()
menuDatei.addAction(self.actionBeenden)
self.actionUeber = QtGui.QAction(self.tr(u"Über Hallo Welt..."), self)
menuUeber = self.menuBar().addMenu(self.tr(u"Hilfe"))
menuUeber.addAction(self.actionUeber)
def createComponents(self):
self.labelHalloWelt = QtGui.QLabel(self.tr(u"Hallo Welt!"));
self.buttonTextAktualisieren = QtGui.QPushButton(self.tr(u"Aktualisieren"));
self.editText = QtGui.QLineEdit()
def createConnects(self):
self.buttonTextAktualisieren.clicked.connect(self.textAktualisieren)
self.editText.textChanged.connect(self.labelHalloWelt.setText)
self.actionUeber.triggered.connect(self.zeigeUeberDialog)
@QtCore.pyqtSlot()
def zeigeUeberDialog(self):
dialog = QtGui.QDialog(self)
label = QtGui.QLabel(self.tr(u"Klicke den Button"), dialog)
button = QtGui.QPushButton(
self.tr(u"Schließe mich"), dialog)
layoutDialog = QtGui.QHBoxLayout()
layoutDialog.addWidget(label)
layoutDialog.addWidget(button)
#layoutDialog.addWidget(buttonAbbrechen)
dialog.setLayout(layoutDialog)
dialog.setWindowTitle(self.tr(u"Hallo Welt Dialog"))
button.clicked.connect(dialog.accept)
#buttonAbbrechen.clicked.connect(dialog.reject)
result = dialog.exec_()
#if result == QtGui.QDialog.Accepted:
# print u"OK, es wurde nicht abgebrochen"
#dialog.show()
#dialog = QtGui.QDialog(self)
#eingabe = QtGui.QLineEdit(dialog)
#buttonOk = QtGui.QPushButton(
# self.tr("OK"), dialog)
#buttonAbbrechen = QtGui.QPushButton(
# self.tr("Abbrechen"), dialog)
#layoutDialog = QtGui.QHBoxLayout()
#layoutDialog.addWidget(eingabe)
#layoutDialog.addWidget(buttonOk)
#layoutDialog.addWidget(buttonAbbrechen)
#dialog.setLayout(layoutDialog)
#buttonOk.clicked.connect(dialog.accept)
#buttonAbbrechen.clicked.connect(dialog.reject)
#result = dialog.exec_()
#if result == QtGui.QDialog.Accepted:
# eingabe = unicode(eingabe.text())
# print eingabe
#else:
# print "Abgebrochen"
#dialog = MeinDialog(self)
#dialog.exec_()
#print dialog.eingabe
@QtCore.pyqtSlot()
def textAktualisieren(self):
self.labelHalloWelt.setText(self.editText.text())
def createLayout(self):
layoutZentral = QtGui.QVBoxLayout()
layoutZentral.addWidget(self.labelHalloWelt)
layoutZentral.addWidget(self.editText)
layoutZentral.addWidget(self.buttonTextAktualisieren)
widgetZentral = QtGui.QWidget()
widgetZentral.setLayout(layoutZentral)
self.setCentralWidget(widgetZentral)
class MeinDialog(QtGui.QDialog):
def __init__(self, *args):
QtGui.QDialog.__init__(self, *args)
self.createComponents()
self.createLayout()
self.createConnects()
self.setWindowTitle(self.tr(u"Mein Dialog"))
def createComponents(self):
self.lineeditEingabe = QtGui.QLineEdit()
self.button = QtGui.QPushButton(self.tr(u"Schließe mich"))
def createLayout(self):
layoutDialog = QtGui.QHBoxLayout()
layoutDialog.addWidget(self.lineeditEingabe)
layoutDialog.addWidget(self.button)
self.setLayout(layoutDialog)
def createConnects(self):
self.button.clicked.connect(self.accept)
@property
def eingabe(self):
eingabe = unicode(self.lineeditEingabe.text())
return eingabe
if __name__ == "__main__":
main(sys.argv)
| 36.69281 | 104 | 0.666726 |
9bd753870d6f410740e6851ca05dd67f66aeea43
| 243 |
py
|
Python
|
dblp/python/conftest.py
|
DocSeven/spark
|
a88330f554a4afc70696dac8d00bcf4d2f512acf
|
[
"Apache-2.0"
] | null | null | null |
dblp/python/conftest.py
|
DocSeven/spark
|
a88330f554a4afc70696dac8d00bcf4d2f512acf
|
[
"Apache-2.0"
] | null | null | null |
dblp/python/conftest.py
|
DocSeven/spark
|
a88330f554a4afc70696dac8d00bcf4d2f512acf
|
[
"Apache-2.0"
] | 1 |
2019-11-06T11:29:31.000Z
|
2019-11-06T11:29:31.000Z
|
import findspark
findspark.init()
import logging
import pytest
from pyspark import SparkContext
@pytest.fixture(scope='session')
def with_spark_context():
spark_context = SparkContext("local", "citationstest")
return spark_context
| 17.357143 | 58 | 0.781893 |
9bd7782d23b30891ee0402d04c746c8b3c0bea41
| 633 |
py
|
Python
|
7-assets/past-student-repos/Data-Structures-master/python/doubly_linked_list/doubly_linked_list.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Data-Structures-master/python/doubly_linked_list/doubly_linked_list.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/Data-Structures-master/python/doubly_linked_list/doubly_linked_list.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
class ListNode:
def __init__(self, value, prev=None, next=None):
self.value = value
self.prev = prev
self.next = next
def insert_after(self, value):
pass
def insert_before(self, value):
pass
def delete(self):
pass
class DoublyLinkedList:
def __init__(self, node=None):
self.head = node
self.tail = node
def add_to_head(self, value):
pass
def remove_from_head(self):
pass
def add_to_tail(self, value):
pass
def remove_from_tail(self):
pass
def move_to_front(self, node):
pass
def move_to_end(self, node):
pass
def delete(self, node):
pass
| 15.439024 | 50 | 0.650869 |
504ee35d6e2da31ab80aa629e72be7e0904a46ee
| 3,067 |
py
|
Python
|
qmk_firmware/lib/python/qmk/cli/bux.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | null | null | null |
qmk_firmware/lib/python/qmk/cli/bux.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | null | null | null |
qmk_firmware/lib/python/qmk/cli/bux.py
|
DanTupi/personal_setup
|
911b4951e4d8b78d6ea8ca335229e2e970fda871
|
[
"MIT"
] | null | null | null |
"""QMK Bux
World domination secret weapon.
"""
from milc import cli
from milc.subcommand import config
@cli.subcommand('QMK Bux miner.', hidden=True)
def bux(cli):
"""QMK bux
"""
if not cli.config.user.bux:
bux = 0
else:
bux = cli.config.user.bux
cli.args.read_only = False
config.set_config('user', 'bux', bux + 1)
cli.save_config()
buck = """
@@BBBBBBBBBBBBBBBBBBBBK `vP8#####BE2~ x###g_ `S###q n##} -j#Bl. vBBBBBBBBBBBBBBBBBBBB@@
@B `:!: ^#@#]- `!t@@&. 7@@B@#^ _Q@Q@@R y@@l:P@#1' `!!_ B@
@B r@@@B g@@| ` N@@u 7@@iv@@u *#@z"@@R y@@&@@Q- l@@@D B@
@B !#@B ^#@#x- I@B@@&' 7@@i "B@Q@@r _@@R y@@l.k#@W: `:@@D B@
@B B@B `v3g#####B0N#d. v##x 'ckk: -##A u##i `lB#I_ @@D B@
@B B@B @@D B@
@B B@B `._":!!!=~^*|)r^~:' @@D B@
@B ~*~ `,=)]}y2tjIIfKfKfaPsffsWsUyx~. **! B@
@B .*r***r= _*]yzKsqKUfz22IAA3HzzUjtktzHWsHsIz]. B@
@B )v` , !1- -rysHHUzUzo2jzoI22ztzkyykt2zjzUzIa3qPsl' !r*****` B@
@B :} @` .j `xzqdAfzKWsj2kkcycczqAsk2zHbg&ER5q55SNN5U~ !RBB#d`c#1 f#\BQ&v B@
@B _y ]# ,c vUWNWWPsfsssN9WyccnckAfUfWb0DR0&R5RRRddq2_ `@D`jr@2U@#c3@1@Qc- B@
@B !7! .r]` }AE0RdRqNd9dNR9fUIzzosPqqAddNNdER9EE9dPy! BQ!zy@[email protected]@@y@8x- B@
@B :****>. '7adddDdR&gRNdRbd&dNNbbRdNdd5NdRRD0RSf}- .k0&EW`xR .8Q=NRRx B@
@B =**-rx*r}r~}" ;n2jkzsf3N3zsKsP5dddRddddRddNNqPzy\" '~****" B@
@B :!!~!;=~r>:*_ `:^vxikylulKfHkyjzzozoIoklix|^!-` B@
@B ```'-_""::::!:_-.`` B@
@B `- .` B@
@B r@= In source we trust @H B@
@B r@= @H B@
@B -g@= `}&###E7 W#g. :#Q n####~ R###8k ;#& `##.7#8-`R#z t@H B@
@B r@= 8@R=-=R@g R@@#:!@@ 2@&!:` 8@1=@@!*@B `@@- v@#8@y @H B@
@B r@= :@@- _@@_R@fB#}@@ 2@@@# 8@@#@Q.*@B `@@- y@@N @H B@
@B `. g@9=_~D@g R@}`&@@@ 2@&__` 8@u_Q@2!@@^-x@@` Y@QD@z .` B@
@@BBBBBBBBBBBBBBBBBBB_ `c8@@@81` S#] `N#B l####v D###BA. vg@@#0~ i#&' 5#K RBBBBBBBBBBBBBBBBBB@@
""" # noqa: Do not care about the ASCII art
print(f"{buck}\nYou've been blessed by the QMK gods!\nYou have {cli.config.user.bux} QMK bux.")
| 61.34 | 100 | 0.332573 |
c5916359e5d72431df2b5c9e9826c3218944c704
| 253 |
py
|
Python
|
Problems/Dynamic Programming/Easy/MaxGeneratedArray/test_max_generated_array.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Easy/MaxGeneratedArray/test_max_generated_array.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Easy/MaxGeneratedArray/test_max_generated_array.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from max_generated_array import getMaximumGenerated
class Test(TestCase):
def test_get_maximum_generated(self):
self.assertTrue(getMaximumGenerated(7) == 3)
self.assertTrue(getMaximumGenerated(2) == 1)
| 31.625 | 52 | 0.766798 |
767ad2baa5ca07c56d9b73ddc4713ecc4423cc06
| 9,093 |
py
|
Python
|
language/template.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 1 |
2021-03-05T07:44:05.000Z
|
2021-03-05T07:44:05.000Z
|
language/template.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | 3 |
2017-06-04T03:01:31.000Z
|
2017-08-04T04:04:37.000Z
|
language/template.py
|
dr-bigfatnoob/quirk
|
f5025d7139adaf06380c429b436ccbf1e7611a16
|
[
"Unlicense"
] | null | null | null |
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
from utils.lib import O
import numpy as np
from collections import deque, OrderedDict
from copy import deepcopy
from operator import mul
__author__ = "bigfatnoob"
class Component(O):
"""
All components of graph
should extend this class
"""
id = 0
def __init__(self, **kwargs):
O.__init__(self, **kwargs)
self.id = Component.id
Component.id += 1
def __hash__(self):
if not self.id:
return 0
return hash(self.id)
def __eq__(self, other):
if not self.id or not other.id:
return False
return self.id == other.id
class Node(Component):
"""
Node for a model
"""
@staticmethod
def agg(a, b, op):
assert len(a) == len(b)
return np.asarray([op(a_i, b_i) for a_i, b_i in zip(a, b)])
def __init__(self, **kwargs):
self._samples = []
self.children = []
self.operation = None
Component.__init__(self, **kwargs)
def add_child(self, child, operation):
if self.operation is not None and operation != self.operation:
raise AttributeError("Parent has an existing operation %s but new operation is %s" % (self.operation, operation))
self.children.append(child)
self.operation = operation
def bfs(self):
queue = deque()
queue.append(self)
while len(queue) > 0:
item = queue.popleft()
print((item.id, item.name))
for child in item.children:
queue.append(child)
def dfs(self):
stack = list()
stack.insert(0, self)
while len(stack) > 0:
item = stack.pop(0)
print((item.id, item.name))
for child in item.children[::-1]:
stack.insert(0, child)
def evaluate(self):
raise NotImplemented("Method has to be implemented in sub class")
def clone(self):
return deepcopy(self)
def get_samples(self):
return self._samples
class Decision(Node):
"""
A Decision node.
Can have multiple options in a hash called options
"""
def __init__(self, **options):
self.value = None
self.options = options
self.key = None
Node.__init__(self)
def clear(self):
self.value = None
self._samples = None
def evaluate(self):
self._samples = self.value.evaluate()
return self._samples
class Objective(Node):
"""
An objective node.
"""
def __init__(self, direction, evaluation):
self.direction = direction
self.eval = evaluation
self.value = None
Node.__init__(self)
def clear(self):
self.value = None
self._samples = None
def evaluate(self):
self._samples = self.children[0].evaluate()
self.value = self.eval(self._samples)
return self.value
class Input(Node):
"""
A Node which is neither a decision or an objective
but has distribution parameter
"""
def __init__(self, distribution):
self.distribution = distribution
Node.__init__(self)
def generate(self, size):
if not self._samples or len(self._samples) != size:
self._samples = self.distribution.sample(size)
return self._samples
def evaluate(self):
return self._samples
class Variable(Node):
def __init__(self):
Node.__init__(self)
def evaluate(self):
children = self.children
self._samples = children[0].evaluate()
if len(children) > 1:
for child in children[1:]:
self._samples = Node.agg(self._samples, child.evaluate(), self.operation)
else:
self._samples = [self.operation(s) for s in self._samples]
return self._samples
class Edge(Component):
def __init__(self, source, target, operation):
self.source = source
self.target = target
self.operation = operation
Component.__init__(self)
def same(node):
return node.children[0].get_samples()
class Model(O):
def __init__(self, name, sample_size=100):
self.name = name
self.sample_size = sample_size
self.nodes = OrderedDict()
self.edges = OrderedDict()
self.decisions = OrderedDict()
self.inputs = OrderedDict()
self.objectives = OrderedDict()
self.variables = OrderedDict()
self.decision_map = OrderedDict()
O.__init__(self)
def add_edge(self, source, target, operation=same):
target.add_child(source, operation)
edge = Edge(source, target, operation)
self.edges[edge.id] = edge
def input(self, distribution, name=None):
i = Input(distribution)
i.name = name if name else i.id
self.inputs[i.id] = i
self.nodes[i.id] = i
return i
def variable(self, name=None):
v = Variable()
v.name = name if name else v.id
self.variables[v.id] = v
self.nodes[v.id] = v
return v
def decision(self, options, name=None, key=None):
d = Decision(**options)
for value in options.values():
self.add_edge(value, d, None)
d.name = name if name else d.id
if key is not None:
d.key = key
self.decisions[d.id] = d
self.nodes[d.id] = d
return d
def objective(self, direction, evaluation=np.mean, name=None):
o = Objective(direction, evaluation)
o.name = name if name else o.id
self.objectives[o.id] = o
self.nodes[o.id] = o
return o
def node_edges(self, node):
for child in node.children:
edge = Edge(child, node, node.operation)
self.edges[edge.id] = edge
def generate(self):
solutions = OrderedDict()
if self.decision_map:
ref = {key: np.random.choice(vals) for key, vals in self.decision_map.items()}
for key, decision in self.decisions.items():
solutions[key] = decision.options[ref[decision.key]].id
else:
for key, decision in self.decisions.items():
solutions[key] = np.random.choice(decision.options.values()).id
return solutions
def get_parameters(self):
parameters = []
for node in self.nodes.values():
if isinstance(node, Input):
parameters.append(node)
return parameters
def get_max_size(self):
if self.decision_map:
lst = [len(vals) for vals in self.decision_map.values()]
else:
lst = [len(value.options) for value in self.decisions.values()]
return reduce(mul, lst, 1)
def get_decisions(self):
if self.decision_map:
return self.decision_map
else:
return {d.name: [o.name for o in d.options.values()] for d in self.decisions.values()}
def get_decision_name(self, key):
name = self.decisions[key].key
if name is None:
name = self.decisions[key].name
return name
def get_decision_value(self, value):
value = self.nodes[value].has()["label"]
if value is None:
value = self.nodes[value].name
return value
def print_solution(self, solution):
print("Solution:")
dic = OrderedDict()
for key, value in solution.items():
name = self.get_decision_name(key)
value = self.get_decision_value(value)
dic[name] = value
for name, value in dic.items():
print("\t name: %s, value: %s" % (name, value))
def get_solution(self, solution):
dic = OrderedDict()
for key, value in solution.items():
name = self.get_decision_name(key)
value = self.get_decision_value(value)
dic[name] = value
return dic
def populate(self, size):
max_size = self.get_max_size()
if size > max_size:
size = max_size
population = []
while len(population) < size:
one = self.generate()
if one not in population:
population.append(one)
return population
def evaluate(self, solution):
assert len(solution) == len(self.decisions)
for key, value in solution.items():
self.decisions[key].value = self.nodes[value]
for key in self.objectives.keys():
self.objectives[key].value = None
objs = OrderedDict()
for key, objective in self.objectives.items():
objective.evaluate()
objs[key] = O(id=objective.id, value=objective.value, _samples=objective.get_samples()[:])
return objs
def evaluate_constraints(self, solution):
return True, 0
def bdom(self, obj1, obj2):
"""
Binary Domination
:param obj1: Objective 1
:param obj2: Objective 2
:return: Check objective 1 dominates objective 2
"""
at_least = False
for i in self.objectives.keys():
a, b = obj1[i], obj2[i]
if self.objectives[i].direction.better(a, b):
at_least = True
elif a == b:
continue
else:
return False
return at_least
def better(self, obj1, obj2):
if self.bdom(obj1, obj2):
return 1
elif self.bdom(obj2, obj1):
return 2
return 0
def initialize(self):
for inp in self.inputs.values():
inp.generate(self.sample_size)
def test(self):
self.initialize()
print("Max Size = %d" % self.get_max_size())
solutions = self.populate(10)
# print(self.evaluate(solutions[0]))
for sol in solutions:
evals = self.evaluate(sol)
arr = []
for key, val in evals.items():
arr.append((self.objectives[key].name, val))
print(arr)
# self.print_solution(sol)
| 25.614085 | 119 | 0.648631 |
4f3d89a119a7ae4f782b13cef9e6699ec2490e03
| 17,143 |
py
|
Python
|
Library/google/cloud/firestore_v1beta1/gapic/firestore_admin_client.py
|
TrojanCrypto/Dashboard
|
3af795980c7aa90505afae684297e43366cb6ee5
|
[
"MIT"
] | 2 |
2018-02-01T06:30:24.000Z
|
2018-04-12T15:39:56.000Z
|
Library/google/cloud/firestore_v1beta1/gapic/firestore_admin_client.py
|
TrojanCrypto/Dashboard
|
3af795980c7aa90505afae684297e43366cb6ee5
|
[
"MIT"
] | 7 |
2020-03-24T15:50:06.000Z
|
2021-06-08T19:57:39.000Z
|
Library/google/cloud/firestore_v1beta1/gapic/firestore_admin_client.py
|
TrojanCrypto/Dashboard
|
3af795980c7aa90505afae684297e43366cb6ee5
|
[
"MIT"
] | 1 |
2018-09-19T05:55:27.000Z
|
2018-09-19T05:55:27.000Z
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/firestore/admin/v1beta1/firestore_admin.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.firestore.admin.v1beta1 FirestoreAdmin API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.firestore_v1beta1.gapic import enums
from google.cloud.firestore_v1beta1.gapic import firestore_admin_client_config
from google.cloud.firestore_v1beta1.proto.admin import firestore_admin_pb2
from google.cloud.firestore_v1beta1.proto.admin import index_pb2
_PageDesc = google.gax.PageDescriptor
class FirestoreAdminClient(object):
"""
The Cloud Firestore Admin API.
This API provides several administrative services for Cloud Firestore.
# Concepts
Project, Database, Namespace, Collection, and Document are used as defined in
the Google Cloud Firestore API.
Operation: An Operation represents work being performed in the background.
# Services
## Index
The index service manages Cloud Firestore indexes.
Index creation is performed asynchronously.
An Operation resource is created for each such asynchronous operation.
The state of the operation (including any errors encountered)
may be queried via the Operation resource.
## Metadata
Provides metadata and statistical information about data in Cloud Firestore.
The data provided as part of this API may be stale.
## Operation
The Operations collection provides a record of actions performed for the
specified Project (including any Operations in progress). Operations are not
created directly but through calls on other collections or resources.
An Operation that is not yet done may be cancelled. The request to cancel is
asynchronous and the Operation may continue to run for some time after the
request to cancel is made.
An Operation that is done may be deleted so that it is no longer listed as
part of the Operation collection.
Operations are created by service ``FirestoreAdmin``, but are accessed via
service ``google.longrunning.Operations``.
"""
SERVICE_ADDRESS = 'firestore.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_indexes': _PageDesc('page_token', 'next_page_token', 'indexes')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/datastore', )
_DATABASE_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/databases/{database}')
_INDEX_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/databases/{database}/indexes/{index}')
@classmethod
def database_path(cls, project, database):
"""Returns a fully-qualified database resource name string."""
return cls._DATABASE_PATH_TEMPLATE.render({
'project': project,
'database': database,
})
@classmethod
def index_path(cls, project, database, index):
"""Returns a fully-qualified index resource name string."""
return cls._INDEX_PATH_TEMPLATE.render({
'project': project,
'database': database,
'index': index,
})
@classmethod
def match_project_from_database_name(cls, database_name):
"""Parses the project from a database resource.
Args:
database_name (str): A fully-qualified path representing a database
resource.
Returns:
A string representing the project.
"""
return cls._DATABASE_PATH_TEMPLATE.match(database_name).get('project')
@classmethod
def match_database_from_database_name(cls, database_name):
"""Parses the database from a database resource.
Args:
database_name (str): A fully-qualified path representing a database
resource.
Returns:
A string representing the database.
"""
return cls._DATABASE_PATH_TEMPLATE.match(database_name).get('database')
@classmethod
def match_project_from_index_name(cls, index_name):
"""Parses the project from a index resource.
Args:
index_name (str): A fully-qualified path representing a index
resource.
Returns:
A string representing the project.
"""
return cls._INDEX_PATH_TEMPLATE.match(index_name).get('project')
@classmethod
def match_database_from_index_name(cls, index_name):
"""Parses the database from a index resource.
Args:
index_name (str): A fully-qualified path representing a index
resource.
Returns:
A string representing the database.
"""
return cls._INDEX_PATH_TEMPLATE.match(index_name).get('database')
@classmethod
def match_index_from_index_name(cls, index_name):
"""Parses the index from a index resource.
Args:
index_name (str): A fully-qualified path representing a index
resource.
Returns:
A string representing the index.
"""
return cls._INDEX_PATH_TEMPLATE.match(index_name).get('index')
def __init__(self,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
channel (~grpc.Channel): A ``Channel`` instance through
which to make calls.
credentials (~google.auth.credentials.Credentials): The authorization
credentials to attach to requests. These credentials identify this
application to the service.
ssl_credentials (~grpc.ChannelCredentials): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (Sequence[str]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
lib_name (str): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (str): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'google-cloud-firestore', ).version
# Load the configuration defaults.
defaults = api_callable.construct_settings(
'google.firestore.admin.v1beta1.FirestoreAdmin',
firestore_admin_client_config.config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.firestore_admin_stub = config.create_stub(
firestore_admin_pb2.FirestoreAdminStub,
channel=channel,
service_path=self.SERVICE_ADDRESS,
service_port=self.DEFAULT_SERVICE_PORT,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._create_index = api_callable.create_api_call(
self.firestore_admin_stub.CreateIndex,
settings=defaults['create_index'])
self._list_indexes = api_callable.create_api_call(
self.firestore_admin_stub.ListIndexes,
settings=defaults['list_indexes'])
self._get_index = api_callable.create_api_call(
self.firestore_admin_stub.GetIndex, settings=defaults['get_index'])
self._delete_index = api_callable.create_api_call(
self.firestore_admin_stub.DeleteIndex,
settings=defaults['delete_index'])
# Service calls
def create_index(self, parent, index, options=None):
"""
Creates the specified index.
A newly created index's initial state is ``CREATING``. On completion of the
returned ``google.longrunning.Operation``, the state will be ``READY``.
If the index already exists, the call will return an ``ALREADY_EXISTS``
status.
During creation, the process could result in an error, in which case the
index will move to the ``ERROR`` state. The process can be recovered by
fixing the data that caused the error, removing the index with
``delete``, then re-creating the index with
``create``.
Indexes with a single field cannot be created.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreAdminClient()
>>>
>>> parent = client.database_path('[PROJECT]', '[DATABASE]')
>>> index = {}
>>>
>>> response = client.create_index(parent, index)
Args:
parent (str): The name of the database this index will apply to. For example:
``projects/{project_id}/databases/{database_id}``
index (Union[dict, ~google.cloud.firestore_v1beta1.types.Index]): The index to create. The name and state should not be specified.
Certain single field indexes cannot be created or deleted.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.firestore_v1beta1.types.Index`
options (~google.gax.CallOptions): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Operation` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = firestore_admin_pb2.CreateIndexRequest(
parent=parent, index=index)
return self._create_index(request, options)
def list_indexes(self, parent, filter_=None, page_size=None, options=None):
"""
Lists the indexes that match the specified filters.
Example:
>>> from google.cloud import firestore_v1beta1
>>> from google.gax import CallOptions, INITIAL_PAGE
>>>
>>> client = firestore_v1beta1.FirestoreAdminClient()
>>>
>>> parent = client.database_path('[PROJECT]', '[DATABASE]')
>>>
>>>
>>> # Iterate over all results
>>> for element in client.list_indexes(parent):
... # process element
... pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_indexes(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): The database name. For example:
``projects/{project_id}/databases/{database_id}``
filter_ (str)
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (~google.gax.CallOptions): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.firestore_v1beta1.types.Index` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = firestore_admin_pb2.ListIndexesRequest(
parent=parent, filter=filter_, page_size=page_size)
return self._list_indexes(request, options)
def get_index(self, name, options=None):
"""
Gets an index.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreAdminClient()
>>>
>>> name = client.index_path('[PROJECT]', '[DATABASE]', '[INDEX]')
>>>
>>> response = client.get_index(name)
Args:
name (str): The name of the index. For example:
``projects/{project_id}/databases/{database_id}/indexes/{index_id}``
options (~google.gax.CallOptions): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`~google.cloud.firestore_v1beta1.types.Index` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = firestore_admin_pb2.GetIndexRequest(name=name)
return self._get_index(request, options)
def delete_index(self, name, options=None):
"""
Deletes an index.
Example:
>>> from google.cloud import firestore_v1beta1
>>>
>>> client = firestore_v1beta1.FirestoreAdminClient()
>>>
>>> name = client.index_path('[PROJECT]', '[DATABASE]', '[INDEX]')
>>>
>>> client.delete_index(name)
Args:
name (str): The index name. For example:
``projects/{project_id}/databases/{database_id}/indexes/{index_id}``
options (~google.gax.CallOptions): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = firestore_admin_pb2.DeleteIndexRequest(name=name)
self._delete_index(request, options)
| 39.318807 | 142 | 0.638511 |
8c5196a9f7c1556012a57375507be658800819fc
| 1,137 |
py
|
Python
|
aoc2020/day_08/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_08/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
aoc2020/day_08/part_1.py
|
en0/aoc2020
|
a2f41b909dffe1f366682b3d03fd5fbdbc924ec0
|
[
"MIT"
] | null | null | null |
from aoc2020 import *
class Solution(SolutionABC):
expected, exit_code, pc, ax = 5, 0, 0, 0
program: list = None
trace: set = None
def solve(self) -> any:
self.load_program()
while self.pc < len(self.program):
if self.pc in self.trace:
self.exit_code = 1
break
self.trace.add(self.pc)
self.execute(*self.program[self.pc])
self.pc += 1
return self.ax
def exec_nop(self, arg):
pass
def exec_jmp(self, arg):
self.pc += (arg - 1)
def exec_acc(self, arg):
self.ax += arg
def execute(self, op: str, arg: int):
return {
'nop': self.exec_nop,
'jmp': self.exec_jmp,
'acc': self.exec_acc,
}[op](arg)
def load_program(self):
self.pc = 0
self.ax = 0
self.exit_code = 0
self.trace = set()
self.program = list(self.resource_lines("input", self.parse_instruction))
@classmethod
def parse_instruction(cls, line: str):
op, arg = line.split(' ')
return op, int(arg)
| 24.191489 | 81 | 0.525066 |
507f62fa5505ee4429b8005646cd139c236b0fb2
| 4,479 |
py
|
Python
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/squeezenet1_1/eval.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""eval squeezenet."""
import os
import ast
import argparse
from mindspore import context
from mindspore.common import set_seed
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from src.CrossEntropySmooth import CrossEntropySmooth
from src.squeezenet import SqueezeNet as squeezenet
from src.dataset import create_dataset_imagenet as create_dataset
from src.config import config_imagenet as config
local_data_url = '/cache/data'
local_ckpt_url = '/cache/ckpt.ckpt'
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--dataset', type=str, default='imagenet', help='Dataset.')
parser.add_argument('--net', type=str, default='squeezenet', help='Model.')
parser.add_argument('--run_cloudbrain', type=ast.literal_eval, default=False,
help='Whether it is running on CloudBrain platform.')
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
parser.add_argument('--dataset_path', type=str, default='', help='Dataset path')
parser.add_argument('--device_target', type=str, default='Ascend', help='Device target')
parser.add_argument('--data_url', type=str, default="None", help='Datapath')
parser.add_argument('--train_url', type=str, default="None", help='Train output path')
args_opt = parser.parse_args()
set_seed(1)
if __name__ == '__main__':
target = args_opt.device_target
if args_opt.device_target != "Ascend":
raise ValueError("Unsupported device target.")
# init context
device_id = os.getenv('DEVICE_ID')
device_id = int(device_id) if device_id else 0
context.set_context(mode=context.GRAPH_MODE,
device_target=target,
device_id=device_id)
# create dataset
if args_opt.run_cloudbrain:
import moxing as mox
mox.file.copy_parallel(args_opt.checkpoint_path, local_ckpt_url)
mox.file.copy_parallel(args_opt.data_url, local_data_url)
dataset = create_dataset(dataset_path=local_data_url,
do_train=False,
repeat_num=1,
batch_size=config.batch_size,
target=target,
run_distribute=False)
else:
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=False,
repeat_num=1,
batch_size=config.batch_size,
target=target,
run_distribute=False)
step_size = dataset.get_dataset_size()
# define net
net = squeezenet(num_classes=config.class_num)
# load checkpoint
if args_opt.run_cloudbrain:
param_dict = load_checkpoint(local_ckpt_url)
else:
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
# define loss
if args_opt.dataset == "imagenet":
if not config.use_label_smooth:
config.label_smooth_factor = 0.0
loss = CrossEntropySmooth(sparse=True,
reduction='mean',
smooth_factor=config.label_smooth_factor,
num_classes=config.class_num)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define model
model = Model(net,
loss_fn=loss,
metrics={'top_1_accuracy', 'top_5_accuracy'})
# eval model
res = model.eval(dataset)
print("result:", res, "ckpt=", local_ckpt_url)
| 40.718182 | 93 | 0.652378 |
fa3b5e7756920b789d762e94147125f340e0c719
| 1,755 |
py
|
Python
|
Packs/HealthCheck/Scripts/HealthCheckCommonIndicators/HealthCheckCommonIndicators.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/HealthCheck/Scripts/HealthCheckCommonIndicators/HealthCheckCommonIndicators.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/HealthCheck/Scripts/HealthCheckCommonIndicators/HealthCheckCommonIndicators.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
THRESHOLDS = {
'relatedIndicatorCount': 1000,
}
def build_body(tenant_name):
query = f"account:{tenant_name}" if tenant_name != "" else ""
body = {
'page': 0,
'size': 10,
'query': f'{query}',
'sort': [{
'field': 'relatedIncCount',
'asc': False,
}],
'period': {
'by': 'day',
'fromValue': 90,
}
}
return body
def main(args):
incident = demisto.incidents()[0]
tenant_name = incident.get('account')
account_name = f"acc_{tenant_name}/" if tenant_name != "" else ""
body = build_body(tenant_name)
indicator_thresholds = args.get('Thresholds', THRESHOLDS)
indicator_res = execute_command('demisto-api-post', {
'uri': f'{account_name}/indicators/search',
'body': body,
})
indicators = indicator_res['response']['iocObjects']
res = []
for indicator in indicators:
if indicator.get('relatedIncCount', 0) > indicator_thresholds['relatedIndicatorCount']:
res.append({
'category': 'Indicators',
'severity': 'Low',
'description': f'The indicator: "{indicator["value"]}" was found {indicator["relatedIncCount"]} times',
'resolution': 'You may consider adding it to the exclusion list',
})
results = CommandResults(
readable_output='HealthCheckCommonIndicators Done',
outputs_prefix='HealthCheck.ActionableItems',
outputs=res)
return results
if __name__ in ('__main__', '__builtin__', 'builtins'): # pragma: no cover
return_results(main(demisto.args()))
| 28.770492 | 119 | 0.590883 |
fa5ebf94623f7791fcb559f412d80a5c09463a06
| 3,550 |
py
|
Python
|
code/sensors/camera/camera_pipelines.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
code/sensors/camera/camera_pipelines.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
code/sensors/camera/camera_pipelines.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
from sensors.camera import camera
from sensors.pipeline import *
def color_filter_pipeline(color="magenta"):
return \
PipelineSequence(
camera.READ_CAMERA_PIPELINE,
camera.ConvertColorspacePipeline(to='hsv'),
camera.ColorThresholdPipeline(color=color),
camera.ErodeDilatePipeline()
)
def color_tracking_pipeline(color="magenta"):
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
ConjunctiveParallelPipeline(
PipelineSequence(
("hsv_image", camera.ConvertColorspacePipeline(to='hsv')),
("threshold", camera.ColorThresholdPipeline(color=color)),
("filtered", camera.ErodeDilatePipeline()),
("contour_bbox", camera.GetLargestContourPipeline())
),
camera.GetImageDimensionsPipeline()
),
("raw_y_deviation", camera.FindYDeviationPipeline()),
("y_deviation", camera.KalmanFilterPipeline())
)
def fast_color_tracking_pipeline(color="magenta"):
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
ConjunctiveParallelPipeline(
PipelineSequence(
camera.ConvertColorspacePipeline(to='hsv'),
camera.ColorThresholdPipeline(color),
("contour_bbox", camera.FastColorDetectionPipeline(color)),
),
camera.GetImageDimensionsPipeline()
),
("y_deviation", camera.FindYDeviationPipeline())
)
def color_tracking_dbscan_pipeline(color="magenta"):
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
ConjunctiveParallelPipeline(
PipelineSequence(
camera.ConvertColorspacePipeline(to='hsv'),
camera.ColorThresholdPipeline(color=color),
camera.ErodeDilatePipeline(),
("contour_bbox", camera.DBSCANPipeline(eps=1.3, min_neighs=5))
),
camera.GetImageDimensionsPipeline()
),
("y_deviation", camera.FindYDeviationPipeline())
)
def box_tracking_pipeline(frame, bbox):
return \
PipelineSequence(
camera.READ_CAMERA_PIPELINE,
ConjunctiveParallelPipeline(
PipelineSequence(
camera.ConvertColorspacePipeline(to='hsv'),
camera.TrackBBOXPipeline(frame, bbox),
),
camera.GetImageDimensionsPipeline()
),
camera.FindYDeviationPipeline()
)
def edge_detection_pipeline(hysteresis_lower=100, hysteresis_upper=200):
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
("edges", camera.EdgeDetectionPipeline(threshold_lower=hysteresis_lower, threshold_upper=hysteresis_upper))
)
def haarcascade_pipeline(haarfile):
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
camera.ConvertColorspacePipeline("grayscale"),
("cascades", camera.HaarcascadePipeline(haarfile))
)
def find_legs_pipeline():
return \
PipelineSequence(
("image", camera.READ_CAMERA_PIPELINE),
("edges", camera.EdgeDetectionPipeline()),
("legs", camera.FindLegsPipeline())
)
| 33.809524 | 119 | 0.590986 |
ad53aa1ec44ea7c115a69718b14b2b0d0993e51c
| 7,357 |
py
|
Python
|
src/Sephrasto/DatenbankEditWaffeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | null | null | null |
src/Sephrasto/DatenbankEditWaffeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | null | null | null |
src/Sephrasto/DatenbankEditWaffeWrapper.py
|
qeqar/Sephrasto
|
ce46d46299b2c793f015e25c98908773c39b1dee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 18 10:52:34 2017
@author: Aeolitus
"""
import Objekte
import UI.DatenbankEditWaffe
from Hilfsmethoden import Hilfsmethoden, WaffeneigenschaftException
from PyQt5 import QtWidgets, QtCore
from TextTagCompleter import TextTagCompleter
from Fertigkeiten import KampffertigkeitTyp
class DatenbankEditWaffeWrapper(object):
def __init__(self, datenbank, waffe=None, readonly=False):
super().__init__()
self.db = datenbank
if waffe is None:
waffe = Objekte.Nahkampfwaffe()
self.waffePicked = waffe
self.readonly = readonly
waffeDialog = QtWidgets.QDialog()
self.ui = UI.DatenbankEditWaffe.Ui_talentDialog()
self.ui.setupUi(waffeDialog)
if not waffe.isUserAdded:
if readonly:
self.ui.warning.setText("Gelöschte Elemente können nicht verändert werden.")
self.ui.warning.setVisible(True)
waffeDialog.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.CustomizeWindowHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint)
self.eigenschaftenValid = True
self.nameValid = True
self.ui.nameEdit.setText(waffe.name)
self.ui.nameEdit.textChanged.connect(self.nameChanged)
self.nameChanged()
if type(waffe) == Objekte.Fernkampfwaffe:
self.ui.comboTyp.setCurrentIndex(1)
else:
self.ui.comboTyp.setCurrentIndex(0)
self.ui.comboTyp.currentIndexChanged[int].connect(self.switchType)
self.eigenschaftenCompleter = TextTagCompleter(self.ui.textEigenschaften, self.db.waffeneigenschaften.keys())
self.ui.textEigenschaften.setPlainText(", ".join(waffe.eigenschaften))
self.ui.textEigenschaften.textChanged.connect(self.eigenschaftenChanged)
self.ui.spinHaerte.setValue(waffe.haerte)
self.ui.spinW6.setValue(waffe.W6)
self.ui.spinPlus.setValue(waffe.plus)
self.ui.spinRW1.setValue(waffe.rw)
if type(waffe) == Objekte.Fernkampfwaffe:
self.switchType(1)
self.ui.spinWMLZ.setValue(waffe.lz)
else:
self.switchType(0)
self.ui.spinWMLZ.setValue(waffe.wm)
for fert in datenbank.fertigkeiten.values():
if fert.kampffertigkeit == KampffertigkeitTyp.Keine:
continue
self.ui.comboFert.addItem(fert.name)
if waffe.fertigkeit != '':
try:
self.ui.comboFert.setCurrentText(waffe.fertigkeit)
fff = waffe.fertigkeit
except:
pass
else:
fff = self.ui.comboFert.currentText()
self.switchTals(fff)
self.ui.comboFert.currentTextChanged.connect(self.switchTals)
if waffe.talent != '':
try:
self.ui.comboTalent.setCurrentText(waffe.talent)
except:
pass
col = 0
row = 0
self.kampfstile = []
for kampfstil in datenbank.findKampfstile():
checkbox = QtWidgets.QCheckBox(kampfstil)
checkbox.stateChanged.connect(lambda state, kampfstil=kampfstil : self.kampfstilChanged(kampfstil, state))
if kampfstil in waffe.kampfstile:
checkbox.setChecked(True)
self.ui.layoutKampfstile.addWidget(checkbox, row, col)
if col == 0:
col +=1
else:
row += 1
col = 0
waffeDialog.adjustSize()
waffeDialog.show()
ret = waffeDialog.exec_()
if ret == QtWidgets.QDialog.Accepted:
if self.ui.comboTyp.currentIndex() == 0:
self.waffe = Objekte.Nahkampfwaffe()
self.waffe.wm = int(self.ui.spinWMLZ.value())
else:
self.waffe = Objekte.Fernkampfwaffe()
self.waffe.lz = int(self.ui.spinWMLZ.value())
self.waffe.rw = int(self.ui.spinRW1.value())
self.waffe.W6 = int(self.ui.spinW6.value())
self.waffe.plus = int(self.ui.spinPlus.value())
self.waffe.haerte = int(self.ui.spinHaerte.value())
eigenschaftStr = self.ui.textEigenschaften.toPlainText()
if eigenschaftStr:
self.waffe.eigenschaften = list(map(str.strip, eigenschaftStr.strip().rstrip(',').split(",")))
self.waffe.name = self.ui.nameEdit.text()
self.waffe.fertigkeit = self.ui.comboFert.currentText()
self.waffe.talent = self.ui.comboTalent.currentText()
self.waffe.kampfstile = self.kampfstile
self.waffe.isUserAdded = False
if self.waffe == self.waffePicked:
self.waffe = None
else:
self.waffe.isUserAdded = True
else:
self.waffe = None
def kampfstilChanged(self, kampfstil, state):
if state == 0:
self.kampfstile.remove(kampfstil)
else:
self.kampfstile.append(kampfstil)
def switchType(self, melee):
if melee == 0:
self.ui.spinWMLZ.setMinimum(-9)
self.ui.labelWMLZ.setText("Waffenmodifikator")
else:
self.ui.spinWMLZ.setMinimum(0)
self.ui.labelWMLZ.setText("Ladezeit")
def switchTals(self, ff):
self.ui.comboTalent.setCurrentIndex(0)
self.ui.comboTalent.clear()
for tal in self.db.talente:
if ff in self.db.talente[tal].fertigkeiten:
self.ui.comboTalent.addItem(tal)
def nameChanged(self):
name = self.ui.nameEdit.text()
self.nameValid = False
if name == "":
self.ui.nameEdit.setToolTip("Name darf nicht leer sein.")
self.ui.nameEdit.setStyleSheet("border: 1px solid red;")
elif name != self.waffePicked.name and name in self.db.waffen:
self.ui.nameEdit.setToolTip("Name existiert bereits.")
self.ui.nameEdit.setStyleSheet("border: 1px solid red;")
else:
self.ui.nameEdit.setToolTip("")
self.ui.nameEdit.setStyleSheet("")
self.nameValid = True
self.updateSaveButtonState()
def eigenschaftenChanged(self):
eigenschaftStr = self.ui.textEigenschaften.toPlainText()
if eigenschaftStr:
eigenschaften = list(map(str.strip, eigenschaftStr.strip().rstrip(',').split(",")))
for el in eigenschaften:
try:
Hilfsmethoden.VerifyWaffeneigenschaft(el, self.db)
except WaffeneigenschaftException as e:
self.ui.textEigenschaften.setToolTip(str(e))
self.ui.textEigenschaften.setStyleSheet("border: 1px solid red;")
self.eigenschaftenValid = False
self.updateSaveButtonState()
return
self.ui.textEigenschaften.setToolTip("")
self.ui.textEigenschaften.setStyleSheet("")
self.eigenschaftenValid = True
self.updateSaveButtonState()
def updateSaveButtonState(self):
self.ui.buttonBox.button(QtWidgets.QDialogButtonBox.Save).setEnabled(not self.readonly and self.nameValid and self.eigenschaftenValid)
| 39.132979 | 142 | 0.605002 |
0f4f0b79465c5333da5571a4e9c514551782cdc4
| 929 |
py
|
Python
|
02 Hardwaredesign/Grundschaltungen/src/led_pwm_blink.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
02 Hardwaredesign/Grundschaltungen/src/led_pwm_blink.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | null | null | null |
02 Hardwaredesign/Grundschaltungen/src/led_pwm_blink.py
|
DennisSchulmeister/dhbwka-wwi-iottech-quellcodes
|
58f86907af31187f267a9ea476f061cc59098ebd
|
[
"CC-BY-4.0"
] | 1 |
2020-10-10T20:24:05.000Z
|
2020-10-10T20:24:05.000Z
|
#! ./env/bin/python3
#encoding=utf-8
# Copyright (C) 2019 Dennis Schulmeister-Zimolong
#
# E-Mail: [email protected]
# Webseite: https://www.wpvs.de
#
# Dieser Quellcode ist lizenziert unter einer
# Creative Commons Namensnennung 4.0 International Lizenz
"""
Minimalbeispiel einer mit Pulsweitenmodulation zum Blinken gebrachten LED.
Die LED muss hierzu über einen Widerstand mit GPIO 12 verbunden werden.
"""
import os, time
import RPi.GPIO as GPIO
GPIO_LED = 12
FREQUENCY = 1 # 1 Hz
DUTY_CYCLE = 50 # 50% => 0.5 Hz
if __name__ == "__main__":
try:
# GPIO-Pin initialisieren
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_LED, GPIO.OUT)
led_pwm = GPIO.PWM(GPIO_LED, FREQUENCY)
led_pwm.start(DUTY_CYCLE)
# Endlosschleife, damit das Programm nicht beendet wird
while True:
time.sleep(10)
except KeyboardInterrupt:
pass
GPIO.cleanup()
| 23.225 | 74 | 0.679225 |
0e36357ea582d0e57dfe9a2dd82e1108f0de7c4e
| 1,096 |
py
|
Python
|
cs/lambda_cs/07_computer_architecture/notes/674/lesson.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
cs/lambda_cs/07_computer_architecture/notes/674/lesson.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | 8 |
2020-03-24T17:47:23.000Z
|
2022-03-12T00:33:21.000Z
|
cs/lambda_cs/07_computer_architecture/notes/674/lesson.py
|
tobias-fyi/vela
|
b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82
|
[
"MIT"
] | null | null | null |
"""
Computer Architecture - Day 4
"""
# === Stack frames === #
# Stack grows downward
# 701:
#
# 700: # return point 1 |
# 699: a = 2 | main()'s stack frame
# 698: b = ?? |
#
# 697: # return point 2 |
# 696: x = 2 |
# 695: y = 7 | mult2()'s stack frame
# 694: z =14 |
#
# When you call, return address gets pushed ont the stack
# When you return, return addr gets popped off the stack and stored in PC
# def mult2(x, y):
# z = x * y
# return z
# def main():
# a = 2
# b = mult2(a, 7)
# # return point 2
# print(b) # 14
# return
# main()
# # return point 2
# print("All done")
# === Recursive example === #
# 701: # return point 1
# 700: n = 4
#
# 699: # return point 2
# 699: n = 3
#
# 697: # return point 2
# 696: n = 2
#
# 695: # return point 2
# 694: n = 1
#
# 693: # return point 2
# 692: n = 0
def count(n):
if n == 0:
return
print(n)
count(n - 1)
# return point 2
return
count(4)
# return point 1
# tail call optimization
| 13.530864 | 73 | 0.491788 |
0e4438b4526dd3446ad66b4e0e667874efe862f0
| 4,730 |
py
|
Python
|
practices/practice_2/task4.1.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
practices/practice_2/task4.1.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
practices/practice_2/task4.1.py
|
br4ch1st0chr0n3/robotic_systems_labs
|
23b8b81dc845e00cf02460258b9cec817019957b
|
[
"MIT"
] | null | null | null |
from libs.can import CANSocket
from libs.myactuator import MyActuator
from time import perf_counter, sleep
import numpy as np
# import getpass
# password = getpass.getpass()
# the serial port of device
# you may find one by examing /dev/ folder,
# this is usually devices ttyACM
# os.system(f"sudo slcand -o -c -s8 /dev/serial/by-id/usb-Protofusion_Labs_CANable_8c005eb_https\:__github.com_normaldotcom_cantact-fw.git_001D00335734570920343135-if00 can0")
serial_device = "ttyACM1"
# Initiate the can bus socket
can_bus = CANSocket(serial_port=serial_device)
# Initiate motor
motor = MyActuator(can_bus=can_bus)
# Set the control loop timings
frequency = 500
sampling_time = 1 / frequency
def stop_motor(motor):
for _ in range(100):
motor.set_current(0)
# total working time
T = 6
N = T * frequency
gains = [20, 50, 100]
g_n = len(gains)
angles = np.zeros((g_n, N))
velocities = np.zeros((g_n, N))
times = np.zeros(N)
angle_initial = 2
velocity_desired = 0
period = 2
import random
left, right = -4*np.pi, 4*np.pi
def get_desired(period):
previous = 0
angle_desired = np.zeros(N)
angle = random.uniform(left, right)
for i in range(N):
current = int(i / (period * frequency))
if current > previous:
angle = random.uniform(left, right)
previous = current
angle_desired[i] = angle
return angle_desired
angle_desired = get_desired(period)
motor.set_current(0)
try:
# for k in range(0):
for k in range(g_n):
i = 0
last_execution = 0
control = 0
# find the global time before entering control loop
initial_time = perf_counter()
# motor.set_zero()
initial_angle = motor.state["angle"] + angle_initial
while True:
time = perf_counter() - initial_time # get actual time in secs
# /////////////////////////
# Get and parse motor state
# /////////////////////////
state = motor.state
theta = state["angle"] - initial_angle
dtheta = state["speed"]
current = state["current"]
# ///////////////////////////////////////////
# Update the control only on specific timings
# ///////////////////////////////////////////
# P-control
if (time - last_execution) >= sampling_time:
if i < N:
angles[k, i] = theta
velocities[k, i] = dtheta
times[i] = time
else:
break
control = -gains[k] * (theta - angle_desired[i])
i += 1
last_execution = time
# YOUR CONTROLLER GOES HERE
motor.set_current(control)
stop_motor(motor)
sleep(1)
except KeyboardInterrupt:
stop_motor(motor)
print("Disabled by interrupt")
motor = None
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(18, 9))
ax[0].set_xlabel("t [s]")
ax[0].set_ylabel("$\\theta$ [$rad$]")
ax[1].set_xlabel("t [s]")
ax[1].set_ylabel("$\\dot{\\theta}$ [$\\frac{rad}{s}$]")
bound = 1
last_n = 10
def get_settling_time(x, ts):
x_f = x[-1]
is_setttled = np.all(np.abs(x[-last_n:] - x_f) <= bound)
if not is_setttled:
return -1
diffs = (np.abs(x - x_f) <= bound)[::-1]
j = 0
n = len(diffs)
for i in range(len(diffs)):
if diffs[i] == False:
j = i
break
return ts[n - j - 1]
def get_overshoot(x):
final_value = np.abs(x[-1])
max_diff = np.max(x[x - final_value >= 0])
return max_diff
def get_steady_state_error(x, x_d):
return np.abs(x[-1] - x_d)
def get_characteristics(x, x_d, ts):
settling_time = get_settling_time(x, ts)
if settling_time == -1:
return [-100., -100., -100.]
overshoot = get_overshoot(x)
steady_state_error = get_steady_state_error(x, x_d)
return [settling_time, overshoot, steady_state_error]
def add_plot(ax, x, x_d, ts, gain, for_angle):
ax.plot(
ts,
x,
label=f"gain: {gain}",
)
for i in range(g_n):
add_plot(
ax=ax[0],
x=angles[i],
x_d=angle_desired[i],
ts=times,
gain=gains[i],
for_angle=True,
)
add_plot(
ax=ax[1],
x=velocities[i],
x_d=velocity_desired,
ts=times,
gain=gains[i],
for_angle=False,
)
ax[0].plot(times, angle_desired, label="$\\theta_{{desired}}$")
ax[0].legend()
ax[1].legend()
fig.suptitle(f"control loop frequency = {frequency} Hz", fontsize="13")
fig.tight_layout(pad=3.0)
plt.savefig("./plots/4.1.png")
plt.show()
| 22.631579 | 175 | 0.569979 |
bc06d3542bbc4ea78df37d43aa38b90c5d777f21
| 26,340 |
py
|
Python
|
research/cvtmodel/regnet/src/regnet_x_1_6gf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cvtmodel/regnet/src/regnet_x_1_6gf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cvtmodel/regnet/src/regnet_x_1_6gf.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.ops as P
from mindspore import nn
class Module1(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
conv2d_0_padding, conv2d_0_pad_mode, conv2d_0_group):
super(Module1, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=conv2d_0_kernel_size,
stride=conv2d_0_stride,
padding=conv2d_0_padding,
pad_mode=conv2d_0_pad_mode,
dilation=(1, 1),
group=conv2d_0_group,
has_bias=True)
self.relu_1 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
return opt_relu_1
class Module6(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module1_0_conv2d_0_in_channels,
module1_0_conv2d_0_out_channels, module1_0_conv2d_0_kernel_size, module1_0_conv2d_0_stride,
module1_0_conv2d_0_padding, module1_0_conv2d_0_pad_mode, module1_0_conv2d_0_group,
module1_1_conv2d_0_in_channels, module1_1_conv2d_0_out_channels, module1_1_conv2d_0_kernel_size,
module1_1_conv2d_0_stride, module1_1_conv2d_0_padding, module1_1_conv2d_0_pad_mode,
module1_1_conv2d_0_group):
super(Module6, self).__init__()
self.module1_0 = Module1(conv2d_0_in_channels=module1_0_conv2d_0_in_channels,
conv2d_0_out_channels=module1_0_conv2d_0_out_channels,
conv2d_0_kernel_size=module1_0_conv2d_0_kernel_size,
conv2d_0_stride=module1_0_conv2d_0_stride,
conv2d_0_padding=module1_0_conv2d_0_padding,
conv2d_0_pad_mode=module1_0_conv2d_0_pad_mode,
conv2d_0_group=module1_0_conv2d_0_group)
self.module1_1 = Module1(conv2d_0_in_channels=module1_1_conv2d_0_in_channels,
conv2d_0_out_channels=module1_1_conv2d_0_out_channels,
conv2d_0_kernel_size=module1_1_conv2d_0_kernel_size,
conv2d_0_stride=module1_1_conv2d_0_stride,
conv2d_0_padding=module1_1_conv2d_0_padding,
conv2d_0_pad_mode=module1_1_conv2d_0_pad_mode,
conv2d_0_group=module1_1_conv2d_0_group)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
def construct(self, x):
module1_0_opt = self.module1_0(x)
module1_1_opt = self.module1_1(module1_0_opt)
opt_conv2d_0 = self.conv2d_0(module1_1_opt)
return opt_conv2d_0
class Module0(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
conv2d_2_group, conv2d_4_in_channels, conv2d_4_out_channels):
super(Module0, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
out_channels=conv2d_2_out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=conv2d_2_group,
has_bias=True)
self.relu_3 = nn.ReLU()
self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
out_channels=conv2d_4_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_6 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
opt_relu_3 = self.relu_3(opt_conv2d_2)
opt_conv2d_4 = self.conv2d_4(opt_relu_3)
opt_add_5 = P.Add()(x, opt_conv2d_4)
opt_relu_6 = self.relu_6(opt_add_5)
return opt_relu_6
class Module11(nn.Cell):
def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
module0_0_conv2d_2_out_channels, module0_0_conv2d_2_group, module0_0_conv2d_4_in_channels,
module0_0_conv2d_4_out_channels, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_conv2d_2_group,
module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels, module0_2_conv2d_0_in_channels,
module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels,
module0_2_conv2d_2_group, module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels):
super(Module11, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_group=module0_0_conv2d_2_group,
conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
conv2d_2_group=module0_1_conv2d_2_group,
conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
conv2d_2_group=module0_2_conv2d_2_group,
conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
conv2d_4_out_channels=module0_2_conv2d_4_out_channels)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
module0_2_opt = self.module0_2(module0_1_opt)
return module0_2_opt
class Module8(nn.Cell):
def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
module0_0_conv2d_2_out_channels, module0_0_conv2d_2_group, module0_0_conv2d_4_in_channels,
module0_0_conv2d_4_out_channels, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_conv2d_2_group,
module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels):
super(Module8, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_group=module0_0_conv2d_2_group,
conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
conv2d_2_group=module0_1_conv2d_2_group,
conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
return module0_1_opt
class MindSporeModel(nn.Cell):
def __init__(self):
super(MindSporeModel, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=32,
out_channels=72,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module6_0 = Module6(conv2d_0_in_channels=72,
conv2d_0_out_channels=72,
module1_0_conv2d_0_in_channels=32,
module1_0_conv2d_0_out_channels=72,
module1_0_conv2d_0_kernel_size=(1, 1),
module1_0_conv2d_0_stride=(1, 1),
module1_0_conv2d_0_padding=0,
module1_0_conv2d_0_pad_mode="valid",
module1_0_conv2d_0_group=1,
module1_1_conv2d_0_in_channels=72,
module1_1_conv2d_0_out_channels=72,
module1_1_conv2d_0_kernel_size=(3, 3),
module1_1_conv2d_0_stride=(2, 2),
module1_1_conv2d_0_padding=(1, 1, 1, 1),
module1_1_conv2d_0_pad_mode="pad",
module1_1_conv2d_0_group=3)
self.relu_9 = nn.ReLU()
self.module0_0 = Module0(conv2d_0_in_channels=72,
conv2d_0_out_channels=72,
conv2d_2_in_channels=72,
conv2d_2_out_channels=72,
conv2d_2_group=3,
conv2d_4_in_channels=72,
conv2d_4_out_channels=72)
self.conv2d_17 = nn.Conv2d(in_channels=72,
out_channels=168,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module6_1 = Module6(conv2d_0_in_channels=168,
conv2d_0_out_channels=168,
module1_0_conv2d_0_in_channels=72,
module1_0_conv2d_0_out_channels=168,
module1_0_conv2d_0_kernel_size=(1, 1),
module1_0_conv2d_0_stride=(1, 1),
module1_0_conv2d_0_padding=0,
module1_0_conv2d_0_pad_mode="valid",
module1_0_conv2d_0_group=1,
module1_1_conv2d_0_in_channels=168,
module1_1_conv2d_0_out_channels=168,
module1_1_conv2d_0_kernel_size=(3, 3),
module1_1_conv2d_0_stride=(2, 2),
module1_1_conv2d_0_padding=(1, 1, 1, 1),
module1_1_conv2d_0_pad_mode="pad",
module1_1_conv2d_0_group=7)
self.relu_24 = nn.ReLU()
self.module11_0 = Module11(module0_0_conv2d_0_in_channels=168,
module0_0_conv2d_0_out_channels=168,
module0_0_conv2d_2_in_channels=168,
module0_0_conv2d_2_out_channels=168,
module0_0_conv2d_2_group=7,
module0_0_conv2d_4_in_channels=168,
module0_0_conv2d_4_out_channels=168,
module0_1_conv2d_0_in_channels=168,
module0_1_conv2d_0_out_channels=168,
module0_1_conv2d_2_in_channels=168,
module0_1_conv2d_2_out_channels=168,
module0_1_conv2d_2_group=7,
module0_1_conv2d_4_in_channels=168,
module0_1_conv2d_4_out_channels=168,
module0_2_conv2d_0_in_channels=168,
module0_2_conv2d_0_out_channels=168,
module0_2_conv2d_2_in_channels=168,
module0_2_conv2d_2_out_channels=168,
module0_2_conv2d_2_group=7,
module0_2_conv2d_4_in_channels=168,
module0_2_conv2d_4_out_channels=168)
self.conv2d_46 = nn.Conv2d(in_channels=168,
out_channels=408,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module6_2 = Module6(conv2d_0_in_channels=408,
conv2d_0_out_channels=408,
module1_0_conv2d_0_in_channels=168,
module1_0_conv2d_0_out_channels=408,
module1_0_conv2d_0_kernel_size=(1, 1),
module1_0_conv2d_0_stride=(1, 1),
module1_0_conv2d_0_padding=0,
module1_0_conv2d_0_pad_mode="valid",
module1_0_conv2d_0_group=1,
module1_1_conv2d_0_in_channels=408,
module1_1_conv2d_0_out_channels=408,
module1_1_conv2d_0_kernel_size=(3, 3),
module1_1_conv2d_0_stride=(2, 2),
module1_1_conv2d_0_padding=(1, 1, 1, 1),
module1_1_conv2d_0_pad_mode="pad",
module1_1_conv2d_0_group=17)
self.relu_53 = nn.ReLU()
self.module8_0 = Module8(module0_0_conv2d_0_in_channels=408,
module0_0_conv2d_0_out_channels=408,
module0_0_conv2d_2_in_channels=408,
module0_0_conv2d_2_out_channels=408,
module0_0_conv2d_2_group=17,
module0_0_conv2d_4_in_channels=408,
module0_0_conv2d_4_out_channels=408,
module0_1_conv2d_0_in_channels=408,
module0_1_conv2d_0_out_channels=408,
module0_1_conv2d_2_in_channels=408,
module0_1_conv2d_2_out_channels=408,
module0_1_conv2d_2_group=17,
module0_1_conv2d_4_in_channels=408,
module0_1_conv2d_4_out_channels=408)
self.module8_1 = Module8(module0_0_conv2d_0_in_channels=408,
module0_0_conv2d_0_out_channels=408,
module0_0_conv2d_2_in_channels=408,
module0_0_conv2d_2_out_channels=408,
module0_0_conv2d_2_group=17,
module0_0_conv2d_4_in_channels=408,
module0_0_conv2d_4_out_channels=408,
module0_1_conv2d_0_in_channels=408,
module0_1_conv2d_0_out_channels=408,
module0_1_conv2d_2_in_channels=408,
module0_1_conv2d_2_out_channels=408,
module0_1_conv2d_2_group=17,
module0_1_conv2d_4_in_channels=408,
module0_1_conv2d_4_out_channels=408)
self.module8_2 = Module8(module0_0_conv2d_0_in_channels=408,
module0_0_conv2d_0_out_channels=408,
module0_0_conv2d_2_in_channels=408,
module0_0_conv2d_2_out_channels=408,
module0_0_conv2d_2_group=17,
module0_0_conv2d_4_in_channels=408,
module0_0_conv2d_4_out_channels=408,
module0_1_conv2d_0_in_channels=408,
module0_1_conv2d_0_out_channels=408,
module0_1_conv2d_2_in_channels=408,
module0_1_conv2d_2_out_channels=408,
module0_1_conv2d_2_group=17,
module0_1_conv2d_4_in_channels=408,
module0_1_conv2d_4_out_channels=408)
self.module11_1 = Module11(module0_0_conv2d_0_in_channels=408,
module0_0_conv2d_0_out_channels=408,
module0_0_conv2d_2_in_channels=408,
module0_0_conv2d_2_out_channels=408,
module0_0_conv2d_2_group=17,
module0_0_conv2d_4_in_channels=408,
module0_0_conv2d_4_out_channels=408,
module0_1_conv2d_0_in_channels=408,
module0_1_conv2d_0_out_channels=408,
module0_1_conv2d_2_in_channels=408,
module0_1_conv2d_2_out_channels=408,
module0_1_conv2d_2_group=17,
module0_1_conv2d_4_in_channels=408,
module0_1_conv2d_4_out_channels=408,
module0_2_conv2d_0_in_channels=408,
module0_2_conv2d_0_out_channels=408,
module0_2_conv2d_2_in_channels=408,
module0_2_conv2d_2_out_channels=408,
module0_2_conv2d_2_group=17,
module0_2_conv2d_4_in_channels=408,
module0_2_conv2d_4_out_channels=408)
self.conv2d_117 = nn.Conv2d(in_channels=408,
out_channels=912,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module6_3 = Module6(conv2d_0_in_channels=912,
conv2d_0_out_channels=912,
module1_0_conv2d_0_in_channels=408,
module1_0_conv2d_0_out_channels=912,
module1_0_conv2d_0_kernel_size=(1, 1),
module1_0_conv2d_0_stride=(1, 1),
module1_0_conv2d_0_padding=0,
module1_0_conv2d_0_pad_mode="valid",
module1_0_conv2d_0_group=1,
module1_1_conv2d_0_in_channels=912,
module1_1_conv2d_0_out_channels=912,
module1_1_conv2d_0_kernel_size=(3, 3),
module1_1_conv2d_0_stride=(2, 2),
module1_1_conv2d_0_padding=(1, 1, 1, 1),
module1_1_conv2d_0_pad_mode="pad",
module1_1_conv2d_0_group=38)
self.relu_124 = nn.ReLU()
self.module0_1 = Module0(conv2d_0_in_channels=912,
conv2d_0_out_channels=912,
conv2d_2_in_channels=912,
conv2d_2_out_channels=912,
conv2d_2_group=38,
conv2d_4_in_channels=912,
conv2d_4_out_channels=912)
self.avgpool2d_132 = nn.AvgPool2d(kernel_size=(7, 7))
self.flatten_133 = nn.Flatten()
self.dense_134 = nn.Dense(in_channels=912, out_channels=1000, has_bias=True)
def construct(self, input_1):
opt_conv2d_0 = self.conv2d_0(input_1)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
module6_0_opt = self.module6_0(opt_relu_1)
opt_add_8 = P.Add()(opt_conv2d_2, module6_0_opt)
opt_relu_9 = self.relu_9(opt_add_8)
module0_0_opt = self.module0_0(opt_relu_9)
opt_conv2d_17 = self.conv2d_17(module0_0_opt)
module6_1_opt = self.module6_1(module0_0_opt)
opt_add_23 = P.Add()(opt_conv2d_17, module6_1_opt)
opt_relu_24 = self.relu_24(opt_add_23)
module11_0_opt = self.module11_0(opt_relu_24)
opt_conv2d_46 = self.conv2d_46(module11_0_opt)
module6_2_opt = self.module6_2(module11_0_opt)
opt_add_52 = P.Add()(opt_conv2d_46, module6_2_opt)
opt_relu_53 = self.relu_53(opt_add_52)
module8_0_opt = self.module8_0(opt_relu_53)
module8_1_opt = self.module8_1(module8_0_opt)
module8_2_opt = self.module8_2(module8_1_opt)
module11_1_opt = self.module11_1(module8_2_opt)
opt_conv2d_117 = self.conv2d_117(module11_1_opt)
module6_3_opt = self.module6_3(module11_1_opt)
opt_add_123 = P.Add()(opt_conv2d_117, module6_3_opt)
opt_relu_124 = self.relu_124(opt_add_123)
module0_1_opt = self.module0_1(opt_relu_124)
opt_avgpool2d_132 = self.avgpool2d_132(module0_1_opt)
opt_flatten_133 = self.flatten_133(opt_avgpool2d_132)
opt_dense_134 = self.dense_134(opt_flatten_133)
return opt_dense_134
| 59.863636 | 119 | 0.521336 |
1dd5096658f15855ad9f07df912a3069bdb43a5c
| 9,338 |
py
|
Python
|
tests/test_connectors/test_confluent.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 8 |
2022-02-24T14:59:24.000Z
|
2022-03-31T04:37:55.000Z
|
tests/test_connectors/test_confluent.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 3 |
2022-02-27T17:08:52.000Z
|
2022-03-18T13:11:01.000Z
|
tests/test_connectors/test_confluent.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 2 |
2022-02-24T15:03:07.000Z
|
2022-03-15T03:12:00.000Z
|
import json
import logging
from random import randint
from typing import Any, List, Tuple
from unittest.mock import MagicMock, patch
from uuid import uuid4
import pytest
from pytest import LogCaptureFixture, MonkeyPatch
from example.data_models import InputMessage, OutputMessage
from tests.conftest import KafkaMessage
from volley import Engine
from volley.connectors.confluent import (
ConfluentKafkaConsumer,
ConfluentKafkaProducer,
handle_creds,
)
from volley.data_models import QueueMessage
def test_confluent_producer(mock_confluent_producer: ConfluentKafkaProducer) -> None:
assert mock_confluent_producer.produce(
queue_name="test-topic", message=b"{'foo':'bar'}", message_context="consumed_message_id"
)
mock_confluent_producer.shutdown()
def test_handle_creds(monkeypatch: MonkeyPatch) -> None:
monkeypatch.delenv("KAFKA_BROKERS")
with pytest.raises(KeyError):
handle_creds(config_dict={})
def test_handle_creds_config_dict(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("KAFKA_KEY", "get")
monkeypatch.setenv("KAFKA_SECRET", "them")
result = handle_creds(config_dict={})
assert result["sasl.username"] == "get"
assert result["sasl.password"] == "them"
assert result["security.protocol"] == "SASL_SSL"
assert result["sasl.mechanism"] == "PLAIN"
@patch("volley.connectors.confluent.Consumer", MagicMock())
def test_confluent_consumer_no_consumer_group(monkeypatch: MonkeyPatch) -> None:
monkeypatch.delenv("KAFKA_CONSUMER_GROUP")
with pytest.raises(Exception):
ConfluentKafkaConsumer(queue_name="input-topic")
@patch("volley.connectors.confluent.Consumer", MagicMock())
def test_kafka_consumer_creds(monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("KAFKA_CONSUMER_GROUP", "test-group")
config = {"sasl.username": "test-user", "sasl.password": "test-password"}
c = ConfluentKafkaConsumer(config=config, queue_name="input-topic")
assert "sasl.username" in c.config
assert "sasl.password" in c.config
@patch("volley.connectors.confluent.Producer", MagicMock())
def test_kafka_producer_creds() -> None:
config = {"sasl.username": "test-user", "sasl.password": "test-password"}
p = ConfluentKafkaProducer(config=config, queue_name="input-topic")
assert "sasl.username" in p.config
assert "sasl.password" in p.config
@patch("volley.connectors.confluent.Consumer")
def test_consumer(mock_consumer: MagicMock, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("KAFKA_CONSUMER_GROUP", "test-group")
kmsg = KafkaMessage(msg=b'{"random": "message"}')
mock_consumer.return_value.poll = lambda x: kmsg
b = ConfluentKafkaConsumer(host="localhost", queue_name="input-topic")
q_message = b.consume()
assert isinstance(q_message, QueueMessage)
b.on_fail(kmsg)
@patch("volley.connectors.confluent.Consumer")
def test_consume_none(mock_consumer: MagicMock, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("KAFKA_CONSUMER_GROUP", "test-group")
mock_consumer.return_value.poll = lambda x: None
b = ConfluentKafkaConsumer(host="localhost", queue_name="input-topic")
q_message = b.consume()
assert q_message is None
@patch("volley.connectors.confluent.Consumer")
def test_consume_error(mock_consumer: MagicMock, monkeypatch: MonkeyPatch) -> None:
monkeypatch.setenv("KAFKA_CONSUMER_GROUP", "test-group")
mock_consumer.return_value.poll = lambda x: KafkaMessage(error=True)
b = ConfluentKafkaConsumer(host="localhost", queue_name="input-topic")
q_message = b.consume()
assert q_message is None
@patch("volley.connectors.confluent.Consumer")
def test_consumer_group_init(mock_consumer: MagicMock, monkeypatch: MonkeyPatch) -> None: # pylint: disable=W0613
with monkeypatch.context() as m:
random_consumer_group = str(uuid4())
m.setenv("KAFKA_CONSUMER_GROUP", random_consumer_group)
m.setenv("KAFKA_BROKERS", "rando_kafka:9092")
consumer = ConfluentKafkaConsumer(queue_name="input-topic")
assert consumer.config["group.id"] == random_consumer_group
@patch("volley.connectors.confluent.Producer", MagicMock())
def test_callback(mock_confluent_producer: ConfluentKafkaProducer, caplog: LogCaptureFixture) -> None:
mock_confluent_producer.on_fail = MagicMock()
mock_confluent_producer.on_success = MagicMock()
caplog.set_level(logging.DEBUG)
m = KafkaMessage()
expected_log_error = "my-logged-error"
mock_confluent_producer.acked(err=expected_log_error, msg=m, consumer_context="consumed_message_id")
assert "failed delivery" in caplog.messages[0].lower()
m = KafkaMessage(topic="test-topic")
mock_confluent_producer.acked(err=None, msg=m, consumer_context="consumed_message_id")
assert "test-topic" in caplog.messages[1]
assert "successful delivery" in caplog.messages[1].lower()
mock_confluent_producer.shutdown()
@patch("volley.connectors.confluent.Consumer", MagicMock())
def test_consumer_init_configs() -> None:
rand_interval = randint(0, 100)
config = {"poll_interval": rand_interval, "auto.offset.reset": "latest"}
con = ConfluentKafkaConsumer(queue_name="test", config=config)
assert con.poll_interval == rand_interval
assert con.config["auto.offset.reset"] == "latest"
@patch("volley.connectors.confluent.Producer", MagicMock())
def test_producer_init_configs() -> None:
config = {"compression.type": "snappy", "poll_thread_timeout": 1}
p = ConfluentKafkaProducer(queue_name="test", config=config)
assert p.config["compression.type"] == "snappy"
p.shutdown()
@pytest.mark.parametrize(
"queue",
[("topic0"), ("topic0,topic1,topic2")],
)
@patch("volley.connectors.confluent.Consumer", MagicMock())
def test_callback_consumer(queue: str) -> None:
topics = queue.split(",")
ackc = ConfluentKafkaConsumer(queue_name=queue)
# first commit
for topic in topics:
m = KafkaMessage(topic=topic, partition=24, offset=42)
ackc.on_success(m)
assert ackc.last_offset[topic][24] == 42
# commit a higher offset, same partition
for topic in topics:
m = KafkaMessage(topic=topic, partition=24, offset=43)
ackc.on_success(m)
assert ackc.last_offset[topic][24] == 43
# commit a lower offset, same partition
# should not change the last commit
for topic in topics:
m = KafkaMessage(topic=topic, partition=24, offset=1)
ackc.on_success(m)
assert ackc.last_offset[topic][24] == 43
# commit to different partition
for topic in topics:
m = KafkaMessage(topic=topic, partition=1, offset=100)
ackc.on_success(m)
assert ackc.last_offset[topic][1] == 100
assert ackc.last_offset[topic][24] == 43
# repeatedly commit same data
for topic in topics:
m = KafkaMessage(topic=topic, partition=1, offset=100)
for _ in range(10):
ackc.on_success(m)
assert ackc.last_offset[topic][1] == 100
assert ackc.last_offset[topic][24] == 43
@patch("volley.connectors.confluent.os", MagicMock())
@patch("volley.connectors.confluent.Consumer", MagicMock())
def test_downstream_failure(caplog: LogCaptureFixture) -> None:
"""validate a downstream failure triggers the expected connector's shutdown procedure"""
config = {"stop_on_failure": True}
p = ConfluentKafkaConsumer(queue_name="test", config=config)
with caplog.at_level(logging.ERROR):
p.on_fail(message_context=KafkaMessage())
assert "stopping application" in caplog.text.lower()
config = {"stop_on_failure": False}
p = ConfluentKafkaConsumer(queue_name="test", config=config)
with caplog.at_level(logging.WARNING):
p.on_fail(message_context=KafkaMessage())
assert "critical" in caplog.text.lower()
@patch("volley.connectors.rsmq.RSMQProducer")
@patch("volley.connectors.confluent.Consumer")
def test_downstream_failure_shutdown(
mock_consumer: MagicMock, mock_producer: MagicMock, caplog: LogCaptureFixture
) -> None:
"""Validate a downstream failure triggers all the expected graceful shutdown procedures"""
eng = Engine(
input_queue="input-topic",
output_queues=["redis_queue"],
yaml_config_path="./example/volley_config.yml",
metrics_port=None,
)
input_msg = json.dumps(InputMessage.schema()["examples"][0]).encode("utf-8")
mock_consumer.return_value.poll = lambda x: KafkaMessage(topic="localhost.kafka.input", msg=input_msg)
# for simplicity, make it a synchronous producer
mock_producer.return_value.callback_delivery = False
# mock the failure to produce
mock_producer.return_value.produce = lambda *args, **kwargs: False
# dummy output object to try to produce
output_msg = OutputMessage.parse_obj(OutputMessage.schema()["examples"][0])
@eng.stream_app
def func(msg: Any) -> List[Tuple[str, OutputMessage]]:
print(msg.json())
return [("redis_queue", output_msg)]
# function should run then exit
with caplog.at_level(logging.INFO):
func()
# make sure volley's graceful killer is set to shutdown
assert eng.killer.kill_now is True
assert "downstream failure" in caplog.text.lower()
assert "shutdown volley complete" in caplog.text.lower()
| 38.427984 | 114 | 0.72553 |
699b0219239116848d7da3660b2b986ed38bb28c
| 567 |
py
|
Python
|
Persistence/log.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 15 |
2019-12-20T08:24:31.000Z
|
2022-03-18T09:24:25.000Z
|
Persistence/log.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 124 |
2020-04-20T04:36:49.000Z
|
2022-01-29T11:08:09.000Z
|
Persistence/log.py
|
simonbredemeier/ds100bot
|
1318b32b818891f4bc6d24f12fcf0ceae898f8bd
|
[
"Apache-2.0"
] | 12 |
2020-07-08T22:19:39.000Z
|
2022-03-19T09:13:11.000Z
|
# pylint: disable=C0114
import logging
#pylint: disable=W0611
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
#pylint: enable=W0611
# we wrap logging because I want more control in getLogger.
def getLogger(name, fmt=None):
log_ = logging.getLogger(name)
if fmt is not None:
fm_ = logging.Formatter(fmt=fmt, style='{')
lh_ = logging.StreamHandler()
lh_.setFormatter(fm_)
log_.addHandler(lh_)
log_.propagate = False
return log_
def basicConfig(*args, **kwargs):
logging.basicConfig(*args, **kwargs)
| 25.772727 | 59 | 0.684303 |
38b2df9c2515dcc0e10d27b97ffc352f3cb3936a
| 596 |
py
|
Python
|
tests/mock/test_base.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 23 |
2018-09-19T13:34:27.000Z
|
2022-02-14T09:49:35.000Z
|
tests/mock/test_base.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 33 |
2018-10-18T07:58:05.000Z
|
2019-05-16T08:24:12.000Z
|
tests/mock/test_base.py
|
pcrete/skil-python
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
[
"Apache-2.0"
] | 11 |
2018-10-21T18:58:57.000Z
|
2022-02-14T09:49:36.000Z
|
import skil
import sys
import pytest
if sys.version_info >= (3, 3):
import unittest.mock as mock
else:
import mock as mock
@mock.patch('skil.Skil')
def test_skil_mock(Skil):
assert Skil is skil.Skil
skil_server = Skil()
assert Skil.called
skil_server.get_default_server_id = mock.MagicMock(return_value=1337)
skil_server.get_default_server_id()
@mock.patch('skil.Skil')
def test_model_upload(Skil):
skil_server = Skil()
model_file_name = './dummy.pb'
skil_server.upload_model(model_file_name)
if __name__ == '__main__':
pytest.main([__file__])
| 19.866667 | 73 | 0.718121 |
38b55e66c43a66681ef4587b0c1f30a1bca82e19
| 1,164 |
py
|
Python
|
python/en/archive/topics/temp/newspaper_url2txt.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/newspaper_url2txt.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/newspaper_url2txt.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
newspaper_url2txt.py
Package Newspaper3k
Reference
- Newspaper On Python, DANIEL HOADLEY, JANUARY 5, 2017
http://carrefax.com/new-blog/2017/1/5/newspaper-on-python
import Newspaper
"""
from newspaper import Article
import sys
# Configure
script_name = sys.argv[0]
argc = len( sys.argv )
arguments = str( sys.argv )
#print(script_name, argc, arguments)
# Assume a single argument
url = arguments
url = 'https://news.v.daum.net/v/20181107151357298'
language='ko'
print('lang=',language, 'url=',url)
# Prerequisite:
# from newspaper import Article # pip install newspaper3k
# "get_news_article" gets a newspaper article from an url
def get_news_article( url, lang='ko' ):
doc_ = Article( url, language=lang )
doc_.download()
doc_.parse()
print( doc_.title )
filename_ = doc_.title[:30] + '.txt'
#print( doc.txt[:50] )
return doc_, filename_
def url2txt( url, lang='ko' ):
doc, filename = get_news_article( url, lang )
# Write the body of the document to a file
print("Saving the URL to", filename, "...")
f = open( filename,'w' )
f.write( doc.text )
url2txt( url,language )
| 23.28 | 59 | 0.681271 |
38c31eeb66fd0e4f3a4e59c3019726f4bc49e155
| 13,340 |
py
|
Python
|
examples/few_shot/pet/pet.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/few_shot/pet/pet.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/few_shot/pet/pet.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import sys
import random
import time
import json
from functools import partial
import numpy as np
import paddle
import paddle.nn.functional as F
import paddlenlp as ppnlp
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import LinearDecayWithWarmup
from model import ErnieForPretraining, ErnieMLMCriterion
from data import create_dataloader, transform_fn_dict
from data import convert_example, convert_chid_example
from evaluate import do_evaluate, do_evaluate_chid
from predict import do_predict, do_predict_chid, predict_file, write_fn
def set_seed(seed):
"""sets random seed"""
random.seed(seed)
np.random.seed(seed)
paddle.seed(seed)
def do_train(args):
paddle.set_device(args.device)
rank = paddle.distributed.get_rank()
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(args.seed)
label_normalize_json = os.path.join("./label_normalized",
args.task_name + ".json")
# Ernie Model
model = ErnieForPretraining.from_pretrained(args.language_model)
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(
args.language_model)
# map y
label_norm_dict = None
with open(label_normalize_json, 'r', encoding="utf-8") as f:
label_norm_dict = json.load(f)
convert_example_fn = convert_example if args.task_name != "chid" else convert_chid_example
evaluate_fn = do_evaluate if args.task_name != "chid" else do_evaluate_chid
predict_fn = do_predict if args.task_name != "chid" else do_predict_chid
# load dataset
train_ds, public_test_ds, test_ds = load_dataset("fewclue",
name=args.task_name,
splits=("train_0",
"test_public",
"test"))
# Task related transform operations, eg: numbert label -> text_label, english -> chinese
transform_fn = partial(transform_fn_dict[args.task_name],
label_normalize_dict=label_norm_dict,
pattern_id=args.pattern_id)
# Task related transform operations, eg: numbert label -> text_label, english -> chinese
transform_test_fn = partial(transform_fn_dict[args.task_name],
label_normalize_dict=label_norm_dict,
is_test=True,
pattern_id=args.pattern_id)
# Some fewshot_learning strategy is defined by transform_fn
# Note: Set lazy=True to transform example inplace immediately,
# because transform_fn should only be executed only once when
# iterate multi-times for train_ds
train_ds = train_ds.map(transform_fn, lazy=False)
public_test_ds = public_test_ds.map(transform_fn, lazy=False)
test_ds = test_ds.map(transform_test_fn, lazy=False)
# dataloader
if args.task_name == "chid":
# [src_ids, token_type_ids, masked_positions, masked_lm_labels, candidate_labels_ids]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
Stack(dtype="int64"), # masked_lm_labels
Stack(dtype="int64"
), # candidate_labels_ids [candidate_num, label_length]
): [data for data in fn(samples)]
batchify_test_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
Stack(dtype="int64"
), # candidate_labels_ids [candidate_num, label_length]
): [data for data in fn(samples)]
else:
# [src_ids, token_type_ids, masked_positions, masked_lm_labels]
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
Stack(dtype="int64"), # masked_lm_labels
): [data for data in fn(samples)]
batchify_test_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # src_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
Stack(dtype="int64"), # masked_positions
): [data for data in fn(samples)]
trans_func = partial(convert_example_fn,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length)
trans_test_func = partial(convert_example_fn,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
is_test=True)
train_data_loader = create_dataloader(train_ds,
mode='train',
batch_size=args.batch_size,
batchify_fn=batchify_fn,
trans_fn=trans_func)
public_test_data_loader = create_dataloader(public_test_ds,
mode='eval',
batch_size=args.batch_size,
batchify_fn=batchify_fn,
trans_fn=trans_func)
test_data_loader = create_dataloader(test_ds,
mode='eval',
batch_size=args.batch_size,
batchify_fn=batchify_test_fn,
trans_fn=trans_test_func)
num_training_steps = len(train_data_loader) * args.epochs
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,
args.warmup_proportion)
decay_params = [
p.name for n, p in model.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
]
optimizer = paddle.optimizer.AdamW(
learning_rate=lr_scheduler,
parameters=model.parameters(),
weight_decay=args.weight_decay,
apply_decay_param_fun=lambda x: x in decay_params)
# load model if there is
if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):
state_dict = paddle.load(args.init_from_ckpt)
model.set_dict(state_dict)
print("warmup from:{}".format(args.init_from_ckpt))
mlm_loss_fn = ErnieMLMCriterion()
rdrop_loss = ppnlp.losses.RDropLoss()
max_test_acc = 0.0
global_step = 0
tic_train = time.time()
for epoch in range(1, args.epochs + 1):
model.train()
# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.
for step, batch in enumerate(train_data_loader, start=1):
src_ids = batch[0]
token_type_ids = batch[1]
masked_positions = batch[2]
masked_lm_labels = batch[3]
max_len = src_ids.shape[1]
new_masked_positions = []
for bs_index, mask_pos in enumerate(masked_positions.numpy()):
for pos in mask_pos:
new_masked_positions.append(bs_index * max_len + pos)
new_masked_positions = paddle.to_tensor(
np.array(new_masked_positions).astype('int32'))
prediction_scores = model(input_ids=src_ids,
token_type_ids=token_type_ids,
masked_positions=new_masked_positions)
if args.rdrop_coef > 0:
prediction_scores_2 = model(
input_ids=src_ids,
token_type_ids=token_type_ids,
masked_positions=new_masked_positions)
ce_loss = (
mlm_loss_fn(prediction_scores, masked_lm_labels) +
mlm_loss_fn(prediction_scores_2, masked_lm_labels)) * 0.5
kl_loss = rdrop_loss(prediction_scores, prediction_scores_2)
loss = ce_loss + kl_loss * args.rdrop_coef
else:
loss = mlm_loss_fn(prediction_scores, masked_lm_labels)
global_step += 1
if global_step % 10 == 0 and rank == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %.5f, speed: %.2f step/s"
% (global_step, epoch, step, loss, 10 /
(time.time() - tic_train)))
tic_train = time.time()
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_grad()
if rank == 0:
save_dir = os.path.join(args.save_dir, "model_%d" % global_step)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
save_param_path = os.path.join(save_dir, 'model_state.pdparams')
paddle.save(model.state_dict(), save_param_path)
tokenizer.save_pretrained(save_dir)
test_accuracy, total_num = evaluate_fn(model, tokenizer,
public_test_data_loader,
label_norm_dict)
print("epoch:{}, test_accuracy:{:.3f}, total_num:{}".format(
epoch, test_accuracy, total_num))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task_name",
required=True,
type=str,
help="The task_name to be evaluated")
parser.add_argument("--batch_size",
default=32,
type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate",
default=1e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument(
"--save_dir",
default='./checkpoint',
type=str,
help="The output directory where the model checkpoints will be written."
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will be padded."
)
parser.add_argument("--weight_decay",
default=0.0,
type=float,
help="Weight decay if we apply some.")
parser.add_argument("--epochs",
default=10,
type=int,
help="Total number of training epochs to perform.")
parser.add_argument(
"--warmup_proportion",
default=0.0,
type=float,
help="Linear warmup proption over the training process.")
parser.add_argument("--pattern_id",
default=0,
type=int,
help="pattern id of pet")
parser.add_argument("--init_from_ckpt",
type=str,
default=None,
help="The path of checkpoint to be loaded.")
parser.add_argument("--seed",
type=int,
default=1000,
help="random seeds for initialization")
parser.add_argument("--output_dir",
default='./output',
type=str,
help="The output directory where to save output")
parser.add_argument(
'--device',
choices=['cpu', 'gpu'],
default="gpu",
help="Select which device to train model, defaults to gpu.")
parser.add_argument('--save_steps',
type=int,
default=10000,
help="Inteval steps to save checkpoint")
parser.add_argument("--if_save_checkpoints", action='store_true')
parser.add_argument("--index",
required=True,
type=str,
default="0",
help="must be in [0, 1, 2, 3, 4, all]")
parser.add_argument('--language_model',
type=str,
default='ernie-1.0',
choices=['ernie-1.0'],
help="Language model")
parser.add_argument(
"--rdrop_coef",
default=0.0,
type=float,
help=
"The coefficient of KL-Divergence loss in R-Drop paper, for more detail please refer to https://arxiv.org/abs/2106.14448), if rdrop_coef > 0 then R-Drop works"
)
args = parser.parse_args()
do_train(args)
| 41.818182 | 167 | 0.563643 |
38c6dd0e646f6fdbf44249d6ec3b85fde888692b
| 864 |
py
|
Python
|
kts/cli/scripts.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/cli/scripts.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/cli/scripts.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
import os
import click
from kts.cli.utils import check_existence, clear_all, list_files, create_config
KTS_PROJECT_PATHS = ['./input', './notebooks', './storage', './output', './submissions']
@click.group()
def cli():
pass
@cli.command()
def init():
"""Initialize empty KTS project.
Create ./input, ./notebooks, ./kts_config.toml, etc.
"""
if check_existence(KTS_PROJECT_PATHS):
list_files('./')
if click.confirm('Do you want to clear existing kts file system?'):
clear_all()
click.confirm('Do you want to build the file system?', abort=True)
for path in KTS_PROJECT_PATHS:
if not os.path.isdir(path):
os.makedirs(path)
if os.path.exists('./kts_config.py'):
if click.confirm('Config found. Overwrite?'):
create_config()
else:
create_config()
| 24 | 88 | 0.630787 |
2a191dc86bd7b7063e6d80308ffb6904d766c4e7
| 12,550 |
py
|
Python
|
py/models/inception_resnet_v1.py
|
zjZSTU/GoogLeNet
|
a0801e45006d34b4901a8834397961ce17f24e2e
|
[
"Apache-2.0"
] | 1 |
2021-04-18T15:36:33.000Z
|
2021-04-18T15:36:33.000Z
|
py/models/inception_resnet_v1.py
|
zjZSTU/GoogLeNet
|
a0801e45006d34b4901a8834397961ce17f24e2e
|
[
"Apache-2.0"
] | null | null | null |
py/models/inception_resnet_v1.py
|
zjZSTU/GoogLeNet
|
a0801e45006d34b4901a8834397961ce17f24e2e
|
[
"Apache-2.0"
] | 3 |
2020-07-10T11:45:52.000Z
|
2022-01-15T08:46:14.000Z
|
# -*- coding: utf-8 -*-
"""
@date: 2020/4/12 下午3:49
@file: inception_resnet_v1.py
@author: zj
@description:
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception_ResNet_v1(nn.Module):
__constants__ = ['transform_input']
def __init__(self, num_classes=1000, transform_input=False, init_weights=True):
super(Inception_ResNet_v1, self).__init__()
self.transform_input = transform_input
self.stem = Stem(3)
self.inception_a1 = Inception_ResNet_A(256)
self.inception_a2 = Inception_ResNet_A(256)
self.inception_a3 = Inception_ResNet_A(256)
self.inception_a4 = Inception_ResNet_A(256)
self.inception_a5 = Inception_ResNet_A(256)
self.reduction_a = ReductionA(256)
self.inception_b1 = Inception_ResNet_B(896)
self.inception_b2 = Inception_ResNet_B(896)
self.inception_b3 = Inception_ResNet_B(896)
self.inception_b4 = Inception_ResNet_B(896)
self.inception_b5 = Inception_ResNet_B(896)
self.inception_b6 = Inception_ResNet_B(896)
self.inception_b7 = Inception_ResNet_B(896)
self.inception_b8 = Inception_ResNet_B(896)
self.inception_b9 = Inception_ResNet_B(896)
self.inception_b10 = Inception_ResNet_B(896)
self.reduction_b = ReductionB(896)
self.inception_c1 = Inception_ResNet_C(1792)
self.inception_c2 = Inception_ResNet_C(1792)
self.inception_c3 = Inception_ResNet_C(1792)
self.inception_c4 = Inception_ResNet_C(1792)
self.inception_c5 = Inception_ResNet_C(1792)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout(0.8)
self.fc = nn.Linear(1792, num_classes)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _transform_input(self, x):
# type: (Tensor) -> Tensor
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
return x
def _forward(self, x):
# N x 3 x 299 x 299
x = self.stem(x)
# N x 256 x 35 x 35
x = self.inception_a1(x)
x = self.inception_a2(x)
x = self.inception_a3(x)
x = self.inception_a4(x)
x = self.inception_a5(x)
# N x 256 x 35 x 35
x = self.reduction_a(x)
# N x 896 x 17 x17
x = self.inception_b1(x)
x = self.inception_b2(x)
x = self.inception_b3(x)
x = self.inception_b4(x)
x = self.inception_b5(x)
x = self.inception_b6(x)
x = self.inception_b7(x)
x = self.inception_b8(x)
x = self.inception_b9(x)
x = self.inception_b10(x)
# N x 896 x 17 x 17
x = self.reduction_b(x)
# N x 1792 x 8 x 8
x = self.inception_c1(x)
x = self.inception_c2(x)
x = self.inception_c3(x)
x = self.inception_c4(x)
x = self.inception_c5(x)
# N x 1792 x 8 x 8
x = self.avgpool(x)
# N x 1792 x 1 x 1
x = torch.flatten(x, 1)
# N x 1792
x = self.dropout(x)
x = self.fc(x)
# N x 1000 (num_classes)
return x
def forward(self, x):
x = self._transform_input(x)
x = self._forward(x)
return x
class Stem(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(Stem, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.conv1 = conv_block(in_channels, 32, kernel_size=3, stride=2)
self.conv2 = conv_block(32, 32, kernel_size=3)
self.conv3 = conv_block(32, 64, kernel_size=3, padding=1)
self.pool = nn.MaxPool2d(3, stride=2)
self.conv4 = conv_block(64, 80, kernel_size=1)
self.conv5 = conv_block(80, 192, kernel_size=3)
self.conv6 = conv_block(192, 256, kernel_size=3, stride=2)
def _forward(self, x):
# x = N x 3 x 299 x 299
x = self.conv1(x)
# N x 32 x 149 x 149
x = self.conv2(x)
# N x 32 x 147 x 147
x = self.conv3(x)
# N x 64 x 147 x 147
x = self.pool(x)
# N x 64 x 73 x 73
x = self.conv4(x)
# N x 80 x 73 x 73
x = self.conv5(x)
# N x 192 x 71 x 71
x = self.conv6(x)
# N x 256 x 35 x 35
return x
def forward(self, x):
outputs = self._forward(x)
return outputs
class Inception_ResNet_A(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(Inception_ResNet_A, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, 32, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, 32, kernel_size=1),
conv_block(32, 32, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, 32, kernel_size=1),
conv_block(32, 32, kernel_size=3, padding=1),
conv_block(32, 32, kernel_size=3, padding=1)
)
self.branch = conv_block(96, 256, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def _forward(self, x):
# x = N x 256 x 35 x 35
identity = x
branch1 = self.branch1(x)
# N x 32 x 35 x35
branch2 = self.branch2(x)
# N x 32 x 35 x 35
branch3 = self.branch3(x)
# N x 32 x 35 x 35
branch = torch.cat([branch1, branch2, branch3], 1)
# linear activation
out = self.branch(branch) * 0.1
out += identity
# N x 256 x 35 x 35
out = self.relu(out)
return out
def forward(self, x):
outputs = self._forward(x)
return outputs
class ReductionA(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(ReductionA, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, 384, kernel_size=3, stride=2)
self.branch2 = nn.Sequential(
conv_block(in_channels, 192, kernel_size=1),
conv_block(192, 192, kernel_size=3, stride=1, padding=1),
conv_block(192, 256, kernel_size=3, stride=2)
)
self.branch3 = nn.MaxPool2d(3, stride=2)
def _forward(self, x):
# N x 256 x 35 x 35
branch1 = self.branch1(x)
# N x 384 x 17 x 17
branch2 = self.branch2(x)
# N x 256 x 17 x 17
branch3 = self.branch3(x)
# N x 256 x 17 x 17
outputs = [branch1, branch2, branch3]
# N x 896 x 17 x 17
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class Inception_ResNet_B(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(Inception_ResNet_B, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, 128, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, 128, kernel_size=1),
conv_block(128, 128, kernel_size=(1, 7), padding=(0, 3)),
conv_block(128, 128, kernel_size=(7, 1), padding=(3, 0))
)
self.branch = conv_block(256, 896, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def _forward(self, x):
# x = N x 896 x 17 x 17
identity = x
branch1 = self.branch1(x)
# N x 128 x 17 x 17
branch2 = self.branch2(x)
# N x 128 x 17 x 17
branch = torch.cat([branch1, branch2], 1)
# linear activation
out = self.branch(branch) * 0.1
out += identity
# N x 896 x 17 x 17
out = self.relu(out)
return out
def forward(self, x):
outputs = self._forward(x)
return outputs
class ReductionB(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(ReductionB, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = nn.Sequential(
conv_block(in_channels, 256, kernel_size=1),
conv_block(256, 384, kernel_size=3, stride=2)
)
self.branch2 = nn.Sequential(
conv_block(in_channels, 256, kernel_size=1),
conv_block(256, 256, kernel_size=3, stride=2)
)
self.branch3 = nn.Sequential(
conv_block(in_channels, 256, kernel_size=1),
conv_block(256, 256, kernel_size=3, padding=1),
conv_block(256, 256, kernel_size=3, stride=2)
)
self.branch4 = nn.MaxPool2d(3, stride=2)
def _forward(self, x):
# x = N x 896 x 17 x 17
branch1 = self.branch1(x)
# N x 384 x 17 x 17
branch2 = self.branch2(x)
# N x 256 x 2 x 17
branch3 = self.branch3(x)
# N x 256 x 17 x 17
branch4 = self.branch4(x)
# N x 896 x 17 x 17
outputs = [branch1, branch2, branch3, branch4]
# N x 1792 x 35 x 35
return outputs
def forward(self, x):
outputs = self._forward(x)
return torch.cat(outputs, 1)
class Inception_ResNet_C(nn.Module):
def __init__(self, in_channels, conv_block=None):
super(Inception_ResNet_C, self).__init__()
if conv_block is None:
conv_block = BasicConv2d
self.branch1 = conv_block(in_channels, 192, kernel_size=1)
self.branch2 = nn.Sequential(
conv_block(in_channels, 192, kernel_size=1),
conv_block(192, 192, kernel_size=(1, 3), padding=(0, 1)),
conv_block(192, 192, kernel_size=(3, 1), padding=(1, 0))
)
self.branch = conv_block(384, 1792, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def _forward(self, x):
# x = N x 1792 x 8 x 8
identity = x
branch1 = self.branch1(x)
# N x 192 x 8 x 8
branch2 = self.branch2(x)
# N x 192 x 8 x 8
branch = torch.cat([branch1, branch2], 1)
# linear activation
out = self.branch(branch) * 0.1
out += identity
# N x 1792 x 8 x 8
out = self.relu(out)
return out
def forward(self, x):
outputs = self._forward(x)
return outputs
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
if __name__ == '__main__':
# model = Stem(3)
# data = torch.randn((1, 3, 299, 299))
# outputs = model(data)
# print(outputs.shape)
# model = Inception_ResNet_A(256)
# data = torch.randn((1, 256, 35, 35))
# outputs = model(data)
# print(outputs.shape)
# model = ReductionA(256)
# data = torch.randn((1, 256, 35, 35))
# outputs = model(data)
# print(outputs.shape)
# model = Inception_ResNet_B(896)
# data = torch.randn((1, 896, 17, 17))
# outputs = model(data)
# print(outputs.shape)
# model = ReductionB(896)
# data = torch.randn((1, 896, 17, 17))
# outputs = model(data)
# print(outputs.shape)
# model = Inception_ResNet_C(1792)
# data = torch.randn((1, 1792, 8, 8))
# outputs = model(data)
# print(outputs.shape)
model = Inception_ResNet_v1(num_classes=1000)
data = torch.randn((1, 3, 299, 299))
outputs = model(data)
print(outputs.shape)
| 29.880952 | 87 | 0.575139 |
2a3c961206b22fc59fa5f02c0fe296322570e925
| 1,739 |
py
|
Python
|
books/PythonCleanCode/ch4_solid/openclosed_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch4_solid/openclosed_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
books/PythonCleanCode/ch4_solid/openclosed_1.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""Clean Code in Python - Chater 4
The open/closed principle
Counter-example of the open/closed principle.
An example that does comply with this principle and should be refactored.
"""
class Event:
def __init__(self, raw_data):
self.raw_data = raw_data
class UnknownEvent(Event):
"""A type of event that cannot be identified from its data."""
class LoginEvent(Event):
"""A event representing a user that has just entered ths system."""
class LogoutEvent(Event):
"""An event representing a user that has just left the system."""
class SystemMonitor:
"""Identify events that occurred in the system
>>> l1 = SystemMonitor({"before": {"session": 0}, "after": {"session": 1}})
>>> l1.identify_event().__class__.__name__
'LoginEvent'
>>> l2 = SystemMonitor({"before": {"session": 1}, "after": {"session": 0}})
>>> l2.identify_event().__class__.__name__
'LogoutEvent'
>>> l3 = SystemMonitor({"before": {"session": 1}, "after": {"session": 1}})
>>> l3.identify_event().__class__.__name__
'UnknownEvent'
"""
def __init__(self, event_data):
self.event_data = event_data
def identify_event(self):
if (
self.event_data["before"]["session"] == 0
and self.event_data["after"]["session"] == 1
):
return LoginEvent(self.event_data)
elif (
self.event_data["before"]["session"] == 1
and self.event_data["after"]["session"] == 0
):
return LogoutEvent(self.event_data)
return UnknownEvent(self.event_data)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25.955224 | 80 | 0.60207 |
930e25677fea5ad0eade9badb8cdf47a5077dc8c
| 312 |
py
|
Python
|
USACOClassLiveTest/src/sortfile2.py
|
javaarchive/USACOClass2020
|
4ae563014b9b2da3e1361e175d38e72308a8da89
|
[
"MIT"
] | null | null | null |
USACOClassLiveTest/src/sortfile2.py
|
javaarchive/USACOClass2020
|
4ae563014b9b2da3e1361e175d38e72308a8da89
|
[
"MIT"
] | null | null | null |
USACOClassLiveTest/src/sortfile2.py
|
javaarchive/USACOClass2020
|
4ae563014b9b2da3e1361e175d38e72308a8da89
|
[
"MIT"
] | null | null | null |
import sys
filename = sys.argv[1]
outfile = sys.argv[2]
lines = open(filename, "r").readlines()
def sortByFirstNumber(line):
#print(line.split(" "))
return int((line.split(" "))[0])
lines = sorted(lines, key = sortByFirstNumber)
ofile = open(outfile, "w")
for num in lines:
ofile.write(str(num))
| 26 | 46 | 0.660256 |
fabceebdc0a61f6c8fb053a5cf417962901374d5
| 855 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex12_pattern_checker_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex12_pattern_checker_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/tests/ch04_strings/ex12_pattern_checker_test.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
import pytest
from ch04_strings.solutions.ex12_pattern_checker import matches_pattern
@pytest.mark.parametrize("pattern, input, expected",
[("x", "", False),
("", "x", False)])
def test_matches_pattern_special_cases(pattern, input, expected):
assert matches_pattern(pattern, input) == expected
@pytest.mark.parametrize("pattern, input, expected",
[("xyyx", "tim mike mike tim", True),
("xyyx", "time mike tom tim", False),
("xyxx", "tim mike mike tim", False),
("xxxx", "tim tim tim tim", True)])
def test_matches_pattern(pattern, input, expected):
assert matches_pattern(pattern, input) == expected
| 35.625 | 71 | 0.596491 |
fad49a3f30e9c3916c16f81c290ebafc15ca26da
| 465 |
py
|
Python
|
pyconuk/migrations/0008_scheduleslot_chair.py
|
OddBloke/2016.pyconuk.org
|
831d510a83c9cab8541198005c8024003b5049f5
|
[
"MIT"
] | null | null | null |
pyconuk/migrations/0008_scheduleslot_chair.py
|
OddBloke/2016.pyconuk.org
|
831d510a83c9cab8541198005c8024003b5049f5
|
[
"MIT"
] | null | null | null |
pyconuk/migrations/0008_scheduleslot_chair.py
|
OddBloke/2016.pyconuk.org
|
831d510a83c9cab8541198005c8024003b5049f5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-12 15:18
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pyconuk', '0007_page_show_sponsors'),
]
operations = [
migrations.AddField(
model_name='scheduleslot',
name='chair',
field=models.CharField(max_length=255, null=True),
),
]
| 22.142857 | 62 | 0.621505 |
ea6714aa93841d6d9632b3382d84543727cf4ae0
| 259 |
py
|
Python
|
python/cycle_detection.py
|
higoress/HackerRank
|
ac13fc378734e70eef08b357bb36c47655d7d0be
|
[
"MIT"
] | null | null | null |
python/cycle_detection.py
|
higoress/HackerRank
|
ac13fc378734e70eef08b357bb36c47655d7d0be
|
[
"MIT"
] | null | null | null |
python/cycle_detection.py
|
higoress/HackerRank
|
ac13fc378734e70eef08b357bb36c47655d7d0be
|
[
"MIT"
] | null | null | null |
#identify cycles in a linked list
def has_cycle(head):
taboo_list = list()
temp = head
while temp:
if temp not in taboo_list:
taboo_list.append(temp)
temp = temp.next
else:
return 1
return 0
| 21.583333 | 35 | 0.559846 |
579e677fbc15325687e4f3b96043b6d0218adb1f
| 8,453 |
py
|
Python
|
src/aijack/defense/dp/manager/rdp.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 24 |
2021-11-17T02:16:47.000Z
|
2022-03-27T01:04:08.000Z
|
src/aijack/defense/dp/manager/rdp.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 9 |
2021-12-03T06:09:27.000Z
|
2022-03-29T06:33:53.000Z
|
src/aijack/defense/dp/manager/rdp.py
|
luoshenseeker/AIJack
|
4e871a5b3beb4b7c976d38060d6956efcebf880d
|
[
"MIT"
] | 5 |
2022-01-12T09:58:04.000Z
|
2022-03-17T09:29:04.000Z
|
import math
import numpy as np
from scipy import special
from scipy.special import logsumexp
from .utils import _log_add, _log_erfc, _log_sub
def eps_gaussian(alpha, params):
if alpha == math.inf:
return min(
4 * (np.exp(eps_gaussian(2, params) - 1)),
2 * np.exp(eps_gaussian(2, params)),
)
return alpha / (2 * (params["sigma"] ** 2))
def eps_laplace(alpha, params):
if alpha <= 1:
return 1 / params["b"] + np.exp(-1 / params["b"]) - 1
elif alpha == math.inf:
return 1 / params["b"]
return (1 / (alpha - 1)) * logsumexp(
[
np.log(alpha / (2 * alpha - 1)) + (alpha - 1) / params["b"],
np.log((alpha - 1) / (2 * alpha - 1)) + (-alpha) / params["b"],
],
b=[1, 1],
)
def eps_randresp(alpha, params):
if params["p"] == 1 or params["p"] == 0:
return math.inf
if alpha <= 1:
return (2 * params["p"] - 1) * np.log(params["p"] / (1 - params["p"]))
elif alpha == math.inf:
return np.abs(np.log((1.0 * params["p"] / (1 - params["p"]))))
return (1 / (alpha - 1)) * logsumexp(
[
alpha * np.log(params["p"]) + (1 - alpha) * np.log(1 - params["p"]),
alpha * np.log(1 - params["p"]) + (1 - alpha) * params["p"],
],
b=[1, 1],
)
def culc_upperbound_of_rdp_with_Sampled_Gaussian_Mechanism(
alpha, params, sampling_rate, _eps
):
"""Compute log(A_alpha) for any positive finite alpha."""
if float(alpha).is_integer():
return culc_upperbound_of_rdp_with_Sampled_Gaussian_Mechanism_int(
int(alpha), params, sampling_rate
)
else:
return culc_upperbound_of_rdp_with_Sampled_Gaussian_Mechanism_float(
alpha, params, sampling_rate
)
def culc_upperbound_of_rdp_with_Sampled_Gaussian_Mechanism_int(
alpha, params, sampling_rate
):
"""Renyi Differential Privacy of the Sampled Gaussian Mechanism
3.3 Numerically Stable Computatio
"""
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
np.log(special.binom(alpha, i))
+ i * np.log(sampling_rate)
+ (alpha - i) * np.log(1 - sampling_rate)
)
s = log_coef_i + (i * i - i) / (2 * (params["sigma"] ** 2))
log_a = _log_add(log_a, s)
return float(log_a) / (alpha - 1)
def culc_upperbound_of_rdp_with_Sampled_Gaussian_Mechanism_float(
alpha, params, sampling_rate
):
"""Compute log(A_alpha) for fractional alpha. 0 < q < 1."""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = params["sigma"] ** 2 * np.log(1 / sampling_rate - 1) + 0.5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = np.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * np.log(sampling_rate) + j * np.log(1 - sampling_rate)
log_t1 = log_coef + j * np.log(sampling_rate) + i * np.log(1 - sampling_rate)
log_e0 = np.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * params["sigma"]))
log_e1 = np.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * params["sigma"]))
log_s0 = log_t0 + (i * i - i) / (2 * (params["sigma"] ** 2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (params["sigma"] ** 2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1) / (alpha - 1)
def culc_upperbound_of_rdp_with_theorem27_of_wang_2019(
alpha, params, sampling_rate, _eps
):
def B(el):
res = 0
for i in range(el + 1):
res += (
((-1) ** (i)) * special.binom(el, i) * np.exp((i - 1) * _eps(i, params))
)
return abs(res)
"""
def logAbsB(el):
ts = []
ss = []
for i in range(el + 1):
ts.append(np.log(special.binom(el, i)) + (i - 1) * _eps(i, params))
ss.append((-1) ** (i + el % 2))
return logsumexp(ts, b=ss)
"""
terms = []
signs = []
terms.append(np.log(1))
signs.append(1)
second = (
2 * np.log(sampling_rate)
+ np.log(special.binom(alpha, 2))
+ min(
np.log(4) + logsumexp([_eps(2, params), np.log(1)], b=[1, -1]),
_eps(2, params)
+ min(
np.log(2),
2 * logsumexp([_eps(math.inf, params), np.log(1)], b=[1, -1]),
),
)
)
terms.append(second)
signs.append(1)
for j in range(3, alpha + 1):
terms.append(
np.log(4)
+ j * np.log(sampling_rate)
+ np.log(special.binom(alpha, j))
+ (1 / 2)
* (np.log(B(2 * math.floor(j / 2))) + np.log(B(2 * math.ceil(j / 2))))
)
signs.append(1)
return (1 / (alpha - 1)) * logsumexp(terms, b=signs)
def culc_general_upperbound_of_rdp_with_theorem5_of_zhu_2019(
alpha, params, sampling_rate, _eps
):
terms = []
signs = []
first = ((alpha - 1) * np.log(1 - sampling_rate)) + np.log(
alpha * sampling_rate - sampling_rate + 1
)
terms.append(first)
signs.append(1)
if alpha >= 2:
second = (
np.log(special.binom(alpha, 2))
+ (2 * np.log(sampling_rate))
+ ((alpha - 2) * np.log(1 - sampling_rate))
+ _eps(2, params)
)
terms.append(second)
signs.append(1)
if alpha >= 3:
for el in range(3, alpha + 1):
third = np.log(3) + (
np.log(special.binom(alpha, el))
+ (alpha - el) * np.log(1 - sampling_rate)
+ (el * np.log(sampling_rate))
+ ((el - 1) * _eps(el, params))
)
terms.append(third)
signs.append(1)
return (1 / (alpha - 1)) * logsumexp(terms, b=signs)
def culc_tightupperbound_lowerbound_of_rdp_with_theorem6and8_of_zhu_2019(
alpha, params, sampling_rate, _eps
):
terms = []
signs = []
first = ((alpha - 1) * np.log(1 - sampling_rate)) + np.log(
alpha * sampling_rate - sampling_rate + 1
)
terms.append(first)
signs.append(1)
if alpha >= 2:
for el in range(2, alpha + 1):
second = (
np.log(special.binom(alpha, el))
+ (el * np.log(sampling_rate))
+ ((alpha - el) * np.log(1 - sampling_rate))
+ ((el - 1) * _eps(el, params))
)
terms.append(second)
signs.append(1)
return (1 / (alpha - 1)) * logsumexp(terms, b=signs)
def culc_tightupperbound_lowerbound_of_rdp_with_theorem6and8_of_zhu_2019_with_tau_estimation(
alpha, params, sampling_rate, _eps, tau=10
):
terms = []
signs = []
eps_alpha_minus_tau = _eps(alpha - tau, params)
first = alpha * np.log(1 - sampling_rate) + logsumexp(
[np.log(1), -eps_alpha_minus_tau], b=[1, -1]
)
terms.append(first)
signs.append(1)
second = -eps_alpha_minus_tau + alpha * logsumexp(
[
np.log(1),
np.log(sampling_rate),
np.log(sampling_rate) + eps_alpha_minus_tau,
],
b=[1, -1, 1],
)
terms.append(second)
signs.append(1)
for el in range(2, tau):
third = (
np.log(special.binom(alpha, el))
+ (alpha - el) * np.log(1 - sampling_rate)
+ el * np.log(sampling_rate)
+ logsumexp(
[(el - 1) * eps_alpha_minus_tau, (el - 1) * _eps(el, params)], b=[1, -1]
)
)
terms.append(third)
signs.append(-1)
for el in range(alpha - tau + 1, alpha + 1):
fourth = (
np.log(special.binom(alpha, el))
+ (alpha - el) * np.log(1 - sampling_rate)
+ el * np.log(sampling_rate)
+ logsumexp(
[(el - 1) * _eps(el, params), (el - 1) * eps_alpha_minus_tau],
b=[1, -1],
)
)
terms.append(fourth)
signs.append(1)
return (1 / (alpha - 1)) * logsumexp(terms, b=signs)
| 28.751701 | 93 | 0.519342 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.