max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
src/cv2r.py
|
BarqueroGerman/metrabs
| 208 |
2024002
|
"""Convenience wrappers over OpenCV functions."""
import os
import cv2
import numpy as np
def undistortPoints(src, cameraMatrix, distCoeffs, R=None, P=None):
src = np.expand_dims(np.asarray(src, dtype=np.float32), 0)
return cv2.undistortPoints(src, cameraMatrix, distCoeffs, None, R, P)[0]
def convertPointsToHomogeneous(src):
return np.squeeze(cv2.convertPointsToHomogeneous(src), axis=1)
def convertPointsFromHomogeneous(src):
return np.squeeze(cv2.convertPointsFromHomogeneous(src), axis=1)
def warpPerspective(src, M, dsize, dst=None, flags=None, borderMode=None, borderValue=None):
fun = cv2.cuda.warpPerspective if isinstance(src, cv2.cuda_GpuMat) else cv2.warpPerspective
return fun(src, M, dsize, dst, flags, borderMode, borderValue)
def remap(src, map1, map2, interpolation, dst=None, borderMode=None, borderValue=None):
fun = cv2.cuda.remap if isinstance(src, cv2.cuda_GpuMat) else cv2.remap
return fun(src, map1, map2, interpolation, dst, borderMode, borderValue)
def warpAffine(src, M, dsize, dst=None, flags=None, borderMode=None, borderValue=None):
fun = cv2.cuda.warpAffine if isinstance(src, cv2.cuda_GpuMat) else cv2.warpAffine
return fun(src, M, dsize, dst, flags, borderMode, borderValue)
def resize(src, dsize, dst=None, fx=None, fy=None, interpolation=None):
fun = cv2.cuda.resize if isinstance(src, cv2.cuda_GpuMat) else cv2.resize
return fun(src, dsize, dst, fx, fy, interpolation)
| 1,469 |
cs15211/SelfCrossing.py
|
JulyKikuAkita/PythonPrac
| 1 |
2025784
|
__source__ = 'https://leetcode.com/problems/self-crossing/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/self-crossing.py
# Time: O(n)
# Space: O(1)
#
# Description: Leetcode # 335. Self Crossing
#
# You are given an array x of n positive numbers.
# You start at point (0,0) and moves x[0] metres to the north,
# then x[1] metres to the west, x[2] metres to the south,
# x[3] metres to the east and so on. In other words,
# after each move your direction changes counter-clockwise.
#
# Write a one-pass algorithm with O(1) extra space to determine,
# if your path crosses itself, or not.
#
# Example 1:
# Given x = [2, 1, 1, 2],
# ┌───┐
# │ │
# └───┼──>
# │
#
# Return true (self crossing)
# Example 2:
# Given x = [1, 2, 3, 4],
# ┌──────┐
# │ │
# │
# │
# └────────────>
#
# Return false (not self crossing)
# Example 3:
# Given x = [1, 1, 1, 1],
# ┌───┐
# │ │
# └───┼>
#
# Return true (self crossing)
#
# Related Topics
# Math
#
import unittest
class Solution(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
if len(x) >= 5 and x[3] == x[1] and x[4] + x[0] >= x[2]:
# Crossing in a loop:
# 2
# 3 ┌────┐
# └─══>┘1
# 4 0 (overlapped)
return True
for i in xrange(3, len(x)):
if x[i] >= x[i - 2] and x[i - 3] >= x[i - 1]:
# Case 1:
# i-2
# i-1┌─┐
# └─┼─>i
# i-3
return True
elif i >= 5 and x[i - 4] <= x[i - 2] and x[i] + x[i - 4] >= x[i - 2] and \
x[i - 1] <= x[i - 3] and x[i - 5] + x[i - 1] >= x[i - 3]:
# Case 2:
# i-4
# ┌──┐
# │i<┼─┐
# i-3│ i-5│i-1
# └────┘
# i-2
return True
return False
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
#
# Categorize the self-crossing scenarios, there are 3 of them:
# 1. Fourth line crosses first line and works for fifth line crosses second line and so on...
# 2. Fifth line meets first line and works for the lines after
# 3. Sixth line crosses first line and works for the lines after
# Suppose i is the current line, then:
#
# i and i-3 can cross
# i and i-4 can cross
# i and i-5 can cross
# no more or no less just exactly the right combination.
#
# Now it's time for us to restrict the conditions to make them just happen.
#
# i and i-3
#
# i>=i-2 && i-1<=i-3
# i and i-4
#
# i+i-4>=i-2 && i-1==i-3
# i and i-5
#
# i+i-4>=i-2 && i-2>=i-4 && i-1+i-5>=i-3 && i-1<=i-3
#
# 0ms 100%
class Solution {
public boolean isSelfCrossing(int[] x) {
int len = x.length;
if (len < 4) {
return false;
}
for (int i = 3; i < len; i++) {
if (check4(x, i) || (i > 3 && check5(x, i)) || (i > 4 && check6(x, i))) {
return true;
}
}
return false;
}
private boolean check4(int[] x, int i) {
return x[i] >= x[i - 2] && x[i - 1] <= x[i - 3];
}
private boolean check5(int[] x, int i) {
return x[i - 1] == x[i - 3] && x[i] + x[i - 4] >= x[i - 2];
}
private boolean check6(int[] x, int i) {
return x[i - 5] - x[i - 3] + x[i - 1] >= 0 && x[i - 4] <= x[i - 2] && x[i - 3] >= x[i - 1] && x[i - 4] - x[i - 2] + x[i] >= 0;
}
}
# 0ms 100%
class Solution {
public boolean isSelfCrossing(int[] x) {
int n = x.length;
if (n <= 3) return false;
for (int i = 3; i < n; i++) {
//4th line cross 1st
if (x[i] >= x[i-2] && x[i-1] <= x[i-3]) return true;
//5th cross 1st
if (i >= 4) {
if (x[i] + x[i-4] >= x[i-2] && x[i-1] == x[i-3]) return true;
}
//6th cross 1st
if (i >= 5) { //ex: [3,3,3,2,1,1]
if (x[i] + x[i-4] >= x[i-2] && x[i-1] + x[i-5] >= x[i-3] && x[i-1] <= x[i-3] && x[i - 4] < x[i-2])
return true;
}
}
return false;
}
}
'''
| 4,323 |
run.py
|
pjmedina/client-insight-wealth-management
| 8 |
2024205
|
# Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, jsonify, render_template, json, Response, request
import os
import client
app = Flask(__name__)
### Build the Client Profile
# Feel free to try any of the following customer_ids to see different results.
#customer_ids
customer_ids = ['1489','1285','1015','1725','1880']
#get desc for life events
with open('values_desc/life_events_desc.json') as json_file:
life_events_desc = json.load(json_file)
json_file.close();
#get desc for attrition features, customer segments
with open('values_desc/feature_desc.json') as json_file:
features_desc = json.load(json_file)
json_file.close();
@app.route('/')
def run():
return render_template('index.html')
@app.route('/api/getcustomerids',methods=['GET'])
def get_customer_ids():
return json.dumps(customer_ids)
@app.route('/api/retrieve', methods =['GET','POST'])
def retrieve():
output = {}
#retrieve the json from the ajax call
json_file = ''
if request.method == 'POST':
json_file = request.json
print ("post request")
#if json_file successfully posted..
if json_file != '':
# check all required arguments are present:
if not all(arg in json_file for arg in ["customerId"]):
print("Missing arguments in post request")
return json.dumps({"error":"Missing arguments"})
inputCustomerId = json_file["customerId"]
print("retreived data: " + str(inputCustomerId) )
data_array = []
client_info_obj = {}
client_attrition_score_obj = {}
client_examine_segement_obj = {}
client_profile_all = client.retrieve_entire_client_profile(inputCustomerId)
client_profile = client_profile_all[0]
if ("error" in client_profile):
return json.dumps({"error": client_profile["error"]})
#profile = json.dumps(client_profile, indent=4, sort_keys=True)
#print(profile)
#get client info, returns a list with first element contatining client info
client_info_obj = client_profile["customer"]
if ("error" in client_info_obj):
return json.dumps({"error": client_info_obj["error"]})
#get client life events
client_life_events = client_profile["event_scores"]
if (len(client_life_events) > 0):
if ("error" in client_life_events[0]):
return json.dumps({"error": client_life_events["error"]})
#get client_attrition_score, returns a list with first element contatining info
client_attrition_scores = client_profile["scores"]
if (len(client_attrition_scores) > 0):
if ("error" in client_attrition_scores[0]):
return json.dumps({"error": client_attrition_scores[0]["error"]})
for i in range(len(client_attrition_scores)):
if (client_attrition_scores[i]["score_code"] == "ATTRITION" and client_attrition_scores[i]["model_scope_forecast_horizon"] == 1 ):
client_attrition_score_obj = client_attrition_scores[i]
if (client_attrition_scores[i]["score_code"] == "DYNAMIC_SEGMENTATION"):
client_examine_segement_obj = client_attrition_scores[i]
#get segment description, returns a list
segment_description = client.segment_description()
if (len(segment_description) > 0):
if ("error" in segment_description[0]):
return json.dumps({"error": segment_description[0]["error"]})
#create the output json
output = {"clientInfo": client_info_obj, "clientAttritionScore": client_attrition_score_obj, "clientLifeEvents": client_life_events, "clientExamineSegment": client_examine_segement_obj, "segmentDescription": segment_description, "customerId": inputCustomerId, "lifeEventsDescription": life_events_desc, "featuresDescription": features_desc}
#return output json
return json.dumps(output)
port = int(os.getenv('VCAP_APP_PORT', 8080))
host='0.0.0.0'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| 4,513 |
Mesh/System/Entity/Concrete/Switch.py
|
ys-warble/Mesh
| 0 |
2023257
|
import numpy as np
from Mesh.System.Entity.Concrete import Concrete
from Mesh.System.Entity.Function import Function
from Mesh.System.Entity.Function.Powered import PowerInput, PowerOutput, Powered, ElectricPower
from Mesh.System.Entity.Function.Tasked import Tasked, TaskLevel, TaskResponse, Status, TaskName
from Mesh.System.SpaceFactor import MatterType
class Switch(Concrete):
identifier = 'switch'
default_dimension = (1, 1, 2)
default_orientation = (0, 1, 0)
default_consume_power_ratings = [ElectricPower(110)]
def __init__(self, uuid, dimension_x=(1, 1, 1), selected_functions=(Function.POWERED, Function.TASKED)):
super().__init__(uuid=uuid, dimension_x=dimension_x, matter_type=MatterType.PLASTIC,
selected_functions=selected_functions)
self.active = False
def get_default_shape(self):
i = self.matter_type.value
shape = np.array([
[[i, i]]
])
return shape
def validate_functions(self, selected_functions):
if Function.POWERED in selected_functions and Function.TASKED in selected_functions:
return True
else:
return False
def define_functions(self, selected_functions):
if Function.POWERED:
powered = Powered(self)
powered.power_inputs.append(PowerInput(self))
powered.power_outputs.append(PowerOutput(self))
powered.input_power_ratings.extend(Switch.default_consume_power_ratings)
self.functions[Function.POWERED] = powered
if Function.TASKED:
self.functions[Function.TASKED] = SwitchTasked(self)
class SwitchTasked(Tasked):
tasks = [
TaskName.GET_SYSTEM_INFO,
TaskName.ACTIVE,
TaskName.DEACTIVATE,
]
def handle(self, task):
def get_info():
return {
'uuid': str(self.entity.uuid),
'identifier': type(self.entity).identifier,
'type': {
'actuator': [
'LUMINOSITY'
],
'sensor': [],
'accessor': []
}
}
if self.entity.active and task.level == TaskLevel.ENTITY:
task_response = TaskResponse(Status.ERROR, {'error': 'Not Implemented'})
elif task.level == TaskLevel.SYSTEM:
if task.name == TaskName.GET_SYSTEM_INFO:
system_info = get_info()
system_info['active'] = self.entity.active
task_response = TaskResponse(status=Status.OK, value={'system_info': system_info})
elif task.name == TaskName.ACTIVE:
self.entity.active = True
if self.entity.has_function(Function.POWERED):
powered = self.entity.get_function(Function.POWERED)
for i in powered.power_outputs:
i.set_power(powered.power_inputs[0].get_power())
task_response = TaskResponse(status=Status.OK, value=None)
elif task.name == TaskName.DEACTIVATE:
self.entity.active = False
if self.entity.has_function(Function.POWERED):
powered = self.entity.get_function(Function.POWERED)
for i in powered.power_outputs:
i.set_power()
task_response = TaskResponse(status=Status.OK, value=None)
else:
task_response = TaskResponse(Status.ERROR, {'error': 'Not Implemented'})
elif task.level == TaskLevel.PROGRAM:
task_response = TaskResponse(Status.ERROR, {'error': 'Not Implemented'})
else:
task_response = TaskResponse(Status.ERROR, {'error': 'Not Implemented'})
return task_response
| 3,848 |
language_apps/expr2/expr2listener.py
|
SadraGoudarzdashti/IUSTCompiler
| 3 |
2025198
|
"""
Example of three address language_apps generator with listener
"""
__version__ = '0.1.0'
__author__ = 'Morteza'
from language_apps.expr2.gen.Expr2Parser import Expr2Parser
from language_apps.expr2.gen.Expr2Listener import Expr2Listener
class DummyListener(Expr2Listener):
def exitExpr1(self, ctx: Expr2Parser.Expr1Context):
print('I enter plus with first operand {0} second operand {1}:'.format(ctx.expr().getText(),
ctx.term().getText()))
def exitFact2(self, ctx:Expr2Parser.Fact2Context):
print('When exist fact2', ctx.Number())
class TreeAddressCode(Expr2Listener):
"""
Generate three address language_apps for part of expression rule of CPP14 grammar
"""
def __init__(self):
self.temp_counter = 0
# self.value = 0
def create_temp(self):
self.temp_counter += 1
return 'T' + str(self.temp_counter)
def remove_temp(self):
self.temp_counter -= 1
def get_temp(self):
return 'T' + str(self.temp_counter)
# Rule: #fact1
def exitFact1(self, ctx: Expr2Parser.Fact1Context):
ctx.code = ctx.getText()
def exitFact2(self, ctx: Expr2Parser.Fact2Context):
ctx.code = ctx.getText()
def exitFact3(self, ctx: Expr2Parser.Fact3Context):
ctx.code = ctx.expr().code
def exitTerm1(self, ctx: Expr2Parser.Term1Context):
temp = self.create_temp()
print(temp, '=', ctx.term().code, '*', ctx.fact().code)
ctx.code = temp
# self.value = int(ctx.term().language_apps) * int(ctx.fact().language_apps)
# ctx.language_apps = self.value
def exitTerm2(self, ctx: Expr2Parser.Term1Context):
temp = self.create_temp()
print(temp, '=', ctx.term().code, '/', ctx.fact().code)
ctx.code = temp
# self.value = int(ctx.term().language_apps) / int(ctx.fact().language_apps)
# ctx.language_apps = self.value
def exitTerm3(self, ctx: Expr2Parser.Term3Context):
ctx.code = ctx.fact().code
def exitExpr1(self, ctx: Expr2Parser.Expr1Context):
temp = self.create_temp()
print(temp, '=', ctx.expr().code, '+', ctx.term().code)
ctx.code = temp
# self.value = int(ctx.expr().language_apps) + int(ctx.term().language_apps)
# ctx.language_apps = self.value
def exitExpr2(self, ctx: Expr2Parser.Expr1Context):
temp = self.create_temp()
print(temp, '=', ctx.expr().code, '-', ctx.term().code)
ctx.code = temp
# self.value = int(ctx.expr().language_apps) - int(ctx.term().language_apps)
# ctx.language_apps = self.value
def exitExpr3(self, ctx: Expr2Parser.Expr1Context):
ctx.code = ctx.term().code
def exitStart(self, ctx: Expr2Parser.StartContext):
# pass
print(ctx.Id(), '=', ctx.expr().code)
# print(ctx.Id(), '=', self.value)
| 2,962 |
hydra_client/codes.py
|
Project-Dream-Weaver/Hydra
| 3 |
2025931
|
from dataclasses import dataclass
@dataclass(frozen=True)
class OpCodes:
IDENTIFY = 0
HTTP_REQUEST = 1
MESSAGE = 2
| 129 |
exercises/exercise85.py
|
djangojeng-e/TIL
| 0 |
2024557
|
def numbers(n):
for number in range(0, n + 1):
if number % 5 == 0 and number % 7 == 0:
yield number
n = int(input("Please enter your number"))
values = []
for i in numbers(n):
values.append(str(i))
print(",".join(values))
| 256 |
myvenv/Lib/site-packages/graphene/utils/tests/test_annotate.py
|
Fa67/saleor-shop
| 1 |
2023731
|
import pytest
from ..annotate import annotate
def func(a, b, *c, **d):
pass
annotations = {
'a': int,
'b': str,
'c': list,
'd': dict
}
def func_with_annotations(a, b, *c, **d):
pass
func_with_annotations.__annotations__ = annotations
def test_annotate_with_no_params():
annotated_func = annotate(func, _trigger_warning=False)
assert annotated_func.__annotations__ == {}
def test_annotate_with_params():
annotated_func = annotate(_trigger_warning=False, **annotations)(func)
assert annotated_func.__annotations__ == annotations
def test_annotate_with_wront_params():
with pytest.raises(Exception) as exc_info:
annotated_func = annotate(p=int, _trigger_warning=False)(func)
assert str(exc_info.value) == 'The key p is not a function parameter in the function "func".'
| 832 |
DynamicGestures/dlib-18.5/python_examples/max_cost_assignment.py
|
uiuyuty/vsfh
| 36 |
2025792
|
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This simple example shows how to call dlib's optimal linear assignment problem solver.
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
# O(N^3) time.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
# Lets imagine you need to assign N people to N jobs. Additionally, each person will make
# your company a certain amount of money at each job, but each person has different skills
# so they are better at some jobs and worse at others. You would like to find the best way
# to assign people to these jobs. In particular, you would like to maximize the amount of
# money the group makes as a whole. This is an example of an assignment problem and is
# what is solved by the dlib.max_cost_assignment() routine.
# So in this example, lets imagine we have 3 people and 3 jobs. We represent the amount of
# money each person will produce at each job with a cost matrix. Each row corresponds to a
# person and each column corresponds to a job. So for example, below we are saying that
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.
print "optimal assignments: ", assignment
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print "optimal cost: ", dlib.assignment_cost(cost, assignment)
| 2,357 |
figures.py
|
ejnnr/steerable_pdo_experiments
| 0 |
2026020
|
# The # %% lines denote cells, allowing this file to be run using
# the interactive mode of the Python VS Code extension. But you
# can also run it simply as a normal python script.
# %%
import matplotlib.pyplot as plt
import numpy as np
import os
import seaborn as sns
from scipy.signal import correlate2d
from e2cnn.diffops.utils import discretize_homogeneous_polynomial as dp
from e2cnn import nn
from e2cnn import gspaces
# %%
sns.set_theme(context="paper")
os.makedirs("fig", exist_ok=True)
# %%
x = np.linspace(0, np.pi, 128)
xx, yy = np.meshgrid(x[32:], x[:64])
zz_x = np.sin(xx + yy) + 0.5 * (xx - 2)
zz_y = np.cos(xx - yy) + 0.5 * (yy - 2)
zz_mag = np.sqrt(zz_x**2 + zz_y**2)
plt.figure(figsize=(6, 4))
plt.gcf().set_facecolor((0.95, 0.95, 0.95))
plt.quiver(zz_x[::8, ::8], zz_y[::8, ::8], zz_mag[::8, ::8])
plt.axis("equal")
plt.axis("off")
plt.gcf().tight_layout()
plt.savefig("fig/vector_input.pdf", bbox_inches="tight", facecolor=plt.gcf().get_facecolor())
# %%
# the order is like that ... for reasons
# y_grad, x_grad = np.gradient(zz)
# gradient_mag = np.sqrt(x_grad**2 + y_grad**2)
# Laplacian of the divergence:
x_filter = dp([-2, -1, 0, 1, 2], np.array([0, 1, 0, 1])).reshape(5, 5)
# curl:
x_filter += dp([-2, -1, 0, 1, 2], np.array([1, 0])).reshape(5, 5)
y_filter = dp([-2, -1, 0, 1, 2], np.array([1, 0, 1, 0])).reshape(5, 5)
y_filter += dp([-2, -1, 0, 1, 2], np.array([0, -1])).reshape(5, 5)
out = correlate2d(zz_x, x_filter, mode="valid") + correlate2d(zz_y, y_filter, mode="valid")
plt.figure(figsize=(6, 4))
plt.imshow(out, origin="lower")
plt.axis("off")
plt.axis("equal")
plt.gcf().tight_layout()
plt.savefig("fig/scalar_output.pdf", bbox_inches="tight")
# %%
plt.imshow(x_filter, cmap="gray")
plt.axis("off")
plt.gcf().tight_layout()
plt.savefig("fig/laplacian_divergence_filter_x.pdf", bbox_inches="tight")
plt.imshow(y_filter, cmap="gray")
plt.axis("off")
plt.gcf().tight_layout()
plt.savefig("fig/laplacian_divergence_filter_y.pdf", bbox_inches="tight")
# %%
gs = gspaces.Rot2dOnR2(8)
in_type = nn.FieldType(gs, [gs.trivial_repr])
out_type = nn.FieldType(gs, [gs.regular_repr])
# %%
models = {}
for kernel_size in [3, 5]:
models[("Kernel", kernel_size)] = nn.R2Conv(in_type, out_type, kernel_size)
max_order = 2 if kernel_size == 3 else 3
models[("FD", kernel_size)] = nn.R2Diffop(in_type, out_type, kernel_size, maximum_order=max_order)
models[("RBF-FD", kernel_size)] = nn.R2Diffop(in_type, out_type, kernel_size, rbffd=True, maximum_order=max_order)
smoothing = 1 if kernel_size == 3 else 1.3
models[("Gauss", kernel_size)] = nn.R2Diffop(in_type, out_type, kernel_size, smoothing=smoothing, maximum_order=max_order)
# %%
filters = {}
for k, model in models.items():
exp = getattr(model.basisexpansion, "block_expansion_('irrep_0', 'regular')")
size = k[1]
filters[k] = exp.sampled_basis.numpy().reshape(-1, 8, size, size)
# %%
methods = ["Kernel", "FD", "RBF-FD", "Gauss"]
for size in [3, 5]:
fig, ax = plt.subplots(4, 6)
vmin = min(np.min(filters[(method, size)]) for method in methods)
vmax = max(np.max(filters[(method, size)]) for method in methods)
for i, method in enumerate(methods):
ax[i, 0].set_ylabel(method, size="large")
for j in range(6):
ax[i, j].imshow(
filters[(method, size)][-j-1, 1],
cmap="bwr",
vmin=vmin,
vmax=vmax,
)
ax[i, j].axis("equal")
# ax[i, j].axis("off")
ax[i, j].get_xaxis().set_ticks([])
ax[i, j].get_yaxis().set_ticks([])
# fig.tight_layout()
fig.subplots_adjust(hspace=.5)
fig.subplots_adjust(wspace=.5)
fig.savefig(f"fig/stencils_{size}.pdf")
| 3,738 |
scripts/performance/perf_load/perf_client_runner.py
|
Rob-S/indy-node
| 627 |
2025880
|
import logging
from perf_load.perf_client_msgs import ClientRun, ClientGetStat
class ClientRunner:
ClientError = 0
ClientCreated = 1
ClientReady = 2
ClientRun = 3
ClientStopped = 4
def __init__(self, name, conn, out_file):
self.status = ClientRunner.ClientCreated
self.name = name
self.conn = conn
self.total_sent = 0
self.total_succ = 0
self.total_failed = 0
self.total_nack = 0
self.total_reject = 0
self._out_file = out_file
self._logger = logging.getLogger(name)
def stop_client(self):
self._logger.debug("stop_client")
self.status = ClientRunner.ClientStopped
def is_finished(self):
return self.status == ClientRunner.ClientStopped
def refresh_stat(self, stat):
if not isinstance(stat, dict):
return
self.total_sent = stat.get("total_sent", self.total_sent)
self.total_succ = stat.get("total_succ", self.total_succ)
self.total_failed = stat.get("total_fail", self.total_failed)
self.total_nack = stat.get("total_nacked", self.total_nack)
self.total_reject = stat.get("total_rejected", self.total_reject)
def run_client(self):
self._logger.debug("run_client {}".format(self))
try:
if self.conn and self.status == ClientRunner.ClientReady:
self.conn.send(ClientRun())
self.status = ClientRunner.ClientRun
except Exception as e:
self._logger.exception("Sent Run to client {} error {}".format(self.name, e))
self.status = ClientRunner.ClientError
def req_stats(self):
self._logger.debug("req_stats {}".format(self))
try:
if self.conn and self.status == ClientRunner.ClientRun:
self.conn.send(ClientGetStat())
except Exception as e:
self._logger.exception("Sent ClientGetStat to client {} error {}".format(self.name, e), file=self._out_file)
self.status = ClientRunner.ClientError
| 2,065 |
twoway_example.py
|
ContinuumBridge/spur_twoway_example
| 0 |
2025776
|
#!/usr/bin/env python
# twoway_example.py
# Copyright (C) ContinuumBridge Limited, 2017
# Written by <NAME>
#
import json
import os.path
import signal
import ibmiotf.application
HOME = os.getcwd()
#api watson={"org": "adqdih", "auth-key": "<KEY>", "auth-token": <KEY>"}
config = {
"org": "adqdih",
"auth-key": "<KEY>",
"auth-token": "<KEY>",
"screensetName": "Watson_Clean",
"listName": "Test",
"buttonName": "Test_Button_703"
}
deviceTypeId = config["screensetName"]
deviceId = config["listName"] + "-" + config["buttonName"]
print("deviceTypeId: {}, deviceId: {}".format(deviceTypeId, deviceId))
def watsonCallback(event):
data = json.dumps(event.data)
print("{} event received from button {}, data: {}".format(event.event, event.device, data))
commandData={"test_response" : "test"}
watsonClient.publishCommand(deviceTypeId, deviceId, "test", "json", commandData)
print("Successfully publishted update")
try:
options = {
"org": config["org"],
"id": "spur",
"auth-method": "apikey",
"auth-key": config["auth-key"],
"auth-token": config["auth-token"]
}
watsonClient = ibmiotf.application.Client(options)
print("Created Watson client")
except ibmiotf.ConnectionException as e:
print("Watson Client creation exception: {}, organsiation: {}".format(e, watsonParams))
exit()
deviceTypeInfo = watsonClient.api.getDeviceType(deviceTypeId)
print("Got deviceTypeInfo")
print("devieTupeInfo: {}".format(deviceTypeInfo))
try:
watsonClient.connect()
print("Watson client connected")
except ibmiotf.ConnectionException as e:
print("Unable to connect to Watson, exception: {}".format(e))
exit()
watsonClient.deviceEventCallback = watsonCallback
print("Watson client callback registered")
watsonClient.subscribeToDeviceEvents(deviceType=deviceTypeId)
print("Watson client subscribed to events")
signal.pause()
| 1,955 |
src/SCTA/System/LocalOscillator.py
|
DougMHu/SCTA_repo
| 0 |
2025557
|
import logging
logger = logging.getLogger(__name__)
class LocalOscillator(object):
def __init__(self, id='LO', freq=0):
"""Constructor.
~~~~~ Possibilities ~~~~~
id: string
freq: positive (float) [MHz]
"""
self.id = id # identifier string
self.freq = freq # LO freq
| 350 |
configtamer/compat.py
|
rbp/configtamer
| 8 |
2025432
|
"""Python 2/3 compatibility utils"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
python_version = sys.version_info[0]
def raise_(exception_type, message=None, traceback=None):
if python_version == 2:
# Trying to mimic "raise exception_type, message, traceback"
if traceback is not None:
from traceback import print_tb
print_tb(traceback)
raise exception_type(message)
else:
raise exception_type(message).with_traceback(traceback)
| 616 |
Hackerrank_codes/find_runner_up_score.py
|
Vyshnavmt94/HackerRankTasks
| 0 |
2024794
|
"""
Given the participants' score sheet for your University Sports Day, you are required to find the runner-up score. You are given scores. Store them in a list and find the score of the runner-up.
Input Format
The first line contains . The second line contains an array of integers each separated by a space.
Constraints
Output Format
Print the runner-up score.
Sample Input 0
5
2 3 6 6 5
Sample Output 0
5
Explanation 0
Given list is . The maximum score is , second maximum is . Hence, we print as the runner-up score.
"""
def check_number(num):
if num >= 2 and num <= 10:
return True
def check_array(arr):
if not False in [True if a >= -100 and a <= 100 else False for a in arr]:
return True
#k = kth largest number to be found
def runner_up(arr,k):
for i in range(k):
m = max(arr)
arr = [a for a in arr if a != m]
return m
| 892 |
tcp_server.py
|
26huitailang/socket-service
| 0 |
2023070
|
#!/usr/bin/env python3
# coding=utf-8
import socketserver
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='/tmp/myapp.log',
filemode='w')
logger = logging.getLogger(__file__)
class MyServer(socketserver.BaseRequestHandler):
def handle(self):
# 创建一个链接,继承于socketserver中的BaseRequestHandler类
conn = self.request
# 发送登录提示
conn.sendall(b"Welcome to login...")
logger.info("Client connect...")
while True:
logger.info("Waitting for recving message...")
# 接收消息
message = conn.recv(1024)
# print(message.decode('utf-8'))
logger.info(message.decode('utf-8'))
# 收到exit就退出
if message == "exit":
break
# 回复消息
data = (message.decode('utf-8') + ' Done!')
# 发送消息
conn.sendall(data.encode('utf-8'))
if __name__ == "__main__":
# 实例化
# server = socketserver.ThreadingTCPServer(('0.0.0.0', 9999,), MyServer) # 线程
server = socketserver.ForkingTCPServer(('0.0.0.0', 9999,), MyServer) # 进程
# 调用serve_forever方法
server.serve_forever()
| 1,320 |
Image Classification/CGIAR Wheat Growth Stage Challenge/neurofitting/zindi_cgiar_wheat_growth_stage_challenge/src_ensemble/dataset.py
|
ZindiAfrica/Computer-Vision
| 0 |
2026033
|
from torch.utils.data import Dataset
import albumentations as A
from albumentations.pytorch.transforms import ToTensorV2
import numpy as np
class ZCTESTDataset(Dataset):
def __init__(self, imgs):
super(ZCTESTDataset).__init__()
self.imgs = imgs
self.transform = A.Compose([ToTensorV2(always_apply=True, p=1.0)], p=1.0)
self.transform_hflip = A.Compose([
A.HorizontalFlip(p=1.0),
ToTensorV2(always_apply=True, p=1.0)
], p=1.0)
def __len__(self):
return self.imgs.shape[0]
def __getitem__(self, index):
img = self.imgs[index].copy()
img = (img/255).astype(np.float32)
img_hflip = self.transform_hflip(**{'image': img})['image']
img = self.transform(**{'image': img})['image']
return img.float(), img_hflip.float()
| 949 |
git.py
|
vikian050194/py
| 0 |
2025158
|
#!/usr/bin/env python3
"""Interactive tool for repositories cloning from GitHub"""
import os
import sys
import requests
import subprocess
def get_repos_metadata(user = "vikian050194"):
"""Info about get_repos_metadata.
Args:
user: GitHub user name
Returns:
List of repositories metadata
"""
url = f"https://api.github.com/users/{user}/repos"
r = requests.get(url = url)
return r.json()
def repo_name_to_dir(repo_name: str):
return repo_name.lower().replace('-', '/', 1)
def do_interactive_clone(repos, url_type):
"""Go through list of repositories and clone some of them according to user response
Args:
repos: List of repositories metadata
"""
url = None
if url_type == "ssh":
url = "ssh_url"
if url_type == "http":
url = "clone_url"
for repo in repos:
print("%s: %s"%(repo["name"], repo["language"]))
target_dir = repo_name_to_dir(repo["name"])
if os.path.exists(target_dir):
print("This repo is already cloned")
continue
print("Clone this repo? (y/n/exit)")
response = input()
if response == "y":
bashCommand = ["git", "clone" , repo[url], target_dir]
process = subprocess.Popen(bashCommand, stdout=subprocess.PIPE)
output, error = process.communicate()
print(output)
if response == "exit":
break
def clone_repos(url_type):
"""Entry point"""
try:
repos = get_repos_metadata()
reposCount = len(repos)
print(f"{reposCount} repositories are discovered")
if len(repos) != 0:
do_interactive_clone(repos, url_type)
except Exception as e:
print(e)
if __name__ == "__main__":
url_type = "ssh"
if len(sys.argv) == 2:
url_type = sys.argv[1];
clone_repos(url_type)
| 1,908 |
run_model.py
|
mkneierV/kaggle_avazu_benchmark
| 39 |
2025698
|
import logging
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from sklearn.linear_model import SGDClassifier
import lib.ml as ml
import lib.preprocessing as pp
def main(neg_rate, submission_num, n_iter, train_path):
ids = [x for x in pp.get_int_field('id', 'original_data/test')]
clicks = pp.get_int_field('click', train_path)
# Get Data Generators
train = pp.data_generator(pp.clean_parse_row, train_path)
test = pp.data_generator(pp.clean_parse_row, 'original_data/test')
# Define estimators
fh = FeatureHasher(n_features=2 ** 20, input_type='pair')
sgd = SGDClassifier(loss='log', n_iter=1, alpha=.003, penalty='l2')
#Fit pipeline
pipeline = ml.PartialFitter([fh, sgd],
batch_size=10000,
logging=True,
n_iter=n_iter,
neg_rate=neg_rate)
pipeline.partial_fit(X=train, y=clicks)
# Correct Intercept
pipeline.steps[-1].intercept_[0] += np.log(neg_rate)
preds = pipeline.predict_proba(newX=test)[:, 1]
pp.write_submission(number=submission_num, ids=ids, preds=preds)
if __name__ == '__main__':
logging.basicConfig(filename='train_errors.log', level=logging.WARNING)
from docopt import docopt
import sys
usage = '''Train SGD model and create entry for Kaggle Avazu Competition.
Usage:
%(program_name)s --neg_rate=<r> --submission_num=<s> --n_iter=<n> --train_path=<p>
%(program_name)s (-h | --help)
Options:
-h --help Show this screen.
--neg_rate=<r> Rate at which to sample negative cases
--submission_num=<s> Submission number.
--n_iter=<n> Number of fitting iterations over training data
--train_path=<p> Path to training dataset
''' % {'program_name': sys.argv[0]}
arguments = docopt(usage)
main(np.float(arguments['--neg_rate']),
arguments['--submission_num'],
np.int(arguments['--n_iter']),
arguments['--train_path']
)
| 2,124 |
bingads/v12/bulk/entities/audiences/bulk_ad_group_audience_association.py
|
channable/BingAds-Python-SDK
| 0 |
2024938
|
from bingads.v12.bulk.entities import *
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V12
from bingads.v12.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v12.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v12.internal.bulk.string_table import _StringTable
from bingads.v12.internal.extensions import *
class BulkAdGroupAudienceAssociation(_SingleRecordBulkEntity):
""" Base class for all Ad Group Audience Association subclasses that can be read or written in a bulk file.
*See also:*
* :class:`.BulkAdGroupCustomAudienceAssociation`
* :class:`.BulkAdGroupInMarketAudienceAssociation`
* :class:`.BulkAdGroupProductAudienceAssociation`
* :class:`.BulkAdGroupRemarketingListAssociation`
* :class:`.BulkAdGroupSimilarRemarketingListAssociation`
"""
def __init__(self,
biddable_ad_group_criterion=None,
campaign_name=None,
ad_group_name=None,
audience_name=None):
super(BulkAdGroupAudienceAssociation, self).__init__()
self._biddable_ad_group_criterion = biddable_ad_group_criterion
self._campaign_name = campaign_name
self._ad_group_name = ad_group_name
self._audience_name = audience_name
self._performance_data = None
_MAPPINGS = [
_SimpleBulkMapping(
_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.Status),
csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'Status', v if v else None)
),
_SimpleBulkMapping(
_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.Id),
csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.AdGroupId),
csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'AdGroupId', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, 'campaign_name', v)
),
_SimpleBulkMapping(
_StringTable.AdGroup,
field_to_csv=lambda c: c.ad_group_name,
csv_to_field=lambda c, v: setattr(c, 'ad_group_name', v)
),
_SimpleBulkMapping(
_StringTable.Audience,
field_to_csv=lambda c: c.audience_name,
csv_to_field=lambda c, v: setattr(c, 'audience_name', v)
),
_SimpleBulkMapping(
_StringTable.BidAdjustment,
field_to_csv=lambda c: field_to_csv_BidAdjustment(c.biddable_ad_group_criterion),
csv_to_field=lambda c, v: csv_to_field_BidAdjustment(c.biddable_ad_group_criterion, float(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.AudienceId,
field_to_csv=lambda c: field_to_csv_CriterionAudienceId(c.biddable_ad_group_criterion),
csv_to_field=lambda c, v: csv_to_field_CriterionAudienceId(c.biddable_ad_group_criterion, int(v) if v else None)
),
]
@property
def biddable_ad_group_criterion(self):
""" Defines a Biddable Ad Group Criterion """
return self._biddable_ad_group_criterion
@biddable_ad_group_criterion.setter
def biddable_ad_group_criterion(self, biddable_ad_group_criterion):
self._biddable_ad_group_criterion = biddable_ad_group_criterion
@property
def campaign_name(self):
""" Defines the name of the Campaign.
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
@property
def ad_group_name(self):
""" Defines the name of the Ad Group
:rtype: str
"""
return self._ad_group_name
@ad_group_name.setter
def ad_group_name(self, ad_group_name):
self._ad_group_name = ad_group_name
@property
def audience_name(self):
""" Defines the name of the Audience
:rtype: str
"""
return self._audience_name
@audience_name.setter
def audience_name(self, audience_name):
self._audience_name = audience_name
@property
def performance_data(self):
return self._performance_data
def process_mappings_from_row_values(self, row_values):
self._biddable_ad_group_criterion = _CAMPAIGN_OBJECT_FACTORY_V12.create('BiddableAdGroupCriterion')
self._biddable_ad_group_criterion.Type = 'BiddableAdGroupCriterion'
self._biddable_ad_group_criterion.Criterion = _CAMPAIGN_OBJECT_FACTORY_V12.create('AudienceCriterion')
self._biddable_ad_group_criterion.Criterion.Type = 'AudienceCriterion'
self._biddable_ad_group_criterion.CriterionBid = _CAMPAIGN_OBJECT_FACTORY_V12.create('BidMultiplier')
self._biddable_ad_group_criterion.CriterionBid.Type = 'BidMultiplier'
row_values.convert_to_entity(self, BulkAdGroupAudienceAssociation._MAPPINGS)
self._performance_data = PerformanceData.read_from_row_values_or_null(row_values)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.biddable_ad_group_criterion, 'biddable_ad_group_criterion')
self.convert_to_values(row_values, BulkAdGroupAudienceAssociation._MAPPINGS)
PerformanceData.write_to_row_values_if_not_null(self._performance_data, row_values)
def read_additional_data(self, stream_reader):
super(BulkAdGroupAudienceAssociation, self).read_additional_data(stream_reader)
| 5,901 |
cfnlp/tools/connector/es_connector.py
|
invoker4zoo/cf-nlp-py
| 0 |
2023105
|
# coding=utf-8
"""
@ license: Apache Licence
@ github: invoker4zoo
@ author: invoker/cc
@ wechart: whatshowlove
@ software: PyCharm
@ file: es_connector.py
@ time: $19-2-21 上午9:29
"""
import sys
from elasticsearch import Elasticsearch
from cfnlp.tools.logger import logger
reload(sys)
sys.setdefaultencoding('utf-8')
class esConnector(object):
def __init__(self, url, index, doc_type):
"""
暂时没有分布式, url为单个链接
:param url:
Elasticsearch fun
es.index
"""
self.es = Elasticsearch([url])
self.index = index
self.doc_type = doc_type
self.re_connect = 3
def search_all(self, size=1000):
"""
可复写此方法
查询示例
:return:
"""
try:
dsl_query = {
'query':{
'match_all':{}
},
'size':size
}
result = self.es.search(self.index, self.doc_type, body=dsl_query)
return result
except Exception, e:
logger.error('search all doc failed for %s' % str(e))
return None
def search_doc_by_id(self, id):
"""
可复写此方法
查询示例
search doc by id
:param id:
:return:
"""
try:
dsl_query = {
'query': {
'match': {
'_id': id
}
}
}
result = self.es.search(self.index, self.doc_type, body=dsl_query)
if len(result.get('hits', {}).get('hits', [])):
return result.get('hits', {}).get('hits', [])[0]
else:
return []
except Exception, e:
logger.error('search doc by id failed for %s' % str(e))
return None
def insert_single_info(self, info):
"""
可复写此方法
查询示例
:param info:
:return:
"""
try:
result = self.es.index(self.index, self.doc_type, body=info)
return result
except Exception, e:
logger.error('insert single info failed for %s' % str(e))
return None
def check_info_exist(self, title):
"""
可复写此方法
查询示例
由于为对插入操作指定id,需要使用title查询文件信息是否存在
:param title:
:return:
"""
try:
# elasticsearch中的字符串精确匹配
# 参考 https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html
dsl_query = {
'query': {
'match_phrase': {
'title': title
}
}
}
result = self.es.search(self.index, self.doc_type, body=dsl_query)
if len(result.get('hits', {}).get('hits', [])):
return True
else:
return False
except Exception, e:
logger.error('check info existed failed for %s' % str(e))
return None
if __name__ == '__main__':
es_db = esConnector(url='localhost:9200', index='test', doc_type='finace')
| 3,157 |
__database__/config.py
|
Zxayler/WeVibinLove
| 0 |
2025779
|
import json
from PIL.ImageColor import getrgb
from discord import Embed, Color
with open("./__database__/config.json") as f:
data = json.load(f)
color = 0xE65C9C
token = data['token']
invite = "https://discord.com/api/oauth2/authorize?client_id=849177419520147496&permissions=17826832&scope=bot"
vote = "https://top.gg/bot/849177419520147496/vote"
def get_color(color):
r,g,b = getrgb(color)
return Color.from_rgb(r,g,b)
| 427 |
packages/google/src/RPA/Cloud/Google/keywords/natural_language.py
|
pavelee/rpaframework
| 0 |
2025658
|
from typing import Dict, Optional
from google.cloud import language_v1
from . import LibraryContext, keyword, TextType, to_texttype
class NaturalLanguageKeywords(LibraryContext):
"""Keywords for Google Cloud Natural Language API"""
def __init__(self, ctx):
super().__init__(ctx)
self.service = None
@keyword(tags=["init", "natural language"])
def init_natural_language(
self,
service_account: str = None,
use_robocorp_vault: Optional[bool] = None,
token_file: str = None,
) -> None:
"""Initialize Google Cloud Natural Language client
:param service_account: file path to service account file
:param use_robocorp_vault: use credentials in `Robocorp Vault`
:param token_file: file path to token file
"""
self.service = self.init_service_with_object(
language_v1.LanguageServiceClient,
service_account,
use_robocorp_vault,
token_file,
)
@keyword(tags=["natural language"])
def analyze_sentiment(
self,
text: str = None,
text_file: str = None,
file_type: TextType = TextType.TEXT,
json_file: str = None,
lang: str = None,
) -> Dict:
"""Analyze sentiment in a text file
:param text: source text
:param text_file: source text file
:param file_type: type of text, PLAIN_TEXT (default) or HTML
:param json_file: json target to save result, defaults to None
:param lang: language code of the source, defaults to None
:return: analysis response
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
**Examples**
**Robot Framework**
.. code-block:: robotframework
${result}= Analyze Sentiment ${text}
${result}= Analyze Sentiment text_file=${CURDIR}${/}test.txt
"""
return self._analyze_handler(
text, text_file, file_type, json_file, lang, "sentiment"
)
@keyword(tags=["natural language"])
def classify_text(
self,
text: str = None,
text_file: str = None,
file_type: TextType = TextType.TEXT,
json_file: str = None,
lang: str = None,
) -> Dict:
"""Classify text
:param text: source text
:param text_file: source text file
:param file_type: type of text, PLAIN_TEXT (default) or HTML
:param json_file: json target to save result, defaults to None
:param lang: language code of the source, defaults to None
:return: classify response
# For list of supported languages:
# https://cloud.google.com/natural-language/docs/languages
**Examples**
**Robot Framework**
.. code-block:: robotframework
${result}= Classify Text ${text}
${result}= Classify Text text_file=${CURDIR}${/}test.txt
"""
return self._analyze_handler(
text, text_file, file_type, json_file, lang, "classify"
)
def _analyze_handler(
self, text, text_file, file_type, json_file, lang, analyze_method
):
file_type = to_texttype(file_type)
parameters = {"type_": file_type}
if text:
parameters["content"] = text
elif text_file:
with open(text_file, "r") as f:
parameters["content"] = f.read()
else:
raise AttributeError("Either 'text' or 'text_file' must be given")
if lang is not None:
parameters["language"] = lang
document = language_v1.Document(**parameters)
if analyze_method == "classify":
response = self.service.classify_text(document=document)
elif analyze_method == "sentiment":
# Available values: NONE, UTF8, UTF16, UTF32
# encoding_type = enums.EncodingType.UTF8
response = self.service.analyze_sentiment(
document=document, encoding_type="UTF8"
)
self.write_json(json_file, response)
return response
| 4,186 |
lemur/certificates/utils.py
|
jramosf/lemur
| 0 |
2024750
|
"""
Utils to parse certificate data.
.. module: lemur.certificates.hooks
:platform: Unix
:copyright: (c) 2019 by <NAME>, see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from marshmallow.exceptions import ValidationError
def get_sans_from_csr(data):
"""
Fetches SubjectAlternativeNames from CSR.
Works with any kind of SubjectAlternativeName
:param data: PEM-encoded string with CSR
:return: List of LemurAPI-compatible subAltNames
"""
sub_alt_names = []
try:
request = x509.load_pem_x509_csr(data.encode('utf-8'), default_backend())
except Exception:
raise ValidationError('CSR presented is not valid.')
try:
alt_names = request.extensions.get_extension_for_class(x509.SubjectAlternativeName)
for alt_name in alt_names.value:
sub_alt_names.append({
'nameType': type(alt_name).__name__,
'value': alt_name.value
})
except x509.ExtensionNotFound:
pass
return sub_alt_names
| 1,180 |
laundrymeter/telegram_bot.py
|
M4a1x/laundrymeter
| 0 |
2025997
|
# -*- coding: utf-8 -*-
"""Telegram Bot responding to commands different commands.
Defines callbacks to Telegram commands, providing a subset of the REST Api.
Authentication is provided via a start token that has to be obtained via
the REST API first.
"""
from __future__ import annotations
from flask import g
from sqlalchemy import desc
from telegram.ext import Updater, CommandHandler
from functools import wraps
import atexit
from .models import User, WashingMachine, WashingMachineSchema
def telegram_auth_required(func):
"""Function wrapper to provide authentication on telegram commands."""
@wraps(func)
def wrapped(bot, update, *args, **kwargs):
with app.app_context():
user = User.query.filter_by(telegram_chat_id=update.message.chat_id).one()
if not user:
app.logger.info('Unauthorized Telegram message received from %d', update.message.chat_id)
update.message.reply_text("Unauthorized. Please authenticate first.")
return
g.user = user
app.logger.debug('User %s (%s) successfully authenticated command call', user.username, user.name)
# Return inside app_context() to use same app context in called function. (to be able to use g)
return func(bot, update, *args, **kwargs)
return wrapped
def start(bot, update, args):
"""Telegram callback for the '/start' command send as first command in every chat."""
with app.app_context():
if not args:
app.logger.debug("A user %s (%s) tried to start a chat with the telegram bot, without providing a token.",
update.message.from_user.full_name,
update.message.from_user.username)
update.message.reply_text("Missing token. Please call `/start \{token\}` with the correct token to authenticate.")
return
update.message
user = User.verify_telegram_token(token=args[0], chat_id=update.message.chat_id)
if not user:
app.logger.debug("A user %s (%s) provided an invalid token for authentication when calling /start",
update.message.from_user.full_name,
update.message.from_user.username)
update.message.reply_text("Invalid token. Please call `/start \{token\}` with the correct token to authenticate.")
return
app.logger.debug('User %s (%s) successfully authenticated via telegram.', user.username, user.name)
update.message.reply_text("Successfully authenticated!")
@telegram_auth_required
def notify(bot, update):
"""Telegram callback for `/notify` to regsiter a user to be notified when the laundry is ready."""
try:
g.user.register_notification(telegram=True)
app.logger.debug('User %s (%s) successfully called notify(). He will be notified when the laundry is ready.', g.user.username, g.user.name)
update.message.reply_text("You will be notified as soon as the laundry is ready.")
except Exception as e:
update.message.reply_text("There was an error registering you.")
app.logger.exception("User %s (%s) raised an error on notify(). He couldn't be added for notification.", g.user.username, g.user.name)
@telegram_auth_required
def status(bot, update):
"""Telegram callback for `/status` to query the current simple status of the washing machine."""
try:
washing_machine = WashingMachine.query.order_by(desc('timestamp')).first()
app.logger.debug('User %s (%s) successfully called status(). Current Wasching Machine status was returned: %s', g.user.username, g.user.name, washing_machine.running)
update.message.reply_text("The Washing Machine is currently " + ("Running" if washing_machine.running else "Stopped"))
except Exception as e:
app.logger.exception("User %s (%s) raised an exception on status(). Couldn't retrieve it from the Database.", g.user.username, g.user.name)
update.message.reply_text("Couldn't retrieve the current machine status.")
@telegram_auth_required
def debug(bot, update):
"""Telegram callback for `/debug` to query the current extended status of the washing machine."""
try:
washing_machine = WashingMachine.query.order_by(desc('timestamp')).first()
wm_debug_schema = WashingMachineSchema()
app.logger.debug('User %s (%s) successfully called debug(). Current Wasching Machine status was returned: %s', g.user.username, g.user.name, wm_debug_schema)
update.message.reply_text(wm_debug_schema.dumps(washing_machine))
except Exception as e:
app.logger.exception("User %s (%s) raised an exception on debug(). Couldn't retrieve it from the Database.", g.user.username, g.user.name)
update.message.reply_text("Couldn't retrieve the current machine status.")
def init_app(flask_app: Flask) -> None:
"""Initializing the telegram app with app context and registering callbacks"""
flask_app.logger.debug('Initializing Telegram Bot...')
global updater
global app
updater = Updater(flask_app.config['TELEGRAM_BOT_TOKEN'])
app = flask_app
updater.dispatcher.add_handler(CommandHandler('notify', notify))
updater.dispatcher.add_handler(CommandHandler('start', start, pass_args=True))
updater.dispatcher.add_handler(CommandHandler('status', status))
updater.dispatcher.add_handler(CommandHandler('debug', debug))
flask_app.logger.debug('Starting Telegram Message Poller...')
updater.start_polling()
atexit.register(lambda: updater.stop())
flask_app.logger.debug('Finished setting up Telegram Bot.')
| 5,690 |
research/experiment1/log/teacher-fmeasure.py
|
rusucosmin/bsc
| 1 |
2024795
|
#!/usr/local/bin/python3
import json
with open("2.txt") as f:
lines = f.readlines()
mat = "".join(lines[:-1])
mat += lines[-1][:-1]
mat = mat[:-1]
a = json.loads(mat)
prec = [0] * 10
rec = [0] * 10
for c in range(10):
prec[c] = a[c][c] / sum(a[c])
rec[c] = a[c][c] / sum([a[x][c] for x in range(10)])
acc = sum([a[c][c] for c in range(10)]) / sum([sum(a[i]) for i in range(10)])
total_prec = sum(prec) / 10
total_rec = sum(rec) / 10
fmeasure = 2 * total_prec * total_rec / (total_prec + total_rec)
# print(acc)
# print(total_prec)
# print(total_rec)
print(fmeasure)
| 608 |
WLANderlust/captiveportals/impl/Waveloc.py
|
rrooggiieerr/WLANderlust.py
| 1 |
2025914
|
from WLANderlust.captiveportals import CaptivePortalSolverImpl
class Waveloc(CaptivePortalSolverImpl):
name = "Waveloc"
def detect(self, bssid, ssid, location = None, body = None):
# Placeholder for future implementation
return False
def solve(self, bssid, ssid, location, body):
# Placeholder for future implementation
return False
| 358 |
gollyx_python/counters.py
|
golly-splorts/gollyx-python
| 0 |
2026012
|
class XCounterStore(object):
"""
Small utilty class used by XYCounterStore
to store counters for a set of x values.
Wraps a hash map of x values to integer counts.
"""
__slots__ = ['mapp']
def __init__(self):
self.mapp = dict()
def __repr__(self):
s = "["
s += ", ".join([f"{j}: {self.mapp[j]}" for j in self.sorted_values()])
s += "]"
return s
def accumulate(self, x):
if x not in self.mapp:
self.mapp[x] = 1
else:
self.mapp[x] += 1
def filter_lohi(self, lo, hi):
for x in list(self.mapp.keys()):
if not (self.mapp[x] >= lo and self.mapp[x] <= hi):
del self.mapp[x]
def filter_values(self, bag):
for x in list(self.mapp.keys()):
if not (self.mapp[x] in bag):
del self.mapp[x]
def count(self, x):
if x not in self.mapp:
return 0
else:
return self.mapp[x]
def sorted_values(self):
return sorted(list(self.mapp.keys()))
class XYCounterStore(object):
"""
Small utility class storing counters for (x,y) values.
Wraps a hash map of y values to x counter stores.
"""
__slots__ = ['mapp']
def __init__(self):
self.mapp = dict()
def __repr__(self):
return str(self.mapp)
def accumulate(self, x, y):
if y not in self.mapp:
self.mapp[y] = XCounterStore()
self.mapp[y].accumulate(x)
def count(self, x, y):
if y not in self.mapp:
return 0
xstore = self.mapp[y]
if x not in xstore.mapp:
return 0
count = xstore.mapp[x]
return count
def sorted_values(self):
return sorted(list(self.mapp.keys()))
def sorted_xvalues(self, y):
"""Return a list of sorted x values for the given y"""
if y not in self.mapp:
return None
xstore = self.mapp[y]
return xstore.sorted_values()
def filter_lohi(self, lo, hi):
"""Filter all counts in this XYCounterStore to values that are between lo and hi"""
for y in list(self.mapp.keys()):
self.mapp[y].filter_lohi(lo, hi)
x_values = self.mapp[y].mapp
if len(x_values) == 0:
del self.mapp[y]
def filter_values(self, bag):
"""Filter all counts in this XYCounterStore to values that are between lo and hi"""
for y in list(self.mapp.keys()):
self.mapp[y].filter_values(bag)
x_values = self.mapp[y].mapp
if len(x_values) == 0:
del self.mapp[y]
class DeadNeighborCounter(XYCounterStore):
"""
Used to count the number of times a dead cell has been a neighbor
of a live cell (used in determining which cells come alive).
Wraps a hash map of (x, y) locations to integer counters.
"""
pass
class AliveNeighborCounter(XYCounterStore):
"""
Used to count the number of live neighbors of a given live cell.
"""
pass
| 3,082 |
events/scheduler/tests.py
|
totaki/learn-python-pnz-00
| 0 |
2026059
|
from django.test import TestCase
from scheduler.task import Task
from fetcher.fetcher import Fetcher
from scheduler.save_event import save_event
from parsers.bar60 import BarParser
from parsers.rostokhall_parser import RostokhallParser
from datetime import datetime
PARSER_MAP = {
'parsers': [RostokhallParser(), BarParser()]
}
class BaseTaskTest(TestCase):
def setUp(self):
self.task = Task(30, Fetcher(PARSER_MAP['parsers']), save_event)
self.datetime = datetime.utcnow()
def test_need_run(self):
"""Тест проверки работы функции need_run, если текущее время больше записанного
в self.task.next - возвращается True"""
self.assertEqual(self.task.need_run(self.datetime), True)
def test_update_next(self):
"""Тест проверки работы функции update_next, всё верно, если в результате значение,
self.task.next увеличивается на self.timedelta"""
seconds = 0
self.task.next = datetime.strptime(
f'2019-12-25 12:20:{seconds}', '%Y-%m-%d %H:%M:%S'
)
self.task.update_next()
plus_delta = self.task.next
result = datetime.strptime(
f'2019-12-25 12:20:{seconds + self.task.timeout}', '%Y-%m-%d %H:%M:%S'
)
self.assertEqual(plus_delta, result)
| 1,297 |
master/tutorials/linkedin-placements/linkedin-practice-bitwise-and/solution.py
|
bitnot/hackerrank-solutions
| 0 |
2025154
|
#!/bin/python3
import sys
t = int(input().strip())
for t0 in range(t):
n, k = [int(tmp) for tmp in input().strip().split(' ')]
a = k-1
b = ~a & -~a #least significant 0 bit to set
if a | b > n:
print(a - 1)
else:
print(a)
| 265 |
duro/scheduler/checks.py
|
TargetProcess/duro
| 4 |
2025728
|
from abc import ABC, abstractmethod
from typing import Optional, Iterable
from utils.file_utils import (
load_query,
list_processors,
load_select_query,
list_tests,
test_postfix,
load_ddl_query,
)
class Check(ABC):
def __init__(self, views_path: str):
self.views_path = views_path
@abstractmethod
def check(self, table):
pass
@abstractmethod
def _list_tables(self) -> Iterable[str]:
pass
@property
@abstractmethod
def message(self) -> str:
pass
def run(self) -> Optional[str]:
tables = self._list_tables()
failures = []
for table in tables:
try:
check_result = self.check(table)
except (OSError, ValueError):
failures.append(table)
continue
if not check_result:
failures.append(table)
if not failures:
return None
return f'{self.message}: {", ".join(failures)}.'
class TestsWithoutQuery(Check):
def _list_tables(self):
tests = list_tests(self.views_path)
return (t.replace(f"{test_postfix}", "") for t in tests)
def check(self, table):
return load_query(self.views_path, table)
@property
def message(self) -> str:
return "Some tables have tests, but not a SELECT query"
class ProcessorsWithoutSelect(Check):
def _list_tables(self) -> Iterable[str]:
return list_processors(self.views_path)
def check(self, table):
return load_select_query(self.views_path, table)
@property
def message(self) -> str:
return "Some processors don’t have a SELECT query"
class ProcessorsWithoutDDL(Check):
def _list_tables(self) -> Iterable[str]:
return list_processors(self.views_path)
def check(self, table):
return load_ddl_query(self.views_path, table)
@property
def message(self) -> str:
return "Some processors don’t have a CREATE TABLE query"
enabled_checks = (TestsWithoutQuery, ProcessorsWithoutSelect, ProcessorsWithoutDDL)
def find_tables_with_missing_files(views_path: str) -> Optional[str]:
check_results = [check(views_path).run() for check in enabled_checks]
failed = (result for result in check_results if result)
return "\n".join(failed)
| 2,345 |
libcardet/__init__.py
|
0x0is1/cardet
| 1 |
2026062
|
# Author: https://github.com/0x0is1
import urllib3
# Disables ssl warning as host do not support ssl
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
| 170 |
tools/variables.py
|
Open-Digital-Twin/scholar-fetch
| 0 |
2026064
|
# The ID and range of a sample spreadsheet.
SPREADSHEET_ID = ''
UPLOAD_FOLDER_ID = ''
RANGE_NAME = '\'pagename\'!$B2:O'
SCRAPER_API_KEY = ''
| 146 |
src/utils/errors.py
|
cgDeepLearn/pyserver
| 0 |
2024637
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : errors.py
# @Author : cgDeepLearn
# @Create Date : 2020/11/12-2:53 下午
class gServerError(Exception):
pass
class PostError(Exception):
'''
不是post请求或者post_data为空
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ParameterError(gServerError):
'''
参数错误
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HttpPathError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ApplyDetailLack(gServerError):
'''
apply detail获取中关键参数没有
'''
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ServerProcessError(gServerError):
def __init__(self, desc):
self.desc = desc
def __str__(self):
return "错误: [%s] " % (self.desc)
| 1,040 |
2016/day_04/main.py
|
lbellomo/adventure_of_code
| 0 |
2025891
|
from typing import List, Iterator, Tuple, Optional
from itertools import cycle
from functools import partial
from collections import Counter
from string import ascii_lowercase
RawData = List[str]
def read_data(path: str) -> RawData:
with open(path) as f:
return [line.strip() for line in f.readlines()]
def process_data(
raw_data: RawData, remove_dash: bool = True
) -> Iterator[Tuple[str, int, str]]:
for line in raw_data:
letters, other = line.rsplit("-", maxsplit=1)
if remove_dash:
letters = letters.replace("-", "")
ids, checksum = other[:-1].split("[")
yield letters, int(ids), checksum
def check_checksum(letters: str, checksum: str) -> bool:
counter = Counter(letters)
# sort alfabetical first, and then by count
sorted_counter = sorted(
sorted(counter.items(), key=lambda x: x[0]), key=lambda x: x[1], reverse=True
)
# keep only the fisrt 5 (the checksum len)
most_common = "".join(i[0] for i in sorted_counter[:5])
if most_common == checksum:
return True
return False
def solve_a(raw_data: RawData) -> int:
sum_ids = 0
for letters, ids, checksum in process_data(raw_data):
if check_checksum(letters, checksum):
sum_ids += ids
return sum_ids
def decode_ch(ch_input: str, ids: int) -> str:
ch = ""
if ch_input == "-":
cycle_letters = cycle("- ")
else:
cycle_letters = cycle(ascii_lowercase)
while ch != ch_input:
ch = next(cycle_letters)
for _ in range(ids):
ch = next(cycle_letters)
return ch
def decode_letters(letters: str, ids: int) -> str:
partial_decode_ch = partial(decode_ch, ids=ids)
return "".join(map(partial_decode_ch, letters))
def solve_b(raw_data: RawData) -> Optional[int]:
for letters, ids, _ in process_data(raw_data, remove_dash=False):
real_name = decode_letters(letters, ids)
if real_name == "northpole-object-storage":
return ids
return None
if __name__ == "__main__":
raw_data = read_data("input.txt")
sol_a = solve_a(raw_data)
print(f"sol a: {sol_a}")
sol_b = solve_b(raw_data)
print(f"sol b: {sol_b}")
def test_check_checksum():
test_raw_data = [
"aaaaa-bbb-z-y-x-123[abxyz]",
"a-b-c-d-e-f-g-h-987[abcde]",
"not-a-real-room-404[oarel]",
"totally-real-room-200[decoy]",
]
test_result = [True, True, True, False]
for raw_data, result in zip(test_raw_data, test_result):
letters, _, checksum = next(process_data([raw_data]))
assert check_checksum(letters, checksum) == result
def test_decode_letters():
letters = "qzmt-zixmtkozy-ivhz"
ids = 343
assert decode_letters(letters, ids) == "very encrypted name"
| 2,808 |
Asymptotic Properties of cLD/genedistance.py
|
QingrunZhangLab/cLD
| 0 |
2024782
|
import time
import os
import pandas as pd
#This code is to calculate the gene distance based on the cLD file.
openfile = open(r'/PATH/namedcld.txt','r') #named cLD file
openfile2 = open(r'/PATH/genemid.txt','r') #gene mid point file
outfile = '/PATH/genedistance.txt' #gene distance file
outfile2 = '/PATH/distseq.txt' #distance sequence file, a string
with open(outfile,'w',newline='') as fdo:
fdo.write('')
with open(outfile2,'w',newline='') as fdo:
fdo.write('')
line = openfile2.readline()
print(line)
print('process start')
mid = []
while line:
mid.append(float(line))
line = openfile2.readline()
np = len(mid)
openfile.seek(0,0)
totaldist = []
line = openfile.readline()
line_list = line.split(',')
num = 0
while line:
newline = [line_list[0],line_list[1],line_list[2],line_list[3],line_list[4]]
for i in range(num+1,np):
newline.append(str(float(mid[i])-float(mid[num])))
totaldist.append(str(float(mid[i])-float(mid[num])))
str_cur = ','.join(newline)
with open(outfile,'a',newline='') as fdo:
fdo.write(str_cur+'\n')
num = num + 1
line = openfile.readline()
line_list = line.split(',')
str_cur = ','.join(totaldist)
with open(outfile2,'a',newline='') as fdo:
fdo.write(str_cur+'\n')
print('process over')
openfile.close()
openfile2.close()
| 1,378 |
api_site/src/api_x/zyt/user_mapping/auth.py
|
webee/pay
| 1 |
2025258
|
# coding=utf-8
from api_x.config import etc as config
from api_x.zyt.user_mapping import get_channel_by_name
from pytoolbox.util.sign import SignType, Signer
from pytoolbox.util import public_key, aes
from pytoolbox.util.strings import gen_rand_str
from api_x.constant import TransactionType
def add_sign_for_params(channel_name, params, sign_type=SignType.RSA):
if params is None:
return params
channel = get_channel_by_name(channel_name)
# 这里的主要作用是签名,只需要lvye_pri_key或md5_key
signer = Signer('key', 'sign', channel.md5_key, config.LVYE_PRI_KEY, None)
# 用来加密lvye_pub_key
channel_pub_key = public_key.loads_b64encoded_key(channel.public_key)
params['channel_name'] = channel_name
params['sign_type'] = sign_type
params['sign'] = signer.sign(params, sign_type)
# 每次动态生成此密码
lvye_aes_key = gen_rand_str(16)
params['_lvye_pub_key'] = aes.encrypt_to_base64(config.LVYE_PUB_KEY, lvye_aes_key)
params['_lvye_aes_key'] = channel_pub_key.encrypt_to_base64(lvye_aes_key)
return params
def vas_payment_is_enabled(payment_entity, vas_name):
from api_x.zyt.vas import NAME
if NAME == vas_name:
channel = get_channel_by_name(payment_entity.channel_name)
# 只支持支付
return channel.zyt_pay_enabled and payment_entity.source in [TransactionType.PAYMENT]
return True
| 1,352 |
numba/core/targetconfig.py
|
ashrielbrian/numba
| 3 |
2026051
|
"""
This module contains utils for manipulating target configurations such as
compiler flags.
"""
from types import MappingProxyType
from numba.core import utils
class Option:
"""An option to be used in ``TargetConfig``.
"""
__slots__ = "_type", "_default", "_doc"
def __init__(self, type, *, default, doc):
"""
Parameters
----------
type :
Type of the option value. It can be a callable.
The setter always calls ``self._type(value)``.
default :
The default value for the option.
doc : str
Docstring for the option.
"""
self._type = type
self._default = default
self._doc = doc
@property
def type(self):
return self._type
@property
def default(self):
return self._default
@property
def doc(self):
return self._doc
class _MetaTargetConfig(type):
"""Metaclass for ``TargetConfig``.
When a subclass of ``TargetConfig`` is created, all ``Option`` defined
as class members will be parsed and corresponding getters, setters, and
delters will be inserted.
"""
def __init__(cls, name, bases, dct):
"""Invoked when subclass is created.
Insert properties for each ``Option`` that are class members.
All the options will be grouped inside the ``.options`` class
attribute.
"""
# Gather options from base classes and class dict
opts = {}
# Reversed scan into the base classes to follow MRO ordering such that
# the closest base class is overriding
for base_cls in reversed(bases):
opts.update(base_cls.options)
opts.update(cls.find_options(dct))
# Store the options into class attribute as a ready-only mapping.
cls.options = MappingProxyType(opts)
# Make properties for each of the options
def make_prop(name, option):
def getter(self):
return self._values.get(name, option.default)
def setter(self, val):
self._values[name] = option.type(val)
def delter(self):
del self._values[name]
return property(getter, setter, delter, option.doc)
for name, option in cls.options.items():
setattr(cls, name, make_prop(name, option))
def find_options(cls, dct):
"""Returns a new dict with all the items that are a mapping to an
``Option``.
"""
return {k: v for k, v in dct.items() if isinstance(v, Option)}
class _NotSetType:
def __repr__(self):
return "<NotSet>"
_NotSet = _NotSetType()
class TargetConfig(metaclass=_MetaTargetConfig):
"""Base class for ``TargetConfig``.
Subclass should fill class members with ``Option``. For example:
>>> class MyTargetConfig(TargetConfig):
>>> a_bool_option = Option(type=bool, default=False, doc="a bool")
>>> an_int_option = Option(type=int, default=0, doc="an int")
The metaclass will insert properties for each ``Option``. For exapmle:
>>> tc = MyTargetConfig()
>>> tc.a_bool_option = True # invokes the setter
>>> print(tc.an_int_option) # print the default
"""
def __init__(self, copy_from=None):
"""
Parameters
----------
copy_from : TargetConfig or None
if None, creates an empty ``TargetConfig``.
Otherwise, creates a copy.
"""
self._values = {}
if copy_from is not None:
assert isinstance(copy_from, TargetConfig)
self._values.update(copy_from._values)
def __repr__(self):
# NOTE: default options will be placed at the end and grouped inside
# a square bracket; i.e. [optname=optval, ...]
args = []
defs = []
for k in self.options:
msg = f"{k}={getattr(self, k)}"
if not self.is_set(k):
defs.append(msg)
else:
args.append(msg)
clsname = self.__class__.__name__
return f"{clsname}({', '.join(args)}, [{', '.join(defs)}])"
def __hash__(self):
return hash(tuple(sorted(self.values())))
def __eq__(self, other):
if isinstance(other, TargetConfig):
return self.values() == other.values()
else:
return NotImplemented
def __ne__(self, other):
eq = self.__eq__(other)
if eq is NotImplemented:
return NotImplemented
else:
return eq
def values(self):
"""Returns a dict of all the values
"""
return {k: getattr(self, k) for k in self.options}
def is_set(self, name):
"""Is the option set?
"""
self._guard_option(name)
return name in self._values
def discard(self, name):
"""Remove the option by name if it is defined.
After this, the value for the option will be set to its default value.
"""
self._guard_option(name)
self._values.pop(name, None)
def inherit_if_not_set(self, name, default=_NotSet):
"""Inherit flag from ``ConfigStack``.
Parameters
----------
name : str
Option name.
default : optional
When given, it overrides the default value.
It is only used when the flag is not defined locally and there is
no entry in the ``ConfigStack``.
"""
self._guard_option(name)
if not self.is_set(name):
cstk = utils.ConfigStack()
if cstk:
# inherit
top = cstk.top()
setattr(self, name, getattr(top, name))
elif default is not _NotSet:
setattr(self, name, default)
def copy(self):
"""Clone this instance.
"""
return type(self)(self)
def summary(self):
"""Returns a ``str`` that summarizes this instance.
In contrast to ``__repr__``, only options that are explicitly set will
be shown.
"""
args = []
for k in self.options:
msg = f"{k}={getattr(self, k)}"
if self.is_set(k):
args.append(msg)
clsname = self.__class__.__name__
return f"{clsname}({', '.join(args)})"
def _guard_option(self, name):
if name not in self.options:
msg = f"{name!r} is not a valid option for {type(self)}"
raise ValueError(msg)
| 6,562 |
code/seir_model.py
|
ion-g-ion/paper-cme-tt
| 0 |
2025364
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 22:31:30 2020
@author: ion
"""
import tensorflow as tf
import t3f
import numpy as np
import matplotlib.pyplot as plt
from CME import CME,Gillespie
import timeit
import scipy.integrate
import numba
import scipy.sparse
from tt_extra import mat_to_tt
import tt
import tt.amen
import tt.eigb
from ttInt import ttInt
#%% REaction cooefficeints
# define reaction
rates = np.array([0.1,0.5,1.0,0.01,0.01,0.01,0.4])
Pre =np.array( [[1,0,1,0],[0,1,0,0],[0,0,1,0],[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,0]])
Post = np.array([[0,1,1,0],[0,0,1,0],[1,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,1],[1,0,0,0]])
Props = [ lambda x: x[:,0]*x[:,2] , lambda x: x[:,1] , lambda x: x[:,2] , lambda x: x[:,0] , lambda x: x[:,1] , lambda x: x[:,2] , lambda x: x[:,0]*0+1 ]
# construct the model and the CME operator
N = 4*[80] # state truncation
mdl = CME(N, Pre,Post,rates,Props) # model
# mdl = CME(N, np.array( [[1,0,1,0]]),np.array([[0,1,1,0]]), np.array([0.1]),[ lambda x: x[:,0]*x[:,2]])
Ns = 20000
x0 = np.array([50,4,0,0])
sigma = 1.0
p1 = np.exp(-0.5*(x0[0]-np.arange(N[0]))**2/sigma)
ch1 = np.random.choice(np.arange(N[0]),(Ns,1),p=p1/np.sum(p1))
p2 = np.exp(-0.5*(x0[1]-np.arange(N[1]))**2/sigma)
ch2 = np.random.choice(np.arange(N[1]),(Ns,1),p=p2/np.sum(p2))
p3 = np.exp(-0.5*(x0[2]-np.arange(N[2]))**2/sigma)
ch3 = np.random.choice(np.arange(N[2]),(Ns,1),p=p3/np.sum(p3))
p4 = np.exp(-0.5*(x0[3]-np.arange(N[3]))**2/sigma)
ch4 = np.random.choice(np.arange(N[3]),(Ns,1),p=p4/np.sum(p4))
#%% Monte Carlo
# time scale
Nt = 70
dT = 10.0/100
time_sample = np.arange(Nt+1) * dT
# draw sample
sample = mdl.ssa(np.concatenate((ch1,ch2,ch3,ch4),axis=1), time_sample,Ns )
# plot sample
plt.figure()
plt.title('Sample')
plt.plot(time_sample,sample[:,0,0],'b')
plt.plot(time_sample,sample[:,1,0],'orange')
plt.plot(time_sample,sample[:,2,0],'r')
plt.plot(time_sample,sample[:,3,0],'g')
plt.legend(['Susceptible','Exposed','Infected','Recovered'])
plt.ylabel(r'#individuals')
plt.xlabel(r'$t$ [d]')
# plot sample
plt.figure()
plt.title('Means')
plt.plot(time_sample,np.mean(sample[:,0,:],1),'b')
plt.plot(time_sample,np.mean(sample[:,1,:],1),'orange')
plt.plot(time_sample,np.mean(sample[:,2,:],1),'r')
plt.plot(time_sample,np.mean(sample[:,3,:],1),'g')
plt.legend(['Susceptible','Exposed','Infected','Recovered'])
plt.ylabel(r'#individuals')
plt.xlabel(r'$t$ [d]')
#%% Integrate ODE
A_tt = mdl.construct_generator_tt()
# A_tt = tt.reshape(A_tt,np.array(2*[[15]*8]).transpose())
P = tt.kron(tt.kron(tt.tensor(p1),tt.tensor(p2)),tt.kron(tt.tensor(p3),tt.tensor(p4)))
P = P * (1/tt.sum(P))
P0 = P
# P = tt.reshape(P,[15]*8)
x_S = tt.kron(tt.tensor(np.arange(N[0])),tt.ones(N[1:]))
x_E = tt.kron(tt.ones([N[0]]),tt.kron(tt.tensor(np.arange(N[1])),tt.ones(N[2:])))
x_I = tt.kron(tt.ones(N[:2]),tt.kron(tt.tensor(np.arange(N[2])),tt.ones([N[3]])))
x_R = tt.kron(tt.ones(N[:3]),tt.tensor(np.arange(N[3])))
epsilon = 1e-10
rmax = 30
#%% reference ode solution
# print('Reference...')
# tme_ode45 = timeit.time.time()
# mdl.construct_generator2(to_tf=False)
# Gen = mdl.gen
# def func(t,y):
# print(t)
# return Gen.dot(y)
# # solve CME
# print('ODE solver...')
# res = scipy.integrate.solve_ivp(func,[0,Nt*dT],P0.full().flatten(),t_eval=[0,Nt*dT])
# Pt = res.y.reshape(N+[-1])
# tme_ode45 = timeit.time.time() - tme_ode45
# P_ref = Pt[:,:,:,:,-1]
print('Loading reference....')
P_mc = np.load('./reference_ode.dat',allow_pickle = True)
P_ref = P_mc
#%% TT
print('TT integration...')
fwd_int = ttInt(A_tt, epsilon = 1e-6, N_max = 64, dt_max = 1e-1,method='crank–nicolson')
time = 0.0
tme_total = timeit.time.time()
Pms_SE = []
Pms_EI = []
for i in range(Nt):
tme = timeit.time.time()
P = fwd_int.solve(P, dT, intervals = 4)
tme = timeit.time.time() - tme
P = P.round(1e-10,100)
P = P * (1/tt.sum(P))
time += dT
Pms_SE.append(tt.sum(tt.sum(P,3),2).full())
Pms_EI.append(tt.sum(tt.sum(P,0),2).full())
print('k = ',i,'/',Nt,' at time ',time, ' rank ',P.r,' time ',tme)
tme_total = timeit.time.time()-tme_total
# print('TT time ',tme_total,' vs ODE solver time ',tme_ode45)
Pend = P.full()
residual = (Pend-P_ref)[:60,:60,:60,:60]
# residual = residual[:40,:40,:40,:40]
print('Mean rel error ',np.mean(np.abs(residual))/np.max(np.abs(Pend)))
print('Max rel error ',np.max(np.abs(residual))/np.max(np.abs(Pend)))
P_ref[66:,:,:,:] = 0
P_ref[:,66:,:,:] = 0
P_ref[:,:,66:,:] = 0
P_ref[:,:,:,66:] = 0
# P = tt.reshape(P,N)
# P1_end = np.zeros((N[0]))
# P2_end = np.zeros((N[1]))
# P3_end = np.zeros((N[2]))
# P4_end = np.zeros((N[3]))
# for i in range(Ns):
# P1_end[sample[-1,0,i]] += 1
# P2_end[sample[-1,1,i]] += 1
# P3_end[sample[-1,2,i]] += 1
# P4_end[sample[-1,3,i]] += 1
# P1_end = P1_end / np.sum(P1_end)
# P2_end = P2_end / np.sum(P2_end)
# P3_end = P3_end / np.sum(P3_end)
# P4_end = P4_end / np.sum(P4_end)
# P1_end_tt = tt.sum(tt.sum(tt.sum(P,1),1),1)
# P2_end_tt = tt.sum(tt.sum(tt.sum(P,0),1),1)
# P3_end_tt = tt.sum(tt.sum(tt.sum(P,0),0),1)
# P4_end_tt = tt.sum(tt.sum(tt.sum(P,0),0),0)
# plt.figure()
# plt.plot(np.arange(N[0]),P1_end)
# plt.plot(np.arange(N[0]),P1_end_tt.full())
# plt.title('Marginal PMF for S')
# plt.figure()
# plt.plot(np.arange(N[1]),P2_end)
# plt.plot(np.arange(N[1]),P2_end_tt.full())
# plt.title('Marginal PMF for E')
# plt.figure()
# plt.plot(np.arange(N[2]),P3_end)
# plt.plot(np.arange(N[2]),P3_end_tt.full())
# plt.title('Marginal PMF for I')
# plt.figure()
# plt.plot(np.arange(N[3]),P4_end)
# plt.plot(np.arange(N[3]),P4_end_tt.full())
# plt.title('Marginal PMF for R')
# plt.figure()
# time = 0.0
# for img in Pms_SE:
# plt.clf()
# plt.imshow(img,origin='lower')
# plt.axis('equal')
# plt.xlabel('Susceptible')
# plt.ylabel('Exposed')
# plt.colorbar()
# plt.pause(0.1)
# time+=dT
# print(time)
# # Pref = P.full().reshape([-1,1])
# time_total = 0
# Exp = x0.reshape([1,-1])
# import sys
# sys.exit()<0):
# tme = timeit.time.time()
# h = time[i] - time[i-1]
# tme = timeit.time.time()
# k1 = tt.matvec(Att,P).round(epsilon,rmax)
# k2 = tt.matvec(Att,( P + 0.5 * h * k1 ).round(epsilon,rmax)).round(epsilon,rmax)
# k3 = tt.matvec(Att,( P + 0.5 * h * k2 ).round(epsilon,rmax)).round(epsilon,rmax)
# k4 = tt.matvec(Att,( P + h * k3 ).round(epsilon,rmax)).round(epsilon,rmax)
# P = (P + (1/6)*k1 + (1/3)*k2 + (1/3)*k3 + (1/6)*k4).round(epsilon,rmax)
# P = P*(1/tt.sum(P))
# time_total += h
# tme = timeit.time.time() - tme
# E = np.array([[tt.sum(P*x_S),tt.sum(P*x_E),tt.sum(P*x_I),tt.sum(P*x_R)]])
# Exp = np.concatenate((Exp,E))
# print('%4d/%4d time %5.3f s' %(i+1,Nt,tme),' rank ',P.r,flush = True)
# plt.figure()
# plt.title('Means')
# plt.plot(time[:100],Exp)
# plt.legend(['S','E','I','R'])
#%% MC
# print('Monte Carlo....')
# N1 = 100000
# N2 = 100
# P_mc = np.zeros(N)
# for i in range(N2):
# print(i)
# sample = mdl.ssa(np.concatenate((ch1,ch2,ch3,ch4),axis=1), np.arrat([0,Nt*dT]),N1 )
# for k in range(N1):
# P_mc[sample[-1,0,k],sample[-1,1,k],sample[-1,2,k],sample[-1,3,k]] += 1
# P_mc = P_mc / np.sum(P_mc)
#%% plots
plt.figure()
plt.imshow(Pend.sum(2).sum(2).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
tikzplotlib.save('./plots/SE_marginal.tex')
plt.figure()
plt.imshow((Pend-P_ref).sum(2).sum(2).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
tikzplotlib.save('./plots/SE_marginal_err.tex')
plt.figure()
plt.imshow(Pend.sum(0).sum(2).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_2$')
plt.ylabel(r'$x_3$')
tikzplotlib.save('./plots/EI_marginal.tex')
plt.figure()
plt.imshow((Pend-P_ref).sum(0).sum(2).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_2$')
plt.ylabel(r'$x_3$')
tikzplotlib.save('./plots/EI_marginal_err.tex')
plt.figure()
plt.imshow(Pend.sum(0).sum(0).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_3$')
plt.ylabel(r'$x_4$')
tikzplotlib.save('./plots/IR_marginal.tex')
plt.figure()
plt.imshow((Pend-P_ref).sum(0).sum(0).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_3$')
plt.ylabel(r'$x_4$')
tikzplotlib.save('./plots/IR_marginal_err.tex')
plt.figure()
plt.imshow(Pend.sum(1).sum(1).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_4$')
tikzplotlib.save('./plots/SR_marginal.tex')
plt.figure()
plt.imshow((Pend-P_ref).sum(1).sum(1).transpose(),origin='lower')
plt.colorbar()
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_4$')
tikzplotlib.save('./plots/SR_marginal_err.tex')
| 8,773 |
amulet/world_interface/chunk/translators/java/java_numerical/java_numerical_translator.py
|
Podshot/Amulet-Core
| 0 |
2024485
|
from __future__ import annotations
from typing import Tuple, Union
import numpy
from amulet.world_interface.chunk.translators import Translator
from PyMCTranslate.py3.translation_manager import Version
class JavaNumericalTranslator(Translator):
def _translator_key(
self, version_number: int
) -> Tuple[str, Union[int, Tuple[int, int, int]]]:
return "java", version_number
def _unpack_palette(self, version: Version, palette: numpy.ndarray):
"""
Unpacks an int array of block ids and block data values [[1, 0], [2, 0]] into a numpy array of Block objects.
:param version:
:param palette:
:return:
"""
palette = numpy.array([version.ints_to_block(*entry) for entry in palette])
return palette
def _pack_palette(self, version: Version, palette: numpy.ndarray) -> numpy.ndarray:
"""
Packs a numpy array of Block objects into an int array of block ids and block data values [[1, 0], [2, 0]].
:param version:
:param palette:
:return:
"""
palette = [version.block_to_ints(entry) for entry in palette]
for index, value in enumerate(palette):
if value is None:
palette[index] = (
0,
0,
) # TODO: find some way for the user to specify this
return numpy.array(palette)
@staticmethod
def is_valid(key):
if key[0] != "anvil":
return False
if key[1] > 1343:
return False
return True
TRANSLATOR_CLASS = JavaNumericalTranslator
| 1,632 |
day_7/part_2.py
|
ITachiLab/adventofcode2017
| 0 |
2025837
|
import re
class Disk():
def __init__(self, name):
self.name = name
self.parents = []
self.subnodes = []
self.weight = 0
self.__total_weight = None
@property
def total_weight(self):
if self.__total_weight is None:
self.__total_weight = sum([sub.total_weight for sub in self.subnodes]) + self.weight
return self.__total_weight
@property
def child_count(self):
return len(self.subnodes)
suspicious = None
def search(node):
global suspicious
if node.child_count > 1:
weight_pairs = [(sub, sub.total_weight) for sub in node.subnodes]
weight_pairs.sort(key=lambda x: x[1])
weights = [x[1] for x in weight_pairs]
if len(set(weights)) != 1:
first_node = weight_pairs[0]
second_node = weight_pairs[-1]
if weights.count(first_node[1]) > weights.count(second_node[1]):
selected = second_node
other = first_node
else:
selected = first_node
other = second_node
if selected[0].child_count > 0:
suspicious = (selected[0], (other[0].total_weight - selected[0].total_weight) + selected[0].weight)
search(suspicious[0])
else:
return
else:
return
def main():
pattern = '^(?P<name>\\w+)\\s\\((?P<weight>\\d+)\\)$|^(?P<name2>\\w+)\\s\\((?P<weight2>\\d+)\\)\\s->\\s(?P<sub>.*)$'
entries = {}
with open('day_7.in', 'r') as f:
for l in f.readlines():
match = re.match(pattern, l)
groups = match.groupdict()
name = groups['name'] or groups['name2']
weight = groups['weight'] or groups['weight2']
sub = groups['sub'].replace(' ', '').split(',') if groups['sub'] is not None else []
disk = Disk(name) if name not in entries else entries[name]
disk.weight = int(weight)
entries[name] = disk
for node in sub:
if node in entries:
disk.subnodes.append(entries[node])
entries[node].parents.append(disk)
else:
new_disk = Disk(node)
entries[node] = new_disk
disk.subnodes.append(new_disk)
new_disk.parents.append(disk)
base = next(d for d in entries.values() if len(d.parents) == 0)
search(base)
print(f'{suspicious[0].name} -> {suspicious[1]}')
if __name__ == '__main__':
main()
| 2,621 |
tests/conftest.py
|
ludwigschubert/flow-simulator
| 0 |
2025178
|
import pytest
# Paths
from flow.path import RelativePath, AbsolutePath, AbsoluteURL
@pytest.fixture
def relative_path():
return RelativePath("a/relative/path.ext")
@pytest.fixture
def absolute_path():
return AbsolutePath("/an/absolute/path.ext")
@pytest.fixture
def absolute_url():
return AbsoluteURL("gs://bucket/an/absolute/path.ext")
# PathTemplate
from flow.task_spec import PathTemplate
PathTemplate.path_template_prefix = ""
@pytest.fixture
def path_template():
return PathTemplate("/data/{group_id}/names/{name_id}.txt")
# InputSpec
from flow.task_spec import (
IterableInputSpec,
PathTemplateInputSpec,
AggregatingInputSpec,
DependentInputSpec,
)
@pytest.fixture
def iterable_input_spec():
return IterableInputSpec("iterable_input_spec", range(10))
@pytest.fixture
def path_template_input_spec(path_template):
return PathTemplateInputSpec("path_template_input_spec", path_template)
@pytest.fixture
def aggregating_input_spec():
name = "aggregating_input_spec"
dictionary = {"{neuron}": "/data/{layer}/{neuron}.jpg"}
return AggregatingInputSpec(name, dictionary)
@pytest.fixture
def dependent_input_spec():
name = "dependent_input_spec"
function = lambda model_name: [model_name + str(i) for i in range(10)]
return DependentInputSpec(name, function)
# OutputSpec
from flow.task_spec import OutputSpec, PathTemplateOutputSpec
@pytest.fixture
def simple_output():
return "/some/file/path/{glob}/test.txt"
@pytest.fixture
def output_spec(simple_output):
return OutputSpec.build(simple_output)
# JobSpec
from flow.job_spec import JobSpec
@pytest.fixture
def noop_job_spec():
return JobSpec({"unity": 1}, "/data/noop", "/tasks/noop.py")
# TaskSpec
from flow.task_spec import TaskSpec
@pytest.fixture
def trivial_task_spec():
iis1 = IterableInputSpec("iis1", [0, 1])
iis2 = IterableInputSpec("iis2", ["a", "b"])
output_spec = OutputSpec.build("/{iis1}/{iis2}.txt")
a_path = "/tasks/trivial_task_spec.py"
name = "trivial_task_spec.py"
return TaskSpec([iis1, iis2], output_spec, a_path, name)
@pytest.fixture
def task_spec_repetetive():
iis1 = IterableInputSpec("iis1", [0, 1])
iis2 = IterableInputSpec("iis2", ["a", "b"])
output_spec = OutputSpec.build("/{iis1}/{iis2}.txt")
a_path = "/tasks/trivial_task_spec.py"
name = "trivial_task_spec.py"
return TaskSpec([iis1, iis2], output_spec, a_path, name)
@pytest.fixture
def full_task_spec(
iterable_input_spec, aggregating_input_spec, dependent_input_spec, output_spec
):
path_template = PathTemplate(
"/data/models/{model_name}/checkpoints/{checkpoint_folder}"
)
path_template_input_spec = PathTemplateInputSpec("checkpoint_path", path_template)
inputs = [
iterable_input_spec,
path_template_input_spec,
aggregating_input_spec,
dependent_input_spec,
]
output = output_spec
a_path = "/tasks/full_task_spec.py"
name = "full_task_spec.py"
return TaskSpec(inputs, output, a_path, name)
| 3,076 |
policies.py
|
hari-sikchi/LOOP
| 20 |
2025733
|
# Policy definitions for Online, Offline and Safe RL
from controllers import arc, arc_offline, arc_safety
import numpy as np
import gym
from models.model_PETS import EnsembleDynamicsModel
from models.predict_env_pets import PredictEnv as PredictEnvPETS
import sac
import torch
# Default termination function that outputs done=False
def default_termination_function(state,action,next_state):
if (torch.is_tensor(next_state)):
done = torch.zeros((next_state.shape[0],1))
else:
done = np.zeros((next_state.shape[0],1))
return done
def get_policy(all_args, env, replay_buffer, config, policy_name='LOOP_SAC',env_fn=None):
policy,sac_policy = None, None
dynamics_config = config['dynamics_config']
mpc_config = config['mpc_config']
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
hidden_dim= dynamics_config['hidden_dim']
if all_args.policy == 'LOOP_SAC_ARC':
env_model = EnsembleDynamicsModel(7, 5, state_dim, action_dim, 1, hidden_dim,
use_decay=True)
dynamics = PredictEnvPETS(env_model,replay_buffer, all_args.env, 'pytorch')
sac_policy = sac.SAC(lambda:gym.make(all_args.env), dynamics, replay_buffer, env.termination_function)
sac_policy.update_every=50
sac_policy.update_after=1000
policy = arc.ARC(
env,
dynamics,
sac_policy,env.termination_function)
elif all_args.policy == 'LOOP_OFFLINE_ARC':
env_model = EnsembleDynamicsModel(7, 5, state_dim, action_dim, 1, hidden_dim,
use_decay=True)
dynamics = PredictEnvPETS(env_model,replay_buffer, all_args.env, 'pytorch')
sac_policy = sac.SAC(lambda:gym.make(all_args.env),replay_buffer)
sac_policy.update_every=50
sac_policy.update_after=1000
if hasattr(env, 'termination_function:'):
policy = arc_offline.ARC_offline(
env,
dynamics,
sac_policy,env.termination_function)
else:
policy = arc_offline.ARC_offline(
env,
dynamics,
sac_policy,default_termination_function)
elif all_args.policy == 'safeLOOP_CEM':
env_model = EnsembleDynamicsModel(7, 5, state_dim, action_dim, 1, hidden_dim,
use_decay=True)
dynamics = PredictEnvPETS(env_model,replay_buffer, all_args.env, 'pytorch')
sac_policy = sac.SAC(env_fn,dynamics,replay_buffer,default_termination_function)
sac_policy.update_every=50
sac_policy.update_after=1000
policy = arc_safety.safeCEM(
env,
dynamics,
sac_policy,default_termination_function)
elif all_args.policy == 'safeLOOP_ARC':
env_model = EnsembleDynamicsModel(7, 5, state_dim, action_dim, 1, hidden_dim,
use_decay=True)
dynamics = PredictEnvPETS(env_model,replay_buffer, all_args.env, 'pytorch')
sac_policy = sac.SAC(env_fn,dynamics,replay_buffer,default_termination_function)
sac_policy.update_every=50
sac_policy.update_after=1000
policy = arc_safety.safeARC(
env,
dynamics,
sac_policy,default_termination_function)
if 'OFFLINE' not in all_args.policy and 'CEM' in all_args.policy:
policy.horizon = mpc_config['horizon']
policy.sol_dim = env.action_space.shape[0] * mpc_config['horizon']
policy.ub = np.repeat(env.action_space.high,mpc_config['horizon'],axis=0)
policy.lb = np.repeat(env.action_space.low,mpc_config['horizon'],axis=0)
policy.mean = np.zeros((policy.sol_dim,))
policy.N = mpc_config['CEM']['popsize']
policy.mixture_coefficient = mpc_config['CEM']['mixture_coefficient']
policy.particles = mpc_config['CEM']['particles']
policy.max_iters = mpc_config['CEM']['max_iters']
policy.num_elites = mpc_config['CEM']['num_elites']
policy.alpha = mpc_config['CEM']['alpha']
if 'reward_horizon' in mpc_config['CEM'].keys():
policy.reward_horizon = mpc_config['CEM']['reward_horizon']
elif 'OFFLINE' not in all_args.policy and 'ARC' in all_args.policy:
policy.horizon = mpc_config['horizon']
policy.sol_dim = env.action_space.shape[0] * mpc_config['horizon']
policy.ub = np.repeat(env.action_space.high,mpc_config['horizon'],axis=0)
policy.lb = np.repeat(env.action_space.low,mpc_config['horizon'],axis=0)
policy.mean = np.zeros((policy.sol_dim,))
policy.N = mpc_config['ARC']['popsize']
policy.mixture_coefficient = mpc_config['ARC']['mixture_coefficient']
policy.particles = mpc_config['ARC']['particles']
policy.max_iters = mpc_config['ARC']['max_iters']
policy.alpha = mpc_config['ARC']['alpha']
policy.kappa = mpc_config['ARC']['kappa']
if 'reward_horizon' in mpc_config['ARC'].keys():
policy.reward_horizon = mpc_config['ARC']['reward_horizon']
lookahead_policies=['LOOP_SAC_ARC','LOOP_OFFLINE_ARC','safeLOOP_CEM','safeLOOP_ARC']
return policy,sac_policy, dynamics, lookahead_policies
| 5,319 |
counter/tests.py
|
arpith/warmups
| 0 |
2026086
|
import unittest
from counter import counter
class TestCounter(unittest.TestCase):
def test_remove_spaces(self):
counts = counter([' '])
self.assertEqual(len(counts.keys()), 0)
def test_total_count(self):
total = 0
s = 'someTextWithoutSpaces'
counts = counter(list(s))
for k, v in counts.iteritems():
total += v
self.assertEqual(len(s), total)
if __name__ == '__main__':
unittest.main()
| 469 |
compute_volume.py
|
applied-systems-biology/python2-custom-segment-glomeruli
| 0 |
2024370
|
# -*- coding: utf-8 -*-
'''
Counting glomeruli in Light-Sheet microscopy images of kidney.
Full details of the alogrithm can be found in the paper
Klingberg et al. (2017) Fully Automated Evaluation of Total Glomerular Number and
Capillary Tuft Size in Nephritic Kidneys Using Lightsheet Microscopy,
J. Am. Soc. Nephrol., 28: 452-459.
For running in command line: ``python compute_volume.py -i settings.csv``
:Author:
`<NAME>`_
email: <EMAIL> or <EMAIL>
:Organization:
Applied Systems Biology Group, Leibniz Institute for Natural Product Research and Infection Biology - Hans Knöll Institute (HKI)
:Version: 2015.11.12
Copyright (c) 2014-2015,
Leibniz Institute for Natural Product Research and Infection Biology –
Hans Knöll Institute (HKI)
Licence: BSD-3-Clause, see ./LICENSE or
https://opensource.org/licenses/BSD-3-Clause for full details
Requirements
------------
* `Python 2.7.3 <http://www.python.org>`_
* `Numpy 1.9.1 <http://www.numpy.org>`_
* `Scipy.ndimage 2.0 <http://www.scipy.org>`_
* `Mahotas 1.0.3 `_
* `argparse 1.1 `_
* `pandas 0.15.2 <http://pandas.pydata.org>`_
Reference
---------
Klingberg et al. (2017) Fully Automated Evaluation of Total Glomerular Number and
Capillary Tuft Size in Nephritic Kidneys Using Lightsheet Microscopy,
J. Am. Soc. Nephrol., 28: 452-459.
'''
import sys
sys.path.append('include')
import re, os
import pandas as pd
import numpy as np
import mahotas
from scipy import ndimage
import time
import filelib
import boost
import tifffile
def list_subfolders(inputfolder, subfolders = []):
'''
list folders, each containing layers of one stack
'''
files = filelib.list_subfolders(inputfolder, subfolders = subfolders)
files.sort()
folders = []
for f in files:
folders.append(filelib.return_path(f))
folders = np.unique(folders)
return folders
def extract_zoom(folder):
'''
Extract zoom data from image name
'''
parts = folder.split('zoom')
p = re.compile('\d+')
if len(parts) > 1:
zoom = p.findall(parts[1])[0]
else:
zoom = '063'
zsize = 5.
if zoom == '063':
xsize = 5.159
if zoom == '08':
xsize = 4.063
return xsize, zsize
def normalize(img, per = 100, min_signal_value = 0):
ph = np.percentile(img, per)
pl = np.percentile(img, 100-per)
if ph > min_signal_value:
img = np.where(img>ph, ph, img)
img = np.where(img<pl, pl, img)
img = img - img.min()
img = img*255./img.max()
else:
img = np.zeros_like(img)
return img
def overlay(mask, img, color, borders = True, normalize = True):
if borders:
borders = mahotas.borders((mask).astype(np.uint8))
else:
borders = mask
ind = np.where(borders)
if normalize and img.max() > 0:
output = img*255./img.max()
else:
output = np.zeros_like(img)
output[ind] = color
return output
################################################################################
#Segmentation
def segment(folder, params):
'''
Segment all layers in a folder
'''
#create folders for the output
filelib.make_folders([params.inputfolder + '../segmented/outlines/' + folder, params.inputfolder + '../segmented/masks/' + folder])
#list all files in the folder
files = filelib.list_image_files(params.inputfolder + folder)
files.sort()
ind = np.int_(np.arange(0, len(files), 10))
files = np.array(files)[ind]
if not len(filelib.list_image_files(params.inputfolder + '../segmented/masks/' + folder)) == len(files):
params.folder = folder
#segment all layers in parallel
boost.run_parallel(process = segment_layer, files = files, params = params, procname = 'Segmentation of glomeruli')
def segment_layer(filename, params):
'''
Segment one layer in a stack
'''
#extract pixel size in xy and z
xsize, zsize = extract_zoom(params.folder)
#load image
img = tifffile.imread(params.inputfolder + params.folder + filename)
#normalize image
img = ndimage.median_filter(img, 3)
per_low = np.percentile(img, 5)
img[img < per_low] = per_low
img = img - img.min()
per_high = np.percentile(img, 99)
img[img > per_high] = per_high
img = img*255./img.max()
imgf = ndimage.gaussian_filter(img*1., 30./xsize).astype(np.uint8)
kmask = (imgf > mahotas.otsu(imgf.astype(np.uint8)))*255.
sizefactor = 10
small = ndimage.interpolation.zoom(kmask, 1./sizefactor) #scale the image to a smaller size
rad = int(300./xsize)
small_ext = np.zeros([small.shape[0] + 4*rad, small.shape[1] + 4*rad])
small_ext[2*rad : 2*rad + small.shape[0], 2*rad : 2*rad + small.shape[1]] = small
small_ext = mahotas.close(small_ext.astype(np.uint8), mahotas.disk(rad))
small = small_ext[2*rad : 2*rad + small.shape[0], 2*rad : 2*rad + small.shape[1]]
small = mahotas.close_holes(small)*1.
small = small*255./small.max()
kmask = ndimage.interpolation.zoom(small, sizefactor) #scale back to normal size
kmask = normalize(kmask)
kmask = (kmask > mahotas.otsu(kmask.astype(np.uint8)))*255. #remove artifacts of interpolation
if np.median(imgf[np.where(kmask > 0)]) < (np.median(imgf[np.where(kmask == 0)]) + 1)*3:
kmask = np.zeros_like(kmask)
#save indices of the kidney mask
# ind = np.where(kmask > 0)
# ind = np.array(ind)
# np.save(params.inputfolder + '../segmented/masks/' + params.folder + filename[:-4] + '.npy', ind)
#save outlines
im = np.zeros([img.shape[0], img.shape[1], 3])
img = tifffile.imread(params.inputfolder + params.folder + filename)
im[:,:,0] = im[:,:,1] = im[:,:,2] = np.array(img)
output = overlay(kmask, im, (255,0,0), borders = True)
tifffile.imsave(params.inputfolder + '../segmented/outlines/' + params.folder + filename[:-4] + '.tif', (output).astype(np.uint8))
#############################################################################
#Quantification
def quantify(folder, params):
'''
Quantify a stack
'''
if not os.path.exists(params.inputfolder + '../statistics/' + folder[:-1] + '.csv'):
#list files in the folder
files = filelib.list_image_files(params.inputfolder + folder)
files.sort()
#create a folder for statistics
filelib.make_folders([params.inputfolder + '../statistics/' + filelib.return_path(folder[:-1] + '.csv')])
#extract voxel size
xsize, zsize = extract_zoom(folder)
#compute volume of the kidney
kidney_volume = 0
for i in range(len(files)):
ind = np.load(params.inputfolder + '../segmented/masks/' + folder + files[i][:-4] + '.npy')
kidney_volume = kidney_volume + len(ind[0])
stat = pd.DataFrame()
stat['Kidney_volume'] = [kidney_volume*xsize**2*zsize]
stat['Image_name'] = folder[:-1]
stat.to_csv(params.inputfolder + '../statistics/' + folder[:-1] + '.csv', sep = '\t')
####################################################################
#read parameters from settings file
try:
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-i","--input", required = True, help = "File with settings")
args = ap.parse_args()
settingsfile = args.input
except:
settingsfile = 'settings.csv'
params = pd.Series.from_csv(settingsfile, sep = '\t')
#list folders with stacks to be analyzed
folders = list_subfolders(params.inputfolder, subfolders = [])
#segment each stack
for folder in folders:
print folder
segment(folder, params)
#quantify the segmented data
boost.run_parallel(process = quantify, files = folders, params = params, procname = 'Quantification')
filelib.combine_statistics(params.inputfolder + '../statistics/', params.inputfolder + '../statistics_combined.csv')
| 7,450 |
tests/test_has_overlaps.py
|
thomasschus/segcheck
| 0 |
2022912
|
import pytest
from .context import has_overlaps
from .context import InvalidSegmentError
testdata = [
([], False),
([[1, 2], [3, 4]], False),
([[3, 4], [1, 2]], False),
([[-1, 0], [-2, 1]], True),
([[-1, 0], [-2, 0]], True),
]
@pytest.mark.parametrize("segmentation,expected", testdata)
def test_has_overlaps(segmentation, expected):
result = has_overlaps(segmentation)
assert result == expected
def test_has_overlaps_exception():
with pytest.raises(InvalidSegmentError):
assert has_overlaps([[0, 1], [5, 4]])
| 555 |
mapper.py
|
rafpach16/simple_Hadoop_MapReduce_example
| 0 |
2025696
|
#!/usr/bin/env python
import string, sys
from sklearn.feature_extraction import stop_words
stops = set(stop_words.ENGLISH_STOP_WORDS)
for line in sys.stdin:
## removes the whitespace
line = line.strip()
## removes punctuation
line = line.translate( string.maketrans(string.punctuation, ' ' * len(string.punctuation)) )
## makes sure that the text is a valid Unicode string
## ignores the characters that are not valid
## converts the text to lowercase
line = line.lower()
## this splits words at all whitespace
words = line.split()
## prints out all the words with a count of 1
for w in words:
if w not in stops:
print '%s\t%s' % (w, "1")
| 717 |
tasks/gallery/gallery/constants.py
|
HackerDom/qctf-starter-2016
| 6 |
2025236
|
import datetime
import os
import pickle
import re
SERVER_PORT = 4567
SERVER_DEBUG = False
with open('secrets/password_secret.txt') as f:
PASSWORD_SECRET = f.read().strip()
with open('secrets/jwt_secret.txt') as f:
JWT_SECRET = f.read().strip()
SESSION_LENGTH = datetime.timedelta(hours=24)
MAX_PHOTO_SIZE = 2 * 1024 * 1024
WAIT_AFTER_UPLOAD_GLOBAL = datetime.timedelta(milliseconds=300)
WAIT_AFTER_UPLOAD_FOR_USER = datetime.timedelta(seconds=5)
MEMCACHED_HOST = 'memcached'
MEMCACHED_PORT = '11211'
MEMCACHED_PARAMS = [MEMCACHED_HOST, MEMCACHED_PORT], False, False, pickle.Pickler, pickle.Unpickler, None, None, 250, 1024 * 1024, 30, 3, False, False, False
MEMCACHED_EXPIRATION_TIME = 60
MEMCACHED_MIN_COMPRESS_LEN = 1000
DB_HOST = 'mysql'
DB_PORT = 3306
DB_USER = os.environ['MYSQL_USER']
DB_PASSWORD = os.environ['MYSQL_PASSWORD']
DB_DATABASE = os.environ['MYSQL_DATABASE']
IP_TO_CITY_PATH = 'ips.txt'
CITY_TO_COORDS_PATH = 'cities.txt'
USERNAME_RE = re.compile(r'[-\w]{4,}')
PHOTO_DIR = 'static/photos/'
| 1,018 |
lizard/events.py
|
Magical-Chicken/lizard-slayer
| 0 |
2025012
|
import enum
import time
from lizard import LOG
from lizard import util
class EventStatus(enum.Enum):
PENDING = 'pending'
RUNNING = 'running'
SUCCESS = 'success'
FAILURE = 'failure'
FINAL_STATES = (EventStatus.SUCCESS.value, EventStatus.FAILURE.value)
class BaseEvent(object):
"""Base event type"""
event_map = None
event_map_lock = None
event_handler_map = None
def __init__(self, event_type, data, register_event=True):
"""
Init for Event
:event_type: event type
:data: event data
:register_event: if true add event to event result map
"""
self.event_type = event_type
self.event_id = util.hex_uuid()
self.status = EventStatus.PENDING
self.result = None
self.data = data
self.completion_time = 0
self._register_event()
def handle(self):
"""
Handle event using handler defined in event handler map and set result
"""
if self.event_handler_map is None:
raise NotImplementedError("Cannot handle BaseEvent")
handler = self.event_handler_map.get(
self.event_type, handler_not_implemented)
start_time = time.time()
try:
self.status = EventStatus.RUNNING
self.result = handler(self)
self.status = EventStatus.SUCCESS
except Exception as e:
msg = repr(e)
LOG.warning("Failed to complete event: %s error: %s", self, msg)
self.status = EventStatus.FAILURE
self.result = {'error': msg}
end_time = time.time()
self.completion_time = end_time - start_time
def _register_event(self):
"""
Register event in event map
:result: result data
"""
if self.event_map is None or self.event_map_lock is None:
raise NotImplementedError("Cannot set result on BaseEvent")
else:
with self.event_map_lock:
self.event_map[self.event_id] = self
@property
def properties(self):
<<<<<<< HEAD
"""event properties"""
=======
"""event properties, not including data which may be very large"""
return {
'event_id': self.event_id,
'type': self.event_type.value,
'status': self.status.value,
'result': self.result,
'completion_time': self.completion_time,
}
@property
def full_properties(self):
"""full event properties including data"""
>>>>>>> ef9b13b186c1a356f50a36e78ad91a3ccff76392
return {
'event_id': self.event_id,
'type': self.event_type.value,
'status': self.status.value,
'data': self.data,
'result': self.result,
}
def __str__(self):
"""str repr for event"""
<<<<<<< HEAD
return "Event: '{}' Data: {}".format(self.event_type, self.data)
class ClientEvent(BaseEvent):
"""Client event"""
event_map = client.CLIENT_EVENT_MAP
event_map_lock = client.CLIENT_EVENT_MAP_LOCK
class ServerEvent(BaseEvent):
"""Server event"""
event_map = server.SERVER_EVENT_MAP
event_map_lock = server.SERVER_EVENT_MAP_LOCK
=======
return "EventType: '{}' ID: {}".format(self.event_type, self.event_id)
>>>>>>> ef9b13b186c1a356f50a36e78ad91a3ccff76392
def get_event_type_by_name(event_type_name, event_type_class):
"""
get the event type object for the specified event name
:event_type_name: event type name string
:returns: instance of ClientEventType or ServerEventType
"""
result = event_type_class.INVALID_TYPE
if event_type_name in (e.value for e in event_type_class):
result = event_type_class(event_type_name)
return result
def handler_not_implemented(event):
"""
placeholder event handler
:event: event to handle
:returns: event result data if event sucessfully handled
:raises: Exception: if error occurs handling event
"""
raise NotImplementedError("No event handler for event: {}".format(event))
| 4,113 |
tests/serialize/runstate/shelvestore_test.py
|
Yelp/Tron
| 190 |
2025226
|
import os
import shutil
import tempfile
from testifycompat import assert_equal
from testifycompat import run
from testifycompat import setup
from testifycompat import teardown
from testifycompat import TestCase
from tron.serialize.runstate.shelvestore import Py2Shelf
from tron.serialize.runstate.shelvestore import ShelveKey
from tron.serialize.runstate.shelvestore import ShelveStateStore
class TestShelveStateStore(TestCase):
@setup
def setup_store(self):
self.tmpdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tmpdir, "state")
self.store = ShelveStateStore(self.filename)
@teardown
def teardown_store(self):
shutil.rmtree(self.tmpdir)
def test__init__(self):
assert_equal(self.filename, self.store.filename)
def test_save(self):
key_value_pairs = [
(ShelveKey("one", "two"), {"this": "data",},),
(ShelveKey("three", "four"), {"this": "data2",},),
]
self.store.save(key_value_pairs)
self.store.cleanup()
stored_data = Py2Shelf(self.filename)
for key, value in key_value_pairs:
assert_equal(stored_data[str(key.key)], value)
stored_data.close()
def test_delete(self):
key_value_pairs = [
(ShelveKey("one", "two"), {"this": "data",},),
(ShelveKey("three", "four"), {"this": "data2",},),
# Delete first key
(ShelveKey("one", "two"), None,),
]
self.store.save(key_value_pairs)
self.store.cleanup()
stored_data = Py2Shelf(self.filename)
assert stored_data == {
str(ShelveKey("three", "four").key): {"this": "data2"},
}
stored_data.close()
def test_restore(self):
self.store.cleanup()
keys = [ShelveKey("thing", i) for i in range(5)]
value = {"this": "data"}
store = Py2Shelf(self.filename)
for key in keys:
store[str(key.key)] = value
store.close()
self.store.shelve = Py2Shelf(self.filename)
retrieved_data = self.store.restore(keys)
for key in keys:
assert_equal(retrieved_data[key], value)
if __name__ == "__main__":
run()
| 2,237 |
helper/downloader/urlDL.py
|
REX-BOTZ/MegaUploaderbot-1
| 2 |
2024880
|
# !/usr/bin/env python3
"""Importing"""
# Importing External Packages
from pySmartDL import SmartDL
from pyrogram.errors import exceptions
# Importing Required developer defined data
from helper.downloader.downloadingData import *
fileName = 'urlDL'
class UrlDown:
def __init__(self, bot, msg, process_msg, Downloadfolder, url):
self.bot = bot
self.msg = msg
self.process_msg = process_msg
self.Downloadfolder = Downloadfolder
self.url = url
async def start(self):
len_file = await length_of_file(self.bot, self.url)
if len_file == 'Valid':
try:
self.process_msg = await self.process_msg.edit_text(BotMessage.starting_to_download, parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
downObj = SmartDL(self.url, dest = self.Downloadfolder)
downObj.start(blocking = False)
while not downObj.isFinished():
progress_bar = downObj.get_progress_bar().replace('#', '■').replace('-', '□')
completed = downObj.get_dl_size(human=True)
speed = downObj.get_speed(human=True)
remaining = downObj.get_eta(human=True)
percentage = int(downObj.get_progress()*100)
try:
self.process_msg = await self.process_msg.edit_text(f"<b>Downloading... !! Keep patience...\n {progress_bar}\n📊Percentage: {percentage}%\n✅Completed: {completed}\n🚀Speed: {speed}\n⌚️Remaining Time: {remaining}</b>", parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
finally:
sleep(1)
if downObj.isSuccessful():
try:
n_msg = await self.process_msg.edit_text(BotMessage.uploading_msg, parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
self.n_msg = n_msg
self.filename = path.basename(downObj.get_dest())
return True
else:
try:
rmtree(self.Downloadfolder)
except Exception as e:
await self.process_msg.delete()
await self.msg.reply_text(BotMessage.unsuccessful_upload, parse_mode = 'html')
for e in downObj.get_errors():
await self.bot.send_message(OwnerID, f'{line_number(fileName, e)}\n\n{self.url}')
else:
await self.bot.send_message(OwnerID, f'{line_number(fileName)}\n\n{self.url}')
elif len_file == 'Not Valid':
try:
await self.process_msg.edit_text(BotMessage.unsuccessful_upload, parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
else:
try:
await self.process_msg.edit_text(f'Filesize Detected: <code>{len_file}MB</code>.\n{BotMessage.file_limit}', parse_mode = 'html')
except exceptions.bad_request_400.MessageNotModified:
pass
self.filename = None
| 3,259 |
main/user/forms.py
|
MahanBi/Back-End
| 0 |
2025353
|
from django import forms
from django.core.exceptions import ValidationError
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from .models import User
class CreateUserForm(forms.ModelForm):
_password = forms.CharField(label='Pass Conf', widget=forms.PasswordInput)
password = forms.CharField(label='Pass', widget=forms.PasswordInput)
class Meta:
model = User
fields = ('username', 'email', 'friend_code', 'full_name', 'role')
def clean_password(self):
password = self.cleaned_data.get("password")
_password = self.cleaned_data.get("_password")
if password != _password:
raise ValidationError("Passwords don't match")
return password
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class ChangeUserForm(forms.ModelForm):
password = <PASSWORD>()
class Meta:
model = User
fields = ('username', 'email', 'friend_code', 'full_name', 'role')
| 1,105 |
tests/server/test_api.py
|
davidkartchner/rubrix
| 1 |
2025772
|
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from rubrix.server.tasks.commons import TaskStatus
from rubrix.server.tasks.text_classification.api import (
TaskType,
TextClassificationBulkData,
TextClassificationRecord,
)
def create_some_data_for_text_classification(client, name: str, n: int):
records = [
TextClassificationRecord(**data)
for idx in range(0, n or 10, 2)
for data in [
{
"id": idx,
"inputs": {"data": "my data"},
"multi_label": True,
"metadata": {"field_one": "value one", "field_two": "value 2"},
"status": TaskStatus.validated,
"annotation": {
"agent": "test",
"labels": [
{"class": "Test"},
{"class": "Mocking"},
],
},
},
{
"id": idx + 1,
"inputs": {"data": "my data"},
"multi_label": True,
"metadata": {"field_one": "another value one", "field_two": "value 2"},
"status": TaskStatus.validated,
"prediction": {
"agent": "test",
"labels": [
{"class": "NoClass"},
],
},
"annotation": {
"agent": "test",
"labels": [
{"class": "Test"},
],
},
},
]
]
client.post(
f"/api/datasets/{name}/{TaskType.text_classification}:bulk",
json=TextClassificationBulkData(
tags={"env": "test", "class": "text classification"},
metadata={"config": {"the": "config"}},
records=records,
).dict(by_alias=True),
)
def uri_2_path(uri: str):
from urllib.parse import urlparse
p = urlparse(uri)
return os.path.abspath(os.path.join(p.netloc, p.path))
| 2,645 |
src/pages/resources.py
|
Ben0mmen/.-MfG-ThC-.
| 1 |
2026029
|
"""This page is for searching and viewing the list of awesome resources"""
import logging
from collections import defaultdict
import streamlit as st
import awesome_streamlit as ast
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
@st.cache
def filter_by_tags(resources, tags):
"""The resources having one of the specified Tags
If tags is the empty list all resources are returned
Arguments:
resources {[type]} -- A list of Resources
tags {[type]} -- A list of Tags
Returns:
[type] -- A list of Resources
"""
if tags:
resources_ = []
for resource in resources:
if set(resource.tags).intersection(tags):
resources_.append(resource)
return resources_
return resources
@st.cache
def filter_by_is_awesome(resources):
"""The resources being that is_awesome
Arguments:
resources {[type]} -- A list of resources
"""
return [resource for resource in resources if resource.is_awesome]
@st.cache
def to_markdown(resources):
"""Converts the specified resources to MarkDown
Arguments:
resources {[type]} -- [description]
Returns:
[type] -- [description]
"""
resources_dict = defaultdict(list)
for resource in resources:
resources_dict[resource.tags[0]].append(resource)
markdown_bullets = []
for tag in sorted(resources_dict.keys(), key=lambda x: x.name):
markdown_bullets.append(f"\n### {tag.name}\n")
for resource in resources_dict[tag]:
markdown_bullets.append(resource.to_markdown_bullet())
markdown = "\n".join(markdown_bullets)
return markdown
@st.cache
def get_sorted_resources(awesome_resources_only: bool = True):
"""The list of resources sorted by name
Keyword Arguments:
awesome_resources_only {bool} -- If True only awesome resources
will be included in the list (default: {True})
Returns:
[type] -- The list of resources sorted by name
"""
resources = sorted(ast.database.RESOURCES, key=lambda x: x.name)
if awesome_resources_only:
resources = filter_by_is_awesome(resources)
return resources
@st.cache
def get_resources_markdown(tags, awesome_resources_only=True) -> str:
"""A bulleted Markdown list of resources filtered as specified
Arguments:
tags {[type]} -- A list of tags to filter to. If the list is empty [] then we
do no filtering on Tags
Keyword Arguments:
awesome_resources_only {bool} -- [description] (default: {True})
Returns:
str -- A bulleted Markdown list of resources filtered as specified
"""
resources = get_sorted_resources(awesome_resources_only)
resources = filter_by_tags(resources, tags)
return to_markdown(resources)
def write():
"""Writes content to the app"""
ast.shared.components.title_awesome("Resources")
st.sidebar.title("Resources")
show_awesome_resources_only = st.sidebar.checkbox(
"Show Awesome Resources Only", value=True
)
tags = ast.shared.components.multiselect(
"Select Tag(s)", options=ast.database.TAGS, default=[]
)
st.info(
"""Please note that resources can have multiple tags!
We list each resource under **a most important tag only!**"""
)
resource_section = st.empty()
with st.spinner("Loading resources ..."):
markdown = get_resources_markdown(tags, show_awesome_resources_only)
resource_section.markdown(markdown)
if st.sidebar.checkbox("Show Resource JSON"):
st.subheader("Source JSON")
st.write(ast.database.RESOURCES)
tags = None
if __name__ == "__main-_":
write()
| 3,726 |
src/solutions/part1/q207_course_schedule.py
|
hychrisli/PyAlgorithms
| 0 |
2025124
|
from src.base.solution import Solution
from src.tests.part1.q207_test_course_schedule import CourseScheduleTestCases
"""
There are a total of n courses you have to take, labeled from 0 to n - 1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
For example:
2, [[1,0]]
There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
2, [[1,0],[0,1]]
There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should also have finished course 1.
So it is impossible.
"""
class CourseSchedule(Solution):
def verify_output(self, test_output, output):
return test_output == output
def print_output(self, output):
super(CourseSchedule, self).print_output(output)
def gen_test_cases(self):
return CourseScheduleTestCases()
def run_test(self, input):
return self.canFinish(input[0], input[1])
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
WHITE = 0
GRAY = 1
BLACK = 2
class Node:
def __init__(self, val):
self.val = val
self.color = WHITE
self.children = []
stack = []
graph = dict()
for i in range(numCourses):
graph[i] = Node(i)
for pre in prerequisites:
graph[pre[0]].children.append(pre[1])
for node in graph.values():
# print(type(node))
if node.color == WHITE:
stack.append(node)
while stack:
# print(len(stack))
cur_node = stack[-1]
if cur_node.color == GRAY:
cur_node.color = BLACK
stack.pop()
else:
"""print(cur_node.val)
for node in graph.values():
print((node.val, node.is_visited, node.children))"""
cur_node.color = GRAY
for child in cur_node.children:
if graph[child].color == WHITE:
stack.append(graph[child])
elif graph[child].color == GRAY:
return False
return True
if __name__ == '__main__':
sol = CourseSchedule()
sol.run_tests()
| 2,680 |
python_webex/v1/Webhook.py
|
Paul-weqe/python-webex-bot
| 10 |
2023946
|
import requests
import sys
class Webhook:
def get_all_webhooks(self):
"""
GETS A LIST OF ALL THE WEBHOOKS CURRENTLY CONNECTED TO YOUR BOT
uses the https://api.ciscospark.com/v1/webhooks - GET request
details on the list webhooks URL can be found in https://developer.webex.com/docs/api/v1/webhooks/list-webhooks
"""
url_route = "webhooks"
data = requests.get(self.URL + url_route, headers=self.headers)
return data
def create_webhook(self, name=None, target_url=None, resource=None, event=None):
"""
Enables one to create a webhook that will be listening to events sent to the bot
uses the https://api.ciscospark.com/v1/webhooks - POST request
details on create webhooks URL can be found in https://developer.webex.com/docs/api/v1/webhooks/create-a-webhook
"""
url_route = "webhooks"
if name is None:
sys.exit("'name' is a required field")
elif target_url is None:
sys.exit("'targetUrl' is a required field")
elif resource is None:
sys.exit("'resource' is a required field")
elif event is None:
sys.exit("'event' is a required field")
# check for if a webhook with this URL already exists for this particular bot
# cause apparently Cisco does not do that for us when creating webhooks. But tis all good :)
existing_webhooks = self.get_all_webhooks().json()
for webhook in existing_webhooks['items']:
if webhook['targetUrl'] == target_url:
return self.get_webhook_details(webhook_id=webhook['id'])
json = {
"name": name, "targetUrl": target_url, "resource": resource, "event": event
}
data = requests.post(self.URL + url_route, headers=self.headers, json=json)
return data
def delete_webhook(self, webhook_id=None):
"""
Deletes a webhook that has ID webhookId
uses the https://api.ciscospark.com/webhooks - DELETE request
details on delete webhooks URL can be found in https://developer.webex.com/docs/api/v1/webhooks/delete-a-webhook
"""
url_route = "webhooks"
if webhook_id is None:
sys.exit("'webhookId' is a required field")
data = requests.delete(self.URL + url_route + "/" + webhook_id, headers=self.headers)
return data
def update_webhook(self, webhook_id=None, name=None, target_url=None):
"""
'name' is the updated name of the webhook
'targetUrl' is the updated targetUrl of the webhook
Edit a webhook with ID of webhookId
uses the https://api.ciscospark.com/webhooks - PUT request
details on edit webhook URL can be found in https://developer.webex.com/docs/api/v1/webhooks/update-a-webhook
"""
url_route = "webhooks"
if webhook_id is None:
sys.exit("'webhookId' is a required field")
elif name is None:
sys.exit("'name' is a required field")
elif target_url is None:
sys.exit("'targetUrl' is a required field")
json = {
"name": name, "targetUrl": target_url
}
data = requests.put(self.URL + url_route + "/" + webhook_id, json=json, headers=self.headers)
return data
def get_webhook_details(self, webhook_id=None):
"""
Get the details of a single webhook with id of webhookId
uses https://api.ciscospark.com/webhooks/{roomId} - GET request
details on get webhook details URL can be found in https://developer.webex.com/docs/api/v1/webhooks/get-webhook-details
"""
url_route = "webhooks"
if webhook_id is None:
sys.exit("'webhookId' is a required field")
data = requests.get(self.URL + url_route, headers=self.headers)
return data
| 3,924 |
core/views_no_rbac.py
|
Raybeam/rb_status_plugin
| 12 |
2024301
|
from flask_admin import BaseView, expose
from flask_admin.form import rules
from flask import flash, redirect, url_for, request
from rb_status_plugin.core.report_model import ReportModel
from rb_status_plugin.core.report_repo import VariablesReportRepo
from rb_status_plugin.core.report import Report
from rb_status_plugin.core.views import (
StatusView,
ReportsView,
)
status_view_rbac = StatusView()
reports_view_rbac = ReportsView()
class StatusViewAdmin(BaseView):
@expose("/")
def test(self):
return self.render(
"no_rbac/status.html", content=status_view_rbac.reports_data()
)
class ReportsViewAdmin(BaseView):
@expose("/")
def list(self):
return self.render("no_rbac/reports.html", content=VariablesReportRepo.list())
@expose("/<string:report_name>/trigger/", methods=["GET"])
def trigger(self, report_name):
r = Report(report_name)
r.trigger_dag()
flash(f"Triggered report: {report_name}", "info")
return redirect(url_for("rb/reports.list"))
@expose("/<string:report_name>/delete/", methods=["POST"])
def delete(self, report_name):
r = Report(report_name)
r.delete_report_variable(VariablesReportRepo.report_prefix)
r.delete_dag()
flash(f"Deleted report: {report_name}", "info")
return redirect(url_for("rb/reports.list"))
@expose("/paused", methods=["POST"])
def pause_dag(self):
r_args = request.args
report_name = r_args.get("report_name")
r = Report(report_name)
if r_args.get("is_paused") == "true":
r.activate_dag()
else:
r.pause_dag()
return "OK"
class ReportMgmtViewAdmin(ReportModel):
can_delete = False
create_template = "no_rbac/report_create_form.html"
edit_template = "no_rbac/report_edit_form.html"
form_rules = [
rules.FieldSet(("report_id", "schedule_timezone"), ""),
rules.FieldSet(
("report_title", "description", "owner_name", "owner_email", "subscribers"),
"General",
),
rules.FieldSet(
("schedule_type", "schedule_time", "schedule_week_day", "schedule_custom"),
"Schedule",
),
rules.FieldSet(("tests",), "Tests"),
]
# We're doing this to hide the view from the main
# menu and keep access in the /reports/ endpoint
def is_visible(self):
return False
| 2,456 |
collatzTestOverflow.py
|
hellpig/collatz
| 2 |
2022766
|
#!/usr/bin/env python3.7
# This code is for checking numbers that overflowed when using 128-bit integers.
# Integers in Python 3 are of unlimited size!
nStart = 340282366920938463463374607431768211455 # 2^128 - 1
# the following is the first to require 129 bits...
nStart = 55247846101001863167
# source:
# http://pcbarina.fit.vutbr.cz/path-records.htm
#
# The following is not a great source
# since given Mx must be divided by 2 before doing B(Mx)...
# http://www.ericr.nl/wondrous/pathrecs.html
nStart = 274133054632352106267
nStart = 71149323674102624415
nStart = 55247846101001863167
# Optionally find A and B for nStart = A * 2**k + B
k = 51
A = nStart >> k
B = nStart - (A << k)
print("For k =", k)
print(" A =", A)
print(" B =", B)
print("")
n = nStart
steps = 0
while True:
if (n >> 128):
print(" ", n) # to have the overflowing parts stand out
else:
print(n)
if n == 1:
#if n < nStart:
break
if n%2 == 1: # odd
n = (3*n+1) // 2
else:
n = n // 2
steps += 1
print("steps =", steps)
| 1,044 |
app/models/books.py
|
andyjohn23/booky
| 1 |
2024872
|
from .search import google_book_search
from .https import convert_to_https
class Books():
def __init__(self, query="", start=0, testing=False, https=False):
if not testing:
self.json = google_book_search(query, start)
else:
self.json = testing
self.https = True if https else False
def parse(self):
"""
Takes the google books api and displays the results to the screen and returns
an empty payload if there is an error
"""
if self.json["status"] != 200:
return empty_payload()
body = self.json["body"]
book_list = []
try:
for i in body["items"]:
book = {}
# checks the API responses for an existing data - and
# adds an empty string if no data is returned from the API
for field in ["authors", "title", "publisher", "imageLinks", "infoLink"]:
try:
book[field] = i["volumeInfo"][field]
except KeyError:
book[field] = ""
book["authors"] = self._parse_authors(book["authors"])
book["imageLinks"] = self._parse_thumbnail(book["imageLinks"])
book_list.append(book)
except KeyError:
return empty_payload()
return {"total": body["totalItems"], "items": book_list}
def _parse_authors(self, authors_list=""):
"""
Joins array of authors into comma-separated string
"""
return ", ".join(authors_list)
def _parse_thumbnail(self, imageLinks=""):
"""
Adding own image if there is no image for book covers
"""
if not imageLinks:
return {"thumbnail": "static/images/booky-book.jpg"}
else:
if self.https:
for link in imageLinks:
imageLinks[link] = convert_to_https(imageLinks[link])
return imageLinks
def payload(items, total):
return {"total": total, "items": items}
def empty_payload():
return payload([], 0)
| 2,144 |
scripts/script_aes_multi_model.py
|
AISyLab/AISY_Framework
| 12 |
2025456
|
import aisy_sca
from app import *
from custom.custom_models.neural_networks import *
aisy = aisy_sca.Aisy()
aisy.set_resources_root_folder(resources_root_folder)
aisy.set_database_root_folder(databases_root_folder)
aisy.set_datasets_root_folder(datasets_root_folder)
aisy.set_database_name("database_ascad.sqlite")
aisy.set_dataset(datasets_dict["ASCAD.h5"])
aisy.set_aes_leakage_model(leakage_model="HW", byte=2)
aisy.set_batch_size(400)
aisy.set_epochs(50)
aisy.add_neural_network(cnn_architecture, name="model_0")
aisy.add_neural_network(mlp, name="model_3")
aisy.add_neural_network(noConv1_ascad_desync_0, name="model_1")
aisy.add_neural_network(methodology_cnn_ascad, name="model_2")
aisy.add_neural_network(cnn, name="model_4")
aisy.run()
| 746 |
algoritms/ExponentialSmoothing.py
|
kaantecik/covid-prediction
| 1 |
2025632
|
"""
170201069
<NAME>
"""
import warnings
from statsmodels.tsa.api import Holt
from statsmodels.tools.sm_exceptions import ConvergenceWarning
warnings.simplefilter('ignore', ConvergenceWarning)
def exponential_smoothing(df, smoothing_trend=0.6, initialization_method="estimated"):
train_data = df[:370]
check_data = df[370:462]
day_of_forecast = 92
model = Holt(train_data, initialization_method=initialization_method)
fit2 = model.fit(smoothing_trend=smoothing_trend)
forecast = fit2.forecast(day_of_forecast).astype(int)
return check_data, forecast
| 581 |
ocr_train/ensemble_checkpoints.py
|
Sand0001/OCR_textrender_jap_chn_eng
| 0 |
2024862
|
#-*- coding:utf-8 -*-
import os
import sys
import numpy as np
from imp import reload
from PIL import Image, ImageOps
from keras.layers import Input
from keras.models import Model
# import keras.backend as K
from keras.utils import multi_gpu_model
import dl_resnet_crnn as densenet
#import densenet
os.environ["CUDA_VISIBLE_DEVICES"] = "4,5"
GPU_NUM = 2
#reload(densenet)
encode_dct = {}
char_set = open('chn.txt', 'r', encoding='utf-8').readlines()
#char_set = open('japchn.txt', 'r', encoding='utf-8').readlines()
for i in range (0, len(char_set)):
c = char_set[i].strip('\n')
encode_dct[c] = i
char_set.append('卍')
#char_set = ''.join([ch.strip('\n') for ch in char_set] + ['卍'])
#characters = ''.join([chr(i) for i in range(32, 127)] + ['卍'])
nclass = len(char_set)
mult_model, basemodel = densenet.get_model(False, 32, nclass)
#input = Input(shape=(32, None, 1), name='the_input')
#y_pred= densenet.dense_cnn(input, nclass)
#basemodel = Model(inputs=input, outputs=y_pred)
#model = Model(inputs=[input, labels, input_length, label_length], outputs=loss_out)
mp = 'weights_densenet-02-1.29.h5'
modelPath = os.path.join(os.getcwd(), './models/' + mp)
#modelPath = sys.argv[1]
'''
load models
'''
def load_model_weights(modelPath):
if not os.path.exists(modelPath):
print ("ERROR Load Model : ", modelPath)
return None
mult_model, basemodel = densenet.get_model(False, 32, nclass)
multi_model = multi_gpu_model(basemodel, gpus=GPU_NUM)
multi_model.load_weights(modelPath)
weights = basemodel.get_weights()
return weights
'''
modelPaths = [
'weights_chn_eng_eroded_v3_resnet-08-1.13.h5',
'weights_chn_eng_eroded_v3_resnet-09-1.21.h5',
'weights_chn_eng_eroded_v3_resnet-10-1.16.h5',
'weights_chn_eng_eroded_v3_resnet-11-1.14.h5',
'weights_chn_eng_eroded_v3_resnet-12-1.20.h5',
'weights_chn_eng_eroded_v3_resnet-13-1.11.h5'
]
'''
modelPaths = [
'weights_chn_eng_eroded_v7_resnet-03-1.14.h5',
'weights_chn_eng_eroded_v7_resnet-04-1.15.h5'
]
weights_list = []
for modelPath in modelPaths:
weights_list.append(load_model_weights("./models/" + modelPath) )
print (weights_list[0])
new_weights = list()
for weights_list_tuple in zip(*weights_list):
new_weights.append(
[np.array(weights_).mean(axis=0)\
for weights_ in zip(*weights_list_tuple)])
multi_model, basemodel = densenet.get_model(False, 32, nclass)
basemodel.set_weights(new_weights)
basemodel.save("avg_model.h5")
#basemodel.save(sys.argv[2])
#basemodel.save("./new_model.h5")
#basemodel = multi_model
#model.load_weights(modelPath)
| 2,535 |
app_challenges/admin.py
|
Audiotuete/backend_challenge_api
| 0 |
2025917
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from ordered_model.admin import OrderedModelAdmin
from .models import Challenge, ChallengeDate
User = get_user_model()
class ChallengeDateAdmin(OrderedModelAdmin):
model = ChallengeDate
list_display = ('challenge', 'event_name', 'event_location', 'move_up_down_links', 'order')
search_fields = ('challenge', 'order',)
class ChallengeDateInline(admin.TabularInline):
model = ChallengeDate
class ChallengeAdmin(admin.ModelAdmin):
model = Challenge
# readonly_fields = ['challenge_code',]
inlines = [ChallengeDateInline]
fieldsets = (('Challenge', {'fields': ('context', 'city', 'year', 'start_date', 'end_date', 'challenge_code', 'contact_info' )}),)
filter_horizontal = ('contact_info',)
actions = None
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == 'contact_info':
kwargs['queryset'] = User.objects.filter(is_challenge_contact = True)
return super().formfield_for_manytomany(db_field, request, **kwargs)
# def has_add_permission(self, request):
# return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(Challenge, ChallengeAdmin)
admin.site.register(ChallengeDate, ChallengeDateAdmin)
| 1,305 |
rxbp/multicast/mixins/multicastsubscribermixin.py
|
MichaelSchneeberger/rx_backpressure
| 24 |
2025899
|
from abc import ABC, abstractmethod
from typing import Tuple, Callable, Optional
import rx
from rxbp.schedulers.trampolinescheduler import TrampolineScheduler
class MultiCastSubscriberMixin(ABC):
@property
@abstractmethod
def subscribe_schedulers(self) -> Tuple[TrampolineScheduler]:
...
def schedule_action(
self,
action: Callable[[], Optional[rx.typing.Disposable]],
index: int = None,
) -> rx.typing.Disposable:
if index is None:
index = len(self.subscribe_schedulers) - 1
else:
assert index < len(self.subscribe_schedulers), f'index "{index}" is out of range of "{len(self.subscribe_schedulers)}"'
def inner_schedule_action(
action: Callable[[], Optional[rx.typing.Disposable]],
current_index: int,
):
if current_index == index:
def inner_action(_, __):
return action()
else:
def inner_action(_, __):
return inner_schedule_action(
current_index=current_index + 1,
action=action,
)
with self.subscribe_schedulers[current_index].lock:
if self.subscribe_schedulers[current_index].idle:
disposable = self.subscribe_schedulers[current_index].schedule(inner_action)
return disposable
else:
return inner_action(None, None)
return inner_schedule_action(
current_index=0,
action=action,
)
| 1,655 |
pidevices/sensors/ads1x15.py
|
robotics-4-all/tektrain-robot-sw
| 0 |
2024237
|
from ..devices import Sensor
import Adafruit_ADS1x15
from time import sleep
import threading
from collections import deque
import numpy
class ADS1X15(Sensor):
_MAX_VALUE = 32767
_CHANNELS = 4
GAINS = numpy.array([[2/3, 6.144],
[1, 4.096],
[2, 2.048],
[4, 1.024],
[8, 0.512],
[16, 0.128]])
def __init__(self,
bus=1,
address=0x48,
v_ref=3.3,
averages=10,
max_data_length=100,
name=""):
"""Constructor"""
self._bus = 1
self._address = 0x48
self.v_ref = v_ref
self._gain = self._find_gain()
self._averages = 10
self._measurements = []
self._results = [4000] * self._CHANNELS
# threading stuff
self._lock = threading.Lock()
self._thread = None
self._thread_alive = False
for channel in range(0,4):
self._measurements.append(deque(maxlen=self._averages))
super(ADS1X15, self).__init__(name, max_data_length)
self.start()
def _find_gain(self):
"""Find the correct gain according to the given vref"""
gain = 2/3
for i in range(1, self.GAINS.shape[0]):
if self.GAINS[-i][1] > self.v_ref:
gain = int(self.GAINS[-i][0])
self.v_ref = self.GAINS[-i][1]
break
return gain
def start(self):
"""Initialize hardware and os resources."""
self.adc = Adafruit_ADS1x15.ADS1115(address=self._address,busnum=self._bus)
if not self._thread_alive:
self._thread_alive = True
self._thread = threading.Thread(target=self._update_channels, args=(), daemon=True)
self._thread.start()
def stop(self):
"""Free hardware and os resources."""
self._thread_alive = False
self._thread.join()
self.adc.stop_adc()
def _update_channels(self):
"""Periodically aquires the moving average of all adc channels"""
while self._thread_alive:
for channel in range(0, 4):
self._measurements[channel].append(self._read_channel(channel))
# to add lock
if len(self._measurements[channel]) == self._averages:
with self._lock:
self._results[channel] = sum(self._measurements[channel]) / self._averages
sleep(0.05)
print("ADC thread terminating...")
def _read_channel(self, channel):
"""Read a sigle's channel value"""
if 0 <= channel and channel < 4:
return self.adc.read_adc(channel, gain=self._gain)
def read(self, channel, SAVE=False):
"""Read result and transform it to voltage"""
with self._lock:
value = float(self._results[channel]) / self._MAX_VALUE * self.v_ref
if SAVE:
self.update_data(value)
return value
| 3,146 |
project/editorial/migrations/0058_auto_20171202_1140.py
|
cojennin/facet
| 25 |
2025249
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('editorial', '0057_auto_20171130_2247'),
]
operations = [
migrations.AlterField(
model_name='call',
name='expiration_date',
field=models.DateTimeField(help_text=b'Day/Time call ends.', null=True, blank=True),
),
]
| 457 |
posts/migrations/0001_initial.py
|
davidfactorial/ingroupjobboard
| 0 |
2025078
|
# Generated by Django 3.2.8 on 2021-11-01 03:04
from django.db import migrations, models
import django.db.models.deletion
import posts.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('bio', models.TextField(blank=True, max_length=8191)),
('twitter', models.URLField(blank=True, max_length=1023)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('timestamp', models.DateTimeField(default=posts.models.current_time)),
('title', models.CharField(blank=True, max_length=255)),
('content', models.TextField(max_length=8191)),
('profile', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to='posts.profile')),
],
),
]
| 1,396 |
examples/collect_config_files.py
|
Trinity-College/py-space-platform
| 27 |
2026121
|
"""
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER
Copyright (c) 2015 Juniper Networks, Inc.
All rights reserved.
Use is subject to license terms.
Licensed under the Apache License, Version 2.0 (the ?License?); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations
under the License.
"""
from __future__ import print_function
import os
import errno
import logging.config
import argparse
from jnpr.space import rest
def main(args):
spc = rest.Space(args.space_URL, args.user, args.passwd)
try:
# Create the output directory
os.makedirs(os.path.abspath(args.out))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
collect_config_files(spc, args.out)
def collect_config_files(spc, output_dir):
"""
Get all device config files from Space in batches of 500 each.
Then collect all versions of each file.
"""
start = 0
config_files = []
while True:
page = spc.config_file_management.config_files.get(
paging={'start': start, 'limit': 500})
config_files.extend(page)
start += len(page)
if len(page) < 500:
break
print("There are %d config files to process" % len(config_files))
for cf in config_files:
collect_config_file_versions(spc, cf, output_dir)
print("\nAll Over!!!")
def collect_config_file_versions(spc, cf, output_dir):
"""
Collect all versions for a given file
"""
print("Collecting file versions for device: ", cf.deviceName)
device_path_name = '/'.join([output_dir, str(cf.deviceName)])
versions = cf.config_file_versions.get()
for v in versions:
version_path_name = '/'.join([device_path_name,
str(v.versionId)])
if os.path.exists(os.path.abspath(version_path_name)):
continue # We already have stored this version
cfv = v.get()
store_version(version_path_name, cfv)
return cf.deviceName
def store_version(version_path_name, config_file_version):
"""
Store the given config file version into the local filesystem.
Each version goes under a separate directory and it contains a MANIFEST.mf
file in addition to the file which has the configuration text.
"""
try:
# Create the directory for this version
os.makedirs(os.path.abspath(version_path_name))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
#
# Create a MANIFEST.mf file with details about this version
#
mf_name = '/'.join([version_path_name, 'MANIFEST.mf'])
with open(os.path.abspath(mf_name), 'w') as f:
f.write('Version Id: %s\n' % str(config_file_version.versionId))
f.write('Config File Size: %s\n' % \
str(config_file_version.configFileSize))
f.write('MD5: %s\n' % str(config_file_version.latestMD5))
f.write('Creation Time: %s\n' % str(config_file_version.creationTime))
f.write('Comment: %s\n' % str(config_file_version.comment))
#
# Store the configuration text contents for this version into a file.
# The name of the file is as given by Space and consists of the device
# name, version id, and creation time stamp.
#
contents_file_name = '/'.join([version_path_name,
str(config_file_version.fileName)])
with open(os.path.abspath(contents_file_name), 'w') as f:
f.write(config_file_version.content.text)
if __name__ == '__main__':
logging.config.fileConfig('../test/logging.conf')
parser = argparse.ArgumentParser()
parser.add_argument("space_URL", help="URL of the Space instance")
parser.add_argument("-u", "--user", help="Userid")
parser.add_argument("-p", "--passwd", help="Password")
parser.add_argument("-o", "--out", help="Output directory")
args = parser.parse_args()
main(args)
| 4,352 |
inference/src/__MACRO__.py
|
Sergio0694/sepconv-gan
| 1 |
2025463
|
from colorama import init, Fore
# initializes colorama
init(autoreset=True)
def LOG(text):
'''Displays an extended info message to the screen, prefixed by the [INFO] tag.
text(str) -- the message to display
'''
print('{}[INFO]'.format(Fore.LIGHTBLUE_EX), end=' ')
print('{}{}'.format(Fore.LIGHTWHITE_EX, text), flush=True)
def INFO(text):
'''Displays an info message, with the >> prefix.
text(str) -- the message to display
'''
print('{} >>'.format(Fore.LIGHTCYAN_EX), end=' ')
print(text, flush=True)
def ERROR(text):
'''Displays an error message and exits the program automatically.
text(str) -- the message to display'''
print('{}[ERROR]'.format(Fore.LIGHTRED_EX), end=' ')
print('{}{}'.format(Fore.LIGHTWHITE_EX, text), flush=True)
exit(-1)
def BAR(x, y, info=''):
'''Displays a simple progress bar with some additional info.
x(int) -- the number of completed steps
y(int) -- the total number of steps to perform
int(str) -- additional info to print after the progress bar (optional)
'''
assert x >= 0 and x <= y
if x < y:
print('\r{}[{}{}]{}{}'.format(Fore.LIGHTYELLOW_EX, '=' * x, ' ' * (y - x), Fore.WHITE, info), end='', flush=True)
else:
print('', end='\r', flush=True) # reset the current line
def RESET_LINE(clean=False):
'''Resets the current line by writing a carriage return character.
clean(bool) -- indicates whether or not to overwrite the current line to clean it up
'''
if clean:
print('\r{}\r'.format(' ' * 100), end='', flush=True)
else:
print('\r', end='', flush=True)
| 1,656 |
pywxwork/base.py
|
renqiukai/pywxwork
| 2 |
2025020
|
import requests
import datetime
from loguru import logger
class base:
host_name = "https://qyapi.weixin.qq.com/cgi-bin"
token = None
def __init__(
self,
token: str
):
"""init
Args:
token (str, optional): . Defaults to None.
"""
self.token = token
def request(self, api_name, method="GET", **kwargs):
url = f"{self.host_name}/{api_name}"
# headers = kwargs.get("headers", {})
# headers["content-type"] = "application/x-www-form-urlencoded"
# headers["accept-type"] = "application/json, text/javascript, */*; q=0.01"
# kwargs["headers"] = headers
params = kwargs.get("params", {})
if self.token:
params["access_token"] = self.token
kwargs["params"] = params
logger.debug(url)
logger.debug(kwargs)
response = requests.request(
method=method,
url=url,
**kwargs,
)
if response.status_code == 200:
return self.response(response.json())
logger.error({
"msg": "请求错误",
"data": response.json(),
})
def response(self, data):
return data
def get_api_domain_ip(self):
api_name = "get_api_domain_ip"
response = self.request(api_name=api_name,)
logger.debug(response)
return response
| 1,412 |
tools/pubsub2inbox/output/scc.py
|
y4nben/professional-services
| 0 |
2025972
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base import Output, NotConfiguredException
import json
from googleapiclient import discovery, errors
class SccOutput(Output):
def output(self):
if 'vars' in self.output_config:
additional_vars = self._jinja_expand_dict(
self.output_config['vars'], 'vars')
self.jinja_environment.globals = {
**additional_vars,
**self.jinja_environment.globals
}
if 'source' not in self.output_config:
raise NotConfiguredException(
'No Security Command Center source defined in configuration.')
source = self._jinja_expand_string(self.output_config['source'],
'source')
if 'finding_id' not in self.output_config:
raise NotConfiguredException(
'No Security Command Center finding ID defined in configuration.'
)
finding_id = self._jinja_expand_string(
str(self.output_config['finding_id']), 'finding_id')
if 'finding' not in self.output_config:
raise NotConfiguredException(
'No Security Command center finding content defined in configuration.'
)
finding = self._jinja_expand_dict(self.output_config['finding'],
'finding')
finding['name'] = '%s/findings/%s' % (source, finding_id)
if 'sourceProperties' in finding:
if not isinstance(finding['sourceProperties'], dict):
try:
props = json.loads(finding['sourceProperties'])
finding['sourceProperties'] = props
except Exception:
pass
scc_service = discovery.build('securitycenter',
'v1',
http=self._get_branded_http())
request = scc_service.organizations().sources().findings().create(
parent=source, findingId=finding_id, body=finding)
try:
request.execute()
except errors.HttpError as exc:
if exc.resp.status == 409:
self.logger.warn('Finding already in Security Command Center.',
extra={
'source': source,
'finding_id': finding_id
})
return
else:
raise (exc)
self.logger.info('Finding sent to Security Command Center!',
extra={
'source': source,
'finding_id': finding_id,
'finding': finding
})
| 3,393 |
routes/index.py
|
eugman/eugeneQuest
| 1 |
2024845
|
from app import app, db
from app.models import *
from app.config import *
import re
from typing import List
from flask import render_template, request, Response
from flask_sqlalchemy import SQLAlchemy
@app.route('/add', methods=['GET', 'POST'])
def add():
player = db.session.query(Player).get(1)
result = request.form
if result.get("new_daily"):
db.session.add(Daily(name=result.get("new_daily")))
db.session.commit()
return render_template("add.html", player = player)
@app.route('/', methods=['GET', 'POST'])
def index():
player = db.session.query(Player).get(1)
player.messages = ""
hour = datetime.datetime.now().hour
result = request.form
if result.get("bookid") and result.get("page"):
book_id = result.get("bookid")
book = db.session.query(Book).get(book_id)
page = int(result.get("page"))
book.current = page
if page >= book.pages:
book.completed = True
db.session.commit()
if result.get("complete"):
daily_id = result.get("daily_id")
daily = db.session.query(Daily).get(daily_id)
daily.completed = True
daily.completedLast = datetime.datetime.now()
daily.rest = daily.restDuration
db.session.commit()
player.messages += addPoints(db, daily.totalPoints())
if result.get("bg"):
if 80 < int(result.get("bg")) < 140:
player.messages += addPoints(db, 5)
db.session.add(BG(BG=result.get("bg"), insulin=result.get("insulin")))
db.session.commit()
if result.get("bookid") and result.get("page"):
book_id = result.get("bookid")
book = db.session.query(Book).get(book_id)
book.page = int(result.get("page"))
db.session.commit()
if result.get("snooze_daily"):
daily_id = result.get("daily_id")
daily = db.session.query(Daily).get(daily_id)
daily.snooze = hour + int(result.get("snooze_daily"))
db.session.commit()
print(hour)
if result.get("reset_dailies"):
Daily.query.update({Daily.completed: False})
db.session.commit()
books = Book.query.all()
vacation = player.vacation
print(vacation)
allDailies = getQuests(vacation, "Main", "All")
stats = DailyStats(allDailies)
openDailies = getQuests(vacation, "Main", "Open")
openSideQuests = getQuests(vacation, "Side", "Open", 0)
if len(openSideQuests) == 0:
openSideQuests = getQuests(vacation, "Side", "Open", 1) + getQuests(vacation, "Bonus","Open", 0)
if len(openSideQuests) == 0:
openSideQuests = getQuests(vacation, "Bonus", "Open", 1)
completedDailies = getQuests(vacation, "Main", "Completed")
missedDailies = getQuests(vacation, "Main", "Missed")
return render_template("index.html", dailies = openDailies, completed = completedDailies, missed = missedDailies, sideQuests = openSideQuests, stats = stats, player = player, books = books)
def getQuests(vacation:int, subtype:str = "Main", status:str = "Open", sideQuestRest:int = 0) -> List[Daily]:
"""Takes in types of quests and returns a list of dailies."""
hour = datetime.datetime.now().hour
isWork = 1 if datetime.datetime.today().weekday() in (0, 1, 2, 3, 4) and 9 <= hour < 18 and hour != 12 else -1
query = Daily.query
#Filter based on the category of quest
if subtype == "Main":
query = query.filter(Daily.subtype != "Side", Daily.subtype != "Bonus")
elif subtype == "Side":
query = query.filter(Daily.subtype == "Side", Daily.rest <= sideQuestRest)
else:
query = query.filter(Daily.subtype == "Bonus", Daily.rest <= sideQuestRest)
#Filter based on the Status
if status == "Open":
query = query.filter_by(completed = False).filter(Daily.availableAfter <= hour, Daily.availableUntil > hour, Daily.snooze < hour)
if subtype == "Main":
query = query.order_by(Daily.points.desc(), "availableAfter", "availableUntil")
else:
query = query.order_by("rest", Daily.points.desc(), "completedLast")
elif status == "Missed":
query = query.filter_by(completed = False).filter(hour >= Daily.availableUntil)
query = query.order_by("availableAfter", "availableUntil")
elif status == "Completed":
query = query.filter_by(completed = True)
query = query.order_by("availableAfter", "availableUntil")
else:
pass
dailies = query.all()
dailies = list(filter(lambda x: x.isWork == 0 or x.isWork == isWork, dailies))
dailies = list(filter(lambda x: x.vacation == 0 or x.vacation == vacation, dailies))
return list(dailies)
| 4,791 |
tests/test_asynchronous_get.py
|
aliel/aleph-client
| 0 |
2025524
|
import pytest
from aleph_client.asynchronous import get_messages, fetch_aggregates, get_fallback_session, \
fetch_aggregate
@pytest.mark.asyncio
async def test_fetch_aggregate():
get_fallback_session.cache_clear()
response = await fetch_aggregate(
address="0xa1B3bb7d2332383D96b7796B908fB7f7F3c2Be10",
key="corechannel"
)
assert response.keys() == {"nodes"}
@pytest.mark.asyncio
async def test_fetch_aggregates():
get_fallback_session.cache_clear()
response = await fetch_aggregates(
address="0xa1B3bb7d2332383D96b7796B908fB7f7F3c2Be10"
)
assert response.keys() == {"corechannel"}
assert response["corechannel"].keys() == {"nodes"}
@pytest.mark.asyncio
async def test_get_posts():
get_fallback_session.cache_clear()
response = await get_messages(
pagination=2,
)
assert response.keys() == {
'messages',
'pagination_page',
'pagination_total',
'pagination_per_page',
'pagination_item'
}
messages = response['messages']
assert set(messages[0].keys()).issuperset({
'_id',
'chain',
'item_hash',
'sender',
'type',
'channel',
'confirmed',
'content',
'item_content',
'item_type',
'signature',
'size',
'time',
# 'confirmations',
})
@pytest.mark.asyncio
async def test_get_messages():
get_fallback_session.cache_clear()
response = await get_messages(
pagination=2,
)
assert response.keys() == {
'messages',
'pagination_page',
'pagination_total',
'pagination_per_page',
'pagination_item'
}
messages = response['messages']
assert set(messages[0].keys()).issuperset({
'_id',
'chain',
'item_hash',
'sender',
'type',
'channel',
'confirmed',
'content',
'item_content',
'item_type',
'signature',
'size',
'time',
# 'confirmations',
})
| 2,082 |
src/rez/data/tests/builds/packages/foo/1.1.0/foo/__init__.py
|
alexey-pelykh/rez
| 0 |
2026000
|
import os
__version__ = os.getenv("REZ_FOO_VERSION")
def report():
return "hello from foo-%s" % __version__
| 114 |
PyTorch/dist_matrix.py
|
TSLNIHAOGIT/VRP_DRL_MHA
| 55 |
2025094
|
import torch
import math
import numpy as np
from data import generate_data
def get_dist(n1, n2):
x1,y1,x2,y2 = n1[0],n1[1],n2[0],n2[1]
if isinstance(n1, torch.Tensor):
return torch.sqrt((x2-x1).pow(2)+(y2-y1).pow(2))
elif isinstance(n1, (list, np.ndarray)):
return math.sqrt(pow(x2-x1,2)+pow(y2-y1,2))
else:
raise TypeError
def get_dist_matrix(points, digit = 2):
n = len(points)
dist = [[0 for i in range(n)] for i in range(n)]
for i in range(n):
for j in range(i, n):
two = get_dist(points[i], points[j])
dist[i][j] = dist[j][i] = round(float(two), digit)
return dist
if __name__ == '__main__':
""" x[0] -- depot_xy: (batch, 2)
x[1] -- customer_xy: (batch, n_nodes-1, 2)
xy: (batch, n_nodes, 2)
"""
batch = 0
x = generate_data()
xy = torch.cat([x[0][:,None,:], x[1]], dim = 1)
print(xy.size())
dist = get_dist_matrix(xy[batch])
print(dist*20)
| 885 |
msg_handler.py
|
vapehacker/bili-auth
| 8 |
2023313
|
#!/bin/python3
import time
import re
from random import random
import bili_utils
import auth_handler
sendCD = 1
patt = re.compile(r'^\s*?"?(\S+?)\((\S+?)\)"?\s*?$', re.IGNORECASE)
ackMts = int(time.time() * 1000)
lastSendTs = 0
def checkMsg():
global ackMts
msgList = bili_utils.getNewMsg(ackMts)
for m in msgList:
uid = m['uid']
content = m['content']
ts = m['ts']
result = patt.search(content)
if result:
action = result.group(1).lower()
arg = result.group(2).lower()
if action in ('auth', 'revoke'):
cmdHandler(uid, action, arg)
ackMts = max(ts * 1000, ackMts)
def cmdHandler(uid, action, arg):
userInfo = bili_utils.getUserInfo(uid)
if action == 'auth':
if auth_handler.checkVerify(arg, **userInfo):
info = auth_handler.getVerifyInfo(arg)
reply = '验证完成。 主体: {} 。如果此次验证不是由您发起, 请回复"revoke({})"以撤销此次验证。此消息为自动发出, 请勿发送闲聊信息。'
reply = reply.format(info['subject'], arg)
else:
reply = '未找到此验证信息, 可能是此验证信息已过期。'
sendText(uid, reply)
if action == 'revoke':
if auth_handler.revokeVerify(arg, uid):
reply = '撤销成功。验证id: {}。'
reply = reply.format(arg)
else:
reply = '未找到此id对应的与您相关的可撤销验证信息。'
sendText(uid, reply)
def sendText(uid, content):
print('[send]', uid, content)
sleepTime = lastSendTs + sendCD - time.time()
if sleepTime > 0:
time.sleep(sleepTime)
print(bili_utils.sendMsg(uid, content))
def mainLoop():
while True:
try:
checkMsg()
except requests.exceptions.ConnectionError as e:
print(e)
time.sleep(4 + random() * 2)
| 1,759 |
scripts/nmf/reorder_factors_manual.py
|
morrislab/plos-medicine-joint-patterns
| 0 |
2024577
|
"""
Reorders factors in a manual fashion.
"""
import numpy as np
import pandas as pd
from click import *
from logging import *
from sklearn.decomposition import NMF
from sklearn.externals import joblib
@command()
@option(
'--model-input',
required=True,
help='the Pickle file to read the model from')
@option(
'--basis-input',
required=True,
help='the CSV file to read the basis matrix from')
@option(
'--score-input', required=True, help='the CSV file to read scores from')
@option(
'--model-output',
required=True,
help='the Pickle file to write the model to')
@option(
'--basis-output',
required=True,
help='the CSV file to write the basis matrix to')
@option(
'--score-output', required=True, help='the CSV file to write scores to')
@option(
'--at-end',
type=int,
multiple=True,
help='the factors to move to the end (multiple permitted)')
def main(model_input, basis_input, score_input, model_output, basis_output,
score_output, at_end):
basicConfig(
level=INFO,
handlers=[
StreamHandler(), FileHandler(
'{}.log'.format(model_output), mode='w')
])
# Load the model.
info('Loading model')
model = joblib.load(model_input)
info('Loading basis matrix')
basis = pd.read_csv(basis_input, index_col=0)
info('Result: {}'.format(basis.shape))
info('Loading scores')
scores = pd.read_csv(score_input, index_col=0)
info('Result: {}'.format(scores.shape))
# Generate a new ordering for the factors.
info('Reordering factors')
factor_order = np.arange(model.components_.shape[0], dtype=int) + 1
if at_end:
at_end = np.array(at_end, dtype=int)
factor_order = np.concatenate(
[np.setdiff1d(factor_order, at_end), at_end])
model.components_ = model.components_[factor_order - 1]
basis = basis.iloc[:, factor_order - 1]
basis.columns = factor_order
scores = scores.iloc[:, factor_order - 1]
scores.columns = factor_order
# Write the output.
info('Writing output')
joblib.dump(model, model_output)
basis.to_csv(basis_output)
scores.to_csv(score_output)
if __name__ == '__main__':
main()
| 2,266 |
Doc/tools/docutils/languages/es.py
|
cocoatomo/Python3.2_C_API_Tutorial
| 2 |
2025618
|
# -*- coding: utf-8 -*-
# $Id: es.py 78909 2010-03-13 10:49:23Z georg.brandl $
# Author: <NAME> <<EMAIL>>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
'author': u'Autor',
'authors': u'Autores',
'organization': u'Organizaci\u00f3n',
'address': u'Direcci\u00f3n',
'contact': u'Contacto',
'version': u'Versi\u00f3n',
'revision': u'Revisi\u00f3n',
'status': u'Estado',
'date': u'Fecha',
'copyright': u'Copyright',
'dedication': u'Dedicatoria',
'abstract': u'Resumen',
'attention': u'\u00a1Atenci\u00f3n!',
'caution': u'\u00a1Precauci\u00f3n!',
'danger': u'\u00a1PELIGRO!',
'error': u'Error',
'hint': u'Sugerencia',
'important': u'Importante',
'note': u'Nota',
'tip': u'Consejo',
'warning': u'Advertencia',
'contents': u'Contenido'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
u'autor': 'author',
u'autores': 'authors',
u'organizaci\u00f3n': 'organization',
u'direcci\u00f3n': 'address',
u'contacto': 'contact',
u'versi\u00f3n': 'version',
u'revisi\u00f3n': 'revision',
u'estado': 'status',
u'fecha': 'date',
u'copyright': 'copyright',
u'dedicatoria': 'dedication',
u'resumen': 'abstract'}
"""Spanish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| 1,918 |
chals/web/docs/run.py
|
adamyi/Geegle3
| 20 |
2025607
|
import re
import sys
import os
from gunicorn.app.wsgiapp import run
os.system("python -m compileall /app/chals/web/docs/image.binary.runfiles/")
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.argv.append("--workers=4")
sys.argv.append("app:app")
sys.argv.append("-b")
sys.argv.append("0.0.0.0:80")
sys.exit(run())
| 339 |
Lab_4/daj.py
|
gradampl/MONTY
| 0 |
2023159
|
def count_calls():
count_calls.counter += 1
print("I am called for the " + str(count_calls.counter) + " time.")
count_calls.counter = 0
def count_calls2(i=[0]):
i[0] += 1
print("And I am called for the " + str(i[0]) + " time.")
for i in range(10):
count_calls()
count_calls2()
# Result:
# I am called for the 1 time.
# And I am called for the 1 time.
# I am called for the 2 time.
# And I am called for the 2 time.
# I am called for the 3 time.
# And I am called for the 3 time.
# I am called for the 4 time.
# And I am called for the 4 time.
# I am called for the 5 time.
# And I am called for the 5 time.
# I am called for the 6 time.
# And I am called for the 6 time.
# I am called for the 7 time.
# And I am called for the 7 time.
# I am called for the 8 time.
# And I am called for the 8 time.
# I am called for the 9 time.
# And I am called for the 9 time.
# I am called for the 10 time.
# And I am called for the 10 time.
#
# Process finished with exit code 0
| 999 |
object_detection_pixell/test.py
|
Arshad9544/object_detection_pixell
| 7 |
2024684
|
from object_detection_pixell.dataloader import LeddartechDataset
from object_detection_pixell import metrics
from object_detection_pixell import models
from object_detection_pixell.utils import get_state_dict
from ignite.contrib.handlers import tqdm_logger
from ignite.engine import create_supervised_evaluator
import torch
from torch.utils.data import DataLoader
import argparse
import os
from ruamel import yaml
FILEPATH = os.path.dirname(os.path.abspath(__file__))
def main(cfg, state, plot=False):
# Dataloaders
dataset = LeddartechDataset(cfg, use_test_set=True)
test_loader = DataLoader(dataset, batch_size=cfg['TRAINING']['BATCH_SIZE'], num_workers=cfg['TRAINING']['NUM_WORKERS'])
print(f"Dataset size: {len(dataset)}")
# Model
in_channels = dataset.check_number_channels()
model = getattr(models, cfg['NEURAL_NET']['NAME'])(cfg, in_channels)
print(f"Model size: {model.size_of_net}")
if cfg['TRAINING']['DEVICE'] == 'cuda' and torch.cuda.device_count() > 1: #Multi GPUs
model = torch.nn.DataParallel(model)
model.to(cfg['TRAINING']['DEVICE'])
print(f"Device set to: {cfg['TRAINING']['DEVICE']}")
# Load model state
state_dict = get_state_dict(state, device=cfg['TRAINING']['DEVICE'])
model.load_state_dict(state_dict)
model.eval()
# Evaluator engine
eval_metrics = {}
for metric in cfg['TRAINING']['METRICS']:
eval_metrics[metric] = getattr(metrics, metric)(cfg, **cfg['TRAINING']['METRICS'][metric])
evaluator = create_supervised_evaluator(model, metrics=eval_metrics, device=cfg['TRAINING']['DEVICE'])
pbar2 = tqdm_logger.ProgressBar(persist=True, desc='Testing')
pbar2.attach(evaluator)
# Start testing
evaluator.run(test_loader)
print('Test results: ', evaluator.state.metrics)
if plot:
for metric in cfg['TRAINING']['METRICS']:
if hasattr(eval_metrics[metric], 'make_plot'):
eval_metrics[metric].make_plot(evaluator.state.metrics)
return evaluator.state.metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cfg')
parser.add_argument('--state')
parser.add_argument('--plot', type=bool, default=False)
args = parser.parse_args()
with open(args.cfg, 'r') as f:
cfg = yaml.safe_load(f)
main(cfg, state=args.state, plot=args.plot)
| 2,393 |
pages/themes/beginners/iteratorsGenerators/labs/demo.py
|
ProgressBG-Python-Course/ProgressBG-VC2-Python
| 0 |
2025786
|
class EvensRange:
def __init__(self,min,max):
self.num = 2
def __next__(self):
self.num += 2
return self.num
def __iter__(self):
return self
evens = EvensRange(1, 9)
for i in evens:
print(i)
# 2, 4, 6, 8
| 233 |
cs20/2_basic_constant.py
|
wdxtub/deep-learning-note
| 37 |
2024121
|
import tensorflow as tf
a = tf.constant([2, 2], name='a')
b = tf.constant([[0, 1], [2, 3]], name='b')
x = tf.multiply(a, b, name='mul') # 会以 Numpy 广播的形式进行计算
zeros = tf.zeros([2, 3], tf.int32)
ones = tf.ones([3, 2], dtype=tf.int32)
zeros_like = tf.zeros_like(zeros)
ones_like = tf.ones_like(ones)
fill_six = tf.fill([2, 3], 6)
lin_space = tf.lin_space(1.0, 8.0, 4)
six_range = tf.range(6)
limit_range = tf.range(3, 18, 3)
# 用正态分布产生随机数,默认是标准正态分布
random_normal = tf.random_normal([3, 3])
# 产生正态分布的值如果与均值的差值大于两倍的标准差,那就重新生成
truncated_normal = tf.truncated_normal([3, 3])
# 用均匀分布产生随机值,默认浮点数范围[0, 1)
random_uniform = tf.random_uniform([3, 3])
# 每一次都把其中的一些行换位置或者不换
random_shuffle = tf.random_shuffle([3, 3])
# random_crop 主要用于裁剪图片,这里不展示
# 从多项式分布中抽取样本,就是根据概率分布的大小,返回对应维度的下标序号
multinomial = tf.multinomial(random_normal, 5)
# 根据gamma分布个数,每个分布给出shape参数对应个数数据
random_gamma = tf.random_gamma([3, 3], 1, 2)
# 设定种子
tf.set_random_seed(314)
with tf.Session() as sess:
print('x')
print(sess.run(x))
print('zeros')
print(sess.run(zeros))
print('ones')
print(sess.run(ones))
print('zeros_like')
print(sess.run(zeros_like))
print('ones_like')
print(sess.run(ones_like))
print('fill_six')
print(sess.run(fill_six))
print('lin_space')
print(sess.run(lin_space))
print('six_range')
print(sess.run(six_range))
print('limit_range')
print(sess.run(limit_range))
print('random_normal')
print(sess.run(random_normal))
print('truncated_normal')
print(sess.run(truncated_normal))
print('random_uniform')
print(sess.run(random_uniform))
print('random_shuffle')
print(sess.run(random_shuffle))
print('multinomial')
print(sess.run(multinomial))
print('random_gamma')
print(sess.run(random_gamma))
print('--------------')
print('a')
print(sess.run(a))
print('b')
print(sess.run(b))
print('div(b,a)')
print(sess.run(tf.div(b, a)))
print('divide(b, a)')
print(sess.run(tf.divide(b, a)))
print('truediv(b, a)')
print(sess.run(tf.truediv(b, a)))
print('floordiv(b, a)')
print(sess.run(tf.floordiv(b, a)))
print('realdiv(b, a)') # 需要是实数
#print(sess.run(tf.realdiv(b, a)))
print('truncatediv(b, a)')
print(sess.run(tf.truncatediv(b, a)))
print('floor_div(b, a)')
print(sess.run(tf.floor_div(b, a)))
| 2,362 |
src/config/api.py
|
COAStatistics/alss-dev
| 0 |
2024925
|
from rest_framework.routers import DefaultRouter
from .views import ExportViewSet
api = DefaultRouter()
api.trailing_slash = "/?"
api.register(r"export", ExportViewSet, basename='export')
| 190 |
tinynet/rnn1l.py
|
giuse/tinynet
| 5 |
2025771
|
import numpy as np
class RNN1L:
"""Recurrent neural network, single layer
- `self.state` represents: last inputs + fixed 1 (bias) + last activation
- weights are thus order the same way in each row of the weights matrix
- rows in the weight matrix correspond to weights of connections entering a same neuron
- cols in the weight matrix correspond to connections from the same input
"""
def __init__(self, ninputs, nneurs, act_fn=np.tanh, init_weights=None):
self.ninputs = ninputs
self.nneurs = nneurs
self.act_fn = act_fn # make sure it applies element-wise to a np array
self.state_size = self.ninputs + 1 + self.nneurs
self.reset_state()
self.weights_matrix = self.init_weights()
if init_weights: self.set_weights(init_weights)
# state index accessors
self.input_idxs = range(0, ninputs)
self.bias_idxs = range(ninputs + 1, ninputs + 1)
self.act_idxs = range(ninputs + 1, ninputs + 1 + nneurs)
def init_weights(self):
return np.random.randn(self.nneurs, self.state_size)
def set_weights(self, weights):
assert weights.size == self.weights_matrix.size, "Wrong number of weights"
self.weights_matrix = weights.reshape(self.weights_matrix.shape)
self.reset_state()
def reset_state(self):
self.state = np.zeros(self.state_size)
self.state[self.ninputs] = 1 # bias -- should never be changed!
def activate(self, inputs):
"""Activate the neural network
- Overwrite the new inputs in the initial part of the state
- Execute dot product with weight matrix
- Pass result to activation function
"""
self.state[self.input_idxs] = inputs
net = np.dot(self.weights_matrix, self.state)
self.state[self.act_idxs] = self.act_fn(net)
return self.get_act()
def last_input(self):
return self.state[self.input_idxs]
def get_act(self):
return self.state[self.act_idxs]
def nweights(self):
return self.weights_matrix.size
def nweights_per_neur(self):
return self.weights_matrix.shape[1]
| 2,180 |
iris_sdk/models/data/note.py
|
NumberAI/python-bandwidth-iris
| 2 |
2025675
|
#!/usr/bin/env python
from iris_sdk.models.base_resource import BaseData
from iris_sdk.models.maps.note import NoteMap
class NoteData(NoteMap, BaseData):
pass
| 164 |
core/categories.py
|
marakmuna/dotfiles
| 0 |
2024427
|
#!/usr/bin/env python
categories = {
1:['information_gathering',
['acccheck', 'ace-voip', 'amap', 'automater', 'braa', 'casefile', 'cdpsnarf', 'cisco-torch',
'cookie-cadger', 'copy-router-config', 'dmitry', 'dnmap', 'dnsenum', 'dnsmap', 'dnsrecon',
'dnstracer', 'dnswalk', 'dotdotpwn', 'enum4linux', 'enumiax', 'fierce', 'firewalk', 'fragroute',
'fragrouter', 'ghost-phisher', 'golismero', 'goofile', 'xplico', 'hping3', 'intrace', 'ismtp',
'lbd', 'maltego-teeth', 'masscan', 'metagoofil', 'miranda', 'nbtscan-unixwiz', 'nmap', 'p0f',
'parsero', 'recon-ng', 'set', 'smtp-user-enum', 'snmpcheck', 'sslcaudit', 'sslsplit', 'sslstrip',
'sslyze', 'thc-ipv6', 'theharvester', 'tlssled', 'twofi', 'urlcrazy','wireshark', 'wol-e']
],
2:['vulnerability_analysis',
['bbqsql', 'bed', 'cisco-auditing-tool', 'cisco-global-exploiter', 'cisco-ocs', 'cisco-torch',
'copy-router-config', 'doona', 'dotdotpwn', 'greenbone-security-assistant', 'hexorbase', 'jsql',
'lynis', 'nmap', 'ohrwurm', 'openvas-administrator', 'openvas-cli', 'openvas-manager', 'openvas-scanner',
'oscanner', 'powerfuzzer', 'sfuzz', 'sidguesser', 'siparmyknife', 'sqlmap', 'sqlninja', 'sqlsus',
'thc-ipv6', 'tnscmd10g', 'unix-privesc-check', 'yersinia']
],
3:['wireless_attacks',
['aircrack-ng', 'asleap', 'bluelog', 'blueranger', 'bluesnarfer', 'bully', 'cowpatty', 'crackle',
'eapmd5pass', 'fern-wifi-cracker', 'ghost-phisher', 'giskismet', 'gqrx', 'hostapd-wpe', 'kalibrate-rtl',
'killerbee', 'kismet', 'mdk3', 'mfcuk', 'mfoc', 'mfterm', 'multimon-ng', 'pixiewps', 'reaver', 'redfang',
'rtlsdr-scanner', 'spooftooph', 'wifi-honey', 'wifiphisher', 'wifitap', 'wifite']
],
4:['web_applications',
['apache-users', 'arachni', 'bbqsql', 'blindelephant', 'burpsuite', 'cutycapt', 'davtest', 'deblaze',
'dirb', 'dirbuster', 'fimap', 'funkload', 'gobuster', 'grabber', 'jboss-autopwn', 'joomscan', 'jsql',
'maltego-teeth', 'padbuster', 'paros', 'parsero', 'plecost', 'powerfuzzer', 'proxystrike', 'recon-ng',
'skipfish', 'sqlmap', 'sqlninja', 'sqlsus', 'ua-tester', 'uniscan', 'vega', 'w3af', 'webscarab',
'websploit', 'wfuzz', 'wpscan', 'xsser', 'zaproxy']
],
5:['sniffing_spoofing',
['burpsuite', 'dnschef', 'fiked', 'hamster-sidejack', 'hexinject', 'iaxflood', 'inviteflood', 'ismtp',
'isr-evilgrade', 'mitmproxy', 'ohrwurm', 'protos-sip', 'rebind', 'responder', 'rtpbreak', 'rtpinsertsound',
'rtpmixsound', 'sctpscan', 'siparmyknife', 'sipp', 'sipvicious', 'sniffjoke', 'sslsplit', 'sslstrip',
'thc-ipv6', 'voiphopper', 'webscarab', 'wifi-honey', 'wireshark', 'xspy', 'yersinia', 'zaproxy']
],
6:['maintaining_access',
['cryptcat', 'cymothoa', 'dbd', 'dns2tcp', 'http-tunnel', 'httptunnel', 'intersect', 'nishang', 'polenum',
'powersploit', 'pwnat', 'ridenum', 'sbd', 'u3-pwn', 'webshells', 'weevely', 'winexe']
],
7:['reporting_tools',
['casefile', 'cutycapt', 'dos2unix', 'dradis', 'keepnote', 'magictree', 'metagoofil', 'nipper-ng', 'pipal']
],
8:['exploitation_tools',
['armitage', 'backdoor-factory', 'beef-xss', 'cisco-auditing-tool', 'cisco-global-exploiter', 'cisco-ocs',
'cisco-torch', 'crackle', 'exploitdb', 'jboss-autopwn', 'linux-exploit-suggester', 'maltego-teeth', 'set',
'shellnoob', 'sqlmap', 'thc-ipv6', 'yersinia']
],
9:['forensics_tools',
['binwalk', 'bulk-extractor', 'chntpw', 'cuckoo', 'dc3dd', 'ddrescue', 'python-distorm3', 'dumpzilla',
'volatility', 'xplico', 'foremost', 'galleta', 'guymager', 'iphone-backup-analyzer', 'p0f', 'pdf-parser',
'pdfid', 'pdgmail', 'peepdf', 'extundelete']
],
10:['stress_testing',
['dhcpig', 'funkload', 'iaxflood', 'inviteflood', 'ipv6-toolkit', 'mdk3', 'reaver', 'rtpflood',
'slowhttptest', 't50', 'termineter', 'thc-ipv6', 'thc-ssl-dos']
],
11:['password_attacks',
['acccheck', 'burpsuite', 'cewl', 'chntpw', 'cisco-auditing-tool', 'cmospwd', 'creddump', 'crunch',
'findmyhash', 'gpp-decrypt', 'hash-identifier', 'hexorbase', 'hydra', 'john', 'johnny', 'keimpx',
'maltego-teeth', 'maskprocessor', 'multiforcer', 'ncrack', 'oclgausscrack', 'pack', 'patator', 'polenum',
'rainbowcrack', 'rcracki-mt', 'rsmangler', 'statsprocessor', 'thc-pptp-bruter', 'truecrack', 'webscarab',
'wordlists', 'zaproxy']
],
12:['reverse_engineering',
['apktool', 'dex2jar', 'python-distorm3', 'edb-debugger', 'jad', 'javasnoop', 'smali', 'valgrind', 'yara']
],
13:['hardware_hacking',
[ 'android-sdk', 'apktool', 'arduino', 'dex2jar', 'sakis3g', 'smali']
],
14:['extra',
['kali-linux', 'kali-linux-full', 'kali-linux-all', 'kali-linux-top10', 'kali-linux-forensic',
'kali-linux-gpu', 'kali-linux-pwtools', 'kali-linux-rfid', 'kali-linux-sdr', 'kali-linux-voip',
'kali-linux-web', 'kali-linux-wireless', 'squid3']
]
}
| 4,715 |
promoterz/representation/oldschool.py
|
emillj/gekkoJaponicus
| 0 |
2026105
|
#!/bin/python
import random
import json
import os
from copy import deepcopy
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
import numpy as np
from .. import functions
def constructPhenotype(stratSettings, individue):
# THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;
# (still works :})
Strategy = individue.Strategy
R = lambda V, lim: ((lim[1]-lim[0])/100) * V + lim[0]
AttributeNames = sorted(list(stratSettings.keys()))
Phenotype = {}
for K in range(len(AttributeNames)):
Value = R(individue[K], stratSettings[AttributeNames[K]])
Phenotype[AttributeNames[K]] = Value
Phenotype = functions.expandNestedParameters(Phenotype)
return Phenotype
def createRandomVarList(IndSize):
VAR_LIST = [random.randrange(0,100) for x in range(IndSize)]
return VAR_LIST
def initInd(Criterion, Attributes):
w = Criterion()
IndSize =len(list(Attributes.keys()))
w[:] = createRandomVarList(IndSize)
return w
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list,
fitness=creator.FitnessMax, Strategy=Strategy)
toolbox.register("newind", initInd, creator.Individual, Attributes)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=10, up=10, indpb=0.2)
toolbox.register("constructPhenotype", constructPhenotype, Attributes)
return toolbox
| 1,646 |
girlfriend/util/file_template.py
|
chihongze/girlfriend
| 83 |
2025992
|
# coding: utf-8
"""很多时候我们经常需要定义目录文件的生成模板,比如生成一个配置文件的模板,
生成一个工程目录结构等等,该模块用于帮助轻松实现这类工作。
"""
import os
import os.path
from abc import (
ABCMeta,
abstractproperty,
abstractmethod
)
from girlfriend.exception import GirlFriendBizException
class TemplateUnit(object):
"""模板单元抽象
"""
__metaclass__ = ABCMeta
TYPE_FILE = "file"
TYPE_DIR = "dir"
def __init__(self, name, access=None):
"""
:param name 目录名或文件名
:param access 文件权限,默认按照umask来,接受一个八进制数字,比如0666
"""
self._name = name
self._access = access
@property
def name(self):
"""单元名称
"""
return self._name
@property
def access(self):
return self._access
@abstractproperty
def unittype(self):
"""单元类型
"""
pass
@abstractmethod
def makeme(self, base_dir=os.getcwd()):
"""创建该单元及其子元素
"""
pass
class Dir(TemplateUnit):
def __init__(self, name, access=None, elements=None):
super(Dir, self).__init__(name, access)
if elements:
self.elements = elements
else:
self.elements = []
@property
def unittype(self):
return TemplateUnit.TYPE_DIR
def append(self, element):
self.elements.append(element)
def makeme(self, base_dir=os.getcwd()):
# 先建自己
myself_dir = os.path.join(base_dir, self.name)
if os.path.exists(myself_dir):
raise DirAlreadyExistException(
u"目录 '{}' 已经存在了,不能重复创建".format(myself_dir))
os.mkdir(myself_dir)
if self.access:
os.chmod(myself_dir, self.access)
# 创建子元素
for element in self.elements:
element.makeme(myself_dir)
class File(TemplateUnit):
def __init__(self, name, access=None, content=""):
super(File, self).__init__(name, access)
self._content = content
@property
def unittype(self):
return TemplateUnit.TYPE_FILE
def makeme(self, base_dir):
myself_path = os.path.join(base_dir, self.name)
if os.path.exists(myself_path):
raise FileAlreadyExistException(
u"文件 '{}' 已经存在了,不能重复创建".format(myself_path))
with open(myself_path, "w") as f:
f.write(self._content)
if self.access:
os.chmod(myself_path, self.access)
class DirAlreadyExistException(GirlFriendBizException):
"""当目录已经存在时,抛出此异常
"""
pass
class FileAlreadyExistException(GirlFriendBizException):
"""当文件已经存在时,抛出此异常
"""
pass
| 2,596 |
app/page/main.py
|
wisedu1/Testing
| 0 |
2026151
|
from appium.webdriver.common.mobileby import MobileBy
from app.page.addresslist_page import AddressList
from app.page.base_page import BasePage
class Main(BasePage):
def goto_message(self):
pass
def goto_addresslist(self):
self._driver.find_element(MobileBy.XPATH, "//*[@text='通讯录']").click()
return AddressList(self._driver)
def goto_workbench(self):
pass
def goto_profile(self):
pass
| 449 |
buver/csemaphore.py
|
kenlowrie/buver
| 0 |
2025908
|
import os
import time
"""
This module implements the semaphore class which is used by buver.py.
These semaphores are implemented via the operating system's well known
file IO interfaces. In our case, we use the OS open() API along with
the O_CREAT flag, which guarantees that only one process or thread will
ever succeed.
The file is kept open during the entire duration of the process, and
is closed and then removed when the application is ready to give up the
semaphore.
wait() - create a new file in the target directory. keep it open ...
signal() - close the file created during wait(), then remove it
got_sem() - this API is used to return the current lock state
NOTE:
This implementation could cause starvation amongst processes, if
enough processes are simultaneously trying to access the same target
directory. However, this should not be the case in our situation, so
we are not handling that case at this time.
"""
class C_semaphore:
def __init__(self,path,filename='csemaphore.lock'):
"""Initialize the object by establishing a name for the mutex,
the path we want to apply the mutex to, and a flag that is
used to provide a quick state of the object."""
self.sem_name = os.path.abspath(os.path.join(path,filename))
self.sem_path = path
self.sem_locked = False
def got_sem(self):
"""This will return the state of the sem_locked flag."""
return self.sem_locked
def wait(self, maxtries = 120):
"""This is the method that obtains the mutex. It will
keep trying for 'maxtries' seconds, at which point it
gives up and returns a failure condition.
Returns:
0 - The semaphore was acquired
1 - Unable to acquire the semaphore
"""
attempt = 1
got_it = 0
# If the directory is invalid, then just give up now.
if not os.path.isdir(self.sem_path): return True
# Try until we get it or we exceed the max tries ...
while got_it == 0 and attempt < maxtries:
try:
# This will throw an OSError exception when it fails
self.fd = os.open(self.sem_name,os.O_CREAT|os.O_EXCL)
except OSError:
# We didn't get it. Increment the number of tries and sleep for 1 second
attempt = attempt + 1
time.sleep(1)
continue
# Woo Hoo! We acquired the semaphore, set our flag so we'll exit
got_it = 1
if not got_it: return True
# Record the object state to reflect we own the semaphore
self.sem_locked = True
return False
def signal(self):
"""This is the method that releases the mutex.
Returns:
0 - The semaphore was released
1 - Unable to release the semaphore
"""
# if the object doesn't reflect that we have the semaphore, bail
if self.sem_locked == False: return True
# Close the open file handle, catch any exceptions, but basically
# just print an error and keep going.
try:
os.close(self.fd)
except OSError:
print('Internal failure during semaphore release - close')
# Remove the file. This will allow the next call to wait() to succeed
# since the file will no longer exist. Catch any exceptions, but
# basically ignore them. If we cannot delete it, then that means some
# manual clean up is going to be required.
try:
os.remove(self.sem_name)
except OSError:
print('Internal failure during semaphore release - remove')
# Reflect that we have released our semaphore. This is here so that if
# the atexit() routine runs and attempts to release the semaphore, we
# will ignore the request. It isn't useful for anything else.
self.sem_locked = False
return False
| 4,227 |
cast/call.py
|
JinShiyin/sast_backend
| 0 |
2024642
|
# 导入系统库并定义辅助函数
from pprint import pformat
# import PythonSDK
from PythonSDK.facepp import API, File
# 导入图片处理类
import PythonSDK.ImagePro
# 以下四项是dmeo中用到的图片资源,可根据需要替换
detech_img_url = 'http://bj-mc-prod-asset.oss-cn-beijing.aliyuncs.com/mc-official/images/face/demo-pic11.jpg'
faceSet_img = './imgResource/demo.jpeg' # 用于创建faceSet
face_search_img = './imgResource/search.png' # 用于人脸搜索
segment_img = './imgResource/segment.jpg' # 用于人体抠像
merge_img = './imgResource/merge.jpg' # 用于人脸融合
# 此方法专用来打印api返回的信息
def print_result(hit, result):
print(hit)
print('\n'.join(" " + i for i in pformat(result, width=75).split('\n')))
def printFuctionTitle(title):
return "\n" + "-" * 60 + title + "-" * 60;
# 初始化对象,进行api的调用工作
api = API()
# -----------------------------------------------------------人脸识别部分-------------------------------------------
# 人脸检测:https://console.faceplusplus.com.cn/documents/4888373
# res = api.detect(image_url=detech_img_url, return_attributes="gender,age,smiling,headpose,facequality,"
# "blur,eyestatus,emotion,ethnicity,beauty,"
# "mouthstatus,skinstatus")
# print_result(printFuctionTitle("人脸检测"), res)
# 人脸比对:https://console.faceplusplus.com.cn/documents/4887586
# compare_res = api.compare(image_file1=File(face_search_img), image_file2=File(face_search_img))
# print_result("compare", compare_res)
# 人脸搜索:https://console.faceplusplus.com.cn/documents/4888381
# 人脸搜索步骤
# 1,创建faceSet:用于存储人脸信息(face_token)
# 2,向faceSet中添加人脸信息(face_token)
# 3,开始搜索
# 删除无用的人脸库,这里删除了,如果在项目中请注意是否要删除
# api.faceset.delete(outer_id='faceplusplus', check_empty=0)
# # 1.创建一个faceSet
# ret = api.faceset.create(outer_id='faceplusplus')
#
# # 2.向faceSet中添加人脸信息(face_token)
# faceResStr=""
# res = api.detect(image_file=File(faceSet_img))
# faceList = res["faces"]
# for index in range(len(faceList)):
# if(index==0):
# faceResStr = faceResStr + faceList[index]["face_token"]
# else:
# faceResStr = faceResStr + ","+faceList[index]["face_token"]
#
# api.faceset.addface(outer_id='faceplusplus', face_tokens=faceResStr)
#
# # 3.开始搜索相似脸人脸信息
# search_result = api.search(image_file=File(face_search_img), outer_id='faceplusplus')
# print_result('search', search_result)
# -----------------------------------------------------------人体识别部分-------------------------------------------
# 人体抠像:https://console.faceplusplus.com.cn/documents/10071567
# segment_res = api.segment(image_file=File(segment_img))
# f = open('./imgResource/demo-segment.b64', 'w')
# f.write(segment_res["result"])
# f.close()
# print_result("segment", segment_res)
# # 开始抠像
# PythonSDK.ImagePro.ImageProCls.getSegmentImg("./imgResource/demo-segment.b64")
# -----------------------------------------------------------证件识别部分-------------------------------------------
# 身份证识别:https://console.faceplusplus.com.cn/documents/5671702
# ocrIDCard_res = api.ocridcard(image_url="https://gss0.bdstatic.com/94o3dSag_xI4khGkpoWK1HF6hhy/baike/"
# "c0%3Dbaike80%2C5%2C5%2C80%2C26/sign=7a16a1be19178a82da3177f2976a18e8"
# "/902397dda144ad34a1b2dcf5d7a20cf431ad85b7.jpg")
# print_result('ocrIDCard', ocrIDCard_res)
# 银行卡识别:https://console.faceplusplus.com.cn/documents/10069553
# ocrBankCard_res = api.ocrbankcard(image_url="http://pic.5tu.cn/uploads/allimg/1107/191634534200.jpg")
# print_result('ocrBankCard', ocrBankCard_res)
# -----------------------------------------------------------图像识别部分-------------------------------------------
# 人脸融合:https://console.faceplusplus.com.cn/documents/20813963
# template_rectangle参数中的数据要通过人脸检测api来获取
# mergeFace_res = api.mergeface(template_file=File(segment_img), merge_file=File(merge_img),
# template_rectangle="130,180,172,172")
# print_result("mergeFace", mergeFace_res)
#
# # 开始融合
# PythonSDK.ImagePro.ImageProCls.getMergeImg(mergeFace_res["result"])
# 人脸关键点检测: https://api-cn.faceplusplus.com/facepp/v1/face/thousandlandmark
landmarks = api.thousandlandmark(image_file=File(face_search_img), return_landmark="all")
print_result(printFuctionTitle("人脸关键点检测"), landmarks)
| 4,249 |
loauth/cijfer.py
|
satanu01/loauth
| 2 |
2025185
|
import hashlib
import hmac as hm
class Cijfer:
'''Base Class for Signature ALgorithms
Methods - prepare_key(key), sign(key,msg), verify(key, msg, sign)
Usage-
Override Methods in inheriting class ot implement new signature algorithm
Reigster the new cipher to the cijferpack using cijferpack.register('name', 'object') function
class newCipher(Cijfer):
def prepare_key(self, key):
pass
def sign(self, key, msg);
pass
def verify(self, key, msg, sign):
pass
'''
def prepare_key(self, key):
raise NotImplementedError
def sign(self, key, msg):
raise NotImplementedError
def verify(self, key, msg, sign):
raise NotImplementedError
class HMAC(Cijfer):
__hash_list = {'sha256': hashlib.sha256, 'sha512':hashlib.sha512 }
def __init__(self, hash_alg):
self.__hash_alg = self.__hash_list[hash_alg.lower()]
@staticmethod
def hash_list():
return ['sha256', 'sha512']
def sign(self, key, msg):
if not isinstance(key, bytes) or not isinstance(msg, bytes):
raise TypeError
else:
return hm.new(key, msg, self.__hash_alg).digest()
def prepare_key(self, key):
if isinstance(key, bytes):
return key
if isinstance(key, string):
return key.encode('utf-8')
else:
raise TypeError
def verify(self, key, msg, sign):
return hm.compare_digest(sign, hm.new(key, msg, self.__hash_alg).digest() )
class Cijferpack:
cijfer_list = {'HS256': HMAC('sha256'), 'HS512': HMAC('sha512') }
def register(self, tag, new_cijfer):
self.cijfer_list[tag] = new_cijfer
return self.cijfer_list
| 1,539 |
usp_1/semana7/remove_repetidos.py
|
dialup/py
| 0 |
2025551
|
def remove_repetidos(n,*args):
sem_rep = []
for i in n:
if i not in sem_rep:
sem_rep.append(i)
return sorted(sem_rep)
#def p(*l):
# new_list = []
# for i in [*l]:
# if i in [*l]:
# new_list.append(i)
# print(new_list)
#def fim(*num):
# sem_repetidos = [*num]
# sem = list(set(sem_repetidos))
# print(sem)
| 375 |
modelexp/experiments/reflectometry/__init__.py
|
DomiDre/modelexp
| 0 |
2022743
|
from ._refl import Reflectometry
from ._pnrefl import PolarizedReflectometry
from ._pnsfrefl import PolarizedReflectometrySF
from ._simultaneous_xrr_nr import SimultaneousXRNR
| 175 |
tests/ui/test_tkui.py
|
mrshu/stash
| 1,822 |
2025581
|
"""
tests for the tkui
"""
import logging
from unittest import skipIf
from stash.tests.stashtest import StashTestCase
try:
from stash.system.shui.tkui import ShTerminal
except ImportError:
ShTerminal = None
class NoInitTkTerminal(ShTerminal):
"""
Subclass of ShTerminal which does not initiate the superclass
"""
def __init__(self, text=u""):
self._text = ""
self.text = text
self.logger = logging.getLogger('StaSh.Terminal')
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
@skipIf(ShTerminal is None, "No Tk-GUI available")
class TkTerminalTests(StashTestCase):
"""
Tests for stash.system.shui.tkui.ShTerminal
"""
tc = NoInitTkTerminal
def test_tk_index_conversion(self):
"""
Test conversion to and from a tk index to a tuple
"""
values = { # tk index -> expected
"1.0": (0, 0),
"1.1": (0, 1),
"2.0": (1, 0),
"2.2": (1, 2),
"10.11": (9, 11),
"9.2": (8, 2),
}
terminal = self.tc()
for tki in values:
expected = values[tki]
converted = terminal._tk_index_to_tuple(tki)
self.assertEqual(converted, expected)
# convert back
back = terminal._tuple_to_tk_index(converted)
self.assertEqual(back, tki)
def test_abs_rel_conversion_1(self):
"""
First test for conversion of absolute and relative indexes
"""
s = """0123
567
9
"""
values = { # rel -> abs
0: (0, 0),
1: (0, 1),
2: (0, 2),
3: (0, 3),
4: (0, 4),
5: (1, 0),
6: (1, 1),
7: (1, 2),
8: (1, 3),
9: (2, 0),
10: (2, 1),
}
terminal = self.tc(s)
for rel in values:
expected = values[rel]
ab = terminal._rel_cursor_pos_to_abs_pos(rel)
self.assertEqual(ab, expected)
# convert back
back = terminal._abs_cursor_pos_to_rel_pos(ab)
self.assertEqual(back, rel)
| 2,253 |
EISeg/tool/update_md5.py
|
JamesLim-sy/PaddleSeg
| 4,708 |
2025341
|
import hashlib
from pathlib import Path
models_dir = Path()
ext = ".pdparams"
for model_path in models_dir.glob("*/*" + ext):
md5 = hashlib.md5(model_path.read_bytes()).hexdigest()
md5_path = str(model_path)[: -len(ext)] + ".md5"
Path(md5_path).write_text(md5)
| 274 |
voice_datasets/librispeech.py
|
flashlin/pycore
| 0 |
2024466
|
from glob import glob
import pandas as pd
from common.io import get_dir
def librispeech_metadata_iter(txt_filepath):
base_dir = get_dir(txt_filepath)
# print(f"librispeech_metadata_iter='{txt_filepath}'")
with open(txt_filepath, "r", encoding='utf-8') as f:
for line in iter(f):
ss = line.split(' ')
filename = ss[0]
trans = ss[1]
wav_filepath = f"{base_dir}/{filename}.wav"
yield wav_filepath, trans
def all_librisppech_metadata_iter(base_dir):
for txt in glob(f"{base_dir}/**/*.txt", recursive=True):
for wav_filepath, trans in librispeech_metadata_iter(txt):
yield wav_filepath, trans
def get_all_librispeech_metadata_dataframe(base_dir):
df = pd.DataFrame(columns=['wav_filepath', 'trans'])
for wav_filepath, trans in all_librisppech_metadata_iter(base_dir):
df = df.append({
'wav_filepath': wav_filepath,
'trans': trans.lower()
}, ignore_index=True)
return df
| 1,062 |
newrelic_plugin_agent/apps.py
|
ambitioninc/django-newrelic-plugin-agent
| 0 |
2025935
|
from django.apps import AppConfig
class NewrelicPluginAgentConfig(AppConfig):
name = 'newrelic_plugin_agent'
verbose_name = 'Django Newrelic Plugin Agent'
| 165 |
pythonrpc-pyserver/pythonrpc_pyserver/server.py
|
repo-list-553108/pythonrpc
| 0 |
2024065
|
from flask import Flask, request
import random
import json
app = Flask(__name__)
lits = {}
mods = {}
def xxquote(x, lit, dups):
if id(x) in dups:
return None
if x == None:
return None
if type(x) in [int, float, bool, str]:
return x
if type(x) == complex:
return {
'real' : x.real,
'imag' : x.imag
}
if type(x) in [list, tuple, set]:
return [ xxquote(t, lit, dups) for t in x ]
if type(x) == dict:
rd = {}
for key in x:
if (type(key) not in [int, float, bool, str, type(None)]):
continue
dups.add(id(x[key]))
rd[key] = xxquote(t, lit, dups)
return rd
lit[id(x)] = x
dups.add(id(x))
return {
'id': id(x)
}
@app.route('/new_session', methods=['POST'])
def new_session():
while True:
seid = random.randint(1, 99999999)
if seid not in lits:
break
print('-- New session %%%s%%' % seid)
return json.dumps({
'success': 1,
'hello': 'hello, pythonrpc',
'sessionid': seid
})
@app.route('/require_module', methods=['POST'])
def require_module():
sessionid = int(request.form['sessionid'])
name = request.form['name']
if sessionid not in mods:
mods[sessionid] = {}
if sessionid not in lits:
lits[sessionid] = {}
if name in mods[sessionid]:
return json.dumps({
'success': 1,
'internalid': id(mods[sessionid][name])
})
mod = __import__(name)
print('-- Session[%%%s%%] require module %s' % (sessionid, mod))
lits[sessionid][id(mod)] = mod
print(lits)
print(mods)
mods[sessionid][name] = mod
return json.dumps({
'success': 1,
'internalid': id(mod)
})
@app.route('/access_attr', methods=['POST'])
def access_attr():
sessionid = int(request.form['sessionid'])
if sessionid not in lits:
lits[sessionid] = {}
interid = int(request.form['internalid'])
attrname = request.form['attr']
obj = lits[sessionid].get(interid)
print(lits[sessionid])
print(obj)
if not hasattr(obj, attrname):
return json.dumps({
'undefined': 1
})
attr = getattr(obj, attrname)
attr_q = xxquote(attr, lits[sessionid], set())
return json.dumps({
'success': 1,
'attr': attr_q
})
@app.route('/apply_callable', methods=['POST'])
def apply_callable():
sessionid = int(request.form['sessionid'])
if sessionid not in lits:
lits[sessionid] = {}
interid = int(request.form['internalid'])
args = json.loads(request.form['args'])
kwargs = json.loads(request.form['kwargs'])
attr = lits[sessionid].get(interid, None)
if not callable(attr) :
return json.dumps({
'undefined': 1
})
r = attr(*args, **kwargs)
r_q = xxquote(r, lits[sessionid], set())
if (type(r_q) == dict) and ('id' in r_q) :
lits[id(r)] = r
return json.dumps({
'success': 1,
'result': r_q
})
@app.route('/del_lit_ref', methods=['POST'])
def del_lit_ref():
sessionid = int(request.form['sessionid'])
if sessionid not in lits:
return json.dumps({
'success': 1
})
interid = int(request.form['internalid'])
if not hasattr(lits[sessionid], interid):
return json.dumps({
'success': 1
})
del lits[sessionid][interid]
return json.dumps({
'success': 1
})
@app.route('/del_session_ref', methods=['POST'])
def del_session_ref():
sessionid = int(request.form['sessionid'])
if sessionid in lits:
del lits[sessionid]
if sessionid in mods:
del mods[sessionid]
return json.dumps({
'success': 1
})
@app.route('/debuginfo', methods=['POST'])
def debuginfo():
debuginfo = {
'lits': lits,
'mods': mods
}
return json.dumps({
'success': 1,
'debuginfo': repr(debuginfo)
})
| 4,076 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.