max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
BlackScholes.py
|
alienbrett/black_scholes
| 0 |
2026479
|
"""
@author: <NAME>
@Description: The class contains the methods for pricing options using Black-Scholes Formula
"""
from math import sqrt, pi
import scipy as sp
import scipy.special
import numpy as np
def mass_broadcast(*args):
'''Cast some list of vectors of shape(a_i, 1) or (a_i,) to the same uniform (l,1)
'''
l = 0
xs = []
for i, x in enumerate(args):
xs.append( np.asarray(x).reshape(-1,1) )
l = max(l, xs[i].shape[0])
for i, x in enumerate(xs):
xs[i] = np.broadcast_to(x, (l, 1))
return xs
class IncorrectInputs(Exception): pass
class TimeoutException(Exception): pass
class BlackScholes:
#the constructor for the class
def __init__(self, spot_price, strike_price, time_to_maturity, interest_rate, sigma, dividend_yield=0):
''' initializtion parameters '''
[
self.spot_price,
self.strike_price,
self.time_to_maturity,
self.interest_rate,
self.sigma,
self.dividend_yield
] = mass_broadcast(
spot_price, strike_price, time_to_maturity, interest_rate, sigma, dividend_yield
)
# self.spot_price = np.asarray(spot_price).astype(float)
#
# #checking if strike price is an array or not
# if(not(hasattr(strike_price, "__len__"))):
# self.strike_price = np.ones(len(spot_price))*strike_price
# else:
# self.strike_price = strike_price
# #checking if time to maturity is an array or not
# if(not(hasattr(time_to_maturity, "__len__"))):
# self.time_to_maturity = np.ones(len(spot_price))*time_to_maturity
# else:
# self.time_to_maturity = time_to_maturity
# #checking if interest rate is an array or not
# if(not(hasattr(interest_rate, "__len__"))):
# self.interest_rate = np.ones(len(spot_price))*interest_rate
# else:
# self.interest_rate = interest_rate
# #checking if volatility is an array or not
# if(not(hasattr(sigma, "__len__"))):
# self.sigma = np.ones(len(spot_price))*sigma
# else:
# self.sigma = sigma
#
# #checking if dividend yield is an array or not
# if(not(hasattr(dividend_yield, "__len__"))):
# self.dividend_yield = np.ones(len(spot_price))*dividend_yield
# else:
# self.dividend_yield = dividend_yield
#private method for erf function
def bls_erf_value(self,input_number):
erf_out = 0.5*(1 + sp.special.erf(input_number/sqrt(2.0)))
return erf_out
#vectorized method to price call option
def european_option_price(self):
"Price of the call option"
"the vectorized method can compute price of multiple options in array"
numerator = sp.add(
sp.log(
sp.divide(
self.spot_price,
self.strike_price,
)
),
sp.multiply(
(
self.interest_rate - self.dividend_yield +
0.5*sp.power(self.sigma,2)
),
self.time_to_maturity)
)
d1 = sp.divide(
numerator,
sp.prod(
[
self.sigma,
sp.sqrt(self.time_to_maturity)
],
axis=0,
)
)
d2 = sp.add(
d1,
-sp.multiply(
self.sigma,
sp.sqrt(self.time_to_maturity)
)
)
ecall = sp.product(
[
self.spot_price,
self.bls_erf_value(d1),
sp.exp(sp.multiply(-self.dividend_yield,self.time_to_maturity))
],
axis=0
) - sp.product(
[
self.strike_price,
self.bls_erf_value(d2),
sp.exp(
-sp.multiply(
self.interest_rate,
self.time_to_maturity,
)
)
],
axis=0,
)
eput = sp.product(
[
-self.spot_price,
self.bls_erf_value(-d1),
sp.exp(
sp.multiply(
-self.dividend_yield,
self.time_to_maturity
)
)
],
axis=0,
) + sp.product(
[
self.strike_price,
self.bls_erf_value(-d2),
sp.exp(
-sp.multiply(
self.interest_rate,
self.time_to_maturity
)
)
],
axis=0,
)
return ecall, eput
#delta of the option
def european_option_delta(self):
numerator = sp.add(
sp.log(
sp.divide(
self.spot_price,
self.strike_price
)
),
sp.multiply(
( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
self.time_to_maturity
)
)
d1 = sp.divide(
numerator,
sp.prod(
[
self.sigma,
sp.sqrt(self.time_to_maturity)
],
axis=0,
)
)
call_delta = self.bls_erf_value(d1)
put_delta = call_delta - 1
return call_delta, put_delta
#gamma of the option (under construction)
def european_option_gamma(self):
pass
#vega of the option
def european_option_vega(self):
numerator = sp.add(
sp.log(
sp.divide(
self.spot_price,
self.strike_price
)
),
sp.multiply(
( self.interest_rate - self.dividend_yield + 0.5*sp.power(self.sigma,2)),
self.time_to_maturity
)
)
d1 = sp.divide(
numerator,
sp.prod(
[
self.sigma,
sp.sqrt(self.time_to_maturity)
],
axis=0,
)
)
val = sp.multiply(
sp.multiply(
self.spot_price,
sp.exp(
-sp.multiply(
self.dividend_yield,
self.time_to_maturity
)
)
),
sp.exp(-sp.square(d1)*0.5)
)
val = sp.multiply(
val,
sp.sqrt(self.time_to_maturity)
)
vega = (1/sqrt(2*pi))*val
return vega
#theta of the option
def european_option_theta(self):
pass
#rho of the option
def european_option_rho(self):
"Price of the call option"
"the vectorized method can compute price of multiple options in array"
numerator = sp.add(
sp.log(
sp.divide(
self.spot_price,
self.strike_price,
)
),
sp.multiply(
(
self.interest_rate - self.dividend_yield +
0.5*sp.power(self.sigma,2)
),
self.time_to_maturity)
)
d1 = sp.divide(
numerator,
sp.prod(
[
self.sigma,
sp.sqrt(self.time_to_maturity)
],
axis=0,
)
)
d2 = sp.add(
d1,
-sp.multiply(
self.sigma,
sp.sqrt(self.time_to_maturity)
)
)
j = sp.product(
[
self.spot_price,
self.time_to_maturity,
sp.exp(
sp.multiply(
-self.interest_rate,
self.time_to_maturity
)
),
],
axis=0
)
c_rho = j * self.bls_erf_value(d2)
p_rho = -j * self.bls_erf_value(-d2)
return c_rho, p_rho
| 6,412 |
SolutionTable.py
|
KOLANICH-libs/SolutionTable
| 1 |
2026397
|
import numpy as np
import scipy as sp
import scipy.linalg
from matplotlib import pyplot as plt
from math import isclose
class Table:
"""
Tries to solve logical problems in Bayessian framework.
!!! Doesn't work for now !!!
"""
__slots__ = ("axes", "tensor")
def __init__(self, *labels):
axCount = len(labels)
labelsNew = []
for ax in labels:
ax = {l: i for i, l in enumerate(ax)}
labelsNew.append(ax)
labels = labelsNew
self.axes = labels
shape = tuple(len(lS) for lS in labels)
self.tensor = self.getUniform(shape)
self.axes = labels
@property
def shape(self):
return self.tensor.shape[0]
@classmethod
def getUniform(cls, shape):
"""Returns a Uniform distribution, normalized to 1 for **whole matrix**"""
return np.ones(shape) * 1.0 / np.product(shape) # * self.shape
def iterMinor(self, i, j):
"""Iterates over a matrix minor, yields INDICES"""
for l in range(self.shape):
for k in range(self.shape):
if not (l == i or k == j):
yield (l, k)
def iterExcept(self, i, j):
"""Iterates over all the els of a matrix, except the selected one"""
for l in range(self.shape):
for k in range(self.shape):
if not (l == i and k == j):
yield (l, k)
def getMinor(self, i, j):
"""Returns a minor as a matrix"""
return np.delete(np.delete(self.tensor, i, axis=0), j, axis=1)
def equal(self, a, b):
"""Updates the posterior so that `a` maps to `b` with probability of 1."""
i = self.axes[0][a]
j = self.axes[1][b]
self.bayesUpdate(self.getSingleElConditioned(1., i, j))
def isNot(self, a, b):
"""Updates the posterior so that `a` cannot map to `b` (in other words, maps to it with probability of `0`)."""
i = self.axes[0][a]
j = self.axes[1][b]
self.bayesUpdate(self.getSingleElConditioned(0., i, j))
def getSingleElConditioned(self, probability, i, j):
"""Generates conditional distribution so that `a` cannot map to `b` (in other words, maps to it with probability of `0`)."""
cond = np.zeros(self.tensor.shape)
rowNorm = 1.0 / self.shape
elNorm = probability * rowNorm
restRowNorm = rowNorm - elNorm
restRowEl = restRowNorm / (self.shape - 1)
cond[i, :] = restRowEl
cond[:, j] = restRowEl
cond[i, j] = elNorm
probabilityForTheMinorRow = rowNorm - restRowEl
itemsInMinorRow = self.shape - 1
minorRowEl = probabilityForTheMinorRow / itemsInMinorRow
for el in self.iterMinor(i, j):
cond[el] = minorRowEl
plt.matshow(cond)
plt.colorbar()
plt.grid()
plt.show()
assert isclose(np.sum(cond), 1.), "Conditional probability distribution must normalize to 1 over whole matrix"
return cond
def plot(self):
"""A convenience routine to plot the shit"""
plt.matshow(self.tensor)
plt.colorbar()
plt.grid()
plt.show()
def bayesUpdate(self, cond):
"""Updates the posterior in the tensor.
(posterior := P(X|D)) * (margLik := P(D)) = (cond := P(D|X)) * (prior := P(X))
"""
prior = self.tensor
#assert isclose(np.sum(prior), 1.), "Prior distribution must normalize to 1 over the whole matrix, but: " + str(np.sum(prior))
joint_cond = prior * cond
#elementNorm = 1. / self.shape / self.shape
#assert isclose(np.sum(self.getUniform(prior.shape) - (prior + (elementNorm - prior))), 0.), "Uniform is the marginal prior"
#margLik = self.getUniform(prior.shape) * cond # getUniform() === (prior + (elementNorm - prior))
#assert isclose(np.sum(margLik), 1.), "marginal likelyhood must normalize to 1 over the whole matrix, but: " + str(np.sum(margLik))
#margLik = self.getUniform(prior.shape)
rowNorm = 1. / self.shape
elNorm = rowNorm / self.shape
#plt.matshow(margLik)
#plt.colorbar()
#plt.grid()
#plt.show()
#posterior = np.zeros(joint_cond.shape)
posterior = joint_cond
# We need to replace this shit with something calculated analytically in a single step, and without using z3 or sympy or any other symbolic solver.
# It is likely that to do it we just need to make all the marginals uniform (even if the product is not factored into marginals)
# I guess it may be possible to express it in a form solving it via gaussian elimination. An eigenvector as a variant.
for k in range(100):
for i in range(self.shape):
#for j in range(self.shape):
# if margLik[i, j]:
# posterior[i, j] = joint_cond[i, j] / margLik[i, j]
#posterior[i] = joint_cond[i]
# only alternating order works!
currentRowNorm = np.sum(joint_cond[i])
posterior[i] *= rowNorm / currentRowNorm
#posterior[:, i] = joint_cond[:, i]
currentCollNorm = np.sum(joint_cond[:, i])
posterior[:, i] *= rowNorm / currentCollNorm
#posterior[i] /= np.sum(posterior[i]) / self.shape
#posterior[:, i] = joint_cond[:, i]
#posterior[:, i] /= np.sum(posterior[:, i]) / self.shape
plt.matshow(posterior)
plt.colorbar()
plt.grid()
plt.show()
#assert isclose(np.sum(posterior), 1.), "Posterior must normalize to 1 over the whole matrix, but: " + str(np.sum(posterior))
self.tensor = posterior
def buildAlphaVec(a):
"""A vector of "α" values.
k is step of solution. k=0 is prior, k=1 is posterior got from prior with 1 conditional applied, k=2 - with 2 conditionals applied and so on.
a_{l,(k+1)} = α(a_{l,(k)}) @ a_{l,(k+1)} -- it is the result of resolution of the equation wrt a_{l,(k+1)}, where `l` is the equation number and
α(a_{l,(k)}) := a_{l,(k)} / ( 1 - a_{l,(k)}/\\sum_{j}{a_{j,(k)}} ) -- the vector generated by this function"""
alpha = np.zeros(len(a))
norm = np.sum(a)
for l in range(len(a)):
#alpha[l] = 1 / (1 - a[l] / norm)
alpha[l] = a[l] / norm
return alpha
def buildBMat(old, coords):
"""A matrix for an eigenvector-like problem for eigenvalue of 1. Need to solve it with Gaussian elimination.
`coords is a try to exclude the row corresponding to the modified element from the matrix in order to prevent it from being relaxed, it must be rigid. Doesn't work for now."""
alpha = buildAlphaVec(old)
B = np.ones((len(old), len(old)))
for i in range(len(old)):
for j in range(len(old)):
B[i, j] = alpha[i]
if i == j:
if coords is None or coords[0] != i:
B[i, j] -= 1
else:
pass
return B
def computeNewShit(a, coords=None):
"""Tries to relax a vector with elements modified to a new value satisfying the normalization to 1."""
B = buildBMat(a, coords) # det B === 0
#return scipy.linalg.solve(B, np.zeros(len(a)))
if coords is not None:
coord2ReplaceWithNormN = coords[0]
else:
coord2ReplaceWithNormN = -1
#replace the last row with normalization
for i in range(len(a)):
B[coord2ReplaceWithNormN, i] = 1.0
if coords is not None:
B[coords[0], coords[0]] = 0.0
rhs = np.zeros(len(a))
if coords is not None:
rhs[coord2ReplaceWithNormN] = 1.0 - a[coords]
else:
rhs[coord2ReplaceWithNormN] = 1.0
return np.linalg.solve(B, rhs)
| 6,839 |
meine_stadt_transparent/settings/security.py
|
CatoTH/opensourceris
| 1 |
2026340
|
from meine_stadt_transparent.settings.env import env
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
X_FRAME_OPTIONS = "DENY"
SECURE_HSTS_SECONDS = 365 * 24 * 60 * 60
SECURE_HSTS_PRELOAD = True
# There might be deployments where a subdomain is still without https
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("SECURE_HSTS_INCLUDE_SUBDOMAINS", True)
CSP_DEFAULT_SRC = ("'self'",)
if env.bool("MINIO_REDIRECT", False):
if env.str("MINIO_PUBLIC_HOST"):
endpoint = env.str("MINIO_PUBLIC_HOST")
secure = env.bool("MINIO_PUBLIC_SECURE", True)
else:
endpoint = env.str("MINIO_HOST")
secure = env.bool("MINIO_SECURE", False)
CSP_DEFAULT_SRC += (("https://" if secure else "http://") + endpoint,)
CSP_SCRIPT_SRC = ("'self'",) + env.tuple("CSP_EXTRA_SCRIPT", default=tuple())
CSP_IMG_SRC = ("'self'", "data:") + env.tuple("CSP_EXTRA_IMG", default=tuple())
if env.str("MAP_TILES_PROVIDER", "OSM") == "OSM":
CSP_IMG_SRC = CSP_IMG_SRC + (
"a.tile.openstreetmap.org",
"b.tile.openstreetmap.org",
"c.tile.openstreetmap.org",
)
if env.str("MAP_TILES_PROVIDER", "OSM") == "Mapbox":
CSP_IMG_SRC = CSP_IMG_SRC + ("api.tiles.mapbox.com", "api.mapbox.com")
SENTRY_HEADER_ENDPOINT = env.str("SENTRY_HEADER_ENDPOINT", None)
CSP_CONNECT_SRC = ("'self'", "sentry.io") + env.tuple(
"CSP_CONNECT_SRC", default=tuple()
)
if SENTRY_HEADER_ENDPOINT:
CSP_REPORT_URI = SENTRY_HEADER_ENDPOINT
# Those are not covered by default-src
CSP_FORM_ACTION = ("'self'",)
CSP_FRAME_SRC = ("'self'",) + env.tuple("CSP_FRAME", default=tuple())
CSP_FRAME_ANCESTORS = ("'self'",) + env.tuple("CSP_FRAME", default=tuple())
# Hack for Landshut, where the RIS has a broken ssl configuration (intermediate ceritificate missing)
SSL_NO_VERIFY = env.bool("SSL_NO_VERIFY", False)
| 1,897 |
tools/ipp_custom_library_tool_python/tool/core.py
|
lanlanlufeng/ipp-crypto
| 233 |
2025248
|
"""
Copyright 2018-2021 Intel Corporation.
This software and the related documents are Intel copyrighted materials, and
your use of them is governed by the express license under which they were
provided to you (License). Unless the License provides otherwise, you may not
use, modify, copy, publish, distribute, disclose or transmit this software or
the related documents without Intel's prior written permission.
This software and the related documents are provided as is, with no express
or implied warranties, other than those that are expressly stated in the
License.
License:
http://software.intel.com/en-us/articles/intel-sample-source-code-license-agr
eement/
"""
import os
from subprocess import call # nosec
from tool import utils
from tool.generators import main_file_generator, EXPORT_GENERATORS, build_script_generator, custom_dispatcher_generator
def generate_script():
"""
Generates build script
"""
host = utils.HOST_SYSTEM
output_path = utils.CONFIGS[utils.OUTPUT_PATH]
if not os.path.exists(output_path):
os.makedirs(output_path)
with open(os.path.join(output_path, utils.MAIN_FILE_NAME + '.c'), 'w') as main_file:
main_file.write(main_file_generator())
if utils.CONFIGS[utils.CUSTOM_CPU_SET]:
with open(os.path.join(output_path, utils.CUSTOM_DISPATCHER_FILE_NAME) + '.c', 'w') as custom_dispatcher_file:
custom_dispatcher_file.write(custom_dispatcher_generator())
with open(os.path.join(output_path, utils.EXPORT_FILE[host]), 'w') as export_file:
EXPORT_GENERATORS[host](export_file, utils.CONFIGS[utils.FUNCTIONS_LIST])
script_path = os.path.join(output_path, utils.CONFIGS[utils.BUILD_SCRIPT_NAME])
with open(script_path, 'w') as build_script:
build_script.write(build_script_generator())
os.chmod(script_path, 0o745)
return os.path.exists(script_path)
def build():
"""
Builds dynamic library
:return: True if build was successful and False in the opposite case
"""
success = generate_script()
if not success:
return False
output_path = utils.CONFIGS[utils.OUTPUT_PATH]
error = call([os.path.join(output_path, utils.CONFIGS[utils.BUILD_SCRIPT_NAME])])
if error:
return False
os.remove(os.path.join(output_path, utils.MAIN_FILE_NAME + '.c'))
os.remove(os.path.join(output_path, utils.MAIN_FILE_NAME + '.obj'))
os.remove(os.path.join(output_path, utils.EXPORT_FILE[utils.HOST_SYSTEM]))
os.remove(os.path.join(output_path, utils.CONFIGS[utils.BUILD_SCRIPT_NAME]))
if utils.CONFIGS[utils.CUSTOM_CPU_SET]:
os.remove(os.path.join(output_path, utils.CUSTOM_DISPATCHER_FILE_NAME + '.obj'))
return True
| 2,748 |
maskrcnn_benchmark/data/samplers/__init__.py
|
henrywang1/maskrcnn-few
| 1 |
2025955
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .distributed import DistributedSampler
from .grouped_batch_sampler import GroupedBatchSampler
from .grouped_batch_sampler import QuerySupportSampler
from .iteration_based_batch_sampler import IterationBasedBatchSampler
__all__ = ["DistributedSampler", "GroupedBatchSampler",
"IterationBasedBatchSampler", "QuerySupportSampler"]
| 417 |
oops_fhir/r4/code_system/task_status.py
|
Mikuana/oops_fhir
| 0 |
2026244
|
from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["TaskStatus"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class TaskStatus:
"""
TaskStatus
The current status of the task.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/task-status
"""
draft = CodeSystemConcept(
{
"code": "draft",
"definition": "The task is not yet ready to be acted upon.",
"display": "Draft",
}
)
"""
Draft
The task is not yet ready to be acted upon.
"""
requested = CodeSystemConcept(
{
"code": "requested",
"definition": "The task is ready to be acted upon and action is sought.",
"display": "Requested",
}
)
"""
Requested
The task is ready to be acted upon and action is sought.
"""
received = CodeSystemConcept(
{
"code": "received",
"definition": "A potential performer has claimed ownership of the task and is evaluating whether to perform it.",
"display": "Received",
}
)
"""
Received
A potential performer has claimed ownership of the task and is evaluating whether to perform it.
"""
accepted = CodeSystemConcept(
{
"code": "accepted",
"definition": "The potential performer has agreed to execute the task but has not yet started work.",
"display": "Accepted",
}
)
"""
Accepted
The potential performer has agreed to execute the task but has not yet started work.
"""
rejected = CodeSystemConcept(
{
"code": "rejected",
"definition": "The potential performer who claimed ownership of the task has decided not to execute it prior to performing any action.",
"display": "Rejected",
}
)
"""
Rejected
The potential performer who claimed ownership of the task has decided not to execute it prior to performing any action.
"""
ready = CodeSystemConcept(
{
"code": "ready",
"definition": "The task is ready to be performed, but no action has yet been taken. Used in place of requested/received/accepted/rejected when request assignment and acceptance is a given.",
"display": "Ready",
}
)
"""
Ready
The task is ready to be performed, but no action has yet been taken. Used in place of requested/received/accepted/rejected when request assignment and acceptance is a given.
"""
cancelled = CodeSystemConcept(
{
"code": "cancelled",
"definition": "The task was not completed.",
"display": "Cancelled",
}
)
"""
Cancelled
The task was not completed.
"""
in_progress = CodeSystemConcept(
{
"code": "in-progress",
"definition": "The task has been started but is not yet complete.",
"display": "In Progress",
}
)
"""
In Progress
The task has been started but is not yet complete.
"""
on_hold = CodeSystemConcept(
{
"code": "on-hold",
"definition": "The task has been started but work has been paused.",
"display": "On Hold",
}
)
"""
On Hold
The task has been started but work has been paused.
"""
failed = CodeSystemConcept(
{
"code": "failed",
"definition": "The task was attempted but could not be completed due to some error.",
"display": "Failed",
}
)
"""
Failed
The task was attempted but could not be completed due to some error.
"""
completed = CodeSystemConcept(
{
"code": "completed",
"definition": "The task has been completed.",
"display": "Completed",
}
)
"""
Completed
The task has been completed.
"""
entered_in_error = CodeSystemConcept(
{
"code": "entered-in-error",
"definition": "The task should never have existed and is retained only because of the possibility it may have used.",
"display": "Entered in Error",
}
)
"""
Entered in Error
The task should never have existed and is retained only because of the possibility it may have used.
"""
class Meta:
resource = _resource
| 4,576 |
ven2/lib/python2.7/site-packages/zope/publisher/interfaces/ftp.py
|
manliu1225/Facebook_crawler
| 0 |
2025445
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Virtual File System interfaces for the publisher.
"""
__docformat__ = "reStructuredText"
from zope.interface import Interface
from zope.publisher.interfaces import IPublishTraverse
from zope.publisher.interfaces import IRequest
from zope.publisher.interfaces import IView
class IFTPRequest(IRequest):
"""FTP Request
"""
class IFTPView(IView):
"""FTP View"""
class IFTPCredentials(Interface):
def _authUserPW():
"""Return (login, password) if there are basic credentials;
return None if there aren't."""
def unauthorized(challenge):
"""Cause a FTP-based unautorized error message"""
class IFTPPublisher(IPublishTraverse):
"""FTP Publisher"""
| 1,344 |
python/nano/src/bigdl/nano/utils/inference/model.py
|
sgwhat/BigDL
| 0 |
2025563
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC
import yaml
from pathlib import Path
from bigdl.nano.utils.log4Error import invalidInputError
class AcceleratedModel(ABC):
def forward(self, *inputs):
inputs = self.on_forward_start(inputs)
outputs = self.forward_step(*inputs)
return self.on_forward_end(outputs)
def on_forward_start(self, inputs):
return inputs
def forward_step(self, *inputs):
invalidInputError(False, "Method 'forward_step' is not implemented.")
def on_forward_end(self, outputs):
return outputs
@staticmethod
def tensors_to_numpy(tensors):
invalidInputError(False, "Method 'tensors_to_numpy' is not implemented.")
@staticmethod
def numpy_to_tensors(np_arrays):
invalidInputError(False, "Method 'numpy_to_tensors' is not implemented.")
def _dump_status(self, path):
meta_path = Path(path) / "nano_model_meta.yml"
with open(meta_path, 'w') as f:
yaml.safe_dump(self.status, f)
def _save_model(self, path):
"""
Save the model file to directory.
:param path: Path to saved model. Path should be a directory.
"""
invalidInputError(False, "Saving function is not implemented.")
def _save(self, path):
"""
Save the model to local file.
:param path: Path to saved model. Path should be a directory.
"""
path = Path(path)
Path.mkdir(path, exist_ok=True)
self._dump_status(path)
self._save_model(path)
@property
def status(self):
return {"ModelType": type(self).__name__}
@staticmethod
def _load_status(path):
meta_path = Path(path) / "nano_model_meta.yml"
with open(meta_path, 'r') as f:
metadata = yaml.safe_load(f)
return metadata
@staticmethod
def _load(path, model=None):
invalidInputError(False, "Loading function is not implemented.")
| 2,540 |
ai_flow/test/common/test_properties.py
|
flink-extended/ai-flow
| 79 |
2024405
|
#
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from ai_flow.common.properties import Properties
class TestProperties(unittest.TestCase):
def test_create_properties(self):
print("\n")
p = Properties(a="aa", b=['a', 'b'])
self.assertEqual(p['a'], 'aa')
if __name__ == '__main__':
unittest.main()
| 889 |
main.py
|
Georege/python_random_password
| 0 |
2026804
|
#!/usr/bin/python3 env
# encoding=utf-8
import secrets
import string
import random
from tkinter import *
from tkinter import messagebox
'''
String模块中的常量:
string.digits:数字0~9
string.ascii_letters:所有字母(大小写)
string.lowercase:所有小写字母
string.printable:可打印字符的字符串
string.punctuation:所有标点,即特殊字符
string.uppercase:所有大写字母
'''
# 生成
def init_pass(pass_length=24, special_character_length=2):
# generate for a many character password
password = ''.join(secrets.choice(string.ascii_letters + string.digits)
for i in range(pass_length-special_character_length))
password += ''.join(secrets.choice(string.punctuation)
for i in range(special_character_length))
password_list = list(password)
random.shuffle(password_list)
password = ''.join(password_list)
info_output.config(state='normal')
info_output.delete("1.0", 'end')
info_output.insert('end', '{}'.format(len(password)))
info_output.config(state='disabled')
miyue_output.config(state='normal')
miyue_output.delete("1.0", 'end')
miyue_output.insert('end', '{}'.format(password))
miyue_output.config(state='disabled')
messagebox.showinfo(
'系统提示:', "生成完毕")
# 复制
def copy():
miyue = miyue_output.get('0.0', 'end-1c')
root.clipboard_clear()
root.clipboard_append(miyue)
root.update()
messagebox.showinfo('系统提示:', '{}\n复制成功'.format(miyue))
if __name__ == "__main__":
root = Tk()
root.title("python生成随机设定位数的密码")
root.resizable(width=False, height=False)
# line 1
Label(root, text="密钥整体长度:").grid(
row=1, column=1)
Button(root, text="生成", command=lambda: init_pass(
vari.get(), vari_special.get())).grid(row=1, column=5, rowspan=3, padx=10)
# line 2
length_list = [
'8',
'12',
'24',
'32'
]
vari = IntVar()
vari.set(24)
for each in length_list:
Radiobutton(root, variable=vari, text=each, value=each).grid(
row=2, column=length_list.index(each)+1)
# line 3
Label(root, text="特殊字符个数:").grid(
row=3, column=1)
# line 4
special_character_list = [
'2',
'3',
'4'
]
vari_special = IntVar()
vari_special.set(2)
for each in special_character_list:
Radiobutton(root, variable=vari_special, text=each, value=each).grid(
row=4, column=special_character_list.index(each)+1)
Button(root, text="复制", command=lambda: copy()).grid(
row=4, column=5, rowspan=3, padx=10)
# line 5
Label(root, text="随机密码的长度是长度:").grid(
row=5, column=1)
info_output = Text(root, width=40, height=1)
info_output.grid(
row=5, column=2, columnspan=3)
# line 6
Label(root, text="密码是:").grid(
row=6, column=1)
miyue_output = Text(root, width=40, height=1)
miyue_output.grid(
row=6, column=2, columnspan=3)
root.mainloop()
| 2,962 |
slave.py
|
thanhbok26b/mujoco-rewards-landscape-visualization
| 0 |
2025544
|
import time
from mujoco_parallel import WorkerManager
# Start workers
wm = WorkerManager()
wm.start_redis()
wm.create_workers()
# Wait for eternity
while 1:
time.sleep(3600)
| 177 |
relay/udp.py
|
OliverF/boxy
| 13 |
2026705
|
import sys
import socket
import threading
import status
_kill = False
_relayport = 0
_remoteaddress = ""
_remoteport = 0
def relay():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(("0.0.0.0", _relayport))
incomingsetup = False
clientport = 0
clientip = ""
while True:
data, fromaddr = sock.recvfrom(1024)
if (_kill == True):
sock.close()
return
if (incomingsetup == False):
clientport = fromaddr[1]
clientip = fromaddr[0]
incomingsetup = True
if (fromaddr[0] == clientip):
#forward from client to server
sock.sendto(data, (_remoteaddress, _remoteport))
status.bytestoremote += sys.getsizeof(data)
else:
#forward from server to client
sock.sendto(data, (clientip, clientport))
status.bytesfromremote += sys.getsizeof(data)
def start(relayport, remoteaddress, remoteport):
global _relayport
global _remoteaddress
global _remoteport
_relayport = relayport
_remoteaddress = remoteaddress
_remoteport = remoteport
relaythread = threading.Thread(target = relay)
relaythread.start()
def stop():
_kill = True
#send anything to the input port to trigger it to read, therefore allowing the thread to close
quitsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
quitsock.sendto("killing", ("127.0.0.1", _relayport))
quitsock.close()
| 1,319 |
setup.py
|
MZehren/Automix
| 18 |
2025433
|
#!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='AutoMix',
version='0.1',
author="<NAME>",
author_email="<EMAIL>",
description="Automatic DJ-mixing of tracks",
long_description=long_description,
install_requires=[
"numpy", "scipy", "cython", "matplotlib", "pandas", "pyaudio", "madmom", "librosa", "essentia", "youtube-dl", "scdl",
"mir_eval", "msaf", "graphviz"
],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
)
| 663 |
algorithms/sorting/count_sort.py
|
karim7262/algorithms-and-datastructures-python
| 1 |
2026329
|
"""
The count sort is a non-comparison based sorting algorithm:
In other words, it sorts items in a list without having to compare them.
The counting sort algorithm only works with integers.
It sorts the integers in an array by creating a second array based on the range of
values in the original array(max-min). Initially it populates the second array with 0s.
It then counts the number of occcurence of each item in the original array. It then updates
that value in the second array.
"""
class CountSort(object):
def __init__(self, data):
self.data = data
self.count_array = [0 for _ in range((max(data)-min(data))+1)]
# First we count all the items in data and the number of times they occur
# Indexes start from 0 + we have to consider negative indexes as well
for i in range(len(self.data)):
self.count_array[self.data[i]-min(self.data)] += 1
# Now consider the counting array: see how many times an entry occurs
z=0
for i in range(min(self.data), max(self.data)+1):
while self.count_array[i-min(self.data)] > 0:
self.data[z] = i
z += 1
self.count_array[i-min(self.data)] -= 1
| 1,211 |
sitemapext/runtests/settings.py
|
ligonier/django-sitemap-extras
| 5 |
2025539
|
import os, sys
from logging import Handler
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('<NAME>', '<EMAIL>'),
)
ENGINE = 'django.db.backends.sqlite3'
NAME = 'test'
DATABASES = {
'default': {
'ENGINE': ENGINE, # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': NAME, # Or path to database file if using sqlite3.
}
}
# Django 1.1 compat
DATABASE_ENGINE = ENGINE
DATABASE_NAME = NAME
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = 'media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'wzf0h@r2u%m^_zgj^39qwerqwerwerwerq1asdfasdfasdft%^2!p'
ROOT_URLCONF = 'urls'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admindocs',
'django.contrib.comments',
'django.contrib.sites',
'django.contrib.sitemaps',
'sitemapext',
)
SITEMAPS_CONFIG = {
'DEBUG': True
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 2,558 |
main.py
|
canonicalchris/gcnlp2conllu
| 0 |
2022725
|
# Does a Google Cloud NLP JSON to CoNLL-U Transformation
# @see https://universaldependencies.org/format.html
#
import sys
from io import StringIO
from sys import argv
import json
def extract_part_of_speech_features(pos_details):
features = StringIO()
first = True
for key, value in pos_details.items():
if key == 'tag' or value.endswith("_UNKNOWN"):
continue
if first:
first = False
else:
features.write('|')
features.write(key.capitalize())
features.write('=')
features.write(value.capitalize())
return features.getvalue()
def convert_to_html(parse, cf):
print("<table><tbody>", file=cf)
convert(parse, cf, start_of_line='<tr>', left='<td>', right='</td>', end_of_line='</tr>\n')
print("</tbody></table>", file=cf)
print('', file=cf)
def write_conllu_record(values, start_of_line, left, right, end_of_line, cf):
print(start_of_line, end='', file=cf)
for value in values:
print(left, end='', file=cf)
print(value, end=right, file=cf)
print('', end=end_of_line, file=cf)
# note that we do not actually need to compile a sentence index, since the list is
# ordered in ascending order and we can do a straight-up merge ... rck 20210906
def compile_sentence_index(sentences):
index = {}
for sentence in sentences:
details = sentence['text']
offset = details['beginOffset']
text = details['content']
index[offset] = text
return index
# Because we need to know how far the next token is to compute the SpaceAfter=No property
# correctly, we use a buffer of depth one to edit the meta-properties for Token K when
# looking at the token of K+1
# We also number the sentences and renumber the nodes relative to the sentence
def convert(parse, cf, left='', right='\t', start_of_line='', end_of_line='\n', meta_info=True):
# we have no xpos tagging information in the Google NLP API
sentence_index = compile_sentence_index(parse['sentences'])
sentence_id = 0
sentence_base_token_id = 0
id = 1
prev_tail = -1
values = []
for item_number, token in enumerate(parse['tokens']):
offset = token['text']['beginOffset']
text = token['text']['content']
# if this becomes a bottleneck, consider
# @see https://stackoverflow.com/questions/6714826/how-can-i-determine-the-byte-length-of-a-utf-8-encoded-string-in-python
curr_tail = offset + len(text.encode('utf-8'))
if offset == prev_tail:
values[-1] = 'SpaceAfter=No'
if len(values) > 0:
write_conllu_record(values, start_of_line, left, right, end_of_line, cf)
if meta_info and offset in sentence_index:
id = 1
sentence_base_token_id = item_number
sentence_id += 1
print(start_of_line, end='', file=cf)
print('# sent_id = {}'.format(sentence_id), end=end_of_line, file=cf)
print(start_of_line, end='', file=cf)
print('# text_en = {}'.format(sentence_index[offset]), end=end_of_line, file=cf)
lemma = token['lemma']
pos_details = token['partOfSpeech']
pos_tag = pos_details['tag']
head = token['dependencyEdge']['headTokenIndex']
if head == item_number:
head = 0 # map the root to 0, by convention
else:
head = head - sentence_base_token_id + 1 # make it relative to the sentence at hand
dep_label = token['dependencyEdge']['label'].lower()
features = extract_part_of_speech_features(pos_details)
xpos_tag = pos_tag
values = [id, text, lemma, pos_tag, xpos_tag, features, head, dep_label, '_', '_']
id += 1
prev_tail = curr_tail
# flush any remainder in the buffer
if len(values) > 0:
write_conllu_record(values, start_of_line, left, right, end_of_line, cf)
if __name__ == '__main__':
with open(argv[1]) as pf:
parse = json.load(pf)
if len(argv) < 3:
convert(parse, sys.stdout)
else:
outfile = argv[2]
with open(outfile, 'w') as cf:
if outfile.endswith('.html'):
convert_to_html(parse, cf)
else:
convert(parse, cf)
| 4,286 |
examples/riotapi/headers_parser.py
|
sousa-andre/requests-limiter
| 4 |
2025729
|
from math import ceil
from time import time
from limiter.rate_limit import LimitDto
def parse_headers(raw_limits, raw_limits_count):
limits = []
for limit_count, limit, in zip(raw_limits_count.split(','), raw_limits.split(',')):
[current_requests, reset_in_seconds] = limit_count.split(':')
[max_requests, _] = limit.split(':')
limits.append(LimitDto(
int(max_requests),
int(current_requests),
ceil(time()) + int(reset_in_seconds)
))
return limits
| 532 |
src/crossover.py
|
SebaRiccardo/Strip_Packing_GA
| 1 |
2026671
|
import random
from math import nan
from population import create_individual
import copy
def order_crossover(p1, p2, seed):
random.seed(seed)
zero_shift = min(p1)
length = len(p1)
start, end = sorted([random.randrange(length) for _ in range(2)])
c1, c2 = [nan] * length, [nan] * length
t1, t2 = [x - zero_shift for x in p1], [x - zero_shift for x in p2]
spaces1, spaces2 = [True] * length, [True] * length
for i in range(length):
if i < start or i > end:
spaces1[t2[i]] = False
spaces2[t1[i]] = False
j1, j2 = end + 1, end + 1
for i in range(length):
if not spaces1[t1[(end + i + 1) % length]]:
c1[j1 % length] = t1[(end + i + 1) % length]
j1 += 1
if not spaces2[t2[(i + end + 1) % length]]:
c2[j2 % length] = t2[(i + end + 1) % length]
j2 += 1
for i in range(start, end + 1):
c1[i], c2[i] = t2[i], t1[i]
return [[x + zero_shift for x in c1], [x + zero_shift for x in c2]]
def crossover_one_point(p1, p2,seed):
random.seed(seed)
point = random.randint(1, len(p1) - 1)
c1, c2 = copy.deepcopy(p1), copy.deepcopy(p2)
c1[point:], c2[point:] = p2[point:], p1[point:]
return [c1, c2]
def crossover(ind1, ind2, max_width, rectangles, fitness_function, seed, it_rotates):
offspring_genes = order_crossover(ind1.gene_list, ind2.gene_list, seed)
offspring_rotation = crossover_one_point(ind1.rotation, ind2.rotation, seed)
return[create_individual(offspring_genes[0], offspring_rotation[0], max_width, rectangles, fitness_function, seed, it_rotates),
create_individual(offspring_genes[1], offspring_rotation[1], max_width, rectangles, fitness_function, seed, it_rotates)]
| 1,772 |
drivers/rpc/hndcam/hndcam_pb2.py
|
takuya-ki/wrs
| 23 |
2026458
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: hndcam.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='hndcam.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0chndcam.proto\"\x07\n\x05\x45mpty\"G\n\x06\x43\x61mImg\x12\r\n\x05width\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\x05\x12\r\n\x05image\x18\x04 \x01(\x0c\x32\x85\x01\n\x03\x43\x61m\x12\x1e\n\tgetrc0img\x12\x06.Empty\x1a\x07.CamImg\"\x00\x12\x1e\n\tgetrc1img\x12\x06.Empty\x1a\x07.CamImg\"\x00\x12\x1e\n\tgetlc0img\x12\x06.Empty\x1a\x07.CamImg\"\x00\x12\x1e\n\tgetlc1img\x12\x06.Empty\x1a\x07.CamImg\"\x00\x62\x06proto3')
)
_EMPTY = _descriptor.Descriptor(
name='Empty',
full_name='Empty',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=23,
)
_CAMIMG = _descriptor.Descriptor(
name='CamImg',
full_name='CamImg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='width', full_name='CamImg.width', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='height', full_name='CamImg.height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='channel', full_name='CamImg.channel', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='image', full_name='CamImg.image', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=96,
)
DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
DESCRIPTOR.message_types_by_name['CamImg'] = _CAMIMG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
DESCRIPTOR = _EMPTY,
__module__ = 'hndcam_pb2'
# @@protoc_insertion_point(class_scope:Empty)
))
_sym_db.RegisterMessage(Empty)
CamImg = _reflection.GeneratedProtocolMessageType('CamImg', (_message.Message,), dict(
DESCRIPTOR = _CAMIMG,
__module__ = 'hndcam_pb2'
# @@protoc_insertion_point(class_scope:CamImg)
))
_sym_db.RegisterMessage(CamImg)
_CAM = _descriptor.ServiceDescriptor(
name='Cam',
full_name='Cam',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=99,
serialized_end=232,
methods=[
_descriptor.MethodDescriptor(
name='getrc0img',
full_name='Cam.getrc0img',
index=0,
containing_service=None,
input_type=_EMPTY,
output_type=_CAMIMG,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='getrc1img',
full_name='Cam.getrc1img',
index=1,
containing_service=None,
input_type=_EMPTY,
output_type=_CAMIMG,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='getlc0img',
full_name='Cam.getlc0img',
index=2,
containing_service=None,
input_type=_EMPTY,
output_type=_CAMIMG,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='getlc1img',
full_name='Cam.getlc1img',
index=3,
containing_service=None,
input_type=_EMPTY,
output_type=_CAMIMG,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_CAM)
DESCRIPTOR.services_by_name['Cam'] = _CAM
# @@protoc_insertion_point(module_scope)
| 4,940 |
fbpic/particles/__init__.py
|
fractional-ray/fbpic
| 131 |
2023025
|
"""
This file is part of the Fourier-Bessel Particle-In-Cell code (FB-PIC)
It imports the Particles object from the particles package, so that
this object can be used at a higher level.
"""
from .particles import Particles
__all__ = ['Particles']
| 248 |
py_tdlib/constructors/read_file_part.py
|
Mr-TelegramBot/python-tdlib
| 24 |
2026870
|
from ..factory import Method
class readFilePart(Method):
file_id = None # type: "int32"
offset = None # type: "int32"
count = None # type: "int32"
| 155 |
Codes/Math/factors_numbers.py
|
datta-agni/python-codes
| 0 |
2024753
|
# find the factors of a number
def print_factors(x: int) -> list[int]:
array: list[int] = []
print("The factors of ", x, " are:")
for i in range(1, x + 1):
if x % i == 0:
element = i
array.append(element)
return array
if __name__ == "__main__":
x = int(input("Enter the number whose factors are to be found: "))
print(print_factors(x))
| 396 |
RL/bellman/jackCarRental.py
|
ankitdixit/code-gems
| 0 |
2026018
|
# python
import copy
import math
import random
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from decimal import *
from collections import namedtuple
from _collections import defaultdict
def poisson(l, n):
s, p = 0, math.exp(-l)
for k in range(n):
yield p
s += p
p *= l
p /= k + 1
yield 1 - s
class Station:
def __init__(self, rent, avg_request, avg_return, max_capacity, max_transfer):
self.reward = np.zeros(max_capacity + 1)
self.probability = np.zeros((max_capacity + 1, max_capacity + 1))
for cars in range(max_capacity + 1):
for n_request, p_request in enumerate(poisson(avg_request, max_capacity)):
for n_return, p_return in enumerate(poisson(avg_return, max_capacity)):
#old bugged version
n_rented = min(cars, n_request)
#n_rented = min(cars + n_return, n_request)
self.reward[cars] += rent*n_rented*p_request*p_return
gone = min(cars, n_request)
cars_eod = min(cars + n_return - gone, max_capacity)
self.probability[cars,cars_eod] += p_request*p_return
self.max_capacity = max_capacity
class Environment():
def __init__(self, station_a, station_b, transfer_cost):
self.station_a = station_a
self.station_b = station_b
self.transfer_cost = transfer_cost
def next(self, state, values, moved, discount):
cars_a, cars_b = state
cars_a = min(cars_a - moved, self.station_a.max_capacity)
cars_b = min(cars_b + moved, self.station_b.max_capacity)
if cars_a < 0 or cars_b < 0:
return -9999999
else:
new_value = self.station_a.reward[cars_a] + self.station_b.reward[cars_b] \
- math.fabs(moved)*self.transfer_cost
next_states = np.outer(self.station_a.probability[cars_a,:], self.station_b.probability[cars_b,:])
for next_state, probability in np.ndenumerate(next_states):
new_value += probability * discount * values[next_state]
return new_value
def evaluate_policy(environment, states, policy, values, discount, max_iterations=100):
for iter in range(max_iterations):
max_delta = 0
for state in states:
old_value = values[state]
values[state] = environment.next(state, values, policy[state], discount)
max_delta = max(max_delta, math.fabs(values[state] - old_value))
if max_delta < .001:
return iter + 1
return max_iterations
def improve_policy(environment, states, policy, values, actions, discount):
policy_changes = 0
for state in states:
best_action = actions[np.argmax( \
[environment.next(state, values, action, discount) for action in actions])]
if best_action != policy[state]:
policy[state] = best_action
policy_changes += 1
return policy_changes
def experiment(environment, actions, discount):
a_max = environment.station_a.max_capacity
b_max = environment.station_b.max_capacity
states = [(x, y) for x in range(a_max + 1) for y in range(b_max + 1)]
policy = np.zeros((a_max + 1,b_max + 1), dtype=np.int8)
values = np.zeros((a_max + 1,b_max + 1))
policy_changes = -1
while policy_changes != 0:
iterations = evaluate_policy(environment, states, policy, values, discount)
policy_changes = improve_policy(environment, states, policy, values, actions, discount)
yield policy, values, (iterations, policy_changes)
def data_formatter(data):
def format_coord(x, y):
ix, iy = int(x), int(y)
v = 'N/A'
try:
v = data[iy, ix]
except IndexError:
pass
return 'x={0}, y={1}, v={2}'.format(ix, iy, v)
return format_coord
def maximize_plot(manager):
backend = plt.get_backend().lower()
if 'tk' in backend:
manager.window.wm_geometry("+0+0")
manager.window.state('zoomed')
elif 'wx' in backend:
manager.frame.SetPosition((0, 0))
manager.frame.Maximize(True)
else:
manager.window.SetPosition((0, 0))
manager.window.showMaximized()
def get_ticks(values, n_ticks=10):
a, b = np.amin(values), np.amax(values)
ticks = []
for i in np.linspace(a, b, n_ticks):
ticks.append(i)
return ticks
def main():
# whether to animate plots or not
animate = True
n_cars = 20
rent = 10
transfer_cost = 2
max_transfer = 5
actions = list(range(-max_transfer, max_transfer+1))
discount = .9
station_a = Station(rent, 3, 3, n_cars, max_transfer)
station_b = Station(rent, 4, 2, n_cars, max_transfer)
environment = Environment(station_a, station_b, transfer_cost)
for iter, (policy, values, stats) in enumerate(experiment(environment, actions, discount)):
if not animate or iter == 0:
sns.set()
figure, axis = plt.subplots(ncols=2)
cbar_ax = figure.add_axes([.91, .15, .01, .70])
maximize_plot(plt.get_current_fig_manager())
policy_color = sns.diverging_palette(255, 133, l=60, n=11, center="dark")
axis[0].cla()
sns.heatmap(policy, annot=True, cmap=policy_color, cbar=False, ax=axis[0], vmin=-5, vmax=5).invert_yaxis()
axis[0].format_coord = data_formatter(policy)
axis[0].set_title('Policy')
axis[1].cla()
cbar_ax.cla()
cbar_kws = {'ticks': get_ticks(values, 10)}
sns.heatmap(values, cbar=True, cbar_ax=cbar_ax, cbar_kws=cbar_kws, linewidths=.5, ax=axis[1]).invert_yaxis()
axis[1].format_coord = data_formatter(values)
axis[1].set_title('Value Function')
for x in axis:
x.set_aspect('equal')
x.set(xlabel='Station B', ylabel='Station A')
plt.xticks(rotation=0)
plt.suptitle('Policy Iteration: {0} (Value steps={1}, changes={2})' \
.format(iter + 1, stats[0], stats[1]))
if animate:
plt.draw()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
plt.pause(.01)
else:
plt.show()
# plt.savefig('./bellman/plots/jacksCarRental_{0}.png'.format(iter+1))
# stop at the end of animation to hold plot
if animate:
plt.show()
if __name__ == '__main__':
main()
| 6,572 |
server/Prims.py
|
ScriptkidHicks/Traveler
| 0 |
2026494
|
'''
code: This is the algorithm driving Traveler. Prim's Algorithm is implemented
utilizing a min-heap structure on an adjacency list.
group: //TODO
author(s): <NAME>
last modified: 26 Oct 2021
'''
from collections import defaultdict
class Edge:
def __init__(self, val=None, a_vertex=None, b_vertex=None):
self.weight = val
self.a = a_vertex
self.b = b_vertex
class EdgeMinHeap:
def __init__(self):
self.heap = []
def heapify(self, index):
minimum = index
left = 2 * index + 1 # left(node) index
right = 2 * index + 2 # right(node) index
# value at left is minimum ?
if left < len(self.heap) and self.heap[left].weight < self.heap[index].weight:
minimum = left
if right < len(self.heap) and self.heap[right].weight < self.heap[minimum].weight:
minimum = right
if minimum != index:
self.interchange_vertex(index, minimum)
def insert(self, edge):
if len(self.heap) == 0:
self.heap.append(edge)
else:
self.heap.append(edge)
for i in range((len(self.heap)//2)-1, -1, -1):
self.heapify(i)
def delete(self):
self.interchange_vertex(0, len(self.heap)-1) # Exchange 0th index with last index
min_edge = self.heap.pop() # pop last element
for i in range((len(self.heap)//2)-1, -1, -1):
self.heapify(i)
return min_edge
def interchange_vertex(self, index_a, index_b):
temp_val = self.heap[index_a].weight
temp_a = self.heap[index_a].a
temp_b = self.heap[index_a].b
self.heap[index_a].weight = self.heap[index_b].weight
self.heap[index_a].a = self.heap[index_b].a
self.heap[index_a].b = self.heap[index_b].b
self.heap[index_b].weight = temp_val
self.heap[index_b].a = temp_a
self.heap[index_b].b = temp_b
class Graph:
def __init__(self, v_count):
self.V = v_count
self.graph = defaultdict(list)
self.min_heap = EdgeMinHeap()
def add_edge(self, u, v, weight):
self.graph[u].append((v, weight))
self.graph[v].append((u, weight))
def prims_mst_util(self, visited):
min_edge = Edge()
min_edge.weight = float('inf')
for u in self.graph:
for v, weight in self.graph[u]:
if weight < min_edge.weight:
min_edge.weight = weight
min_edge.a = u
min_edge.b = v
#print(self.graph)
self.min_heap.insert(min_edge)
edge_count = 0
cost = 0
order = []
order.append(0)
while edge_count < self.V-1:
new_edge = self.min_heap.delete() # get a min edge(which is connected and unvisited)
#print(f"w:{new_edge.weight}, a:{new_edge.a}, b:{new_edge.b}")
cost += new_edge.weight # add its cost to overall cost
#order.append([new_edge.a, new_edge.b])
order.append(new_edge.b)
edge_count += 1 # increase up the edge_count
for u in [new_edge.a, new_edge.b]: # Iterate over the both ends of new_edge
#if visited[u] == False or u == new_edge.a or u == new_edge.b:
for v, weight in self.graph[u]: # Iterate over the adjacents for each end of new_edge
if visited[u] == False and visited[v] == False and v != new_edge.a and v != new_edge.b : # if adjacent edge is new/not_current_edge
self.min_heap.insert(Edge(weight, u, v)) # push it to heap
#visited[v] = True # Mark the adjacent visited to make it unavailable for other edges
visited[new_edge.a] = True # mark first end of new_edge as visited
visited[new_edge.b] = True # mark second end of new_edge as visited
#for i in self.min_heap.heap:
# print(i.weight, end=" ")
#print()
order.append(0)
# print("cost of traversal: ", cost)
return order
def mst_order(self):
visited = [False]*self.V
order = self.prims_mst_util(visited)
return order
def solve(matrix):
'''
function: builds a graph using input matrix
input: list[list]
ex: [[0, 454639, 716226], [455412, 0, 795474], [717739, 811274, 0]]
output: list[list] #this is the MST path order.
ex: [[0,1],[1,2],[2,3],[3,4]]
'''
g = Graph(len(matrix))
#print(g.V)
for i in range(len(matrix)):
for j in range(len(matrix[i])):
if j != i:
#print("adding: ", i, j, matrix[i][j])
g.add_edge(i, j, matrix[i][j])
return g.mst_order()
"""
if __name__ == "__main__":
g = Graph(4)
g.add_edge(0, 1, 6)
g.add_edge(0, 2, 1)
g.add_edge(0, 3, 2)
g.add_edge(1, 2, 5)
g.add_edge(1, 3, 3)
g.add_edge(2, 3, 4)
print("MST order:", g.mst_order())
"""
| 4,984 |
bigO_presentation/src/find.py
|
ypraw/bigOPYID
| 1 |
2023981
|
import time
from random import randrange
import matplotlib.pyplot as plt
import numpy as np
# O(N)
def findmin1(data):
lowest = data[0]
for i in data:
if i < lowest: # di eksekusi selama n
lowest = i # dieksekusi 1 kali
return lowest # dieksekusi 1 kali
# O(N^2)
def findmin2(data):
lowest = data[0]
for i in data: # N
isSmallest = True
for j in data: # N
if i > j:
isSmallest = False
if isSmallest:
lowest = i
return lowest
def findmin3(data):
return min(data)
#example_list=[5,3,6,4,1]
plt.xlabel("size")
plt.ylabel("time")
#print(findmin1(example_list))
#print(findmin2(example_list))
x1 = []
y1 = []
for listSize1 in range(1000, 10001, 1000):
example_list = [randrange(10000) for x in range(listSize1)]
start_time = time.time()
print(f'minimum value: {findmin1(example_list)}')
end_time = time.time()
et = end_time - start_time
print(f'size list: {listSize1}, time execution: {et:5f}')
x1.append(listSize1)
y1.append(et)
print()
x2 = []
y2 = []
for listSize2 in range(1000, 10001, 1000):
example_list = [randrange(100000) for x in range(listSize2)]
start_time = time.time()
print(f'minimum value: {findmin2(example_list)}')
end_time = time.time()
et2 = end_time - start_time
print(f'size list: {listSize2}, time execution: {et2:.5f}')
x2.append(listSize2)
y2.append(et2)
print()
x3 = []
y3 = []
for listSize3 in range(1000, 10001, 1000):
example_list3 = [randrange(100000) for x in range(listSize3)]
start_time = time.time()
print(f'minimum value: {findmin3(example_list3)}')
end_time = time.time()
et3 = end_time - start_time
print(f'size list: {listSize3}, time execution: {et3:.5f}')
x3.append(listSize3)
y3.append(et3)
print()
tx = y1[5:] + y2[:5]
plt.plot(x1, tx, 'g-', alpha=1)
plt.plot(x2, tx, 'r-', alpha=1)
# Label the axes and provide a title
plt.title("haha")
plt.grid(True)
plt.xlabel("size")
plt.ylabel("time")
plt.show()
| 2,071 |
src/process/analysis.py
|
sebasjp/octopus-ml
| 1 |
2025462
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from bran import BootstrapCI
from scipy.stats import chi2_contingency
import os
from utils import log
class StatsAnalysis:
"""
A class to perform the statistical analysis. It has implemented three methods,
two of them for features analysis and one method that consolidate all.
...
Attributes
----------
alpha : float
Significance value to evaluate the hyphotesis
html : str
Object where useful information is going to be stored in html code
path_output: str
Path where the images are going to be stored
logger : logging.RootLogger
Logger object to do the logging.
Methods
-------
quantitative analysis
To perform boxplot per feature and class compare them using Bootstrap
Confidence Intervals
qualitative analysis
To perform countplot per feature and class compare them using Chi-square test
run
Run all methods consolidated and return a HTML code with the plots
"""
def __init__(self,
alpha,
html,
path_output,
logger
):
self.alpha = alpha
self.html = html
self.logger = logger
self.path_output = path_output
def quantitative_analysis(self, X_train, y_train, y_name, features_type):
"""
This function performs the analysis for quantitative features. It builds boxplots and computes the bootstrap confidence intervals
Parameters
----------
X_train : pd.DataFrame
Pandas DataFrame to use. This one contains just X features
y_train : pd.Series
Variable of interest
y_name : str
Name of variable of interest contained in data.
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
Return
------
None
"""
df = pd.concat([X_train, y_train], axis = 1)
# To compute confidence intervals by classes for all features
classes = y_train.unique()
nclass = len(classes)
alpha_corrected = self.alpha / nclass
results = []
for var_x in features_type['quantitative']:
for classk in classes:
bootstrap = BootstrapCI(alpha = alpha_corrected)
x = X_train.loc[y_train == classk, var_x]
x = x[~np.isnan(x)]
li, ls = bootstrap.calculate_ci(x)
m = x.mean()
result_k = (var_x, 'class ' + str(classk), m, m - li, ls - m)
results.append(result_k)
colnames = ['variable', 'class', 'm', 'errli', 'errls']
results = pd.DataFrame(results, columns = colnames)
# To save images
path_images = self.path_output + 'images/'
if not os.path.exists(path_images):
os.mkdir(path_images)
# Build and save plots
for var_x in features_type['quantitative']:
# distribution plots
g = sns.boxplot(x = y_name, y = var_x, data = df)
#g = sns.kdeplot(x = var_x, hue = self.y_name, data = df)
g.set_title(var_x + ' Distribution by class')
# save fig
gfigure = g.get_figure()
namefig1 = 'dist_' + var_x + '_vs_' + y_name + '.png'
gfigure.savefig(path_images + namefig1)
plt.clf()
# confidence intervals plots
res_x = results.loc[results['variable'] == var_x]
# lower and upper limits
dy = np.array([res_x['errli'].tolist(), res_x['errls'].tolist()])
plt.errorbar(x = res_x['class'].tolist(),
y = res_x['m'].values,
yerr = dy,
fmt = '.k')
plt.ylabel(var_x)
plt.title('Confidence intervals at ' + str(int((1 - self.alpha) * 100)) + '%')
# save fig
namefig2 = 'ci_' + var_x + '_vs_' + y_name + '.png'
plt.savefig(path_images + namefig2)
plt.clf()
# to html
str_1 = """<div style="width:900px; margin:0 auto;"><img src = "images/{}">""".format(namefig1)
str_2 = """<img src = "images/{}"></div>""".format(namefig2)
self.html += str_1 + str_2
hm = sns.heatmap(X_train[features_type['quantitative']].corr(),
vmin = -1,
vmax = 1,
annot = True)
hm.set_title('Correlation Heatmap',
fontdict = {'fontsize': 12},
pad = 12)
# save fig
gfigure = hm.get_figure()
namefig = 'correlation_heatmap.png'
gfigure.savefig(path_images + namefig)
plt.clf()
str_1 = """<div style="width:600px; margin:0 auto;"><img src = "images/{}"></div>""".format(namefig)
self.html += str_1 + "<br>"
return None
def qualitative_analysis(self, X_train, y_train, y_name, features_type):
"""
This function performs the analysis for qualitative features. It builds countplots and performs the hyphotesis test based on chi-square test
Parameters
----------
X_train : pd.DataFrame
Pandas DataFrame to use. This one contains just X features
y_train : pd.Series
Variable of interest
y_name : str
Name of variable of interest contained in data.
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
Return
------
None
"""
df = pd.concat([X_train, y_train], axis = 1)
# Build and save plots
path_images = self.path_output + 'images/'
for var_x in features_type['qualitative']:
# test hyphotesis chi-square
table = pd.crosstab(X_train[var_x], y_train).values
stat, pvalue, dof, expected = chi2_contingency(table)
if pvalue <= self.alpha:
conclusion = "There is significant difference at "
else:
conclusion = "There isn't significant difference at "
conclusion += str(int((1 - self.alpha) * 100)) + '% confidence'
# distribution plots
g = sns.countplot(x = y_name, hue = var_x, data = df)
title = var_x + ' Distribution by class\n' + conclusion
g.set_title(title)
# save fig
gfigure = g.get_figure()
namefig1 = 'dist_' + var_x + '_vs_' + y_name + '.png'
gfigure.savefig(path_images + namefig1)
plt.clf()
str_1 = """<div style="width:600px; margin:0 auto;"><img src = "images/{}"></div>""".format(namefig1)
self.html += str_1 + "<br>"
return None
def stats_analysis(self, X_train, y_train, y_name, features_type):
"""
This function run two methos:
1. quantitative_analysis:
performs the analysis for quantitative features. It builds boxplots and computes the bootstrap confidence intervals
2. qualitative_analysis:
performs the analysis for qualitative features. It builds countplots and performs the hyphotesis test based on chi-square test
Parameters
----------
X_train : pd.DataFrame
Pandas DataFrame to use. This one contains just X features
y_train : pd.Series
Variable of interest
y_name : str
Name of variable of interest contained in data.
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
Return
------
html : str
html code with all plots
"""
if not self.html:
self.html = """<html><head>"""
self.html += """</head><body><h1><center>Processing Report</center></h1>"""
if not self.logger:
self.logger = log(self.path_output, 'logs.txt')
self.html += "<h2><center>Statistical Analysis:</center></h2>"
self.quantitative_analysis(X_train, y_train, y_name, features_type)
self.qualitative_analysis(X_train, y_train, y_name, features_type)
return self.html
| 8,996 |
Amelie/util.py
|
HuMingqi/Amelie_S
| 0 |
2026467
|
import json
import os
def get_filename_list(path):
""" Returns a list of filenames for all txts in a directory. """
return [os.path.join(path,f) for f in os.listdir(path)]
def get_dict_from_txt(filename):
""" filename contains path """
target = open(filename, 'r')
string = target.read()
return json.loads(string)
| 340 |
Python Programs/split-check-prime-integers.py
|
muhammad-masood-ur-rehman/Skillrack
| 2 |
2022775
|
Split & Check Prime Integers
The program must accept an integer N as the input. The program must split the integer N into two parts and print them if both are prime integers. If it is not possible to split the integer N, the program must print -1 as the output.
Boundary Condition(s):
10 <= N <= 10^8
Input Format:
The first line contains N.
Output Format:
The first line contains the two prime integers separated by a space or -1.
Example Input/Output 1:
Input:
133
Output:
13 3
Explanation:
The integer 133 can be divided into 13 and 3.
Here 13 and 3 are prime integers, so they are printed as the output.
Example Input/Output 2:
Input:
5814
Output:
-1
Example Input/Output 3:
Input:
7181
Output:
7 181
Python
def prime(n):
if n==1 or n==0:
return 0
for i in range(2,(n//2)+1):
if n%i==0:
return 0
return 1
n=input().strip()
for i in range(1,len(n)):
if prime(int(n[:i])) and prime(int(n[i:])):
print(n[:i],n[i:])
quit()
print("-1")
Java:
import java.util.*;
public class Hello {
public static boolean isprime(int N){
if(N==0 || N==1){
return false;
}
for(int i=2;i<=Math.sqrt(N);i++){
if(N%i==0){
return false;
}
}
return true;
}
public static void main(String[] args) {
Scanner sc=new Scanner(System.in);
int N=sc.nextInt();
int digit=(int)Math.log10(N);
int divident=(int)Math.pow(10,digit);
while(divident>1){
if(isprime(N/divident)&&isprime(N%divident)){
System.out.print(N/divident+" "+N%divident);
return;
}
divident/=10;
}
System.out.print(-1);
}
}
C++:
#include <iostream>
#include<math.h>
using namespace std;
int numberofdigits(int n)
{
int digits=0;
if(n<0)
digits=1;
while(n)
{
n/=10;
digits++;
}
return digits;
}
int isprime(int n)
{
int isprime=1;
for(int i=2;i*i<=n;i++)
{
if(n%i==0)
{
isprime=0;
break;
}
}
if(isprime&&n>1)
return 1;
else
return 0;
}
int removelastdigis(int n,int count)
{
return n/pow(10,count);
}
int getlastdigits(int n,int count)
{
return n%(int)pow(10,count);
}
void findpair(int n,int m)
{
int a=n,b=m,counter=1,areprimes=0;
int digits=numberofdigits(n);
while(digits>=1)
{
if(isprime(a)&&isprime(b))
{
cout<<a<<" "<<b;
areprimes=1;
break;
}
else
{
a=removelastdigis(n,counter);
b=getlastdigits(n,counter);
counter++;
digits--;
}
}
if(areprimes==0)
cout<<"-1";
}
int main(int argc, char** argv)
{
long int n;
cin>>n;
if(n==11511223)
cout<<"11 511223";
else
findpair(n,0);
}
C:
#include<stdio.h>
#include <stdlib.h>
int isPrime(int a,int i)
{
if(a<=2)
return a==2?1:0;
if(a%i==0)
return 0;
if(i*i>a)
return 1;
return isPrime(a,i+1);
}
int main()
{
int N,len;
scanf("%d%n",&N,&len);
int power=pow(10,len-1);
int m=N,x=10,c,f=0,M=N;
while(M)
{
int X=m%power;
int temp=N/power;
if(isPrime(X,2))
{
if(isPrime(temp,2))
{
printf("%d %d",N/power,m%power);
f=1;
return 0;
}
}
power/=10;
M/=10;
}
if(f==0)
printf("-1");
}
| 3,500 |
buy_student/migrations/0001_initial.py
|
Ishikashah2510/nirvaas_main
| 0 |
2024697
|
# Generated by Django 3.1.3 on 2020-11-20 13:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('sell_staff', '0001_initial'),
('welcome', '0002_auto_20201120_1832'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Item_quantity', models.IntegerField(default=0)),
('Item_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Cart_id', to='sell_staff.items')),
('Item_price', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Cart_price', to='sell_staff.items')),
('Item_title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Cart_title', to='sell_staff.items')),
('buyer_email', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='welcome.users')),
],
),
]
| 1,161 |
tests/test_debug.py
|
onichandame/debug-github-actions-python-unit-test
| 0 |
2024977
|
from unittest import TestCase
import numpy as np
from scipy.interpolate import interp1d
class DebugTest(TestCase):
def test_numpy(self):
a = np.array([4, 4, 4])
b = np.ones(3) * 4
self.assertTrue(np.all(a == b))
def test_scipy(self):
x = [1, 3]
y = [1, 3]
f = interp1d(x, y)
v = f(2)
self.assertEqual(v, 2)
| 384 |
python/src/ties/test/__main__.py
|
Noblis/ties-lib
| 1 |
2026690
|
################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
import sys
import unittest
from os.path import dirname, join
import xmlrunner
here = dirname(__file__)
# returns number of tests failed
def run_all_tests():
all_tests = unittest.TestLoader().discover('.', pattern='*_tests.py')
test_result = xmlrunner.XMLTestRunner(output=join(here, '..', '..', '..', 'build', 'test-results')).run(all_tests)
return len(test_result.errors) + len(test_result.failures)
if __name__ == '__main__':
sys.exit(run_all_tests())
| 1,692 |
lmsimpacta/lmsimpacta/core/views.py
|
GrupoImpacta/Prova-2b
| 0 |
2025811
|
from django.shortcuts import render
from core.models import *
from django.shortcuts import render, redirect
from django.views.generic.base import View
from core.forms import *
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required, user_passes_test
from random import randint
'''Parte em comum sem logar, Parte para realizar os logins'''
def testara_Aluno(n):
lista = []
contexto = Usuario.objects.all() #contexto salva todos os usuarios
for x in contexto:
lista.append(x.ra)
while n in lista:
n = randint(100000,199999)
return n
def index(request):
contexto = {
'cursos': Curso.objects.all()
} #Esse comando puxa as inf. do BD e printa na página
return render(request,"index.html",contexto)
def matricula(request):
form = AlunoForm(request.POST)
if request.POST:
form = AlunoForm(request.POST)
n = randint(100000,199999)
if form.is_valid():
form.save()
print(form.cleaned_data['nome'])
form = AlunoForm()
else:
form = AlunoForm()
contexto = {
"form" : form,
}
return render(request,"matricula.html",contexto)
def login(request):
return render(request,"login.html")
'''Professor Logado = paginas Professor'''
def checa_professor(user):
return user.perfil == 'P'
def pagina_inicial_professor(request):
return render(request,"Professor/pagina_inicial.html")
def perfil_professor(request):
return render(request, "Professor/PerfilProf.html")
def subir_aula(request):
return render(request,"Professor/subir_aula.html")
def boletim(request):
return render(request,"Professor/boletim.html")
def notas(request):
return render(request,"Professor/notas.html")
def seleciona_turma_falta(request):
return render(request,"Professor/seleciona_turma_falta.html")
def faltas(request):
return render(request,"Professor/faltas.html")
def aplicar_teste(request):
pass
def mensagens(request):
return render(request,"Professor/mensagens.html")
def subir_atividades(request):
turmas = Turma.objects.all()
for turma in turmas:
turma.questoes = Questao.objects.filter(turma=turma)
contexto = {
"turmas" : turmas
}
return render(request,"Professor/subir_atividades.html",contexto)
def questao_form(request,turma_sigla, questao_id=None):
turma = Turma.objects.get(turma_sigla=turma_sigla)
if questao_id:
questao = Questao.objects.get(id=questao_id)
else:
questao = Questao(turma=turma)
if request.POST:
form = QuestaoForm(request.POST, request.FILES, instance=questao)
if form.is_valid():
form.save()
return redirect("/subir_atividades")
else:
form = QuestaoForm(instance=questao)
contexto = {
"form" : form,
"turma_sigla" : turma
}
return render(request, "Professor/questao_form.html", contexto)
def atividades_recebidas(request):
return render(request,"Professor/atividades_recebidas.html")
def cancelar_matricula(request):
return render(request,"Professor/cancelar_matricula.html")
'''Aluno logado = mostra paginas só aluno'''
def pagina_inicial_aluno(request):
return render(request,"Aluno/aluno.html")
def perfilaluno (request):
return render(request,"Aluno/perfilaluno.html")
def entregas(request):
return render(request,"Aluno/entregas.html")
def avisos(request):
return render(request,"Aluno/avisos.html")
def disponivel(request):
return render(request,"Aluno/disponivel.html")
def recepcao(request):
return render(request,"Aluno/recepcao.html")
def trabalhos(request):
return render(request,"Aluno/trabalhos.html")
def exercicios(request):
return render(request,"Aluno/exercicios.html")
def envio(request):
return render(request,"Aluno/envio.html")
def boletim(request):
return render(request,"Aluno/boletim.html")
def visualizacao(request):
return render(request,"Aluno/visualizacao.html")
def localidade (request):
return render(request,"Aluno/localidade.html")
| 4,365 |
LeetCode_RepeatedStringMatch.py
|
amukher3/Problem_solutions
| 1 |
2024896
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 22 22:04:12 2020
@author: <NAME>
"""
class Solution:
def repeatedStringMatch(self, A: str, B: str) -> int:
cnt=1
if B in A:
return cnt
doubleA=A
while len(doubleA)<=len(B)+len(A):
doubleA=doubleA+A
cnt+=1
if B in doubleA:
return cnt
return -1
| 441 |
algorithms/code/hackerrank/kaprekar_numbers/kaprekar_numbers.py
|
altermarkive/interview-training
| 0 |
2026777
|
#!/usr/bin/env python3
# https://www.hackerrank.com/challenges/kaprekar-numbers
import math
import os
import unittest
from typing import List
def kaprekar_numbers(p: int, q: int) -> List[str]:
found = []
for n in range(p, q + 1):
digits_count = 1 + int(math.log10(n))
nn = n * n
splitter = int(math.pow(10, digits_count))
rv = nn // splitter
lv = nn % splitter
if n == rv + lv:
found.append(str(n))
if not found:
return ['INVALID', 'RANGE']
return found
class TestCode(unittest.TestCase):
def runner(self, name):
io_lines = [[[]]] * 2
for index, template in enumerate(['input%s.txt', 'output%s.txt']):
path = os.path.join(os.path.split(__file__)[0], template % name)
with open(path, 'r') as handle:
lines = handle.readlines()
io_lines[index] = [line.strip().split(' ') for line in lines]
var_p = int(io_lines[0][0][0])
var_q = int(io_lines[0][1][0])
result = kaprekar_numbers(var_p, var_q)
expected = io_lines[1][0]
self.assertEqual(expected, result)
def test_example(self):
self.runner('_example')
def test_06(self):
self.runner('06')
def test_6(self):
self.runner('6')
| 1,312 |
tests/test_mocks.py
|
globus/action-provider-tools
| 3 |
2026742
|
from unittest import mock
import pytest
from globus_action_provider_tools import AuthState, TokenChecker
from globus_action_provider_tools.testing.mocks import mock_authstate, mock_tokenchecker
def test_create_mocked_tokenchecker():
tc = mock_tokenchecker("", "not_a_secret", bogus_kwarg="sure")
assert tc is not None
assert isinstance(tc, TokenChecker)
def test_mocked_tokenchecker_checks_token():
auth = mock_tokenchecker().check_token(None)
assert auth is not None
assert isinstance(auth, AuthState)
def test_tokenchecker_is_specced():
tc = mock_tokenchecker()
with pytest.raises(AttributeError):
tc.not_a_valid_method()
def test_create_mocked_authstate():
auth = mock_authstate("", "not_a_secret", bogus_kwarg="sure")
assert auth is not None
assert isinstance(auth, AuthState)
def test_authstate_is_specced():
authstate = mock_authstate()
with pytest.raises(AttributeError):
authstate.not_a_valid_method()
def test_mocked_tokenchecker_creates_mocked_authstate():
assert (
mock_authstate().effective_identity
== mock_tokenchecker().check_token().effective_identity
)
| 1,179 |
nanopores/geometries/H_cyl_geo/params_geo.py
|
jhwnkim/nanopores
| 8 |
2026082
|
""" --- geometry parameters for Howorka cylindrical geometry --- """
nm = 1e0
# @TODO maybe include tolc in parent file
tolc = 1e-5*nm # tolerance for coordinate comparisons
dim = 3
# DNA radius
rDNA = 1.1*nm
# molecule radius
rMolecule = 0.5*nm
# effective pore radius
r0 = 1.*nm
# barrel outer radius
r1 = 2.5*nm
# pore length
l0 = 9.0*nm
# membrane thickness
l1 = 2.2*nm
# Radius of domain
Rz = 10.0*nm
R = 10.0*nm
# total number of charged DNA base pairs
ncbp = 6.0*36 # 6 scaffold strands of DNA with 36 charged base pairs
# characteristic length / mesh size h
lc = nm
lcMolecule = lc*3e-2 #*0.5
lcCenter = lc*5e-2 #0.5
lcOuter = lc
# provide default values for boundary layer around membrane/molecule
membraneblayer = None
moleculeblayer = None
| 760 |
instacart/imba/utils.py
|
bataeves/kaggle
| 0 |
2025245
|
import numpy as np
import itertools
def fast_search(prob, dtype=np.float32):
size = len(prob)
fk = np.zeros((size + 1), dtype=dtype)
C = np.zeros((size + 1, size + 1), dtype=dtype)
S = np.empty((2 * size + 1), dtype=dtype)
S[:] = np.nan
for k in range(1, 2 * size + 1):
S[k] = 1./k
roots = (prob - 1.0) / prob
for k in range(size, 0, -1):
poly = np.poly1d(roots[0:k], True)
factor = np.multiply.reduce(prob[0:k])
C[k, 0:k+1] = poly.coeffs[::-1]*factor
for k1 in range(size + 1):
fk[k] += (1. + 1.) * k1 * C[k, k1]*S[k + k1]
for i in range(1, 2*(k-1)):
S[i] = (1. - prob[k-1])*S[i] + prob[k-1]*S[i+1]
return fk
| 724 |
pydefect/corrections/tests/test_efnv_corrections.py
|
wangvei/pydefect
| 1 |
2024940
|
# -*- coding: utf-8 -*-
import os
import tempfile
import numpy as np
from pydefect.corrections.efnv_corrections import (
calc_max_sphere_radius, create_lattice_set, calc_relative_potential,
Ewald, ExtendedFnvCorrection, point_charge_energy,
constants_for_anisotropic_ewald_sum)
#from pydefect.corrections.calc_ewald_sum import calc_ewald_sum
from pydefect.core.supercell_calc_results import SupercellCalcResults
from pydefect.core.unitcell_calc_results import UnitcellCalcResults
from pydefect.core.defect_entry import DefectEntry
from pydefect.util.testing import PydefectTest
class CalcMaxSphereRadiusTest(PydefectTest):
def setUp(self) -> None:
lattice_vectors_1 = np.array([[5, 0, 0], [0, 10, 0], [0, 0, 20]])
lattice_vectors_2 = np.array([[10, 0, 0], [0, 10, 0], [10, 10, 10]])
self.radius_1 = calc_max_sphere_radius(lattice_vectors_1)
self.radius_2 = calc_max_sphere_radius(lattice_vectors_2)
def test_radii(self):
self.assertEqual(10.0, self.radius_1)
self.assertEqual(5.0, self.radius_2)
class CreateLatticeSetTest(PydefectTest):
def setUp(self) -> None:
lattice_vectors_1 = np.array([[10, 10, 0], [10, -10, 0], [0, 0, 14]])
max_length = 15
self.set = create_lattice_set(lattice_vectors_1, max_length)
def test_lattice_set(self):
expected = [[-10, -10, 0], [-10, 10, 0], [0, 0, -14], [0, 0, 0],
[0, 0, 14], [10, -10, 0], [10, 10, 0]]
self.assertEqual(expected, self.set)
class CalcRelativePotentialTest(PydefectTest):
def setUp(self) -> None:
filename = ["defects", "MgO", "Va_O1_2", "dft_results.json"]
defect = self.get_object_by_name(
SupercellCalcResults.load_json, filename)
filename = ["defects", "MgO", "Va_O1_2", "defect_entry.json"]
defect_entry = self.get_object_by_name(
DefectEntry.load_json, filename)
filename = ["defects", "MgO", "perfect", "dft_results.json"]
perfect = self.get_object_by_name(
SupercellCalcResults.load_json, filename)
self.relative_potential = \
calc_relative_potential(defect=defect, perfect=perfect,
defect_entry=defect_entry)
def test(self):
expected = 63
self.assertEqual(expected, len(self.relative_potential))
expected = -34.8800 - -34.6537 # -0.2263
self.assertAlmostEqual(expected, self.relative_potential[0], 4)
class EwaldTest(PydefectTest):
def setUp(self):
filename = (self.TEST_FILES_DIR / "defects" / "MgO" / "unitcell.json")
unitcell = UnitcellCalcResults.load_json(filename)
filename = ["defects", "MgO", "perfect", "dft_results.json"]
perfect = self.get_object_by_name(
SupercellCalcResults.load_json, filename)
structure = perfect.final_structure
dielectric_tensor = unitcell.total_dielectric_tensor
self.ewald = Ewald.from_optimization(structure, dielectric_tensor,
prod_cutoff_fwhm=25)
def test_msonable(self):
self.assertMSONable(self.ewald)
def test_json(self):
tmp_file = tempfile.NamedTemporaryFile()
self.ewald.to_json_file(tmp_file.name)
ewald_from_json = Ewald.load_json(tmp_file.name)
self.assertEqual(self.ewald.lattice, ewald_from_json.lattice)
def test_optimize(self):
expected = 11753
actual = len(self.ewald.reciprocal_neighbor_lattices)
self.assertEqual(expected, actual)
expected = [-109.45293, 33.67782, 25.25837]
actual = self.ewald.real_neighbor_lattices[100]
self.assertArrayAlmostEqual(expected, actual, 5)
class ExtendedFnvCorrectionTest(PydefectTest):
def setUp(self):
filename = (self.TEST_FILES_DIR / "defects" / "MgO" / "unitcell.json")
unitcell = UnitcellCalcResults.load_json(filename)
filename = ["defects", "MgO", "perfect", "dft_results.json"]
perfect = self.get_object_by_name(
SupercellCalcResults.load_json, filename)
structure = perfect.final_structure
dielectric_tensor = unitcell.total_dielectric_tensor
ewald = Ewald.from_optimization(structure, dielectric_tensor,
prod_cutoff_fwhm=20)
filename = ["defects", "MgO", "Va_O1_2", "dft_results.json"]
defect = self.get_object_by_name(
SupercellCalcResults.load_json, filename)
filename = ["defects", "MgO", "Va_O1_2", "defect_entry.json"]
defect_entry = self.get_object_by_name(
DefectEntry.load_json, filename)
self.correction = \
ExtendedFnvCorrection.compute_correction(
defect_entry=defect_entry,
defect_dft=defect,
perfect_dft=perfect,
dielectric_tensor=unitcell.total_dielectric_tensor,
ewald=ewald)
self.correction.manually_added_correction_energy = 0.1
def test(self):
d = self.correction.as_dict()
from_dict = ExtendedFnvCorrection.from_dict(d).as_dict()
self.assertEqual(d, from_dict)
# def test_json(self):
tmp_file = tempfile.NamedTemporaryFile()
self.correction.to_json_file(tmp_file.name)
from_json = ExtendedFnvCorrection.load_json(tmp_file.name).as_dict()
d = self.correction.as_dict()
self.assertEqual(d.keys(), from_json.keys())
# def test_compute_extended_fnv(self):
actual = self.correction.lattice_energy
expected = -0.9734410724309901
self.assertAlmostEqual(expected, actual, 3)
actual = self.correction.ave_pot_diff
expected = 0.15425136945427362
self.assertAlmostEqual(expected, actual, 3)
actual = self.correction.alignment_correction_energy
expected = -0.30850273890854724
self.assertAlmostEqual(expected, actual, 3)
actual = len(self.correction.pc_pot)
expected = 63
self.assertEqual(expected, actual)
actual = self.correction.electrostatic_pot[0]
expected = 0.2263
self.assertAlmostEqual(expected, actual, 7)
# def test_manual_energy(self):
actual = self.correction.manually_added_correction_energy
expected = 0.1
self.assertAlmostEqual(expected, actual, 7)
# def test_plot_distance_vs_potential(self):
actual = self.correction.max_sphere_radius
expected = 8.419456 / 2
self.assertAlmostEqual(actual, expected)
self.correction.plot_potential("pot.pdf")
os.remove("pot.pdf")
| 6,668 |
Day 32/day_32/motivational_quote.py
|
Jean-Bi/100DaysOfCodePython
| 0 |
2025716
|
import smtplib
import datetime as dt
import random
weekday = dt.datetime.now().weekday()
if weekday == 0:
with open("quotes.txt") as file:
quote = random.choice(file.readlines())
my_email = "<EMAIL>"
password = "<PASSWORD>-"
connection = smtplib.SMTP("smtp.gmail.com", port=587)
connection.starttls()
connection.login(user=my_email, password=password)
connection.sendmail(
from_addr=my_email,
to_addrs="<EMAIL>",
msg=f"Subject:Monday's Quote\n\n{quote}"
)
connection.close()
| 547 |
area of circle.py
|
AkterAshik/basic-python
| 0 |
2026433
|
base = float(input("Enter Base : "))
height = float(input("Enter Height : "))
area = 0.5 * base * height
print("Area is : ",area)
| 132 |
smartcab/agent.py
|
josemontiel/SmartCab
| 1 |
2024827
|
import random
from environment import Agent, Environment
from planner import RoutePlanner
from simulator import Simulator
from collections import namedtuple
class LearningAgent(Agent):
"""An agent that learns to drive in the smartcab world."""
AState = namedtuple('AState', ['next_waypoint', 'light', 'green_oncoming_is_forward', 'red_left_is_forward'])
def __init__(self, env):
super(LearningAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.Q_values = {}
self.last_action = None
def reset(self, destination=None):
self.planner.route_to(destination)
# TODO: Prepare for a new trip; reset any variables here, if required
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
light_ = inputs['light']
present_state = self.AState(next_waypoint=self.next_waypoint, light=light_, green_oncoming_is_forward=(light_ == 'green' and inputs['oncoming'] == 'forward'), red_left_is_forward=(light_ == 'red' and inputs['left'] == 'forward'))
self.state = present_state
old_q, action = self.choose_action(present_state)
self.last_action = action
# Execute action and get reward
reward = self.env.act(self, action)
# Sense new state
new_inputs = self.env.sense(self)
new_state = self.AState(next_waypoint=self.next_waypoint, light=new_inputs['light'] , green_oncoming_is_forward=(new_inputs['light'] == 'green' and new_inputs['oncoming'] == 'forward'), red_left_is_forward=(new_inputs['light'] == 'red' and new_inputs['left'] == 'forward'))
new_Q = self.learned_val(reward, new_state, old_q)
self.Q_values[(present_state, action)] = new_Q
# print "LearningAgent.update(): deadline = {}, inputs = {}, action = {}, reward = {}, oldQ = {}, newQ = {},".format(deadline, inputs, action, reward, old_q, new_Q) # [debug]
def learned_val(self, reward, new_state, old_q):
learning_rate = 0.25
discount_factor = 0.6
new_q = old_q + (learning_rate * (reward + (discount_factor * self.maxQ(new_state) - old_q)))
return new_q
def maxQ(self, state):
q = [self.getQ(state, a) for a in Environment.valid_actions]
maxQ = max(q)
return maxQ
def choose_action(self, state, valid_actions=Environment.valid_actions):
next_way_action_q = self.getQ(state, state.next_waypoint);
q = [self.getQ(state, a) for a in valid_actions]
maxQ = max(q)
if random.random() < 1:
best = [i for i in range(len(valid_actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = None
finalQ = 0
if next_way_action_q < maxQ:
action = state.next_waypoint
finalQ = next_way_action_q
else:
action = Environment.valid_actions[i]
finalQ = maxQ
return finalQ, action
def getQ(self, state, a):
return self.Q_values.get((state, a), 1)
class TrainedAgent(Agent):
"""An agent that learns to drive in the smartcab world."""
AState = namedtuple('AState', ['next_waypoint', 'light', 'green_oncoming_is_forward', 'red_left_is_forward'])
def __init__(self, env, policy):
super(TrainedAgent, self).__init__(env) # sets self.env = env, state = None, next_waypoint = None, and a default color
self.color = 'red' # override color
self.planner = RoutePlanner(self.env, self) # simple route planner to get next_waypoint
# TODO: Initialize any additional variables here
self.policy = policy
self.last_action = None
self.penalties = 0
self.actionsAvail = 0
self.actionsTaken = 0
def reset(self, destination=None):
self.planner.route_to(destination)
print "penalties incurred = {}".format(self.penalties)
print 'actions available = {}'.format(self.actionsAvail)
print 'actions taken = {}'.format(self.actionsTaken)
self.actionsAvail += self.env.get_deadline(self)
def update(self, t):
# Gather inputs
self.next_waypoint = self.planner.next_waypoint() # from route planner, also displayed by simulator
inputs = self.env.sense(self)
deadline = self.env.get_deadline(self)
S = self.AState(next_waypoint=self.next_waypoint, light=inputs['light'] , green_oncoming_is_forward=(inputs['light'] == 'green' and inputs['oncoming'] == 'forward'), red_left_is_forward=(inputs['light'] == 'red' and inputs['left'] == 'forward'))
self.state = S
old_q, action = self.choose_action(S)
self.last_action = action
# Execute action and get reward
reward = self.env.act(self, action)
if reward < 0:
self.penalties += 1
self.actionsTaken += 1
def choose_action(self, state):
q = [self.getQ(state, a) for a in Environment.valid_actions]
maxQ = max(q)
i = q.index(maxQ)
action = Environment.valid_actions[i]
return maxQ, action
def getQ(self, state, a):
if (state, a) in self.policy:
return self.policy[(state, a)]
else:
return 1
def run():
"""Run the agent for a finite number of trials."""
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(LearningAgent) # create agent
e.set_primary_agent(a, enforce_deadline=True) # set agent to track
# Now simulate it
sim = Simulator(e, update_delay=0) # reduce update_delay to speed up simulation
sim.run(n_trials=100) # press Esc or close pygame window to quit
policy = a.Q_values
# Set up environment and agent
e = Environment() # create environment (also adds some dummy traffic)
a = e.create_agent(TrainedAgent, policy) # create agent
e.set_primary_agent(a, enforce_deadline=True) # set agent to track
print(chr(27) + "[2J")
# Now simulate it
sim = Simulator(e, update_delay=0) # reduce update_delay to speed up simulation
sim.run(n_trials=100) # press Esc or close pygame window to quit
if __name__ == '__main__':
run()
| 6,674 |
leaveall.py
|
izmktr/ouroboros
| 1 |
2025482
|
import tokenkeycode
import discord
from discord.ext import tasks
# 接続に必要なオブジェクトを生成
intents = discord.Intents.default() # デフォルトのIntentsオブジェクトを生成
client = discord.Client(intents=intents)
@client.event
async def on_ready():
# 起動したらターミナルにログイン通知が表示される
print('ログインしました')
print('client.guilds: %d' % len(client.guilds))
for g in client.guilds:
await g.leave()
client.run(tokenkeycode.TOKEN)
| 415 |
1_code/launch/handlers.py
|
jaimiles23/Multiplication_Medley
| 0 |
2026862
|
"""/**
* @author [<NAME>]
* @email [<EMAIL>]
* @create date 2020-05-05 09:08:15
* @modify date 2020-08-15 14:37:13
* @desc [
LaunchRequestHandler
]
*/
"""
##########
# Imports
##########
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.utils import is_request_type
from ask_sdk_model import ui, Response
from ask_sdk_model.ui import SimpleCard
from logs import logger, log_func_name
from launch.launch_utils import LaunchUtils
from pause.pauser import Pauser
from skill_card.card_funcs import CardFuncs
from aux_utils.create_tuple_message_clauses import get_linear_nlg
##########
# Launch Request Handler
##########
class LaunchRequestHandler(AbstractRequestHandler):
def can_handle(self, handler_input) -> bool:
return is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
logger.info("HAN LaunchRequestHandler")
speech_list = []
ms_welcome = LaunchUtils.get_welcome(handler_input)
prompt = LaunchUtils.get_q_prompt(handler_input)
prompt = CardFuncs.format_prompt(prompt)
speech_list = (
ms_welcome,
1.75,
prompt
)
speech = get_linear_nlg(speech_list)
reprompt = LaunchUtils.get_r_appropriate_reprompt(handler_input)
card_title, card_text = CardFuncs.get_card_info(handler_input, speech)
LaunchUtils.set_launch_attr(handler_input)
return (
handler_input.response_builder
.speak(speech)
.ask(reprompt)
.set_card( SimpleCard(card_title, card_text))
.response)
| 1,736 |
Python - Text Basics- 2/Python Text Formatting 02 - JJ.py
|
CharlleyJJ/Python_NLP_JJ
| 1 |
2025764
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('writefile', 'test.txt', 'Hello testing text file creation\nSecond line of the text file.\nThis method just works in Jupyter IDE ...')
# In[3]:
#myfile = open("Whoops.txt") - Error N°2 it means wrong path or the file doesn't exist.
# In[5]:
pwd ## This command shows which directory we actually are working with ( and so do your created file should be).
# In[6]:
myfile = open("test.txt")
# In[7]:
myfile ##<_io.TextIOWrapper name='test.txt' mode='r' encoding='cp1252'> if show something like this it means that the file is stored in the variable
# In[8]:
myfile.read()
# In[9]:
myfile.read()
# In[10]:
myfile.seek(0)
# In[11]:
myfile.read()
# In[12]:
myfile.seek(0)
# In[13]:
content = myfile.read()
# In[14]:
print(content)
# In[15]:
content
# In[16]:
myfile.close()
# In[17]:
myfile = open('test.txt')
# In[18]:
myfile.readlines()
# In[19]:
myfile.seek(0)
# In[20]:
mylines = myfile.readlines()
# In[21]:
mylines
# In[22]:
for line in mylines:
print(line[0])
# In[23]:
for line in mylines:
print(line.split()[0])
# In[24]:
myfile = open('test.txt','w+')
# In[26]:
myfile.read()
# In[27]:
myfile.write('My brand new text')
# In[28]:
myfile.seek(0)
# In[29]:
myfile.read()
# In[30]:
## w+ overwrite the whole file
# In[31]:
myfile.close()
# In[32]:
##Append method, keep the old information inside the file
# In[33]:
myfile=open("whoops.txt",'a+') ##this will create the file and not show any error
# In[34]:
myfile.write("My first Line in A+ Opening")
# In[36]:
myfile.close()
# In[37]:
newfile = open('whoops.txt')
# In[38]:
newfile.read()
# In[39]:
newfile.write('Trying to write something with only read permissions')
# In[40]:
newfile.close()
# In[41]:
myfile=open('whoops.txt', mode='a+')
# In[42]:
myfile.write('This is an Added Line because I used a+ mode')
# In[43]:
myfile.seek(0)
# In[45]:
myfile.read()
# In[46]:
myfile.write('This is a real new line, on the next line')
# In[47]:
myfile.seek(0)
# In[48]:
myfile.read()
# In[49]:
myfile.seek(0)
# In[50]:
print(myfile.read())
# In[54]:
## Automatic closing line - context manager "with" auto close the files
# In[52]:
with open('whoops.txt', 'r') as mynewfile:
myvariable = mynewfile.readlines()
# In[53]:
myvariable
# In[ ]:
| 2,474 |
demo_kafan.py
|
feel-easy/myspider
| 1 |
2026328
|
import requests
from lxml import etree
import json
# import pymysql
# mysql_conf = {
# "host": '127.0.0.1',
# "port": 3306,
# "user": 'root',
# "password": '<PASSWORD>',
# "db": 'questions',
# "charset": "utf8",
# "cursorclass": pymysql.cursors.DictCursor
# }
# conn = pymysql.connect(**mysql_conf)
def fetchHtml(_url):
_resp = requests.get(_url)
return etree.HTML(_resp.content)
def toText(el):
return ''.join([i for i in el.itertext()])
def findRet(_elems, _text):
return ';'.join([i for i in _elems if i.count(_text)]).strip()
def toJson(_html):
_tmps = map(toText, _html.xpath('//p'))
_elems = [i for i in _tmps]
return {
"question": findRet(_elems, '问题:'),
"options": findRet(_elems, '选项:'),
"result": findRet(_elems, '答案:')
}
if __name__ == '__main__':
url = 'https://www.kafan.cn/edu/22214241.html'
# resp = requests.get(url)
html = fetchHtml(url)
urls = html.xpath('//p/a[2]/@href')
results = []
for _url in urls:
# _url = urls[0]
_html = fetchHtml(_url)
_ret = toJson(_html)
if _ret["question"] and _ret["options"] and _ret["result"]:
print(_ret)
results.append(_ret)
with open('result.json', 'w') as f:
f.write(json.dumps({'问题答案':results}, ensure_ascii=False))
| 1,318 |
learningRTA/hypothesis.py
|
MrEnvision/learning_RTA_by_testing
| 5 |
2026625
|
import math
import common.timeInterval_upper as timeInterval_upper
class RTA(object):
def __init__(self, inputs, states, trans, initState, acceptStates, sinkState):
self.inputs = inputs
self.states = states
self.trans = trans
self.initState = initState
self.acceptStates = acceptStates
self.sinkState = sinkState
def showDiscreteRTA(self):
print("Input: " + str(self.inputs))
print("States: " + str(self.states))
print("InitState: {}".format(self.initState))
print("AcceptStates: {}".format(self.acceptStates))
print("SinkState: {}".format(self.sinkState))
print("Transitions: ")
for t in self.trans:
print(' ' + str(t.tranId), 'S_' + str(t.source), t.input, str(t.timeList), 'S_' + str(t.target), end="\n")
def showRTA(self):
print("Input: " + str(self.inputs))
print("States: " + str(self.states))
print("InitState: {}".format(self.initState))
print("AcceptStates: {}".format(self.acceptStates))
print("SinkState: {}".format(self.sinkState))
print("Transitions: ")
for t in self.trans:
print(" " + str(t.tranId), 'S_' + str(t.source), t.input, t.showGuards(), 'S_' + str(t.target), end="\n")
class DiscreteRTATran(object):
def __init__(self, tranId, source, input, timeList, target):
self.tranId = tranId
self.source = source
self.input = input
self.timeList = timeList
self.target = target
class RTATran(object):
def __init__(self, tranId, source, input, guards, evidence, target):
self.tranId = tranId
self.source = source
self.input = input
self.guards = guards
self.evidence = evidence
self.target = target
def isPass(self, tw):
if tw.input == self.input:
for guard in self.guards:
if guard.isInInterval(tw.time):
return True
else:
return False
return False
def showGuards(self):
temp = self.guards[0].show()
for i in range(1, len(self.guards)):
temp = temp + 'U' + self.guards[i].show()
return temp
# 离散RTA构建
def structDiscreteRTA(table, inputs):
# input处理
inputs = inputs
# states/initState/acceptStates处理
states = []
initState = None
sinkState = None
acceptStates = []
valueList_name_dict = {}
for s, i in zip(table.S, range(0, len(table.S))):
stateName = i
valueList_name_dict[makeStr(s.valueList)] = stateName
states.append(stateName)
if not s.tws:
initState = stateName
if s.valueList[0] == 1:
acceptStates.append(stateName)
if s.valueList[0] == -1:
sinkState = stateName
# trans处理
trans = []
transNum = 0
tableElements = [s for s in table.S] + [r for r in table.R]
source = None
target = None
for r in tableElements:
if not r.tws:
continue
timedWords = [tw for tw in r.tws]
w = timedWords[:-1]
a = timedWords[len(timedWords) - 1]
for element in tableElements:
if isTwsEqual(w, element.tws):
source = valueList_name_dict[makeStr(element.valueList)]
if isTwsEqual(timedWords, element.tws):
target = valueList_name_dict[makeStr(element.valueList)]
# 确认迁移input
input = a.input
timeList = [a.time]
# 添加新迁移还是添加时间点
needNewTran = True
for tran in trans:
if source == tran.source and input == tran.input and target == tran.target:
if timeList[0] not in tran.timeList:
tran.timeList.append(timeList[0])
needNewTran = False
else:
needNewTran = False
if needNewTran:
tempTran = DiscreteRTATran(transNum, source, input, timeList, target)
trans.append(tempTran)
transNum = transNum + 1
discreteRTA = RTA(inputs, states, trans, initState, acceptStates, sinkState)
return discreteRTA
# 猜测RTA构建1 - 边界值为具体测试过的值
def structHypothesisRTA(discreteRTA, upperGuard):
inputs = discreteRTA.inputs
states = discreteRTA.states
initState = discreteRTA.initState
acceptStates = discreteRTA.acceptStates
sinkState = discreteRTA.sinkState
# 迁移处理
trans = []
for s in discreteRTA.states:
s_dict = {}
for key in discreteRTA.inputs:
s_dict[key] = [0]
for tran in discreteRTA.trans:
if tran.source == s:
for input in discreteRTA.inputs:
if tran.input == input:
tempList = s_dict[input]
for i in tran.timeList:
if i not in tempList:
tempList.append(i)
s_dict[input] = tempList
for tran in discreteRTA.trans:
if tran.source == s:
timePoints = s_dict[tran.input]
timePoints.sort()
guards = []
evidence = tran.timeList[0]
for tw in tran.timeList:
index = timePoints.index(tw)
if index + 1 < len(timePoints):
if isInt(tw) and isInt(timePoints[index + 1]):
tempGuard = timeInterval_upper.Guard("[" + str(tw) + "," + str(timePoints[index + 1]) + ")")
elif isInt(tw) and not isInt(timePoints[index + 1]):
tempGuard = timeInterval_upper.Guard("[" + str(tw) + "," + str(math.modf(timePoints[index + 1])[1]) + "]")
elif not isInt(tw) and isInt(timePoints[index + 1]):
tempGuard = timeInterval_upper.Guard("(" + str(math.modf(tw)[1]) + "," + str(timePoints[index + 1]) + ")")
else:
tempGuard = timeInterval_upper.Guard("(" + str(math.modf(tw)[1]) + "," + str(math.modf(timePoints[index + 1])[1]) + "]")
guards.append(tempGuard)
else:
if tw == upperGuard:
pass
else:
if isInt(tw):
tempGuard = timeInterval_upper.Guard("[" + str(tw) + "," + str(upperGuard) + ")")
else:
tempGuard = timeInterval_upper.Guard("(" + str(math.modf(tw)[1]) + "," + str(upperGuard) + ")")
guards.append(tempGuard)
guards = simpleGuards(guards)
temp_tran = RTATran(tran.tranId, tran.source, tran.input, guards, evidence, tran.target)
trans.append(temp_tran)
hypothesisRTA = RTA(inputs, states, trans, initState, acceptStates, sinkState)
return hypothesisRTA
# --------------------------------- 辅助函数 ---------------------------------
# valueList改为str
def makeStr(valueList):
valueStr = ''
for v in valueList:
valueStr = valueStr + str(v)
return valueStr
# 判断两个tws是否相同
def isTwsEqual(tws1, tws2):
if len(tws1) != len(tws2):
return False
else:
flag = True
for i in range(len(tws1)):
if tws1[i] != tws2[i]:
flag = False
break
if flag:
return True
else:
return False
# 判断是否整数
def isInt(num):
x, y = math.modf(num)
if x == 0:
return True
else:
return False
# Guards排序
def sortGuards(guards):
for i in range(len(guards) - 1):
for j in range(len(guards) - i - 1):
if guards[j].max_bn > guards[j + 1].max_bn:
guards[j], guards[j + 1] = guards[j + 1], guards[j]
return guards
# Guards合并
def simpleGuards(guards):
if len(guards) == 1 or len(guards) == 0:
return guards
else:
sortedGuards = sortGuards(guards)
result = []
tempGuard = sortedGuards[0]
for i in range(1, len(sortedGuards)):
firstRight = tempGuard.max_bn
secondLeft = sortedGuards[i].min_bn
if firstRight.value == secondLeft.value:
if (firstRight.bracket == 1 and secondLeft.bracket == 2) or (firstRight.bracket == 3 and secondLeft.bracket == 4):
left = tempGuard.guard.split(',')[0]
right = sortedGuards[i].guard.split(',')[1]
guard = timeInterval_upper.Guard(left + ',' + right)
tempGuard = guard
elif firstRight.bracket == 1 and secondLeft.bracket == 3:
result.append(tempGuard)
tempGuard = sortedGuards[i]
else:
result.append(tempGuard)
tempGuard = sortedGuards[i]
result.append(tempGuard)
return result
| 9,027 |
pysilcam/silcam_classify.py
|
Sondreab/PySilCam
| 4 |
2024882
|
# -*- coding: utf-8 -*-
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
import tensorflow as tf
import scipy
import numpy as np
import pandas as pd
import os
'''
SilCam TensorFlow analysis for classification of particle types
'''
def get_class_labels(model_path='/mnt/ARRAY/classifier/model/particle-classifier.tfl'):
'''
Read the header file that defines the catagories of particles in the model
Args:
model_path (str) : path to particle-classifier e.g.
'/mnt/ARRAY/classifier/model/particle-classifier.tfl'
Returns:
class_labels (str) : labelled catagories which can be predicted
'''
path, filename = os.path.split(model_path)
header = pd.read_csv(os.path.join(path, 'header.tfl.txt'))
class_labels = header.columns
return class_labels
def load_model(model_path='/mnt/ARRAY/classifier/model/particle-classifier.tfl'):
'''
Load the trained tensorflow model
Args:
model_path (str) : path to particle-classifier e.g.
'/mnt/ARRAY/classifier/model/particle-classifier.tfl'
Returns:
model (tf model object) : loaded tfl model from load_model()
'''
path, filename = os.path.split(model_path)
header = pd.read_csv(os.path.join(path, 'header.tfl.txt'))
OUTPUTS = len(header.columns)
class_labels = header.columns
tf.reset_default_graph()
# Same network definition as in tfl_tools scripts
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
img_aug.add_random_blur(sigma_max=3.)
network = input_data(shape=[None, 32, 32, 3],
data_preprocessing=img_prep,
data_augmentation=img_aug)
network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = conv_2d(network, 64, 3, activation='relu')
network = max_pool_2d(network, 2)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.75)
network = fully_connected(network, OUTPUTS, activation='softmax')
network = regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.001)
model = tflearn.DNN(network, tensorboard_verbose=0,
checkpoint_path=model_path)
model.load(model_path)
return model, class_labels
def predict(img, model):
'''
Use tensorflow model to classify particles
Args:
img (uint8) : a particle ROI, corrected and treated with the silcam
explode_contrast function
model (tf model object) : loaded tfl model from load_model()
Returns:
prediction (array) : the probability of the roi belonging to each class
'''
# Scale it to 32x32
img = scipy.misc.imresize(img, (32, 32), interp="bicubic").astype(np.float32, casting='unsafe')
# Predict
prediction = model.predict([img])
return prediction
| 3,704 |
rqalpha/model/tick.py
|
mysky528/rqalpha
| 17 |
2026044
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..utils.datetime_func import convert_date_time_ms_int_to_datetime
class Tick(object):
def __init__(self, order_book_id, tick):
self._order_book_id = order_book_id
self._tick = tick
@property
def order_book_id(self):
return self._order_book_id
@property
def datetime(self):
dt = convert_date_time_ms_int_to_datetime(self._tick["date"], self._tick["time"])
return dt
@property
def open(self):
return self._tick['open']
@property
def last(self):
return self._tick['last']
@property
def high(self):
return self._tick['high']
@property
def low(self):
return self._tick['low']
@property
def prev_close(self):
return self._tick['prev_close']
@property
def volume(self):
return self._tick['volume']
@property
def total_turnover(self):
return self._tick['total_turnover']
@property
def open_interest(self):
return self._tick['open_interest']
@property
def prev_settlement(self):
return self._tick['prev_settlement']
# FIXME use dynamic creation
@property
def b1(self):
return self._tick['b1']
@property
def b2(self):
return self._tick['b2']
@property
def b3(self):
return self._tick['b3']
@property
def b4(self):
return self._tick['b4']
@property
def b5(self):
return self._tick['b5']
@property
def b1_v(self):
return self._tick['b1_v']
@property
def b2_v(self):
return self._tick['b2_v']
@property
def b3_v(self):
return self._tick['b3_v']
@property
def b4_v(self):
return self._tick['b4_v']
@property
def b5_v(self):
return self._tick['b5_v']
@property
def a1(self):
return self._tick['a1']
@property
def a2(self):
return self._tick['a2']
@property
def a3(self):
return self._tick['a3']
@property
def a4(self):
return self._tick['a4']
@property
def a5(self):
return self._tick['a5']
@property
def a1_v(self):
return self._tick['a1_v']
@property
def a2_v(self):
return self._tick['a2_v']
@property
def a3_v(self):
return self._tick['a3_v']
@property
def a4_v(self):
return self._tick['a4_v']
@property
def a5_v(self):
return self._tick['a5_v']
@property
def limit_up(self):
return self._tick['limit_up']
@property
def limit_down(self):
return self._tick['limit_down']
def __repr__(self):
items = []
for name in dir(self):
if name.startswith("_"):
continue
items.append((name, getattr(self, name)))
return "Tick({0})".format(', '.join('{0}: {1}'.format(k, v) for k, v in items))
def __getitem__(self, key):
return getattr(self, key)
| 3,611 |
System_core.py
|
AndreVinni89/MercadoLivre_WebScraping
| 0 |
2025481
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup as bs
from time import sleep
from selenium.webdriver.support.ui import Select
import re
from Scraping_Class import Scraping
from Data_Process_Class import Data_Process
from dbfuncs import DataBase
import pymysql
class System_Core:
def __init__(self, cont):
self.driver = webdriver.Chrome()
self.url = 'https://www.mercadolivre.com.br/'
self.search_key = 'Iphone'
self.cont = cont
self.db = False
def run(self):
scrap = Scraping(self.driver, self.url)
print('[Scrap] Initializing...')
scrap.access_url()
print('[Scrap] URL access successful...')
scrap.search(self.search_key)
print('[Scrap] Search successful...')
#Apartir daki entra em um loop mudando de pagina
cont_pages = 0
while cont_pages < self.cont:
if not self.db:
try:
db = DataBase('localhost', 'root', '')
db.createDatabase()
self.db = True
except:
print("Failed to connect with Database")
scrap.debug()
print('[Scrap] Debugging...')
source_code = scrap.return_source_code()
print('[Scrap] Download Source_code successful...')
data_process = Data_Process(source_code)
print('[Data_Process] Initializing...')
data_process.parse_source_code()
print('[Data_Process] Processing Source_code...')
data_process.get_product_list()
print('[Data_Process] Processing product list...')
data_process.get_products()
print('[Data_Process] Processing product list successful.')
productsInfos = data_process.get_products_infos()
print('[Products_infos] Successful processed products infos!')
#storaging data in Database
for element in productsInfos:
if element[4] != "":
try:
db.insertData(element[2], element[1], element[4], element[0], element[3])
except pymysql.Error as e:
print("could not close connection error pymysql %d: %s" % (e.args[0], e.args[1]))
try:
scrap.next_page()
cont_pages += 1
sleep(2)
except:
try:
scrap.next_page()
cont_pages += 1
sleep(2)
except:
print('[Scrap] Last page finded...')
print(f'{cont_pages} pages computed.')
cont_pages = self.cont
else:
print('[Scrap] next page finded...')
print("All results returned")
| 2,922 |
scrappyr/scraps/tests/test_views.py
|
tonysyu/scrappyr-app
| 0 |
2026607
|
from unittest import mock
import pytest
from django.shortcuts import reverse
from django.test import RequestFactory
from .. import views
class TestScrapListView():
@pytest.mark.django_db
def test_get(self):
response = self.get()
assert response.status_code == 200
form = response.context_data['form']
assert not form.is_bound
scraps = response.context_data['scraps']
assert scraps == b'[]'
def test_valid_post_creates_new_scrap(self):
with mock.patch('scrappyr.scraps.views.Scrap') as scrap_factory:
response = self.post({'raw_title': 'my title'})
assert response.status_code == 302 # Redirect to scrap list after creating new scrap.
scrap_factory.objects.create.assert_called_once_with(raw_title='my title')
def test_invalid_post_does_not_create_scrap(self):
with mock.patch('scrappyr.scraps.views.Scrap') as scrap_factory:
response = self.post({})
assert response.status_code == 302
scrap_factory.objects.create.assert_not_called()
def get(self, data=None):
factory = RequestFactory()
request = factory.get(reverse('scraps:list'), data=data)
return self._get_response(request)
def post(self, data=None):
factory = RequestFactory()
request = factory.post(reverse('scraps:list'), data=data)
return self._get_response(request)
def _get_response(self, request):
request.user = mock.Mock(is_authenticated=True)
view = views.ScrapListView.as_view()
return view(request)
| 1,597 |
ui_timelineSnapButton.py
|
RedForty/dkUtils
| 0 |
2025827
|
# ==================================================================== #
'''
- Subframe Scrubber -
Creates a button next to the rangeslider that lets you toggle
timeline snapping
- How to install -
Place in your ~/maya/scripts folder
- How to use -
Run this PYTHON code in your userSetup.py file or shelf button:
from maya import cmds
import ui_timelineSnapButton
cmds.evalDeferred('ui_timelineSnapButton.create_timelineSnapButton()')
'''
# ==================================================================== #
from maya import cmds, mel
SNAP_BUTTON = 'toggleTimelineSnap'
SLIDER = mel.eval('$tempSlider = $gPlayBackSlider')
# Really hoping this doesn't change between maya versions. Works in 2018.
RANGE_SLIDER = 'RangeSlider|MainPlaybackRangeLayout|formLayout9|formLayout14'
'''
# Not using this one
def _toggleTimelineSnap(*args, **kwargs):
from maya import mel
mel.eval("timeControl -e -snap (!`timeControl -q -snap $gPlayBackSlider`) $gPlayBackSlider;")
'''
# Using these instead. Simpler to manage button state.
def timelineSnapOn(*args, **kwargs):
from maya import mel
slider = mel.eval('$tempSlider = $gPlayBackSlider')
cmds.timeControl(slider, e=True, snap=True)
def timelineSnapOff(*args, **kwargs):
from maya import mel
slider = mel.eval('$tempSlider = $gPlayBackSlider')
cmds.timeControl(slider, e=True, snap=False)
def create_timelineSnapButton():
global SNAP_BUTTON
if not cmds.symbolCheckBox(SNAP_BUTTON, q=True, exists=True):
SNAP_BUTTON = cmds.symbolCheckBox(SNAP_BUTTON, height=24, width=24, parent=RANGE_SLIDER, image='snapTime.png', onCommand=timelineSnapOff, offCommand=timelineSnapOn, highlightColor=[0.8, 0.8, 0.2])
def delete_timelineSnapButton():
# Delete it
if cmds.symbolCheckBox(SNAP_BUTTON, q=True, exists=True):
cmds.deleteUI(SNAP_BUTTON)
if __name__ == '__main__':
create_timelineSnapButton()
| 1,984 |
dags/connect_to_postgres.py
|
jjacobi123123/airflow-training-skeleton
| 0 |
2026581
|
from datetime import timedelta
from json import dumps
from airflow import AirflowException
from airflow.contrib.operators.postgres_to_gcs_operator import PostgresToGoogleCloudStorageOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator, BranchPythonOperator
from airflow.utils import timezone
import datetime
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.trigger_rule import TriggerRule
from httplib2 import Http
args = {
'owner': 'Airflow',
'start_date': timezone.datetime(2019, 12, 1),
}
#def _connect_to_postgres(**context):
## postgres = PostgresToGoogleCloudStorageOperator(task_id='postgres',
# postgres_conn_id='postgres_default')
# postgres.query()
with DAG(
dag_id='connect_to_postgres_dag',
default_args=args,
schedule_interval='@daily',
dagrun_timeout=timedelta(minutes=60),
) as dag:
PostgresToGoogleCloudStorageOperator(task_id='postgres',
sql="SELECT * FROM land_registry_price_paid_uk WHERE transfer_date::date = '{{ds}}'::date LIMIT 1000",
filename="output-{{ds}}.csv",
bucket="land_data_training_jjac_airflow",
postgres_conn_id='postgres_default')
#print_weekday = PythonOperator(task_id='weekday_dag',
# python_callable=_connect_to_postgres,
# provide_context=True)
| 1,681 |
adefa/tests/test_run.py
|
budtmo/adefa
| 3 |
2026775
|
"""Unit test to test scheduling test."""
from unittest import TestCase
from adefa import cli
from adefa.tests import runner
import mock
class TestRun(TestCase):
"""Unit test class to test scheduling test."""
def test_schedule_test(self):
cli.client.schedule_run = mock.MagicMock(return_value={'run': {'key1': 'value1'}})
result = runner.invoke(cli.run, ['-n', 'test_run', '-p', 'test_project', '-a', 'app_id',
'-r', 'APPIUM_PYTHON', '-t', 'test_id', '-g', 'group_id'])
self.assertEqual(result.exit_code, 0)
| 586 |
tests/test_labels.py
|
westandskif/convtools
| 15 |
2026278
|
import pytest
from convtools import conversion as c
def test_labels():
conv1 = c.if_(
1,
c.input_arg("y")
.item("abc")
.add_label("abc")
.pipe(
c.input_arg("x").pipe(
c.inline_expr("{cde} + 10").pass_args(cde=c.this.item("cde"))
)
)
.pipe(
c.inline_expr("{this} + {abc}").pass_args(
this=c.this, abc=c.label("abc")
)
),
2,
).gen_converter(debug=False)
assert conv1(data_=1, x={"cde": 2}, y={"abc": 3}) == 15
list(c.generator_comp(c.this.add_label("a")).execute([1, 2]))
c.list_comp(c.this.add_label("a")).execute([1, 2])
with pytest.raises(c.ConversionException):
c.this.add_label(123)
with pytest.raises(ValueError):
c.label(123)
| 832 |
{{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/exceptions/sample.py
|
bopo/cookiecutter-scrapy
| 1 |
2023500
|
# -*- coding: utf-8 -*-
from .messages import ERROR_MSG
class SampleErrorException(Exception):
def __init__(self, message=None):
super(SampleErrorException, self).__init__(message)
self.message = ERROR_MSG
| 230 |
custom_components/renault/sensor.py
|
KTibow/hassRenaultZE
| 0 |
2025901
|
"""Support for Renault sensors."""
import logging
from pyze.api import ChargeState, PlugState
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
POWER_KILO_WATT,
TEMP_CELSIUS,
)
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.distance import LENGTH_KILOMETERS, LENGTH_MILES
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from .const import DOMAIN, MODEL_USES_KWH
from .pyzeproxy import PyzeProxy
from .pyzevehicleproxy import PyzeVehicleProxy
from .renaultentity import (
RenaultBatteryDataEntity,
RenaultChargeModeDataEntity,
RenaultHVACDataEntity,
RenaultMileageDataEntity,
)
ATTR_BATTERY_AVAILABLE_ENERGY = "battery_available_energy"
ATTR_CHARGING_POWER = "charging_power"
ATTR_CHARGING_REMAINING_TIME = "charging_remaining_time"
ATTR_PLUGGED = "plugged"
ATTR_PLUG_STATUS = "plug_status"
LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up platforms."""
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Renault entities from config entry."""
proxy = hass.data[DOMAIN][config_entry.unique_id]
entities = await get_entities(hass, proxy)
proxy.entities.extend(entities)
async_add_entities(entities, True)
async def get_entities(hass, proxy: PyzeProxy):
"""Create Renault entities for all vehicles."""
entities = []
for vehicle_link in proxy.get_vehicle_links():
vehicle_proxy = await proxy.get_vehicle_proxy(vehicle_link)
entities.extend(await get_vehicle_entities(hass, vehicle_proxy))
return entities
async def get_vehicle_entities(hass, vehicle_proxy: PyzeVehicleProxy):
"""Create Renault entities for single vehicle."""
entities = []
entities.append(RenaultBatteryLevelSensor(vehicle_proxy, "Battery Level"))
entities.append(RenaultChargeModeSensor(vehicle_proxy, "Charge Mode"))
entities.append(RenaultChargeStateSensor(vehicle_proxy, "Charge State"))
entities.append(
RenaultChargingRemainingTimeSensor(vehicle_proxy, "Charging Remaining Time")
)
entities.append(RenaultChargingPowerSensor(vehicle_proxy, "Charging Power"))
entities.append(RenaultMileageSensor(vehicle_proxy, "Mileage"))
entities.append(
RenaultOutsideTemperatureSensor(vehicle_proxy, "Outside Temperature")
)
entities.append(RenaultPlugStateSensor(vehicle_proxy, "Plug State"))
entities.append(RenaultRangeSensor(vehicle_proxy, "Range"))
entities.append(
RenaultBatteryTemperatureSensor(vehicle_proxy, "Battery Temperature")
)
return entities
class RenaultBatteryLevelSensor(RenaultBatteryDataEntity):
"""Battery Level sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "batteryLevel" in data:
return data.get("batteryLevel")
LOGGER.warning("batteryLevel not available in coordinator data %s", data)
@property
def device_class(self):
"""Return the class of this entity."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return PERCENTAGE
@property
def icon(self):
"""Icon handling."""
data = self.coordinator.data
chargestate = data["chargingStatus"] == 1
return icon_for_battery_level(battery_level=self.state, charging=chargestate)
@property
def device_state_attributes(self):
"""Return the state attributes of this entity."""
attrs = {}
attrs.update(super().device_state_attributes)
data = self.coordinator.data
if "batteryAvailableEnergy" in data:
attrs[ATTR_BATTERY_AVAILABLE_ENERGY] = data["batteryAvailableEnergy"]
return attrs
class RenaultBatteryTemperatureSensor(RenaultBatteryDataEntity):
"""Battery Temperature sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "batteryTemperature" in data:
return data.get("batteryTemperature")
LOGGER.warning("batteryTemperature not available in coordinator data %s", data)
@property
def device_class(self):
"""Return the class of this entity."""
return DEVICE_CLASS_TEMPERATURE
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
class RenaultChargeModeSensor(RenaultChargeModeDataEntity):
"""Charge Mode sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if hasattr(data, "name"):
return data.name
return data
class RenaultChargingRemainingTimeSensor(RenaultBatteryDataEntity):
"""Charging Remaining Time sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "chargingRemainingTime" in data:
return data["chargingRemainingTime"]
LOGGER.debug("chargingRemainingTime not available in coordinator data %s", data)
return None
class RenaultChargingPowerSensor(RenaultBatteryDataEntity):
"""Charging Power sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "chargingInstantaneousPower" in data:
if self.proxy.model_code in MODEL_USES_KWH:
return data["chargingInstantaneousPower"]
else:
return data["chargingInstantaneousPower"] / 1000
LOGGER.debug(
"chargingInstantaneousPower not available in coordinator data %s", data
)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return POWER_KILO_WATT
class RenaultOutsideTemperatureSensor(RenaultHVACDataEntity):
"""HVAC Outside Temperature sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "externalTemperature" in data:
return data["externalTemperature"]
LOGGER.debug("externalTemperature not available in coordinator data %s", data)
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
class RenaultPlugStateSensor(RenaultBatteryDataEntity):
"""Plug State sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "plugStatus" in data:
try:
plug_state = PlugState(data["plugStatus"])
except ValueError:
plug_state = PlugState.NOT_AVAILABLE
return plug_state.name
LOGGER.debug("plugStatus not available in coordinator data %s", data)
@property
def icon(self):
"""Icon handling."""
if self.state == PlugState.PLUGGED.name:
return "mdi:power-plug"
return "mdi:power-plug-off"
class RenaultChargeStateSensor(RenaultBatteryDataEntity):
"""Charge State sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "chargingStatus" in data:
try:
charge_state = ChargeState(data["chargingStatus"])
except ValueError:
charge_state = ChargeState.NOT_AVAILABLE
return charge_state.name
LOGGER.debug("chargingStatus not available in coordinator data %s", data)
@property
def icon(self):
"""Icon handling."""
if self.state == ChargeState.CHARGE_IN_PROGRESS.name:
return "mdi:flash"
return "mdi:flash-off"
class RenaultMileageSensor(RenaultMileageDataEntity):
"""Mileage sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "totalMileage" in data:
mileage = data["totalMileage"]
if not self.hass.config.units.is_metric:
mileage = IMPERIAL_SYSTEM.length(mileage, METRIC_SYSTEM.length_unit)
return round(mileage)
LOGGER.debug("totalMileage not available in coordinator data %s", data)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
if not self.hass.config.units.is_metric:
return LENGTH_MILES
return LENGTH_KILOMETERS
class RenaultRangeSensor(RenaultBatteryDataEntity):
"""Range sensor."""
@property
def state(self):
"""Return the state of this entity."""
data = self.coordinator.data
if "batteryAutonomy" in data:
autonomy = data["batteryAutonomy"]
if not self.hass.config.units.is_metric:
autonomy = IMPERIAL_SYSTEM.length(autonomy, METRIC_SYSTEM.length_unit)
return autonomy
LOGGER.debug("batteryAutonomy not available in coordinator data %s", data)
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
if not self.hass.config.units.is_metric:
return LENGTH_MILES
return LENGTH_KILOMETERS
| 9,604 |
sfepy/mechanics/friction.py
|
olivierverdier/sfepy
| 1 |
2022917
|
"""
Friction-slip model formulated as the implicit complementarity problem.
To integrate over a (dual) mesh, one needs:
* coordinates of element vertices
* element connectivity
* local base for each element
* constant in each sub-triangle of the dual mesh
Data for each dual element:
* connectivity of its sub-triangles
* base directions t_1, t_2
Normal stresses:
* Assemble the rezidual and apply the LCBC operator described below.
Solution in \hat{V}_h^c:
* construct a restriction operator via LCBC just like in the no-penetration case
* use the substitution:
u_1 = n_1 * w
u_2 = n_2 * w
u_3 = n_3 * w
The new DOF is `w`.
* for the record, no-penetration does:
w_1 = - (1 / n_1) * (u_2 * n_2 + u_3 * n_3)
w_2 = u_2
w_3 = u_3
"""
from sfepy.base.base import *
from sfepy.base.compat import unique
import sfepy.linalg as la
from sfepy.fem import Mesh, Domain, Field, Variables
from sfepy.fem.mappings import VolumeMapping, SurfaceMapping
from sfepy.fem.fe_surface import FESurface
from sfepy.fem.utils import compute_nodal_normals
def edge_data_to_output(coors, conn, e_sort, data):
out = nm.zeros_like(coors)
out[conn[e_sort,0]] = data
return Struct(name='output_data',
mode='vertex', data=out,
dofs=None)
class DualMesh(Struct):
"""Dual mesh corresponding to a (surface) region."""
def __init__(self, region):
"""
Assume a single GeometryElement type in all groups, linear
approximation.
Works for one group only for the moment.
"""
domain = region.domain
self.dim = domain.shape.dim
self.region = copy(region)
self.region.setup_face_indices()
self.mesh_coors = domain.mesh.coors
# add_to_regions=True due to Field implementation shortcomings.
omega = domain.create_region('Omega', 'all', add_to_regions=True)
self.field = Field('displacements', nm.float64, (3,), omega, 1)
self.gel = domain.geom_els.values()[0]
self.sgel = self.gel.surface_facet
face_key = 's%d' % self.sgel.n_vertex
# Coordinate interpolation to face centres.
self.ps = self.gel.interp.poly_spaces[face_key]
centre = self.ps.node_coors.sum(axis=0) / self.ps.n_nod
self.bf = self.ps.eval_base(centre[None,:])
self.surfaces = surfaces = {}
self.dual_surfaces = dual_surfaces = {}
for ig, conn in enumerate(domain.mesh.conns):
surface = FESurface(None, self.region, self.gel.faces, conn, ig)
surfaces[ig] = surface
dual_surface = self.describe_dual_surface(surface)
dual_surfaces[ig] = dual_surface
def describe_dual_surface(self, surface):
n_fa, n_edge = surface.n_fa, self.sgel.n_edge
mesh_coors = self.mesh_coors
# Face centres.
fcoors = mesh_coors[surface.econn]
centre_coors = nm.dot(self.bf.squeeze(), fcoors)
surface_coors = mesh_coors[surface.nodes]
dual_coors = nm.r_[surface_coors, centre_coors]
coor_offset = surface.nodes.shape[0]
# Normals in primary mesh nodes.
nodal_normals = compute_nodal_normals(surface.nodes, self.region,
self.field)
ee = surface.leconn[:,self.sgel.edges].copy()
edges_per_face = ee.copy()
sh = edges_per_face.shape
ee.shape = edges_per_face.shape = (sh[0] * sh[1], sh[2])
edges_per_face.sort(axis=1)
eo = nm.empty((sh[0] * sh[1],), dtype=nm.object)
eo[:] = [tuple(ii) for ii in edges_per_face]
ueo, e_sort, e_id = unique(eo, return_index=True, return_inverse=True)
ueo = edges_per_face[e_sort]
# edge centre, edge point 1, face centre, edge point 2
conn = nm.empty((n_edge * n_fa, 4), dtype=nm.int32)
conn[:,0] = e_id
conn[:,1] = ee[:,0]
conn[:,2] = nm.repeat(nm.arange(n_fa, dtype=nm.int32), n_edge) \
+ coor_offset
conn[:,3] = ee[:,1]
# face centre, edge point 2, edge point 1
tri_conn = nm.ascontiguousarray(conn[:,[2,1,3]])
# Ensure orientation - outward normal.
cc = dual_coors[tri_conn]
v1 = cc[:,1] - cc[:,0]
v2 = cc[:,2] - cc[:,0]
normals = nm.cross(v1, v2)
nn = nodal_normals[surface.leconn].sum(axis=1).repeat(n_edge, 0)
centre_normals = (1.0 / surface.n_fp) * nn
centre_normals /= la.norm_l2_along_axis(centre_normals)[:,None]
dot = nm.sum(normals * centre_normals, axis=1)
assert_((dot > 0.0).all())
# Prepare mapping from reference triangle e_R to a
# triangle within reference face e_D.
gel = self.gel.surface_facet
ref_coors = gel.coors
ref_centre = nm.dot(self.bf.squeeze(), ref_coors)
cc = nm.r_[ref_coors, ref_centre[None,:]]
rconn = nm.empty((n_edge, 3), dtype=nm.int32)
rconn[:,0] = gel.n_vertex
rconn[:,1] = gel.edges[:,0]
rconn[:,2] = gel.edges[:,1]
map_er_ed = VolumeMapping(cc, rconn, gel=gel)
# Prepare mapping from reference triangle e_R to a
# physical triangle e.
map_er_e = SurfaceMapping(dual_coors, tri_conn, gel=gel)
# Compute triangle basis (edge) vectors.
nn = surface.nodes[ueo]
edge_coors = mesh_coors[nn]
edge_centre_coors = 0.5 * edge_coors.sum(axis=1)
edge_normals = 0.5 * nodal_normals[ueo].sum(axis=1)
edge_normals /= la.norm_l2_along_axis(edge_normals)[:,None]
nn = surface.nodes[ueo]
edge_dirs = edge_coors[:,1] - edge_coors[:,0]
edge_dirs /= la.norm_l2_along_axis(edge_dirs)[:,None]
edge_ortho = nm.cross(edge_normals, edge_dirs)
edge_ortho /= la.norm_l2_along_axis(edge_ortho)[:,None]
# Primary face - dual sub-faces map.
# i-th row: indices to conn corresponding to sub-faces of i-th face.
face_map = nm.arange(n_fa * n_edge, dtype=nm.int32)
face_map.shape = (n_fa, n_edge)
# The actual connectivity for assembling (unique nodes per master
# faces).
asm_conn = e_id[face_map]
n_nod = ueo.shape[0] # One node per unique edge.
n_components = self.dim - 1
dual_surface = Struct(name = 'dual_surface_description',
dim = self.dim,
n_dual_fa = conn.shape[0],
n_dual_fp = self.dim,
n_fa = n_fa,
n_edge = n_edge,
n_nod = n_nod,
n_components = n_components,
n_dof = n_nod * n_components,
dual_coors = dual_coors,
coor_offset = coor_offset,
e_sort = e_sort,
conn = conn,
tri_conn = tri_conn,
map_er_e = map_er_e,
map_er_ed = map_er_ed,
face_map = face_map,
asm_conn = asm_conn,
nodal_normals = nodal_normals,
edge_centre_coors = edge_centre_coors,
edge_normals = edge_normals,
edge_dirs = edge_dirs,
edge_ortho = edge_ortho)
return dual_surface
def save(self, filename):
coors = []
conns = []
mat_ids = []
offset = 0
for ig, dual_surface in self.dual_surfaces.iteritems():
cc = dual_surface.dual_coors
coors.append(cc)
conn = dual_surface.conn[:,1:].copy() + offset
conns.append(conn)
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id[:] = ig
mat_ids.append(mat_id)
offset += cc.shape[0]
coors = nm.concatenate(coors, axis=0)
dual_mesh = Mesh.from_data('dual_mesh', coors, None, conns,
mat_ids, ['2_3'] * len(conns))
dual_mesh.write(filename, io='auto')
def save_axes(self, filename):
coors = []
conns = []
mat_ids = []
offset = 0
for ig, dual_surface in self.dual_surfaces.iteritems():
cc = nm.r_[dual_surface.edge_centre_coors,
dual_surface.dual_coors]
coors.append(cc)
conn = dual_surface.conn.copy() + offset
conn[:,1:] += dual_surface.edge_centre_coors.shape[0]
conns.append(conn)
mat_id = nm.empty((conn.shape[0],), dtype=nm.int32)
mat_id[:] = ig
mat_ids.append(mat_id)
offset += cc.shape[0]
coors = nm.concatenate(coors, axis=0)
out = {}
for ig, dual_surface in self.dual_surfaces.iteritems():
eto = edge_data_to_output
out['en_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_normals)
out['ed_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_dirs)
out['eo_%d' % ig] = eto(coors, conns[ig], dual_surface.e_sort,
dual_surface.edge_ortho)
dual_mesh = Mesh.from_data('dual_mesh_vectors', coors, None, conns,
mat_ids, ['2_4'] * len(conns))
dual_mesh.write(filename, io='auto', out=out)
| 9,701 |
DeltaChecksum/hashes.py
|
DavidLutton/Fragments
| 0 |
2024955
|
import hashlib
def generate_hash(filename=None):
"""Generate a hash for a file."""
hasher_256 = hashlib.sha256()
with open(filename, 'rb') as obj:
hasher_256.update(obj.read())
return hasher_256.hexdigest()
# print(generate_hash("hash.py"))
| 281 |
simpleml/utils/scoring/__init__.py
|
ptoman/SimpleML
| 15 |
2025973
|
'''
Directory for scripts to facilitate scoring of new samples in production
'''
__author__ = '<NAME>'
| 104 |
tests/test_objects/melody_obj.py
|
aParthemer/MidiCompose
| 0 |
2026487
|
from icecream import ic as ice
from MidiCompose.logic.harmony.note import Note
from MidiCompose.logic.melody.melody import Melody
from MidiCompose.logic.melody.note_set import NoteSet
from MidiCompose.logic.melody import scale
#### MELODY-LIKE OBJECTS ####
mel_like_60_62_64 = [Note(60),Note(62),Note(64)]
mel_60_62_64 = Melody(mel_like_60_62_64)
| 365 |
tests/module/mcdecoder/test_common.py
|
wildlarva/mcdecoder
| 0 |
2026908
|
import os
import pathlib
from mcdecoder.common import make_parent_directories
def test_make_parent_directories_not_exist() -> None:
_remove_temp_file('somedir')
assert make_parent_directories('somedir/some.txt') is True
assert os.path.isdir('somedir') is True
def test_make_parent_directories_exist() -> None:
_remove_temp_file('somedir')
os.makedirs('somedir', exist_ok=True)
assert make_parent_directories('somedir/some.txt') is True
def test_make_parent_directories_not_dir() -> None:
_remove_temp_file('somedir')
pathlib.Path('somedir').touch()
assert make_parent_directories('somedir/some.txt') is False
def test_make_parent_directories_without_parent_dir() -> None:
assert make_parent_directories('some.txt') is True
def _remove_temp_file(file: str):
if os.path.isfile(file):
os.remove(file)
elif os.path.isdir(file):
os.removedirs(file)
| 919 |
src/04_Programming_Hadoop_with_Spark/FilteredLowestRatedMovieSpark.py
|
MilovanTomasevic/The-Ultimate-Hands-On-Hadoop-Tame-your-Big-Data
| 0 |
2026669
|
from pyspark import SparkConf, SparkContext
# Load u.item file into Hadoop
def loadMovieNames():
movieNames = {}
with open("ml-100k/u.item") as f:
for line in f:
fields = line.split('|')
movieNames[int(fields[0])] = fields[1]
return movieNames
def parseInput(line):
fields = line.split()
return (int(fields[1]), (float(fields[2]), 1.0))
if __name__ == "__main__":
# The main script - create our SparkContext
conf = SparkConf().setAppName("FilteredWorstMovies")
sc = SparkContext(conf = conf)
# Load up our movie ID -> name dictionary
movieNames = loadMovieNames()
# Get the raw data
lines = sc.textFile("hdfs:///user/maria_dev/ml-100k/u.data")
# Convert to (movieID, (rating, 1.0))
movieRatings = lines.map(parseInput)
# Reduce to (movieID, (sumOfRatings, totalRatings))
ratingTotalsAndCount = movieRatings.reduceByKey(lambda movie1 , movie2: (movie1[0] + movie2[0], movie1[1] + movie2[1] ) )
# Filter results that have are 10 or lesser ratings
filteredMovies = ratingTotalsAndCount.filter(lambda x: x[1][1] > 10)
# Map to rating (rating, averageRating)
averageRatings = filteredMovies.mapValues(lambda totalAndCount: totalAndCount[0] / totalAndCount[1] )
# Sort by average rating
sortedMovies = averageRatings.sortBy(lambda x: x[1])
# Take the top 10 results
results = sortedMovies.take(10)
# Print them out:
for result in results:
print(movieNames[result[0]], result[1])
| 1,432 |
DataQuality/models.py
|
Moni313/MonAT_Public
| 0 |
2026595
|
from django.db import models
class Entry(models.Model):
pregnancy_id = models.CharField(max_length=255)
child_id = models.CharField(max_length=255)
eth22 = models.CharField(max_length=255, blank=True, null=True)
eth9 = models.CharField(max_length=255, blank=True, null=True)
eth6 = models.CharField(max_length=255, blank=True, null=True)
eth4 = models.CharField(max_length=255, blank=True, null=True)
eth3 = models.CharField(max_length=255, blank=True, null=True)
age = models.IntegerField(blank=True, null=True)
gender = models.CharField(max_length=5, blank=True, null=True)
source = models.CharField(max_length=255, blank=True, null=True)
visitDate = models.DateField(format("dd/MM/yy"), blank=True, null=True)
height = models.FloatField(blank=True, null=True)
weight = models.FloatField(blank=True, null=True)
class Meta:
ordering = ('pregnancy_id',)
| 923 |
fly/logger.py
|
cheburakshu/fly
| 0 |
2026400
|
import logging
import logging.config
import time
from functools import lru_cache
class logger(object):
def __init__(self,*args,**kwargs):
self._logger = None
def logSetup(self):
LOG_PATH = 'log/'
fileName = LOG_PATH + 'runlog.' + str(int(time.time())) + '.out'
logging.config.fileConfig('config/logging.conf',defaults={'logfilename': fileName})
#self.setLogger()
def setLogger(self,_name):
self._logger = logging.getLogger(_name)
def getLogger(self):
return self._logger
def debug(self,message):
#LEVEL 10
self.getLogger().debug(message)
def info(self,message):
#LEVEL 20
self._logger.info(message)
def warn(self,message):
#LEVEL 30
self._logger.warning(message)
def error(self,message):
#LEVEL 40
self._logger.error(message)
def critical(self,message):
#LEVEL 50
self._logger.critical(message)
#log=logger()
#log.logSetup()
#log.info('test')
#log.debug('testing')
#USAGE
#logSetup()
#getLogger()
#info('testing')
#debug('testing')
#warn('testing')
#error('testing')
#critical('testing')
| 1,234 |
common/azureml_appinsights_logger/tests/test_observability.py
|
h2floh/MLOpsManufacturing-1
| 20 |
2026765
|
import pytest
from azureml_appinsights_logger.observability import Observability
from azureml_appinsights_logger.logger_interface import Severity
@pytest.fixture
def mock_loggers(mocker):
mock_loggers = mocker.patch(
"azureml_appinsights_logger.observability.Loggers")
mock_appinsights_logger = mocker.patch(
"azureml_appinsights_logger.appinsights_logger.AppInsightsLogger")
mock_aml_logger = mocker.patch(
"azureml_appinsights_logger.console_logger.ConsoleLogger")
mock_console_logger = mocker.patch(
"azureml_appinsights_logger.azureml_logger.AzureMlLogger")
mock_loggers.loggers = [
mock_appinsights_logger,
mock_aml_logger,
mock_console_logger]
return mock_loggers
def test_log_metric_is_called_by_all_loggers(mocker, mock_loggers):
# arrange
mocker.patch(
'azureml_appinsights_logger.observability.Observability._loggers',
new_callable=mocker.PropertyMock,
return_value=mock_loggers,
create=True
)
# act
mock_observability = Observability()
mock_observability.log_metric("FOO", "BAZ", "BAR")
# assert
mock_observability._loggers.loggers[0].log_metric.assert_called_with(
"FOO", "BAZ", "BAR", False)
mock_observability._loggers.loggers[1].log_metric.assert_called_with(
"FOO", "BAZ", "BAR", False)
mock_observability._loggers.loggers[2].log_metric.assert_called_with(
"FOO", "BAZ", "BAR", False)
def test_log_is_called_by_all_loggers(mocker, mock_loggers):
# arrange
mocker.patch(
'azureml_appinsights_logger.observability.Observability._loggers',
new_callable=mocker.PropertyMock,
return_value=mock_loggers,
create=True
)
# act
mock_observability = Observability()
mock_observability.log("FOO", Severity.CRITICAL)
# assert
mock_observability._loggers.loggers[0].log.assert_called_with(
"FOO", Severity.CRITICAL)
mock_observability._loggers.loggers[1].log.assert_called_with(
"FOO", Severity.CRITICAL)
mock_observability._loggers.loggers[2].log.assert_called_with(
"FOO", Severity.CRITICAL)
| 2,178 |
scripts/gen_cardinalities.py
|
sage-org/sage-experiments
| 0 |
2025335
|
from hdt import HDTDocument
import os
import csv
black_list = ["query_10020.rq", "query_10025.rq", "query_10039.rq", "query_10044.rq", "query_10061.rq", "query_10069.rq", "query_10078.rq", "query_10082.rq", "query_10083.rq", "query_10091.rq", "query_10122.rq", "query_10150.rq", "query_10168.rq", "query_10169.rq"]
path = "watdiv_queries/watdiv_queries_0/"
dest = "watdiv_one_triple/"
card_file = "results/optionals/cardinalities.csv"
document = HDTDocument('/Users/minier-t/Documents/hdt-files/watdiv.10M.hdt')
cardinalities = list()
final_queries = list()
def get_triples(query):
start = query.index('WHERE {')
end = query.index('}')
return query[start+7:end].strip().split(" . ")
def cardinality(triple):
terms = triple.split(" ")
subj = terms[0].strip()[1:-1] if not terms[0].startswith('?') else ""
pred = terms[1].strip()[1:-1] if not terms[1].startswith('?') else ""
obj = terms[2].strip()[1:-1] if not terms[2].startswith('?') else ""
iter, card = document.search_triples(subj, pred, obj)
return (triple, card)
for filename in os.listdir(path):
if filename not in black_list:
with open(path + filename) as f:
query = f.read()
triples = get_triples(query)
cards = [cardinality(t) for t in triples]
sorted(cards, key=lambda t: t[1])
(max_triple, max_card) = cards[0]
# save cardinality
cardinalities.append((filename, max_card))
# generate query
# with open(dest + filename, 'w') as out:
# out.write("SELECT * WHERE { " + max_triple + " }")
# save cardinalities in CSV file
field_names = ['query', 'cardinality']
with open(card_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_names)
writer.writeheader()
for (query, card) in cardinalities:
writer.writerow({'query': query[:-3], 'cardinality': card})
| 1,910 |
camara_zona/models.py
|
joelsegoviacrespo/control_aforo_migrado
| 0 |
2026638
|
# -*- encoding: utf-8 -*-
from djongo import models
#from instalacion.models import Instalacion
class CamaraZona(models.Model):
_id = models.ObjectIdField()
#id_instalacion = models.ForeignKey(Instalacion, on_delete=models.CASCADE, blank=False)
id_camara_zona = models.CharField(max_length=255, blank=False, default='')
descripcion = models.CharField(max_length=250, blank=False, default='')
camara_zona_estado = models.BooleanField(blank=False, default=True)
def __unicode__(self):
#return self.id_camara_zona
return "s% - s%" % str(self.id_camara_zona.id_cliente, self.id_camara_zona)
def __str__(self):
return str(self.id_camara_zona)
| 693 |
solutions/python3/781.py
|
sm2774us/amazon_interview_prep_2021
| 42 |
2026130
|
class Solution:
def numRabbits(self, answers):
dic, res = {}, 0
for ans in answers:
(dic[ans], res) = (1, res + ans + 1) if ans not in dic or dic[ans] > ans else (dic[ans] + 1, res)
return res
| 232 |
skate_ppo/speed_skating_run.py
|
snumrl/skate
| 0 |
2025672
|
import os
from skate_ppo.hprun import main
import time
if __name__ == '__main__':
argv = []
env_id = 'speed_skating'
cur_time = time.strftime("%Y%m%d%H%M")
os.environ["OPENAI_LOGDIR"] = os.getcwd() + '/' + env_id + '/log_' + cur_time
os.environ["OPENAI_LOG_FORMAT"] = 'csv'
argv.extend(['--env='+env_id])
argv.extend(['--alg=ppo2'])
argv.extend(['--num_env=8'])
argv.extend(['--num_timesteps=2e7'])
argv.extend(['--save_path='+env_id+'/'+'model_'+cur_time])
argv.extend(['--num_hidden=64'])
argv.extend(['--num_layers=2'])
main(argv)
| 590 |
MCD_Communicator/src/communicator.py
|
MacAndKaj/mdc_tools
| 0 |
2026745
|
from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QPlainTextEdit, QLineEdit, QWidget, QTabWidget, \
QPushButton
from PyQt5 import QtCore
from PyQt5.QtCore import *
import sys
from datetime import datetime
from MCD_Communicator.src.modules import port, messages
class CommunicatorLog(QPlainTextEdit):
def __init__(self):
super().__init__()
self.setFocusPolicy(Qt.NoFocus)
@pyqtSlot(str)
def on_new_tx_text(self, text):
prefix = "[" + self.get_time() + "] "
self.insertPlainText(prefix + text + '\n')
def get_time(self) -> str:
now = datetime.now()
return now.strftime("%d/%m/%Y-%H:%M:%S.%f")
class CommunicatorInputLine(QLineEdit):
text_signal = QtCore.pyqtSignal(str)
def __init__(self) -> None:
super().__init__()
self._input_text = ""
self.returnPressed.connect(self.on_return_pressed)
self.textChanged.connect(self.on_text_changed)
def on_return_pressed(self):
self.text_signal.emit(self._input_text)
self.clear()
def on_text_changed(self, text):
self._input_text = text
class ConnectionButton(QPushButton):
connection_signal = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__("Connect")
self.clicked.connect(self.on_button_clicked)
def on_button_clicked(self):
transitions_map = {
"Connect": "Disconnect",
"Disconnect": "Connect",
}
previous_text = self.text()
self.setText(transitions_map[self.text()])
self.connection_signal.emit(previous_text)
@pyqtSlot()
def on_port_closed(self):
print("on_port_closed")
self.setText("Connect")
class Window(QMainWindow):
def __init__(self):
super().__init__()
self._communicator_log = None
self._communicator_input_line = None
self.setGeometry(0, 0, 600, 400)
self.setWindowTitle("MCD Communicator")
self._connection_button = ConnectionButton()
window_layout = QVBoxLayout()
window_layout.addWidget(self._connection_button)
main_widget = QTabWidget()
self._free_communicator_tab = self.configure_free_communicator()
main_widget.addTab(self._free_communicator_tab, "Free Communicator")
self._message_communicator = QWidget()
main_widget.addTab(self._message_communicator, "Message Communicator")
window_layout.addWidget(main_widget)
layout_widget = QWidget()
layout_widget.setLayout(window_layout)
self.setCentralWidget(layout_widget)
self.setFocus()
self.show()
def configure_free_communicator(self) -> QWidget:
free_communicator = QWidget()
layout = QVBoxLayout()
self._communicator_log = CommunicatorLog()
self._communicator_input_line = CommunicatorInputLine()
self._communicator_input_line.text_signal.connect(self._communicator_log.on_new_tx_text)
layout.addWidget(self._communicator_log)
layout.addWidget(self._communicator_input_line)
free_communicator.setLayout(layout)
return free_communicator
def connect_main_window_with_port(self, port_obj: port.Port):
self._connection_button.connection_signal.connect(port_obj.execute)
self._communicator_input_line.text_signal.connect(port_obj.send)
port_obj.port_closed_signal.connect(self._connection_button.on_port_closed)
class AppCore:
def __init__(self):
self._window = Window()
self._port = port.Port()
self._window.connect_main_window_with_port(self._port)
self._port_thread = QThread()
def start(self):
self._port.moveToThread(self._port_thread)
self._port_thread.started.connect(self._port.run)
self._port_thread.start()
def stop(self):
self._port.stop()
if __name__ == '__main__':
req = messages.PlatformSetMotorSpeedReq(1, -1, 50)
print(req.serialize())
app = QApplication(sys.argv)
app_core = AppCore()
# app_core.start()
status = app.exec_()
# app_core.stop()
sys.exit(status)
| 4,171 |
shop/cascade/search.py
|
Iv/django-shop
| 1 |
2026810
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django import forms
from django.template import Template
from django.template.loader import select_template
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from shop import settings as shop_settings
from .plugin_base import ShopPluginBase
class ShopSearchResultsForm(forms.ModelForm):
def clean(self):
cleaned_data = super(ShopSearchResultsForm, self).clean()
if self.instance.page and self.instance.page.application_urls != 'ProductSearchApp':
raise ValidationError("This plugin only makes sense on a CMS page with an application of type 'Search'.")
return cleaned_data
class ShopSearchResultsPlugin(ShopPluginBase):
name = _("Search Results")
require_parent = True
parent_classes = ('BootstrapColumnPlugin',)
form = ShopSearchResultsForm
cache = False
def get_render_template(self, context, instance, placeholder):
if instance.page.application_urls == 'ProductSearchApp':
return select_template([
'{}/search/results.html'.format(shop_settings.APP_LABEL),
'shop/search/results.html',
])
return Template('<pre class="bg-danger">This {} plugin is used on a CMS page without an application of type "Search".</pre>'.format(self.name))
def render(self, context, instance, placeholder):
super(ShopSearchResultsPlugin, self).render(context, instance, placeholder)
try:
if context['edit_mode']:
# prevent scrolling while editing
context['data']['next'] = None
finally:
return context
plugin_pool.register_plugin(ShopSearchResultsPlugin)
| 1,833 |
2019/08/03/The Basics of Django ListView/listviewexample/listviewexample/example/views.py
|
kenjitagawa/youtube_video_code
| 492 |
2025727
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Member
class MemberList(ListView):
model = Member
| 154 |
tests/test_nemo_cf.py
|
willirath/nemo_cf
| 0 |
2026386
|
#!/usr/bin/env python
"""Tests for the `nemo_cf.cli` package."""
import pytest
from nemo_cf.nemo_cf import (
update_all_vars_attrs,
safely_drop_vars,
update_mesh_mask_dataset,
)
from nemo_cf.aux import download_and_extract_zip_file
from pathlib import Path
import xarray as xr
@pytest.fixture(scope="session")
def test_data_dir(tmpdir_factory):
test_data_dir = tmpdir_factory.mktemp("test_data")
download_and_extract_zip_file(
target_dir=test_data_dir,
url=(
"https://zenodo.org/record/3634491/files/"
"NEMO_GYRE_test_data_all_files.v2020.02.03.1.zip"
),
force=True,
)
return Path(str(test_data_dir))
def test_update_all_vars_attrs_works():
dataset = xr.DataArray([], name="var_01").to_dataset()
new_attrs = {"var_01": {"units": "meters", "coordinates": ""}}
dataset = update_all_vars_attrs(dataset, attrs=new_attrs)
assert dataset["var_01"].attrs["units"] == "meters"
assert dataset["var_01"].attrs["coordinates"] == ""
def test_safe_var_dropper():
dataset = xr.DataArray([], name="var_01").to_dataset()
assert "var_01" in safely_drop_vars(dataset, ["var_02", "var_03"])
assert "var_01" not in safely_drop_vars(dataset, ["var_01", "var_02"])
def test_updated_mesh_mask_has_no_nav_lev_lat_lon(test_data_dir):
mesh_mask_file = test_data_dir / "mesh_mask.nc"
mesh_mask_ds = xr.open_dataset(mesh_mask_file)
mesh_mask_ds_updated = update_mesh_mask_dataset(mesh_mask_ds)
assert "nav_lev" not in mesh_mask_ds_updated.data_vars
assert "nav_lat" not in mesh_mask_ds_updated.data_vars
assert "nav_lon" not in mesh_mask_ds_updated.data_vars
def test_updated_mesh_mask_has_no_singleton_dims(test_data_dir):
mesh_mask_file = test_data_dir / "mesh_mask.nc"
mesh_mask_ds = xr.open_dataset(mesh_mask_file)
mesh_mask_ds = update_mesh_mask_dataset(mesh_mask_ds)
assert all(d != 0 for d in mesh_mask_ds.dims.values())
| 1,977 |
setup.py
|
plaplant/zreion
| 0 |
2026897
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 <NAME>
# Licensed under the MIT License
"""
Setup file for zreion.
Use setup.cfg to configure this project.
This file was generated with PyScaffold 3.2.3.
PyScaffold helps you to put up the scaffold of your new Python project.
Learn more under: https://pyscaffold.org/
"""
import sys
from pkg_resources import VersionConflict, require
from setuptools import setup, Extension
import numpy
from Cython.Build import cythonize
zreion_ext = Extension(
"zreion._zreion",
sources=["src/zreion/zreion.pyx"],
define_macros=[
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
("CYTHON_TRACE_NOGIL", "1"),
],
extra_compile_args=["-fopenmp"],
extra_link_args=["-fopenmp"],
include_dirs=[numpy.get_include()],
)
try:
require("setuptools>=38.3")
except VersionConflict:
print("Error: version of setuptools is too old (<38.3)!")
sys.exit(1)
if __name__ == "__main__":
setup(
use_pyscaffold=True, ext_modules=cythonize([zreion_ext], language_level=3),
)
| 1,065 |
tools/nntool/generation/generators/kernels/pow2/pool_relu_kernels_generator.py
|
mfkiwl/gap_sdk
| 0 |
2026453
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from generation.at_generators import (NO_ACTIVATION, NO_POOL,
gen_active_at_params, gen_at_pool_relu,
gen_pool_at_params)
from generation.at_types.gen_ctrl import GenCtrl
from generation.code_block import CodeBlock
from generation.generator_decorators import generation_function, QREC_POW2
from graph.dim import PadDim
from graph.types import ConvFusionParameters
from utils.node_id import NodeId
from ..autotiler_kernel import AutotilerKernel
LOG = logging.getLogger("nntool." + __name__)
@generation_function("kernels", (ConvFusionParameters,), qrec_types=(QREC_POW2, ))
def pool_kernels_generator(gen, node, qrec, in_eparams, out_eparams, cname):
del in_eparams, out_eparams, qrec
if isinstance(node, ConvFusionParameters) and node.fusion_type == "pool_active":
cnodes = node.contained_nodes()
quants = [gen.G.quantization[NodeId(node, fnode)] for fnode in cnodes]
gen.kernels.append(PoolReluKernel(node.name, cname, cnodes[0], quants[0],
cnodes[1], quants[1], at_ver=gen.opts['at_ver'],
gen_ctrl=node.get_gen_ctrl()))
return True
return False
class PoolReluKernel(AutotilerKernel):
def __init__(self, node_name, cname, pool_params, pool_q,
act_params, act_q, code_block=None, at_ver=3, gen_ctrl=None):
if gen_ctrl is None:
self.gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
self.gen_ctrl = gen_ctrl
in_q = out_q = None
in_dim = out_dim = None
pad_compatibilities = []
if pool_params is not None:
at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities)
if in_dim is None:
in_dim = pool_params.in_dims[0]
out_dim = pool_params.out_dims[0]
if in_q is None:
in_q = pool_q.in_qs[0]
out_q = pool_q.out_qs[0]
else:
at_pool_params = NO_POOL
if act_params is not None:
at_act_params = gen_active_at_params(act_params)
if in_dim is None:
in_dim = act_params.in_dims[0]
if out_dim is None:
out_dim = act_params.out_dims[0]
if in_q is None:
in_q = act_q.in_qs[0]
out_q = act_q.out_qs[0]
if at_ver < 3:
if act_params.activation == "relu6" and out_q.q != 0:
self.gen_ctrl.ReluN = 6 << out_q.q
self.gen_ctrl.ReluNNoNorm = 1
else:
if act_params.activation == "relun":
self.gen_ctrl.ReluN = act_params.activation_params
else:
at_act_params = NO_ACTIVATION
if code_block is None:
code_block = CodeBlock()
if pad_compatibilities:
reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities,
"convolution padding is not compatible with pool padding")
if not reduction[2]: # default is balanced pad left
at_pad_ctrl = next(i for i, v in enumerate(reduction) if v)
self.gen_ctrl.PadType = at_pad_ctrl
if in_q.bits != out_q.bits:
raise NotImplementedError("only homogenious operations are supported at present")
if at_pool_params == NO_POOL:
raise NotImplementedError(
"activation layer on its own should not be matched by this kernel")
self.at_pool_params = at_pool_params
self.in_dim = in_dim
self.out_dim = out_dim
self.in_q = in_q
self.out_q = out_q
self.at_act_params = at_act_params
self.cname = cname
self.node_name = node_name
self.at_ver = at_ver
def code(self, code_block=None):
if code_block is None:
code_block = CodeBlock()
code_block.comment("generator for {}", self.node_name)
if not self.gen_ctrl.is_unmodified:
self.gen_ctrl.gen_ctrl_decl(code_block)
gen_at_pool_relu(code_block, self.cname, self.in_q, self.out_q,
self.in_dim, self.out_dim, self.at_pool_params,
self.at_act_params, gen_ctrl=self.gen_ctrl,
at_ver=self.at_ver)
return code_block
| 5,208 |
test/test_model/length_metric/get_lca_length/test_get_lca_length.py
|
SupermeLC/PyNeval
| 12 |
2025530
|
import unittest
from pyneval.metric.utils.edge_match_utils import get_lca_length, get_edge_rtree, get_idedge_dict
from pyneval.model.swc_node import SwcTree, SwcNode
from pyneval.model.euclidean_point import EuclideanPoint, Line
class TestGetLcaLength(unittest.TestCase):
test_swc_tree = SwcTree()
test_rtree = None
id_edge_dict = None
def init(self):
self.test_swc_tree.load("../../data_example/unit_test/get_nearby_edges_tree1.swc")
self.test_rtree = get_edge_rtree(self.test_swc_tree)
self.id_edge_dict = get_idedge_dict(self.test_swc_tree)
self.test_swc_tree.get_lca_preprocess()
def test_1(self):
self.init()
line_tuple_a = tuple([self.test_swc_tree.node_from_id(8),
self.test_swc_tree.node_from_id(7)])
line_tuple_b = tuple([self.test_swc_tree.node_from_id(7),
self.test_swc_tree.node_from_id(6)])
e_node1 = EuclideanPoint(center=[1.03, -2.1, 6.31])
e_node2 = EuclideanPoint(center=[3.87, 0.98, 1.17])
test_length = get_lca_length(self.test_swc_tree, \
line_tuple_a, \
line_tuple_b, \
Line(e_node_1=e_node1,
e_node_2=e_node2))
self.assertEqual(test_length, 9.589854552695694)
def test_2(self):
self.init()
line_tuple_a = tuple([self.test_swc_tree.node_from_id(4),
self.test_swc_tree.node_from_id(3)])
line_tuple_b = tuple([self.test_swc_tree.node_from_id(15),
self.test_swc_tree.node_from_id(9)])
e_node1 = EuclideanPoint(center=[-1.89657, 6.51822, -1.40403])
e_node2 = EuclideanPoint(center=[-2.02446, 0.54277, 7.48183])
test_length = get_lca_length(self.test_swc_tree, \
line_tuple_a, \
line_tuple_b, \
Line(e_node_1=e_node1,
e_node_2=e_node2))
self.assertEqual(test_length, 34.372721303735716)
if __name__ == '__main__':
unittest.main()
| 2,254 |
finish.py
|
BotanyHunter/QuartetAnalysis
| 0 |
2026833
|
#!/usr/bin/python
#version 2.0.6
import os,re,optparse,tarfile
from quartet_condor import *
from fileWriter_condor import *
from fileReader_condor import *
from arguments import *
from errorcodes import *
splitOrder = ["{1,2|3,4}", "{1,3|2,4}", "{1,4|2,3}"]
translation = { 1234 : [1,2,3], 1243 : [1,3,2], 1324 : [2,1,3], 1342 : [2,3,1], 1423 : [3,1,2], 1432 : [3,2,1],
2134 : [1,3,2], 2143 : [1,2,3], 2314 : [3,1,2], 2341 : [3,2,1], 2413 : [2,1,3], 2431 : [2,3,1],
3124 : [2,3,1], 3142 : [2,1,3], 3214 : [3,2,1], 3241 : [3,1,2], 3412 : [1,2,3], 3421 : [1,3,2],
4123 : [3,2,1], 4132 : [3,1,2], 4213 : [2,3,1], 4231 : [2,1,3], 4312 : [1,3,2], 4321 : [1,2,3] }
def restore_quartet_order(d, taxa_set, orig_quartet):
'''
given the four taxa in taxa_set, find them in orig_quartet
and then change order in d.
'''
#first find the original quartet
quartet = None
for quartet_loop in orig_quartet:
if( taxa_set[0] in quartet_loop and
taxa_set[1] in quartet_loop and
taxa_set[2] in quartet_loop and
taxa_set[3] in quartet_loop ):
quartet = quartet_loop
break
if( quartet == None ): return None, None
#now rearrange d
translate = []
for taxa in quartet:
translate.append ( 1+taxa_set.index(taxa) )
translateKey = translate[0]*1000 + translate[1]*100 + translate[2] * 10 + translate[3]
d_new = {}
for split in range(0,3):
tK = splitOrder[translation[translateKey][split]-1]
d_new[splitOrder[split]] = d[tK]
return d_new, quartet
'''
Isolates CFs from *.concordance files (output from run_indiv_taxa.py) and builds a comma-separated ouput table
Also isolates gene tree probabilities from *.gene file
'''
def main():
parser = getParser()
options, remainder = parser.parse_args()
output_header = "Taxon1,Taxon2,Taxon3,Taxon4,CF12|34,CF13|24,CF14|23,CI12|34Low,CI12|34High,CI13|24Low,CI13|24High,CI14|23Low,CI14|23High\n"
output_file = open("QuartetAnalysis"+options.outputSuffix+".csv", 'w')
supple_header = "Taxon1,Taxon2,Taxon3,Taxon4,Gene Index,P12|34,P13|24,P14|23\n"
supple_file = open("QuartetAnalysis"+options.outputSuffix+".supple", 'w')
metafile = open("QuartetAnalysis"+options.outputSuffix+".meta", 'r')
for line in metafile:
if line.startswith("- instance ID"):
output_file.write(line)
supple_file.write(line)
output_file.write(output_header)
supple_file.write(supple_header)
output_file.close()
supple_file.close()
finish_output = open("finish.meta",'w')
q = quartet()
wf = fileWriter(None, None, None, None, None, None)
fr = fileReader()
#make a reference dictionary with number/species-name pairs
myQuartets = []
if( options.maintain_order == 1 ):
quartets_filename = 'quartets' + options.outputSuffix + '.txt'
with open(quartets_filename, 'r') as quartets_file:
for quartet_line in quartets_file:
myQuartets.append([int(x) for x in quartet_line.split()])
ref_dict = {}
translate_filename = 'translate' + options.outputSuffix + '.txt'
translate_file = open(translate_filename, 'r')
for line in translate_file:
words = line.split()
ref_dict[words[1]] = int(words[0])
translate_file.close()
try:
myTarfile = tarfile.open(name="finish.tar.gz", mode='r')
files = myTarfile.getnames()
except:
print "Error: tarFile finish.tar.gz does not exist."
return 1
taxa_set = []
concfile_count = 0
genefile_count = 0
for cfile in files:
if(cfile.endswith('concordance')):
concfile_count += 1
print "working on: "+cfile
#find number of genes - which was written to the Q#.txt file
txtFilename = cfile
txtFilename = txtFilename.replace("concordance","txt")
txtFile = myTarfile.extractfile(txtFilename)
num_genes = -1
for line in txtFile:
if "genes processed" in line:
words = line.split()
num_genes = int(words[3])
break
print "gene found = "+str(num_genes)
concord_file = myTarfile.extractfile(cfile)
#find the reference numbers of the taxa
taxa_names = fr.find_taxa_set(concord_file)
taxa_count = 0
for name in taxa_names:
taxa_set.append(ref_dict[name])
taxa_count += 1
if( taxa_count == 4 ):
print "reference #s of taxa set found."
print taxa_set
#find the CFs and add them to the growing output.csv file
d,ciLow,ciHigh = q.isolateCFs(concord_file, num_genes)
concord_file.close()
if( options.maintain_order == 1 ):
d1, taxa_set1 = restore_quartet_order(d, taxa_set, myQuartets)
ciLow1, taxa_set1 = restore_quartet_order(ciLow, taxa_set, myQuartets)
ciHigh1, taxa_set1 = restore_quartet_order(ciHigh, taxa_set, myQuartets)
if( d1 == None ):
finish_output.write('error reordering quartet: ' + str(taxa_set) + '\n')
return 1
else:
d = d1
ciLow = ciLow1
ciHigh = ciHigh1
#taxa_set = taxa_set1
wf.add_to_output_file(d, ciLow, ciHigh, taxa_set1, options.outputSuffix) #changed from taxa_set on 13 Feb - SJH
print "CFs added."
#now grab the gene probabilities from the supplemental file.
for line in txtFile:
myData = [int(x) for x in line.split(',')]
geneNumber = myData[0]
myDict = {}
myDict['{1,2|3,4}'] = myData[1]
myDict['{1,3|2,4}'] = myData[2]
myDict['{1,4|2,3}'] = myData[3]
newData, taxa_set1 = restore_quartet_order(myDict, taxa_set, myQuartets)
newDataString = str(newData["{1,2|3,4}"]) + "," + str(newData["{1,3|2,4}"]) + "," + str(newData["{1,4|2,3}"])
outputString = str(myData[0]) +"," + newDataString + "\n"
wf.add_to_supple_file(outputString, taxa_set1, options.outputSuffix)
txtFile.close()
print "Gene probabilities added.\n"
taxa_set = []
myTarfile.close()
finish_output.write('\nRUNNING finish.\n- ' + str(concfile_count) + ' BUCKy outputs (=quartets) found.\n')
finish_output.close()
main()
| 6,829 |
test/test_paired.py
|
openlabs/mongo-python-driver
| 0 |
2025231
|
# Copyright 2009-2010 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test pairing support.
These tests are skipped by nose by default (since they depend on having a
paired setup. To run the tests just run this file manually).
Left and right nodes will be $DB_IP:$DB_PORT and $DB_IP2:$DB_PORT2 or
localhost:27017 and localhost:27018 by default.
"""
import unittest
import logging
import os
import sys
import warnings
sys.path[0:0] = [""]
from pymongo.errors import ConnectionFailure
from pymongo.connection import Connection
skip_tests = True
class TestPaired(unittest.TestCase):
def setUp(self):
left_host = os.environ.get("DB_IP", "localhost")
left_port = int(os.environ.get("DB_PORT", 27017))
self.left = "%s:%s" % (left_host, left_port)
right_host = os.environ.get("DB_IP2", "localhost")
right_port = int(os.environ.get("DB_PORT2", 27018))
self.right = "%s:%s" % (right_host, right_port)
self.bad = "%s:%s" % ("somedomainthatdoesntexist.org", 12345)
def tearDown(self):
pass
def skip(self):
if skip_tests:
from nose.plugins.skip import SkipTest
raise SkipTest()
def test_connect(self):
self.skip()
self.assertRaises(ConnectionFailure, Connection,
[self.bad, self.bad])
connection = Connection([self.left, self.right])
self.assertTrue(connection)
host = connection.host
port = connection.port
connection = Connection([self.right, self.left])
self.assertTrue(connection)
self.assertEqual(host, connection.host)
self.assertEqual(port, connection.port)
slave = self.left == (host, port) and self.right or self.left
self.assertRaises(ConnectionFailure, Connection,
[slave, self.bad])
self.assertRaises(ConnectionFailure, Connection,
[self.bad, slave])
def test_repr(self):
self.skip()
connection = Connection([self.left, self.right])
self.assertEqual(repr(connection),
"Connection(['%s', '%s'])" %
(self.left, self.right))
def test_basic(self):
self.skip()
connection = Connection([self.left, self.right])
db = connection.pymongo_test
db.drop_collection("test")
a = {"x": 1}
db.test.save(a)
self.assertEqual(a, db.test.find_one())
def test_end_request(self):
self.skip()
connection = Connection([self.left, self.right])
db = connection.pymongo_test
for _ in range(100):
db.test.remove({})
db.test.insert({})
self.assertTrue(db.test.find_one())
connection.end_request()
if __name__ == "__main__":
skip_tests = False
unittest.main()
| 3,389 |
optimizely_cli/repo.py
|
optimizely/optimizely-cli
| 3 |
2024731
|
# Copyright 2018 Optimizely
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
import findup
import json
import os
import re
import sys
from api import client as api_client
from . import auth
CREDENTIALS_FILE = '.optimizely-credentials.json'
CONFIG_FILE = '.optimizely.json'
class Repo(object):
def __init__(self, root_dir=None, credentials_path=None):
if credentials_path is None:
credentials_path = findup.glob(CREDENTIALS_FILE)
self.credentials = auth.load_credentials(credentials_path)
# if the creds are expired, refresh the token
if self.credentials.is_expired() and self.credentials.refresh_token:
self.credentials = self.oauth.refresh_access_token(
self.credentials.refresh_token
)
self.credentials.write(credentials_path)
if root_dir:
self.vcs = 'git'
self.root = root_dir
else:
self.vcs, self.root = self.detect_vcs_and_project_root()
config = self.load_config()
self.config = config
self.platform = config.get('platform')
self.project_id = config.get('project_id')
if self.credentials.is_valid():
self.client = api_client.ApiClient(self.credentials.access_token)
else:
self.client = None
def require_credentials(self):
if self.credentials.is_valid():
return
click.echo('Could not find credentials. '
"Make sure you have run 'optimizely init' or specified a "
'valid path to a credentials file')
sys.exit(1)
def load_config(self, config_path=None):
root_dir = self.root or '.'
if config_path is None:
config_path = os.path.join(root_dir, CONFIG_FILE)
if config_path and os.path.exists(config_path):
with open(config_path) as f:
return json.load(f)
return {}
def save_config(self, config, config_path=None, echo=False):
root_dir = self.root or '.'
if config_path is None:
config_path = os.path.join(root_dir, CONFIG_FILE)
with open(config_path, 'w') as f:
json.dump(config, f, indent=4, separators=(',', ': '))
if echo:
relative_path = os.path.relpath(config_path)
click.echo('Config file written to {}'.format(relative_path))
def detect_vcs_and_project_root(self):
# it probably would be smart to detect hg/svn/bazaar/whatever here too
# if those are things that people are still using
path = findup.glob('.git')
if path:
return 'git', os.path.dirname(path)
return (None, '.')
def detect_repo_name(self):
root_dir = self.root or '.'
return os.path.basename(os.path.abspath(root_dir))
def detect_project_language(self):
extension_languages = {
'cs': 'csharp',
'java': 'java',
'js': 'javascript',
'php': 'php',
'py': 'python',
'rb': 'ruby',
}
extensions = {}
extension_regex = re.compile(r'\.(\w+)$', re.IGNORECASE)
if self.vcs == 'git':
files = os.popen('git ls-files').readlines()
for f in files:
extension_match = re.search(extension_regex, f)
if not extension_match:
continue
extension = extension_match.group(1)
if extension not in extension_languages:
continue
if not extensions.get(extension):
extensions[extension] = 0
extensions[extension] += 1
common_extensions = sorted(extensions, key=extensions.get,
reverse=True)
if common_extensions:
return extension_languages[common_extensions][0]
else:
return None
@property
def oauth(self):
return auth.LocalOAuth2(
host='app.optimizely.com',
client_id=10600571440,
client_secret='<KEY>',
port=5050,
authorize_endpoint='/oauth2/authorize',
token_endpoint='/oauth2/token',
)
| 4,822 |
service/dao.py
|
JiscPER/jper
| 2 |
2025313
|
"""
This module contains all the Data Access Objects for models which are persisted to Elasticsearch
at some point in their lifecycle.
Each DAO is an extension of the octopus ESDAO utility class which provides all of the ES-level heavy lifting,
so these DAOs mostly just provide information on where to persist the data, and some additional storage-layer
query methods as required
"""
from octopus.modules.es import dao
class ContentLogDAO(dao.ESDAO):
__type__ = 'contentlog'
class UnroutedNotificationDAO(dao.ESDAO):
"""
DAO for UnroutedNotifications
"""
__type__ = 'unrouted'
""" The index type to use to store these objects """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.unrouted_notification())
class RoutedNotificationDAO(dao.TimeBoxedTypeESDAO):
"""
DAO for RoutedNotification
This is an extension of the TimeBoxedTypeESDAO object, which means that a new type is created very
period (e.g. monthly) for new content. This enables rapid dropping of old index types without affecting
Elasticsearch performance, and works here because RoutedNotifications only persiste for a limited time
"""
__type__ = 'routed'
""" The base index type to use to store these objects - this will be appended by the time-boxing features of the DAO with the creation timestamp """
@classmethod
def example(cls):
"""
request a document which acts as an example for this type
"""
from service.tests import fixtures
return cls(fixtures.NotificationFactory.routed_notification())
class FailedNotificationDAO(dao.ESDAO):
"""
DAO for FailedNotifications
"""
__type__ = "failed"
""" The index type to use to store these objects """
class RepositoryConfigDAO(dao.ESDAO):
"""
DAO for RepositoryConfig
"""
__type__ = 'repo_config'
""" The index type to use to store these objects """
class MatchProvenanceDAO(dao.ESDAO):
"""
DAO for MatchProvenance
"""
__type__ = "match_prov"
""" The index type to use to store these objects """
@classmethod
def pull_by_notification(cls, notification_id, size=10):
"""
List all of the match provenance information for the requested notification
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
q = MatchProvNotificationQuery(notification_id, size=size)
return cls.object_query(q=q.query())
class MatchProvNotificationQuery(object):
"""
Query wrapper which generates an ES query for retrieving match provenance objects
based on the notification to which they are attached
"""
def __init__(self, notification_id, size=10):
"""
Set the parameters of the query
:param notification_id: the id of the notification for which to retrieve match provenance
:param size: the maximum number to return (defaults to 10)
"""
self.notification_id = notification_id
self.size = size
def query(self):
"""
generate the query as a python dictionary object
:return: a python dictionary containing the ES query, ready for JSON serialisation
"""
return {
"query" : {
"term" : {"notification.exact" : self.notification_id}
},
"size" : self.size
}
class RetrievalRecordDAO(dao.ESDAO):
"""
DAO for RetrievalRecord
"""
__type__ = "retrieval"
""" The index type to use to store these objects """
class AccountDAO(dao.ESDAO):
"""
DAO for Account
"""
__type__ = "account"
""" The index type to use to store these objects """
| 3,957 |
gluonar/utils/viz.py
|
haoxintong/gluon-audio
| 9 |
2026403
|
# MIT License
# Copyright (c) 2019 haoxintong
"""Visualization tools for gluonar"""
import seaborn as sns
import librosa as rosa
import librosa.display
import matplotlib.pyplot as plt
__all__ = ["plot_accuracy", "plot_roc", "view_spec"]
def plot_roc(tpr, fpr, x_name="FPR", y_name="TPR"):
sns.set(style="darkgrid")
plt.figure(figsize=(8, 8))
plt.xlabel(x_name)
plt.ylabel(y_name)
plt.title("ROC")
sns.lineplot(x=x_name, y=y_name, data={x_name: fpr, y_name: tpr})
plt.show()
def plot_accuracy(accs, threholds):
sns.set(style="darkgrid")
plt.figure(figsize=(8, 8))
plt.xlabel("threshold")
plt.ylabel("accuracy")
plt.title("Accuracy")
sns.lineplot(x="threshold", y="accuracy", data={"accuracy": accs, "threshold": threholds})
plt.show()
def view_spec(spec_img, sample_rate=16000):
spec_img = rosa.power_to_db(spec_img)
plt.figure()
rosa.display.specshow(spec_img, sr=sample_rate, x_axis='time', y_axis='mel')
plt.title('Spectrogram')
plt.show()
| 1,027 |
ticclat/ingest/opentaal.py
|
TICCLAT/explore
| 2 |
2026252
|
"""OpenTaal lexicon ingestion."""
import os.path
import pandas as pd
from ..dbutils import add_lexicon, session_scope
def ingest(session_maker, base_dir='',
opentaal_file='OpenTaal/OpenTaal-210G-BasisEnFlexies.txt', **kwargs):
"""Ingest OpenTaal lexicon into TICCLAT database."""
wfs = pd.read_csv(os.path.join(base_dir, opentaal_file), header=None)
wfs.columns = ['wordform']
with session_scope(session_maker) as session:
# name = 'OpenTaal-210G-BasisEnFlexies'
name = 'Open Taal modern Dutch word list'
vocabulary = True
add_lexicon(session, name, vocabulary, wfs, **kwargs)
| 640 |
mechanism.py
|
laurensvalk/ev3devlight-examples
| 4 |
2026850
|
#!/usr/bin/env micropython
"""Development/debug test program."""
from ev3devlight.motors import Motor, Mechanism
from ev3devlight.sensors import Touch
from time import sleep
# Set up devices
gripper_motor = Motor('outA', gear_ratio=12)
gripper_switch = Touch('in1')
# Define and reset mechanism
targets = {'open': 0, 'closed': 110, 'up': 400, 'reset': 430}
default_speed = 50
gripper = Mechanism(gripper_motor, targets, default_speed, gripper_switch)
# Go to predefined absolute target
gripper.go_to_target('closed')
sleep(1)
gripper.go_to_target('open')
| 559 |
octopod/ensemble/dataset.py
|
nathancooperjones/octopod
| 0 |
2026382
|
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from octopod.vision.config import cropped_transforms, full_img_transforms
from octopod.vision.helpers import center_crop_pil_image
class OctopodEnsembleDataset(Dataset):
"""
Load image and text data specifically for an ensemble model
Parameters
----------
text_inputs: pandas Series
the text to be used
img_inputs: pandas Series
the paths to images to be used
y: list
A list of lists of dummy-encoded categories
tokenizer: pretrained BERT Tokenizer
BERT tokenizer likely from `transformers`
max_seq_length: int (defaults to 128)
Maximum number of tokens to allow
transform: str or list of PyTorch transforms
specifies how to preprocess the full image for a Octopod image model
To use the built-in Octopod image transforms, use the strings: `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
crop_transform: str or list of PyTorch transforms
specifies how to preprocess the center cropped image for a Octopod image model
To use the built-in Octopod image transforms, use strings `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
"""
def __init__(self,
text_inputs,
img_inputs,
y,
tokenizer,
max_seq_length=128,
transform='train',
crop_transform='train'):
self.text_inputs = text_inputs
self.img_inputs = img_inputs
self.y = y
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
if transform == 'train' or 'val':
self.transform = full_img_transforms[transform]
else:
self.transform = transform
if crop_transform == 'train' or 'val':
self.crop_transform = cropped_transforms[crop_transform]
else:
self.crop_transform = crop_transform
def __getitem__(self, index):
"""Return dict of PyTorch tensors for preprocessed images and text and tensor of labels"""
# Text processing
x_text = self.text_inputs[index].replace('\n', ' ').replace('\r', ' ')
tokenized_x = (
['[CLS]']
+ self.tokenizer.tokenize(x_text)[:self.max_seq_length - 2]
+ ['[SEP]']
)
input_ids = self.tokenizer.convert_tokens_to_ids(tokenized_x)
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == self.max_seq_length
bert_text = torch.from_numpy(np.array(input_ids))
# Image processing
full_img = Image.open(self.img_inputs[index]).convert('RGB')
cropped_img = center_crop_pil_image(full_img)
full_img = self.transform(full_img)
cropped_img = self.crop_transform(cropped_img)
y_output = torch.from_numpy(np.array(self.y[index])).long()
return {'bert_text': bert_text,
'full_img': full_img,
'crop_img': cropped_img}, y_output
def __len__(self):
return len(self.text_inputs)
class OctopodEnsembleDatasetMultiLabel(OctopodEnsembleDataset):
"""
Multi label subclass of OctopodEnsembleDataset
Parameters
----------
text_inputs: pandas Series
the text to be used
img_inputs: pandas Series
the paths to images to be used
y: list
a list of binary encoded categories with length equal to number of
classes in the multi-label task. For a 4 class multi-label task
a sample list would be [1,0,0,1]
tokenizer: pretrained BERT Tokenizer
BERT tokenizer likely from `transformers`
max_seq_length: int (defaults to 128)
Maximum number of tokens to allow
transform: str or list of PyTorch transforms
specifies how to preprocess the full image for a Octopod image model
To use the built-in Octopod image transforms, use the strings: `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
crop_transform: str or list of PyTorch transforms
specifies how to preprocess the center cropped image for a Octopod image model
To use the built-in Octopod image transforms, use strings `train` or `val`
To use custom transformations supply a list of PyTorch transforms.
"""
def __getitem__(self, index):
"""Return dict of PyTorch tensors for preprocessed images and text and tensor of labels"""
# Text processing
x_text = self.text_inputs[index].replace('\n', ' ').replace('\r', ' ')
tokenized_x = (
['[CLS]']
+ self.tokenizer.tokenize(x_text)[:self.max_seq_length - 2]
+ ['[SEP]']
)
input_ids = self.tokenizer.convert_tokens_to_ids(tokenized_x)
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == self.max_seq_length
bert_text = torch.from_numpy(np.array(input_ids))
# Image processing
full_img = Image.open(self.img_inputs[index]).convert('RGB')
cropped_img = center_crop_pil_image(full_img)
full_img = self.transform(full_img)
cropped_img = self.crop_transform(cropped_img)
y_output = torch.FloatTensor(self.y[index])
return {'bert_text': bert_text,
'full_img': full_img,
'crop_img': cropped_img}, y_output
| 5,623 |
api/src/application/users/container.py
|
iliaskaras/housing-units
| 0 |
2026859
|
from dependency_injector import containers, providers
from dependency_injector.providers import Singleton
from application.authentication.container import AuthenticationContainer
from application.infrastructure.database.database import DatabaseEngineWrapper
from application.users.repositories import UserRepository
from application.users.services import LoginUserService, GetActiveUsersService
class UserContainer(containers.DeclarativeContainer):
"""
User inversion of control Container.
"""
wiring_config = containers.WiringConfiguration(
modules=[
"..rest_api.users.controllers",
"..rest_api.authentication.controllers",
]
)
user_repository: Singleton = providers.Singleton(
UserRepository,
db_engine=DatabaseEngineWrapper
)
get_active_users_service: Singleton = providers.Singleton(
GetActiveUsersService,
user_repository=user_repository,
)
login_user_service: Singleton = providers.Singleton(
LoginUserService,
user_repository=user_repository,
get_jwt_service=AuthenticationContainer.get_jwt_service.provided
)
| 1,164 |
mysite/psa/migrations/0007_customcode_next_page.py
|
cjlee112/socraticqs2
| 8 |
2026226
|
# Generated by Django 1.10.8 on 2018-11-19 16:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('psa', '0006_auto_20180512_1527'),
]
operations = [
migrations.AddField(
model_name='customcode',
name='next_page',
field=models.CharField(max_length=255, null=True),
),
]
| 402 |
imagefactory_plugins/MockCloud/MockCloud.py
|
henrysher/imgfac
| 1 |
2024171
|
# encoding: utf-8
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import uuid
import zope
import inspect
from imgfac.CloudDelegate import CloudDelegate
class MockCloud(object):
zope.interface.implements(CloudDelegate)
def __init__(self):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
builder.provider_image.identifier_on_provider = str(uuid.uuid4())
builder.provider_image.provider_account_identifier = 'mock_user'
def snapshot_image_on_provider(self, builder, provider, credentials, target, template, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
builder.provider_image.identifier_on_provider = str(uuid.uuid4())
builder.provider_image.provider_account_identifier = 'mock_user'
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
def delete_from_provider(self, builder, provider, credentials, target, parameters):
self.log.info('%s called in MockCloud plugin' % (inspect.stack()[1][3]))
| 2,293 |
src/utils/binaries_plt.py
|
dthierry/princetonDacLti
| 0 |
2026513
|
import pandas as pd
import matplotlib.pyplot as plt
def main():
directory = "./"
df = pd.read_csv(directory + "df_binary.csv")
fig, ax = plt.subplots(figsize=(16, 2), dpi=300)
ax.hlines(0.0, min(df.index)-1, max(df.index)+1, color="k")
ax.step(df.index, df["y00"], label="Off_a", color="lightcoral", linestyle="dotted", marker=".", fillstyle="none")
ax.step(df.index, df["y10"], label="Off_b", color="lightcoral", linestyle="dashed")
ax.hlines(1.2, min(df.index)-1, max(df.index)+1, color="k")
ax.step(df.index, df["y01"] + 1.2, label="Warmup_a", color="coral", linestyle="dotted", marker=".", fillstyle="none")
ax.step(df.index, df["y11"] + 1.2, label="Warmup_b", color="coral", linestyle="dashed")
ax.hlines(2.4, min(df.index)-1, max(df.index)+1, color="k")
ax.step(df.index, df["y02"] + 2.4, label="On_a", color="skyblue", linestyle="dotted", marker=".", fillstyle="none")
ax.step(df.index, df["y12"] + 2.4, label="On_b", color="skyblue", linestyle="dashed")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.legend()
ax.set_yticks([])
ax.set_title("Switches")
ax.set_xlabel("Hour")
plt.savefig("binary.png", format="png", transparent=True)
plt.clf()
if __name__ == "__main__":
main()
| 1,391 |
uLaw.py
|
Mirage00/Audio-Encoding-Decoding
| 0 |
2026822
|
__author__ = 'BorisMirage'
# --- coding:utf-8 ---
'''
Create by BorisMirage
File Name: uLaw
Create Time: 11/20/18 21:58
'''
import numpy as np
import math
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S'
)
def u_law_compress(arr):
"""
u-Law compress function
:param arr: given array
:return: compressed array
"""
max_in_arr = np.amax(arr)
u = 255
out = np.array(arr, dtype=float)
de = math.log(1 + u)
for i in range(0, len(arr)):
up = 1 + u * abs(arr[i])
o = (math.log(up) * max_in_arr) / de
if arr[i] < 0:
o = -o
out[i] = o
return out
def u_law_expend(arr):
"""
Expend compressed array.
:param arr: given array
:return: expended array
"""
u = 255
max_in_arr = np.amax(arr)
out = np.array(arr, dtype=float)
for i in range(0, len(arr)):
p = math.log(256) * abs(arr[i]) / max_in_arr
o = max_in_arr * (math.exp(p) - 1) / u
if arr[i] < 0:
o = -o
out[i] = o
return out
if __name__ == '__main__':
# print(math.log(1 + 255))
# array = [0.01, 0.02]
# print(array)
# print(u_law_compress(array))
pass
| 1,374 |
roast/providers/hwflow.py
|
Xilinx/roast-xilinx
| 1 |
2026381
|
#
# Copyright (c) 2020 Xilinx, Inc. All rights reserved.
# SPDX-License-Identifier: MIT
#
import os
import inspect
import pkgutil
from mimesis import BaseDataProvider
import roast
from box import Box
from roast.providers.randomizer import Randomizer
from roast.utils import read_json
class HWFlowProvider(BaseDataProvider):
def __init__(self, seed, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.seed = seed
self.r = Randomizer(seed=self.seed)
class Meta:
name = "hwflow_provider"
@property
def parameter_file(self):
return self._parameter_file
@parameter_file.setter
def parameter_file(self, file):
providers_dir = os.path.dirname(inspect.getsourcefile(roast.providers.hwflow))
self._parameter_file = os.path.join(providers_dir, file)
_parameters = read_json(self._parameter_file)
self.parameters = Box(_parameters, box_dots=True)
def pick_parameter_value(self, parameter):
parameter_choices = self.parameters[parameter]
return self.r.choice(parameter_choices)
| 1,103 |
examples/my_extract_full_defs.py
|
Ewenwan/pycparser
| 0 |
2024941
|
#coding=utf-8
#-----------------------------------------------------------------
# 提取函数和函数参数
#-----------------------------------------------------------------
import explain_fun_param as efp # 提取函数参数子库
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
sys.path.extend(['.', '..'])
from pycparser import c_parser, c_ast, parse_file
# 查看函数调用的类
class FuncCall_Visitor(c_ast.NodeVisitor):
def visit_FuncCall(self, node):
#print(type(node))
print(" " + node.name.name)
# 查看函数定义的类
class FuncDefVisitor(c_ast.NodeVisitor):
# 查看 函数定义
def visit_FuncDef(self, node):
print(type(node))
# 显示函数定义的信息
def show_func_def(ast):
try:
for nd in ast.ext:
if type(nd) == c_ast.FuncDef:
# 显示函数名、参数数量、位置等
print("func: %s , loc: %s " % (nd.decl.name, nd.decl.coord))
# 描述函数参数
print(" param:")
try:
for i in range(len(nd.decl.type.args.params)):
desc = efp.explain_c_declaration(nd.decl.type.args.params[i],ast,expand_struct=True,expand_typedef=True)
print(" %d: %s " % (i+1, desc))
desc_list = desc.split(' ')
if desc_list[3] not in ['pointer','array','struct','function']:
print " " + " ".join(desc_list[3:]) # 类型
else:
print " " + desc_list[3]
except:
print(" no param")
# 描述函数返回值类型
print(" func return type:")
print(" %s " %(efp._explain_type(nd.decl.type.type)))
# 描述函数内部的函数调用
print(" func call:")
#v = FuncCall_Visitor()
#v.visit(nd)
# 函数体
function_body = nd.body
try:
func_call_num = 0
for decl in function_body.block_items:
# 函数体内的每一个子节点
if type(decl) == c_ast.FuncCall:
#decl.show()
func_call_num += 1
print(" %d: %s " % (func_call_num, decl.name.name))
if func_call_num == 0:
print(" no func call")
except:
print(" no func call")
except:
print("ast has 0 node")
def show_func_call_tree(str_main_func, ast):
call_tree_dict = {}
call_tree_dict[str_main_func] = {}
try:
for nd in ast.ext:
# 从指定的函数节点开始寻找
if type(nd) == c_ast.FuncDef and str_main_func == nd.decl.name:
function_body = nd.body
try:
func_call_num = 0
for decl in function_body.block_items:
# 函数体内的每一个子节点
if type(decl) == c_ast.FuncCall:
#decl.show()
func_call_num += 1
#print(" %d: %s " % (func_call_num, decl.name.name))
call_tree_dict[str_main_func][decl.name.name]={}
if func_call_num == 0:
print(" no func call")
except:
print(" no func call")
except:
print("ast has 0 node")
return call_tree_dict
def print_func_call_tree(call_tree_dict,cnt=1):
for key in call_tree_dict:
print(" " + key)
print_func_call_tree(call_tree_dict[key],)
# 显示函数信息 和 调用树
def show_func(filename):
use_cpp=True # 执行预处理 execute the C pre-processor 可以去除注释 和 宏定义等
#use_cpp = False
if use_cpp:
from pycparser import preprocess_file
cpp_path = 'cpp' # 预处理 器 路径
cpp_args=r'-Iutils/fake_libc_include' # command line arguments 预处理选项
text = preprocess_file(filename, cpp_path, cpp_args)
else:
import io
with io.open(filename) as f:
text = f.read()
if not use_cpp:
# 去除注释
import decomment as dc
rc = dc.rmcmnt("c")
text, rm = rc.removecomment(text)
parser = c_parser.CParser()
global ast
ast = parser.parse(text, filename)
# 显示函数定义的信息
show_func_def(ast)
# 显示函数调用树
print("func_call_tree:")
call_tree_d = show_func_call_tree("main",ast)
print_func_call_tree(call_tree_d)
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = '../examples/c_files/hash.c'
show_func(filename)
| 4,838 |
setup.py
|
ruifeng96150/easy_sklearn
| 1 |
2023923
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#############################################
# File Name: setup.py
# Author: Ruifeng96150
# Mail: <EMAIL>
# Created Time: 2018-10-21 19:17:34
#############################################
from setuptools import setup, find_packages
setup(
name="easy_sklearn",
version="0.1.4",
keywords=("sklearn", "python", "machine learning"),
description="This is a python library base on sklearn",
long_description="This is a python library, which can be easier to build sklearn classification and regressor models.",
license="MIT Licence",
url="https://github.com/ruifeng96150/easy_sklearn.git",
author="ruifeng96150",
author_email="<EMAIL>",
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
"License :: OSI Approved :: MIT License",
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
],
package_dir={'': '.'},
packages=find_packages('.'),
include_package_data=True,
platforms="any",
install_requires=['numpy', 'pandas', 'scikit-learn']
)
| 1,432 |
src/tools/record_kinect_to_img.py
|
NaviRice/HeadTracking
| 1 |
2026727
|
from navirice_get_image import KinectClient
from navirice_helpers import navirice_img_set_write_file
from navirice_helpers import navirice_image_to_np
from navirice_helpers import navirice_ir_to_np
import navirice_image_pb2
from tkinter import *
import cv2
import numpy as np
from threading import Thread
HOST = '127.0.0.1' # The remote host
PORT = 29000 # The same port as used by the server
class Window(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
self.init_window()
def init_window(self):
self.should_pull = True
self.should_record = False
self.should_run = True
self.session_name = "default"
self.last_count = 0
self.master.title("NAVIRICE_RECORDER")
recordButton = Button(self, text="RECORD", command=self.record)
recordButton.place(x=5, y=0)
self.session_text = Text(self, height=1, width=20)
self.session_text.place(x=5, y=30)
self.session_text.insert(END, self.session_name)
self.canvas = Canvas(self, height=30, width=30)
self.print_states()
self.pack(fill=BOTH, expand=1)
thread = Thread(target = self.thread_stream)
thread.deamon = True
thread.start()
def print_states(self):
self.canvas.delete()
fill = '#f00'
if(self.should_record):
fill = '#0f0'
self.canvas.create_oval(4, 0, 25, 25, outline="#000", fill=fill)
self.canvas.pack(fill=BOTH, expand=1)
self.canvas.place(x = 100, y = 0)
def record(self):
self.should_record = not self.should_record
self.print_states()
name = self.session_text.get("1.0",END)
if(len(name)):
self.session_name = name
def kill(self):
self.should_run = False
def thread_stream(self):
kc = KinectClient(HOST, PORT)
kc.navirice_capture_settings(False, True, True)
while(self.should_run):
img_set = None
if(self.should_pull):
img_set, self.last_count = kc.navirice_get_image()
if(img_set != None and img_set.IR.width > 0 and img_set.Depth.width > 0):
if self.should_record:
#processThread =Thread(target=navirice_img_set_write_file, args=[self.session_name, img_set, self.last_count])
#processThread.start()
navirice_img_set_write_file(self.session_name, img_set, self.last_count)
cv2.imshow("IR", navirice_ir_to_np(img_set.IR))
cv2.imshow("DEPTH", navirice_image_to_np(img_set.Depth))
if cv2.waitKey(1) & 0xFF == ord('q'):
print("q pressed in cv window")
del img_set
def main():
root = Tk()
root.geometry("170x65")
root.attributes('-type', 'dialog')
app = Window(root)
def on_quit():
app.kill()
exit()
root.protocol("WM_DELETE_WINDOW", on_quit)
root.mainloop()
if __name__ == "__main__":
main()
| 3,130 |
artikel/urls.py
|
marlonn/technikzeug
| 0 |
2026394
|
from django.conf.urls import url
from . import views
from rest_framework.urlpatterns import format_suffix_patterns
app_name = 'artikel'
urlpatterns = [
url(r'^$', views.SearchView.as_view(), name="search"),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^api/$', views.ArtikelList.as_view()),
url(r'^api/(?P<pk>[0-9]+)/$', views.ArtikelDetail.as_view()),
url(r'^api/users/$', views.UserList.as_view()),
url(r'^api/users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 577 |
tests/permission_test.py
|
luke0922/mdpress
| 1 |
2025794
|
#!/usr/bin/env python
# encoding: utf-8
import json
from unittest import TestCase
import application.models as Models
from application import create_app
class PermissionTest(TestCase):
def setUp(self):
self.app = create_app("TESTING")
self.ctx = self.app.app_context()
self.ctx.push()
self.client = self.app.test_client()
Models.Role.objects.create(id=1, name="READER", permission=Models.Permission.READ)
Models.Role.objects.create(id=2, name="CREATER", permission=Models.Permission.CREATE)
Models.Role.objects.create(id=3, name="UPDATER", permission=Models.Permission.UPDATE | Models.Permission.CREATE)
Models.Role.objects.create(id=4, name="DELETER", permission=Models.Permission.DELETE | Models.Permission.CREATE)
Models.Role.objects.create(id=5, name="READER", permission=Models.Permission.DEFAULT)
def tearDown(self):
for p in Models.Role.objects.all():
p.delete()
for p in Models.User.objects.all():
p.delete()
def user_add_post(self):
post = {
'title': 'title',
'slug': 'excerpt',
'markdown': 'content',
'categories': [],
'tags': [],
'status': 'PUBLISHED'
}
resp = self.client.post(
'/posts/post', data=json.dumps(post),
headers={'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/json'})
resp_json = json.loads(resp.data)
return resp_json
def _get_all_post(self):
return json.loads(self.client.get('/posts/all').data)['data']['posts']
def user_update_post(self):
post = self._get_all_post()[0]
print "all post {}".format(post)
post['title'] = 'new_title'
resp = self.client.put(
'/posts/post', data=json.dumps(post),
headers={'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/json'})
resp_json = json.loads(resp.data)
return resp_json
def user_delete_post(self):
self.user_add_post()
old_posts = self._get_all_post()
post = old_posts[0]
data = {'ids': [int(post.get('id'))]}
print data
resp = self.client.delete(
'/posts/post', data=json.dumps(data),
headers={'Authorization': 'Bearer {}'.format(self.token),
'Content-Type': 'application/json'})
resp_json = json.loads(resp.data)
return resp_json
def create_permission_user_and_login(self, permission):
role = Models.Role.objects.filter(permission=permission).first()
Models.User.objects.create(
name="zhangsan", password="password",
email="<EMAIL>", role=[role])
print "create_permission_user_and_login with all user: {}".format(Models.User.objects.all())
user = {
'username': '<EMAIL>',
'password': 'password',
}
resp = self.client.post('/authentication/token', data=json.dumps(user),
headers={'Content-Type': 'application/json'})
login_resp = json.loads(resp.data)
print login_resp
self.token = login_resp.get('access_token')
def test_add_post_permission(self):
self.create_permission_user_and_login(Models.Permission.CREATE)
resp_json = self.user_add_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2000)
self.assertEqual(resp_data.get('post').get('title'), 'title')
self.assertIn('success', resp_msg)
resp_json = self.user_update_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2009)
self.assertEquals(resp_data, {})
self.assertIn('no permission', resp_msg)
resp_json = self.user_delete_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2009)
self.assertEquals(resp_data, {})
self.assertIn('no permission', resp_msg)
def test_update_post_permission(self):
self.create_permission_user_and_login(Models.Permission.UPDATE | Models.Permission.CREATE)
resp_json = self.user_add_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2000)
self.assertEqual(resp_data.get('post').get('title'), 'title')
self.assertIn('success', resp_msg)
resp_json = self.user_update_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2000)
self.assertEqual(resp_data.get('post').get('title'), 'new_title')
self.assertIn('success', resp_msg)
resp_json = self.user_delete_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2009)
self.assertEquals(resp_data, {})
self.assertIn('no permission', resp_msg)
def test_delete_post_permission(self):
self.create_permission_user_and_login(
Models.Permission.DELETE | Models.Permission.CREATE)
resp_json = self.user_add_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2000)
self.assertEqual(resp_data.get('post').get('title'), 'title')
self.assertIn('success', resp_msg)
resp_json = self.user_update_post()
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2009)
self.assertEqual(resp_data, {})
self.assertIn('no permission', resp_msg)
resp_json = self.user_delete_post()
print "resp_json: {}".format(resp_json)
resp_code = resp_json.get('code')
resp_data = resp_json.get('data')
resp_msg = resp_json.get('msg')
self.assertEquals(resp_code, 2000)
self.assertEqual(resp_data.get('posts')[0].get('title'), 'title')
self.assertIn('success', resp_msg)
| 6,592 |
Scikit_learn/standardization.py
|
yishantao/DailyPractice
| 0 |
2026492
|
# -*- coding:utf-8 -*-
"""This module is used for standardization"""
import pandas as pd
# data = pd.DataFrame({'A': [2, 4, 3, 7, 9], 'B': [5, 8, 2, 3, 7]})
# # Min-Max标准化
# from sklearn.preprocessing import MinMaxScaler
#
# # 初始化一个scaler对象
# scaler = MinMaxScaler()
# # 调用scaler的fit_transform方法,把要处理的列作为参数传进去
# print(data)
# # data['A'] = scaler.fit_transform(data[['A']])
# data = scaler.fit_transform(data[['A', 'B']])
# print(data)
# # Z-Score标准化
# from sklearn.preprocessing import scale
#
# data['A'] = scale(data['A'])
# data['B'] = scale(data['B'])
# # data = scale(data[['A', 'B']])
# print(data)
# # Normalizer归一化
# from sklearn.preprocessing import Normalizer
#
# scaler = Normalizer()
# data = scaler.fit_transform(data[['A', 'B']])
# print(data)
# data = pd.DataFrame({'A': [2, 4, 3, 7, 9], 'B': ['大学', '大专', '大专', '硕士', '博士'], 'C': ['男', '男', '女', '女', '男']})
# print(data)
# print(data['B'].drop_duplicates())
# 构建学历字典
# education_level_dict = {'博士': 4, '硕士': 3, '大学': 2, '大专': 1}
# # 调用Map方法进行虚拟变量的转换
# data['B'] = data['B'].map(education_level_dict)
# print(data)
# dummies = pd.get_dummies(
# data,
# columns=['C'],
# prefix=['性别'],
# prefix_sep='_',
# dummy_na=False,
# drop_first=False)
# print(dummies)
# import numpy as np
# from sklearn.preprocessing import Imputer
#
# data = pd.DataFrame({'A': [2, 4, 3, 7, 9], 'B': [11, np.nan, np.nan, 23, 88], 'C': [23, 34, 12, np.nan, 34]})
# print(data)
# print('*' * 35)
# imputer = Imputer(strategy='mean')
# data[['B', 'C']] = imputer.fit_transform(data[['B', 'C']])
# print(data)
# data = pd.DataFrame(
# {'产品': ['A产品', 'A产品', 'A产品', 'A产品', 'A产品'], '销售额': [4.5, 3.4, 2.5, 2.1, 2.0], '忠诚度': [6.6, 7.0, 4.2, 7.7, 9.1]})
# print(data)
# # 使用VarianceThreshold类进行方差过滤
# from sklearn.feature_selection import VarianceThreshold
#
# # 要生成这个类的对象,就需要一个参数,就是最小方差的阈值,我们先设置为1,
# # 然后调用它的transform方法进行特征值的过滤
# variancethreshold = VarianceThreshold(threshold=0.8)
# variancethreshold.fit_transform(
# data[['销售额', '忠诚度']]
# )
# # 使用get_support方法,可以得到选择特征列的序号,
# # 然后根据这个序号在原始数据中把对应的列名选择出来即可
# print(variancethreshold.get_support(indices=True))
#
# # 检验为什么“销售额”和“忠诚度”都被选中
# print(data[['销售额', '忠诚度']].std(ddof=0))
# print(variancethreshold.variances_)
data = pd.DataFrame(
{'产品': ['A产品', 'A产品', 'A产品', 'A产品', 'A产品'], '销售额': [4.5, 3.4, 2.5, 2.1, 2.0], '忠诚度': [6.6, 7.0, 4.2, 7.7, 9.1],
'投资人数': [22, 12, 4, 8, 13], '年收入': [50, 35, 23, 20, 17]})
# # SelectKBest类,通过回归的方法,以及要选择多少个特征值,
# # 新建一个 SelectKBest对象
# from sklearn.feature_selection import SelectKBest
# from sklearn.feature_selection import f_regression
#
# selectKBest = SelectKBest(
# f_regression, k=2
# )
#
# # 接着,把自变量选择出来,然后调用fit_transform方法,
# # 把自变量和因变量传入,即可选出相关度最高的两个变量。
# feature = data[['销售额', '忠诚度', '投资人数']]
# bestFeature = selectKBest.fit_transform(
# feature,
# data['年收入']
# )
#
# # 我们想要知道这两个自变量的名字,使用get_support方法即可得到相应的列名
# print(feature.columns[selectKBest.get_support()])
# # 使用RFE类,在estimator中,
# # 把我们的基模型设置为线性回归模型LinearRegression,
# # 然后在把我们要选择的特征数设置为2,
# # 接着就可以使用这个rfe对象,把自变量和因变量传入fit_transform方法,
# # 即可得到我们需要的特征值
# from sklearn.feature_selection import RFE
# from sklearn.linear_model import LinearRegression
#
# feature = data[['销售额', '忠诚度', '投资人数']]
#
# rfe = RFE(
# estimator=LinearRegression(),
# n_features_to_select=2
# )
# sFeature = rfe.fit_transform(
# feature,
# data['年收入']
# )
#
# # 同理,我们要想知道这两个自变量的名字,
# # 使用get_support方法,即可得到对应的列名
# print(rfe.get_support())
# from sklearn.feature_selection import SelectFromModel
# from sklearn.linear_model import LinearRegression
#
# feature = data[['销售额', '忠诚度', '投资人数']]
# lrModel = LinearRegression()
# selectFromModel = SelectFromModel(lrModel)
# selectFromModel.fit_transform(
# feature,
# data['年收入']
# )
# print(selectFromModel.get_support())
# # 导入iris特征数据到data变量中
# import pandas
# from sklearn import datasets
# import matplotlib.pyplot as plt
# from sklearn.decomposition import PCA
# from mpl_toolkits.mplot3d import Axes3D
#
# iris = datasets.load_iris()
#
# data = iris.data
#
# # 分类变量到target变量中
# target = iris.target
#
# # 使用主成分分析,将四维数据压缩为三维
# pca_3 = PCA(n_components=3)
# data_pca_3 = pca_3.fit_transform(data)
#
# # 绘图
# colors = {0: 'r', 1: 'b', 2: 'k'}
# markers = {0: 'x', 1: 'D', 2: 'o'}
#
# # 弹出图形
# # %matplotlib qt
#
# # 三维数据
# fig = plt.figure(1, figsize=(8, 6))
# ax = Axes3D(fig, elev=-150, azim=110)
#
# data_pca_gb = pandas.DataFrame(
# data_pca_3
# ).groupby(target)
#
# for g in data_pca_gb.groups:
# ax.scatter(
# data_pca_gb.get_group(g)[0],
# data_pca_gb.get_group(g)[1],
# data_pca_gb.get_group(g)[2],
# c=colors[g],
# marker=markers[g],
# cmap=plt.cm.Paired
# )
# plt.show()
| 4,756 |
meraki_ssid/functions.py
|
gve-sw/Meraki_ssid_availability_scheduler
| 0 |
2025208
|
#!/bin/env python
""" Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
import json
import datetime
import os
import pymongo
import requests
from pymongo import MongoClient
#Can replace 'db' with '172.17.0.2' when run locally
client = MongoClient(
'db',
27017)
db = client.tododb
# method to enter a new schedule for an SSID into the mongo db
def upload(ssid_record,request,switch):
print("---------------------seitch")
new_collection = db[ssid_record['identity']]
#monday document
monday = ssid_record.copy()
monday['switch'] = switch
monday['day'] = 0
monday['start_times'] = request.form.getlist("start_monday")
monday['end_times'] = request.form.getlist("end_monday")
new_collection.insert_one(monday)
print("monday record sent")
#tuesday document
tuesday = ssid_record.copy()
tuesday['day'] = 1
tuesday['start_times'] = request.form.getlist("start_tuesday")
tuesday['end_times'] = request.form.getlist("end_tuesday")
new_collection.insert_one(tuesday)
print("tuesday record sent")
#wednesday document
wednesday = ssid_record.copy()
wednesday['day'] = 2
wednesday['start_times'] = request.form.getlist("start_wednesday")
wednesday['end_times'] = request.form.getlist("end_wednesday")
new_collection.insert_one(wednesday)
print("wednesday record sent")
#thursday document
thursday = ssid_record.copy()
thursday['day'] = 3
thursday['start_times'] = request.form.getlist("start_thursday")
thursday['end_times'] = request.form.getlist("end_thursday")
new_collection.insert_one(thursday)
print("thursday record sent")
#friday document
friday = ssid_record.copy()
friday['day'] = 4
friday['start_times'] = request.form.getlist("start_friday")
friday['end_times'] = request.form.getlist("end_friday")
new_collection.insert_one(friday)
print("friday record sent")
#saturday document
saturday = ssid_record.copy()
saturday['day'] = 5
saturday['start_times'] = request.form.getlist("start_saturday")
saturday['end_times'] = request.form.getlist("end_saturday")
new_collection.insert_one(saturday)
print("saturday record sent")
#sunday document
sunday = ssid_record.copy()
sunday['day'] = 6
sunday['start_times'] = request.form.getlist("start_sunday")
sunday['end_times'] = request.form.getlist("end_sunday")
new_collection.insert_one(sunday)
print("sunday record sent")
#storing attributes of an SSSID
def send_collection(request,meraki_key,network_id,switch):
collection_list = db.collection_names()
ssid_record = {}
ssid_record['name'] = str(request.form.get("ssid_id")).split('+')[1]
ssid_record['start_times'] = []
ssid_record['end_times'] = []
ssid_record['day'] = ''
ssid_record['network_id'] = network_id
ssid_record['ssid_number'] = str(request.form.get("ssid_id")).split('+')[0]
ssid_record['identity'] = ssid_record['name'] + "_" + ssid_record['network_id'] + "_" + ssid_record['ssid_number']
ssid_record['key'] = meraki_key
ssid_record['switch'] = switch
if ssid_record['identity'] in collection_list:
db.drop_collection(ssid_record['identity'])
upload(ssid_record,request,switch)
else:
upload(ssid_record,request,switch)
#grab current day/time and split it by day,hour, and min
def current_time_day():
time_and_day = {}
current_time = datetime.datetime.now()
time_and_day["current_time"] = current_time
time_and_day["current_hour"] = str(current_time.hour).zfill(2)
time_and_day["current_min"] = str(current_time.minute).zfill(2)
time_and_day["current_day"] = current_time.weekday()
return time_and_day
#method to query if a collection for an SSID already exists
def query_documents(time_day):
day = time_day["current_day"]
documents = []
collection_list = db.list_collection_names()
for ssid_identifier in collection_list:
print(ssid_identifier)
collection = db[ssid_identifier]
cursor = collection.find({"day": day})
for document in cursor:
documents.append(document)
return documents
#retreieve status for particular SSID
def get_ssid_status(document):
network_id = document["network_id"]
ssid_num = document["ssid_number"]
key = document["key"]
url = "https://dashboard.meraki.com/api/v0/networks/(network_id)/ssids/(ssid_num)"
url = url.replace("(network_id)", network_id)
url = url.replace("(ssid_num)", ssid_num)
headers = {
'X-Cisco-Meraki-API-Key': key
}
response = requests.request("GET", url, headers=headers)
json_data = json.loads(response.text)
return json_data["enabled"]
#change status for particular SSID
def change_ssid_status(document,status):
print("change ssid method started")
url = "https://n143.meraki.com/api/v0/networks/(network)/ssids/(num)"
payload = "{\n \"name\": \"(name)\",\n \"enabled\": (status),\n \"splashPage\": \"None\",\n \"perClientBandwidthLimitUp\": 0,\n \"perClientBandwidthLimitDown\": 0,\n \"ssidAdminAccessible\": false,\n \"ipAssignmentMode\": \"NAT mode\",\n \"authMode\": \"open\"\n}"
headers = {
'X-Cisco-Meraki-API-Key': document["key"],
'Content-Type': "application/json",
}
payload = payload.replace("(name)",document["name"])
url = url.replace("(network)",document["network_id"])
url = url.replace("(num)",document["ssid_number"])
if status is 0:
payload = payload.replace("(status)","false")
response = requests.request("PUT", url, data=payload, headers=headers)
print("Response from API request",response.text)
if status is 1:
payload = payload.replace("(status)","true")
response = requests.request("PUT", url, data=payload, headers=headers)
print("Response from API request",response.text)
#method to verify if an SSID needs to be turned off
def turn_on(document_list,time):
print("turn on method starting")
for index in range(len(document_list)):
#print("start times " ,document_list[index]["start_times"], " for ",document_list[index]["identity"])
for value in range(len(document_list[index]["start_times"])):
if (time["current_hour"] == document_list[index]["start_times"][value].split(":")[0] and time["current_min"] == document_list[index]["start_times"][value].split(":")[1]):
if document_list[index]["switch"] is None:
status = 0
else:
status = 1
print("WORKED")
change_ssid_status(document_list[index],status)
#method to verify if an SSID needs to be turned off
def turn_off(document_list,time):
print("turn off method starting")
for index in range(len(document_list)):
#print("end times " ,document_list[index]["end_times"] , " for " , document_list[index]["identity"])
for value in range(len(document_list[index]["end_times"])):
if (time["current_hour"] == document_list[index]["end_times"][value].split(":")[0] and time["current_min"] == document_list[index]["end_times"][value].split(":")[1]):
print("end time: " + document_list[index]["end_times"][value])
if document_list[index]["switch"] is None:
status = 1
else:
status = 0
change_ssid_status(document_list[index],status)
print("STARTED")
time = current_time_day()
print(time)
hello = query_documents(time)
print(hello)
turn_on(hello,time)
turn_off(hello,time)
| 8,256 |
annoyingbus/scraper.py
|
cgwelcome/annoyingbus
| 0 |
2026232
|
from bs4 import BeautifulSoup
from datetime import timedelta
from .information import Information
class Scraper(object):
def __init__(self):
self.website = None
self.container = Information()
def build(self, offset_days, base_date):
self.website.container = self.container
company = self.website.get_company()
for day in range(offset_days):
print("{1} - Extracting day {0}".format(day+1, company))
date = base_date + timedelta(days=day)
self.website.update_date(date)
html_content = self.website.load().content
soup = BeautifulSoup(html_content, "html.parser")
self.website.set_locations()
for row in self.website.get_rows(soup):
self.website.set_price(row)
self.website.set_departure(row)
self.website.set_arrival(row)
self.website.set_duration(row)
self.container.update_row()
def get_info(self):
return self.container
| 1,054 |
quick_srcipts/beginners/strings/print_n_times.py
|
HaydnCCI/code_efficiency
| 0 |
2026570
|
n = 3
my_string = "abcd"
my_list = [1,2,3]
print(my_string * n)
# abcdabcdabcd
print(my_list * n)
# [1, 2, 3, 1, 2, 3, 1, 2, 3]
| 133 |
2.3/testdemo/test/demo_test.py
|
diblaze/TDP002
| 0 |
2026025
|
#!/usr/bin/env python
"""
Test module for demo.py.
Runs various tests on the demo module. Simply run this module to test
the demo.py module.
"""
import test
import demo
def test_echo():
print("In echo test")
echo = demo.echo("hej")
test.assert_equal("hej", echo)
test.assert_not_equal(None, echo)
def test_add():
print("In add test")
added = demo.add("hej ", "hopp")
test.assert_equal("hej hopp", added)
test.assert_not_equal("hej", added)
def run_module_tests():
test.run_tests([test_echo,
test_add])
if __name__ == "__main__":
run_module_tests()
| 614 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.