max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
pyIPCMI/ToolChain/GNU.py
|
mithro/pyIPCMI
| 5 |
2171617
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: <NAME>
# <NAME>
#
# Python Class: GNU tools specific classes
#
# License:
# ==============================================================================
# Copyright 2017-2018 <NAME> - Bötzingen, Germany
# Copyright 2007-2016 Technische Universität Dresden - Germany
# Chair of VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
from pathlib import Path
from re import compile as re_compile
from lib.Functions import Init
from lib.CallBy import CallByRefParam
from pyIPCMI.Base.Exceptions import PlatformNotSupportedException
from pyIPCMI.Base.Executable import Environment, DryRunException, Executable, CommandLineArgumentList
from pyIPCMI.Base.Executable import ExecutableArgument, ValuedFlagArgument, ShortTupleArgument, LongTupleArgument, LongFlagArgument, StringListArgument
from pyIPCMI.Base.Logging import LogEntry, Severity
from pyIPCMI.DataBase.Entity import SimulationResult
from pyIPCMI.ToolChain import ToolChainException, OutputFilteredExecutable
__api__ = [
'GNUException',
'Make',
'GNUMakeQuestaSimFilter',
'CocotbSimulationResultFilter'
]
__all__ = __api__
class GNUException(ToolChainException):
pass
class Make(OutputFilteredExecutable):
def __init__(self, platform, dryrun, logger=None):
if (platform == "Linux"): executablePath = Path("/usr/bin/make")
else: raise PlatformNotSupportedException(platform)
super().__init__(platform, dryrun, executablePath, logger=logger)
self.Parameters[self.Executable] = executablePath
class Executable(metaclass=ExecutableArgument):
pass
class SwitchGui(metaclass=ValuedFlagArgument):
_name = "GUI"
Parameters = CommandLineArgumentList(
Executable,
SwitchGui
)
def RunCocotb(self):
parameterList = self.Parameters.ToArgumentList()
self.LogVerbose("command: {0}".format(" ".join(parameterList)))
if (self._dryrun):
self.LogDryRun("Start process: {0}".format(" ".join(parameterList)))
return
try:
self.StartProcess(parameterList)
except Exception as ex:
raise GNUException("Failed to launch Make.") from ex
self._hasOutput = False
self._hasWarnings = False
self._hasErrors = False
simulationResult = CallByRefParam(SimulationResult.Error)
try:
iterator = iter(CocotbSimulationResultFilter(GNUMakeQuestaSimFilter(self.GetReader()), simulationResult))
line = next(iterator)
line.IndentBy(self.Logger.BaseIndent + 1)
self._hasOutput = True
self.LogNormal(" Make messages")
self.LogNormal(" " + ("-" * (78 - self.Logger.BaseIndent*2)))
self.Log(line)
while True:
self._hasWarnings |= (line.Severity is Severity.Warning)
self._hasErrors |= (line.Severity is Severity.Error)
line = next(iterator)
line.IndentBy(self.Logger.BaseIndent + 1)
self.Log(line)
except DryRunException:
simulationResult <<= SimulationResult.DryRun
except StopIteration:
pass
finally:
if self._hasOutput:
self.LogNormal(" " + ("-" * (78 - self.Logger.BaseIndent*2)))
return simulationResult.value
class Bash(Executable):
def __init__(self, platform, dryrun, logger=None):
if (platform == "Linux"): executablePath = Path("/bin/bash")
else: raise PlatformNotSupportedException(platform)
super().__init__(platform, dryrun, executablePath, logger=logger)
self.Parameters[self.Executable] = executablePath
class Executable(metaclass=ExecutableArgument):
pass
class SwitchCommand(metaclass=ShortTupleArgument):
_name = "c"
Parameters = CommandLineArgumentList(
Executable,
SwitchCommand
)
def GetEnvironment(self, settingsFile=None, variables=""):
if (settingsFile is None):
self.Parameters[self.SwitchCommand] = "env"
else:
self.Parameters[self.SwitchCommand] = "{variables}source {settingsFile!s} && env".format(settingsFile=settingsFile, variables=variables)
parameterList = self.Parameters.ToArgumentList()
self.LogVerbose("command: {0}".format(" ".join(parameterList)))
if (self._dryrun):
self.LogDryRun("Start process: {0}".format(" ".join(parameterList)))
return
try:
self.StartProcess(parameterList)
except Exception as ex:
raise GNUException("Failed to launch /bin/bash.") from ex
env = Environment()
iterator = iter(self.GetReader())
for line in iterator:
try:
var,value = line.split("=", 1)
env.Variables[var] = value
except Exception as ex:
raise GNUException("Error while reading output from /bin/bash.") from ex
return env
class LCov(Executable):
def __init__(self, platform, dryrun, logger=None):
if (platform == "Linux"): executablePath = Path("/usr/bin/lcov")
else: raise PlatformNotSupportedException(platform)
super().__init__(platform, dryrun, executablePath, logger=logger)
self.Parameters[self.Executable] = executablePath
class Executable(metaclass=ExecutableArgument):
pass
class FlagCapture(metaclass=LongFlagArgument):
_name = "capture"
class SwitchDirectory(metaclass=LongTupleArgument):
_name = "directory"
class SwitchOutputFile(metaclass=LongTupleArgument):
_name = "output-file"
Parameters = CommandLineArgumentList(
Executable,
FlagCapture,
SwitchDirectory,
SwitchOutputFile
)
def Execute(self):
parameterList = self.Parameters.ToArgumentList()
self.LogVerbose("command: {0}".format(" ".join(parameterList)))
if (self._dryrun):
self.LogDryRun("Start process: {0}".format(" ".join(parameterList)))
return
try:
self.StartProcess(parameterList)
except Exception as ex:
raise GNUException("Failed to launch /usr/bin/lcov.") from ex
iterator = iter(self.GetReader())
for line in iterator:
print(line)
class GenHtml(Executable):
def __init__(self, platform, dryrun, logger=None):
if (platform == "Linux"): executablePath = Path("/usr/bin/genhtml")
else: raise PlatformNotSupportedException(platform)
super().__init__(platform, dryrun, executablePath, logger=logger)
self.Parameters[self.Executable] = executablePath
class Executable(metaclass=ExecutableArgument):
pass
class SwitchOutputDirectory(metaclass=LongTupleArgument):
_name = "output-directory"
class SwitchInputFiles(metaclass=StringListArgument):
pass
Parameters = CommandLineArgumentList(
Executable,
SwitchOutputDirectory,
SwitchInputFiles
)
def Execute(self):
parameterList = self.Parameters.ToArgumentList()
self.LogVerbose("command: {0}".format(" ".join(parameterList)))
if (self._dryrun):
self.LogDryRun("Start process: {0}".format(" ".join(parameterList)))
return
try:
self.StartProcess(parameterList)
except Exception as ex:
raise GNUException("Failed to launch /usr/bin/genhtml.") from ex
iterator = iter(self.GetReader())
for line in iterator:
print(line)
def GNUMakeQuestaSimFilter(gen):
for line in gen:
if line.startswith("# --"): yield LogEntry(line, Severity.Verbose)
elif line.startswith("# Loading"): yield LogEntry(line, Severity.Verbose)
elif line.startswith("# ** Note"): yield LogEntry(line, Severity.Info)
elif line.startswith("# ** Warn"): yield LogEntry(line, Severity.Warning)
elif line.startswith("# ** Erro"): yield LogEntry(line, Severity.Error)
elif line.startswith("# ** Fata"): yield LogEntry(line, Severity.Error)
elif line.startswith("# //"): continue
else: yield LogEntry(line, Severity.Normal)
# Could not be moved to CocotbSimulator. Function could not be imported. (Why?)
def CocotbSimulationResultFilter(gen, simulationResult):
passedRegExpStr = r".*?in tear_down\s+Passed \d+ tests" # Source filename
passedRegExp = re_compile(passedRegExpStr)
failedRegExpStr = r".*?in tear_down\s+Failed \d+ out of \d+ tests" # Source filename
failedRegExp = re_compile(failedRegExpStr)
for line in gen:
color = None
passedRegExpMatch = passedRegExp.match(str(line))
failedRegExpMatch = failedRegExp.match(str(line))
if passedRegExpMatch is not None:
color = Init.Foreground['GREEN']
simulationResult <<= SimulationResult.Passed
elif failedRegExpMatch is not None:
color = Init.Foreground['RED']
simulationResult <<= SimulationResult.Failed
# color is set when message should be printed
if color is not None:
yield LogEntry("{COLOR}{line}{NOCOLOR}".format(COLOR=color, line=line.Message, **Init.Foreground), line.Severity,
line.Indent)
continue
yield line
| 9,442 |
Machine_Learning_Old_Files/[28] Support Vector Machine.py
|
Ghasak/PracticalMachineLeanring
| 0 |
2171026
|
# Here we will use the support vector machine
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
# Lets try to write the basic of support vector machine
# we will use a class of several objects and methods
class Support_Vector_Machine:
# when you call a class to run, none of the methods will run except the __init__ one
def __init__(self, visualization=True): # visualization is used with a boolean variable.
self.visualization = visualization
self.colors = {1: 'r', -1: 'b'} # Color of the classes, one red for +1 and blue of -1
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1, 1, 1) # simply add a subplot as a grid to for the plotting
# now we will define the method of fitting (notice you can add pass in the end of the method
# in case you don't know what to add yet,
# This method (fit) actually is the training of the data
def fit(self, data):
self.data = data
# here we will create a dictionary with ||w||:[w,b] see the theory to understand more
# first we will create an empty dictionary and later we will populate it with these information
opt_dict = {}
# as we learn before this is the transforms to check read the theory
# each time a vector is created we check with the transform here
transforms = [[1, 1],
[-1, 1],
[-1, -1],
[1, -1]]
# lets check the maximum and minimum range of the data
all_data = []
# yi is the class name which is the output,
# yi is -1 or +1
for yi in self.data: # to iterate through classes
for featureset in self.data[yi]: # to iterate through features for e.g. [1,7] is 1 then [2,8] is 2
for feature in featureset: # to iterate through points 1,7
all_data.append(feature) # to append them to a list populated with the numbers
# Now we can use the max function to know largest value in our data
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
# now since you got these values they will be stored and you can now get ride of matrix
all_data = None
# Now recall the picture of the big U shape, first we take a large steps and the medium and later small once we
# reach to the optimum value we want
# we can also thread or multi-processed
step_size = [self.max_feature_value * 0.1, # Big Step
self.max_feature_value * 0.01, # Medium Steps
self.max_feature_value * 0.001] # Small (expensive) steps
# Support vectors yi(xi.w+b) = 1
# for example 1.01 and so on, some times with linear kernel it will not work,
b_range_multiple = 5 # extremely expensive, we don't care about b that much
# we don't need to take as small steps with b as we do with w
b_multiple = 5
latest_optimum = self.max_feature_value * 10 # this the largest vector w will be equal to this number
# Now we will start the stepping
for step in step_size:
w = np.array([latest_optimum, latest_optimum])
# we can do this because convex
optimized = False # until we run out of our step_size
while not optimized:
for b in np.arange(-1*(self.max_feature_value*b_range_multiple), # range from -80 to 80
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transformation in transforms:
w_t = w*transformation
found_option = True
# Here is the weakest link in the SVM fundamentally
# SMO attempts to fix this a bit
# But not that much
# remember the constrain function is
# yi(xi.w+b) >= 1
# Try to add a break later
for i in self.data:
for xi in self.data[i]:
yi = i
if not yi*(np.dot(w_t,xi)+b) >=1: # I fixed the type here it was i not 1
found_option = False
#print(xi,yi*(np.dot(w_t,xi)+b))
# I have removed the break it was here
if found_option:
opt_dict[np.linalg.norm(w_t)]=[w_t,b]
if w[0] < 0:
optimized = True
print('Optimized a step.')
else:
# w = [5,5]
# step =1
# w-[step,step] it should be but the one we offer here is correct
w = w - step
norms = sorted([n for n in opt_dict])
# as you remember ||w|| = [w,b]
opt_choice = opt_dict[norms[0]]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]+step*2
for i in self.data:
for xi in self.data[i]:
yi=i
print(xi, ':',yi*(np.dot(self.w,xi)+self.b))
# now we will define a method to make the predication
def predict(self, features):
# should return the sign of the class, as sign(x.w+b)
# you can make a lambda expression for upper than 1 and lower than 1
# or you can simply use the numpy sign function,
classification = np.sign(np.dot(np.array(features), self.w) + self.b)
if classification != 0 and self.visualization:
self.ax.scatter(features[0],features[1], s=200, marker='*', c = self.colors[classification])
return classification
def visualize(self):
[[self.ax.scatter(x[0],x[1],s=100,color=self.colors[i]) for x in data_dict[i]] for i in data_dict]
# Hyperplane = x.w+b
# we want v = x.w+b
# Positive support vector PSV = 1
# Negative support vector NPS = -1
# decision hyperplane = 0
# here we will see the support the decision hyperplane
def hyperplane(x,w,b,v):
return (-w[0]*x-b+v)/w[1]
datarange = (self.min_feature_value*0.9,self.max_feature_value*1.1) # just to make the points not out of the range
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
##################################################################################################
# (w.x+b) = 1
# Positive support vector hyperplane
psv1 = hyperplane(hyp_x_min,self.w,self.b,1) # it will be a y so we will use the x to draw the line
psv2 = hyperplane(hyp_x_max,self.w,self.b,1)
self.ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], 'k') # now we have the the positive support vector
##################################################################################################
# (w.x+b) = -1
# Negative support vector hyperplane
nsv1 = hyperplane(hyp_x_min, self.w, self.b, -1) # it will be a y so we will use the x to draw the line
nsv2 = hyperplane(hyp_x_max, self.w, self.b, -1)
self.ax.plot([hyp_x_min, hyp_x_max], [nsv1, nsv2], 'k') # now we have the the positive support vector
##################################################################################################
# (w.x+b) = 0
# Decision boundary vector hyperplane
db1 = hyperplane(hyp_x_min, self.w, self.b, 0) # it will be a y so we will use the x to draw the line
db2 = hyperplane(hyp_x_max, self.w, self.b, 0)
self.ax.plot([hyp_x_min, hyp_x_max], [db1, db2], 'y--') # now we have the the positive support vector
plt.show()
# now we will start with simple data and later we will extend
data_dict = {-1: np.array([[1, 7],
[2, 8],
[3, 8],]),
1: np.array([[5, 1],
[6, -1],
[7, 3],])}
svm = Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10],
[1,3],
[3,4],
[3,5],
[5,5],
[5,6],
[6,-5],
[5,8],]
for p in predict_us:
svm.predict(p)
svm.visualize()
| 8,659 |
creds.py
|
fbennets/HU-grade-crawler
| 2 |
2171097
|
user = '' # Replace with your agnes.hu-berlin.de login name
password = '' # Replace with your agnes.hu-berlin.de login name
account = '' # Replace with your Twilio Account SID
auth = '' # Twilio Authentification Token
number = '' # Replace with your own mobile number where you want to
# receive the SMS in international format e.g. '+49123456789'
| 350 |
modules/cnn/delete_temp_files.py
|
mevol/pediip
| 1 |
2170906
|
import argparse
import glob
import logging
import os
from pathlib import Path
def list_temp_files(directory):
"""List all temporary files within a directory which are marked with *temp* in their name"""
try:
dir_path = Path(directory)
assert dir_path.exists()
except Exception:
raise Exception(f"Expected absolute path to valid directory, got {directory}")
temps = glob.glob(str(dir_path / "*temp*"))
temp_files = [file for file in temps if Path(file).is_file()]
return temp_files
def delete_file(filename):
"""Delete the file and return True"""
try:
file_path = Path(filename)
assert file_path.exists(), f"Could not find file to delete at {file_path}"
os.remove(file_path)
except Exception:
logging.error(f"Could not delete file at {filename}")
raise
return True
def delete_temp_files(directory):
"""Delete all temporary files in the directory and return True when complete"""
logging.debug(f"Deleting all files in {directory}")
try:
temp_files = list_temp_files(directory)
except Exception:
raise
try:
for file in temp_files:
delete_file(file)
except Exception:
raise
return True
if __name__ == "__main__":
# As command line utility, check user wants to do this
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
help="directory provided is absolute path, otherwise assumed relative",
action="store_true",
)
parser.add_argument(
"directory",
help="the directory you wish to remove temporary files from",
type=str,
)
parser.add_argument(
"--force", help="delete all temp files without checking", action="store_true"
)
args = parser.parse_args()
if args.a == True:
dir_name = args.directory
else:
dir_name = Path(os.getcwd()) / args.directory
if args.force == True:
delete_temp_files(dir_name)
print("All temp files deleted")
else:
temp_files = list_temp_files(dir_name)
print("Found following temp files:")
for file in temp_files:
print(Path(file).name)
delete = input("Are you sure you want to delete all of these files? [y/N]")
if delete == "y":
delete_temp_files(dir_name)
print("All temp files deleted")
| 2,430 |
random_ctfs/tghack19/n00b/echo_chamber/script.py
|
bernardosequeir/CTFfiles
| 0 |
2169923
|
from pwn import *
r = remote("echo.tghack.no",5555)
for x in range(50):
line = r.recvline()
print line
r.send(line)
print r.recvline()
#flag TG19{behold_the_echo_chamber_of_secrets}
| 194 |
test cases/common/143 list of file sources/gen.py
|
kira78/meson
| 4,047 |
2170520
|
import shutil
import sys
if __name__ == '__main__':
if len(sys.argv) != 3:
raise Exception('Requires exactly 2 args')
shutil.copy2(sys.argv[1], sys.argv[2])
| 174 |
lawstructural/lawstructural/studentwelfare.py
|
PhilErickson/LawStructural
| 1 |
2171074
|
""" Module for computing student welfare changes based on information regime
change
"""
from __future__ import print_function, division
import numpy as np
from numpy.random import random_sample
import pandas as pd
from copy import deepcopy
from os.path import join, dirname
from sklearn import ensemble
from scipy.stats import norm #pylint: disable=no-name-in-module
import lawstructural.lawstructural.utils as lu
import lawstructural.lawstructural.firststage as fs
import lawstructural.lawstructural.constants as lc
def gen_n_apps(treat):
""" Generate average number of applications per year, conditional on
information
"""
if treat:
years = range(2010, 2015)
else:
years = range(2000, 2010)
n_apps = []
for year in years:
n_apps.append(fs.gen_n_apps(year).tolist()[0])
return np.mean(n_apps)
class StudentWelfare(object):
""" Primary class for the module
Parameters
----------
fs_params: dict
estimates from firststage.lsn_long_est, to be used for admission and
matriculation probabilities
sigmasq: dict
sigma-squared estimate for distribution of independent school-level
values with keys 'treat0' and 'treat1' for estimates before and after
information regime change
"""
def __init__(self, fs_tuition, sigmasq, opts):
self.fs_tuition = fs_tuition
self.sigmasq = sigmasq
self.opts = opts
self.fs_rhs = lu.student_problem_vars()
self.lsn_rhs = deepcopy(self.fs_rhs)
self.lsn_rhs.remove('OverallRank')
self.lsn_rhs.remove('Tuition')
self.lsn_models = self.gen_matric_ev()
self.data = None
def gen_data(self, treat):
""" Generate dataset of students for lc.N_PERIODS years. Variables
include MedianLSAT, UndergraduatemedianGPA, year, treat
"""
data_dir = join(dirname(dirname(__file__)), 'data')
source_data = pd.read_csv(join(data_dir, 'lawschoolnumbers.csv'))
source_data = source_data.loc[source_data.treat == treat]
n_apps = gen_n_apps(treat)
treat_years = {0: range(2003, 2010), 1: range(2010, 2014)}[treat]
data = []
for _ in xrange(2013, 2013 + lc.N_PERIODS):
students = np.random.choice(
source_data.loc[source_data.year.isin(treat_years), 'user'],
size=n_apps
)
students = pd.DataFrame({
'user': students[np.where(pd.notnull(students))]
})
data_year = pd.merge(
students,
source_data.loc[source_data.year.isin(treat_years)]
)
data.append(data_year)
data = pd.concat(data)
self.data = data.reset_index()
def gen_matric_ev(self):
""" Estimate expected rank of school j attended by student i given
student i is in the final matriculant group
"""
data_dir = join(dirname(dirname(__file__)), 'data')
data = pd.read_csv(join(data_dir, 'lawschoolnumbers.csv'))
models = {}
models['admit_binary'] = ensemble.GradientBoostingClassifier(
n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0
).fit(data[self.lsn_rhs], data['admit_binary'])
models['matric_binary'] = ensemble.GradientBoostingClassifier(
n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0
).fit(data.loc[data.admit_binary == 1, self.lsn_rhs],
data.loc[data.admit_binary == 1, 'matric_binary'])
params = {'n_estimators': 500, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.01,
'loss': 'ls'}
models['matric_ev'] = ensemble.GradientBoostingRegressor(**params)
models['matric_ev'].fit(
data.loc[data.matric_binary == 1, self.lsn_rhs],
data.loc[data.matric_binary == 1, 'matric'])
return models
def predict_admit(self):
""" Use first stage estimates to predict binary admission outcome """
threshold = random_sample(self.data.shape[0])
self.data['admit_hat'] = self.lsn_models['admit_binary'].predict_proba(
self.data[self.lsn_rhs]
)[:, 1]
self.data['admit_hat'] = 1 * (self.data['admit_hat'] > threshold)
def predict_matric(self):
""" Use first stage estimates to predict matriculation outcomes
"""
self.data.loc[self.data['admit_hat'] == 1, 'matric_hat_pr'] = \
self.lsn_models['matric_binary'].predict_proba(
self.data.loc[self.data['admit_hat'] == 1, self.lsn_rhs]
)[:, 1]
data_temp = deepcopy(self.data) # use treatment probabilities for utl
data_temp['treat'] = 1
self.data.loc[self.data['admit_hat'] == 1, 'matric_hat_pr_treat'] = \
self.lsn_models['matric_binary'].predict_proba(
data_temp.loc[data_temp['admit_hat'] == 1, self.lsn_rhs]
)[:, 1]
self.data.loc[self.data['admit_hat'] == 1, 'OverallRank'] = \
self.lsn_models['matric_ev'].predict(
self.data.loc[self.data['admit_hat'] == 1, self.lsn_rhs]
)
self.data.loc[self.data['admit_hat'] == 1, 'Tuition'] = \
self.fs_tuition.predict(
self.data.loc[self.data['admit_hat'] == 1,
['OverallRank', 'treat', 'year']]
)
def utility(self):
""" Function derived from inverting probability of student i
attending school j given school j is in admissions set
"""
utility = norm.ppf(self.data['matric_hat_pr_treat'],
scale=self.sigmasq)
utility = self.data['Tuition'] - utility
return np.nansum(utility)
def payoff_matric(self):
""" Predict payoff from any given matriculation """
pass
def gen_welfare(self, treat):
""" Generate welfare for the simulated population of students for
given information regime.
"""
self.gen_data(treat)
self.predict_admit()
self.predict_matric()
payoff = self.utility()
return payoff
def policy_comp(self):
""" Compare surplus with and without """
print("SIMULATING STUDENT SIDE")
print(" * Simulating without treatment")
payoff0 = self.gen_welfare(0)
print(" * Simulating with treatment")
payoff1 = self.gen_welfare(1)
diff = payoff1 - payoff0
pdiff = diff / payoff0
print(" - Change in Consumer Surplus: {0}".format(diff))
print(" - Percent change in Consumer Surplus: {0}".format(pdiff))
return diff
| 6,761 |
train.py
|
hummat/if-net
| 0 |
2169330
|
import models.local_model as model
import models.data.voxelized_data_shapenet as voxelized_data
from models import training
import argparse
import torch
from multiprocessing import cpu_count
# python train.py -posed -dist 0.5 0.5 -std_dev 0.15 0.05 -res 32 -batch_size 40 -m
parser = argparse.ArgumentParser(
description='Run Model'
)
parser.add_argument('-pointcloud', dest='pointcloud', action='store_true')
parser.add_argument('-voxels', dest='pointcloud', action='store_false')
parser.set_defaults(pointcloud=False)
parser.add_argument('-pc_samples', default=3000, type=int)
parser.add_argument('-dist', '--sample_distribution', default=[0.5, 0.5], nargs='+', type=float)
parser.add_argument('-std_dev', '--sample_sigmas', default=[0.15, 0.015], nargs='+', type=float)
parser.add_argument('-batch_size', default=32, type=int)
parser.add_argument('-res', default=128, type=int)
parser.add_argument('-m', '--model', default='LocNet', type=str)
parser.add_argument('-o', '--optimizer', default='Adam', type=str)
try:
args = parser.parse_args()
except:
args = parser.parse_known_args()[0]
if args.model == 'ShapeNet32Vox':
net = model.ShapeNet32Vox()
if args.model == 'ShapeNet128Vox':
net = model.ShapeNet128Vox()
if args.model == 'ShapeNetPoints':
net = model.ShapeNetPoints()
if args.model == 'SVR':
net = model.SVR()
train_dataset = voxelized_data.VoxelizedDataset('train',
voxelized_pointcloud=args.pointcloud,
pointcloud_samples=args.pc_samples,
# data_path="/home/matthias/Data2/datasets/shapenet/matthias/disn/core",
data_path="/home/matthias/Data2/datasets/shapenet/occupancy_networks/ShapeNet/core",
# data_path="/net/rmc-gpu03/home_local/humt_ma/occupancy_networks/core",
res=args.res,
sample_distribution=args.sample_distribution,
sample_sigmas=args.sample_sigmas,
num_sample_points=2048,
batch_size=args.batch_size,
num_workers=cpu_count())
val_dataset = voxelized_data.VoxelizedDataset('val',
voxelized_pointcloud=args.pointcloud,
pointcloud_samples=args.pc_samples,
# data_path="/home/matthias/Data2/datasets/shapenet/matthias/disn/core",
data_path="/home/matthias/Data2/datasets/shapenet/occupancy_networks/ShapeNet/core",
# data_path="/net/rmc-gpu03/home_local/humt_ma/occupancy_networks/core",
res=args.res,
sample_distribution=args.sample_distribution,
sample_sigmas=args.sample_sigmas,
num_sample_points=2048,
batch_size=args.batch_size,
num_workers=cpu_count())
exp_name = 'i{}_dist-{}sigmas-{}v{}_m{}'.format('PC' + str(args.pc_samples) if args.pointcloud else 'Voxels',
''.join(str(e) + '_' for e in args.sample_distribution),
''.join(str(e) + '_' for e in args.sample_sigmas),
args.res, args.model)
trainer = training.Trainer(net, torch.device("cuda"), train_dataset, val_dataset, exp_name, optimizer=args.optimizer)
trainer.train_model(1500)
| 3,984 |
misc/abstract_class.py
|
decaun/easy-python-study
| 1 |
2169759
|
from abc import ABCMeta, abstractmethod, ABC
class abstract(metaclass=ABCMeta):
@abstractmethod
def method_name(self):
pass
class abstract_2(ABC):
@abstractmethod
def method_name(self):
pass
class inherit_abstract(abstract):
def method_name(self):
pass
class inherit_abstract_fail(abstract):
def method_incorrect_name(self):
pass
if __name__ == "__main__":
print("main")
x = inherit_abstract()
y = inherit_abstract_fail()
print(x)
# y=Test2()
# print(y)
| 548 |
cacheman/cacher.py
|
Jenyay/py_cache_manager
| 0 |
2171344
|
import os
import tempfile
from collections import defaultdict
from .cachewrap import CacheWrap, NonPersistentCache, PersistentCache
from .autosync import AutoSyncCache
DEFAULT_CACHEMAN = 'general_cacher'
def get_cache_manager(manager_name=None, base_cache_directory=None):
if manager_name is None:
# Don't grab this from default args in case someone changes DEFAULT_CACHEMAN
manager_name = DEFAULT_CACHEMAN
if manager_name not in _managers:
# Need name argument, so can't use defaultdict easily
_managers[manager_name] = CacheManager(manager_name, base_cache_directory)
return _managers[manager_name]
_managers = {} # Labeled with leading underscore to trigger del before module cleanup
class CacheManager():
def __init__(self, manager_name, base_cache_directory=None):
self.name = manager_name
self.cache_directory = os.path.join(base_cache_directory or tempfile.gettempdir(), self.name)
self.cache_by_name = {}
self.async_pid_cache = defaultdict(set) # Used for async cache tracking
def __del__(self):
self.save_all_cache_contents()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.save_all_cache_contents()
def retrieve_cache(self, cache_name):
'''
Loads or builds a cache using any registered post_process, custom_builder, and validator hooks.
If a cache has already been generated it will return the pre-loaded cache content.
'''
cache = self.cache_by_name.get(cache_name)
if cache is None:
return self.register_cache(cache_name)
return cache
def retrieve_raise(self, cache_name):
cache = self.cache_by_name.get(cache_name)
if cache is None:
raise KeyError("No cache found with name {}".format(cache_name))
return cache
def register_cache(self, cache_name, contents=None):
return self.register_custom_cache(cache_name, contents, persistent=True, autosync=True, nowrapper=False)
def register_custom_cache(self, cache_name, contents=None, persistent=True, autosync=True, nowrapper=False, **kwargs):
if nowrapper or isinstance(contents, CacheWrap):
cache = contents
elif not persistent:
# Replace default pickle loader/saver/deleter
cache = NonPersistentCache(cache_name, cache_manager=self, contents=contents, **kwargs)
elif autosync:
cache = AutoSyncCache(cache_name, cache_manager=self, contents=contents, **kwargs)
else:
cache = PersistentCache(cache_name, cache_manager=self, contents=contents, **kwargs)
self.cache_by_name[cache_name] = cache
return self.retrieve_cache(cache_name)
def cache_registered(self, cache_name):
return cache_name in self.cache_by_name
def register_loader(self, cache_name, loader):
cache = self.retrieve_cache(cache_name)
cache.loader = loader
return cache
def register_builder(self, cache_name, builder):
cache = self.retrieve_cache(cache_name)
cache.builder = builder
return cache
def register_saver(self, cache_name, saver):
cache = self.retrieve_cache(cache_name)
cache.saver = saver
return cache
def register_deleter(self, cache_name, deleter):
cache = self.retrieve_cache(cache_name)
cache.deleter = deleter
return cache
def register_post_processor(self, cache_name, post_processor):
cache = self.retrieve_cache(cache_name)
cache.post_processor = post_processor
return cache
def register_pre_processor(self, cache_name, pre_processor):
cache = self.retrieve_cache(cache_name)
cache.pre_processor = pre_processor
return cache
def register_validator(self, cache_name, validator):
cache = self.retrieve_cache(cache_name)
cache.validator = validator
return cache
def register_dependent_cache(self, cache_name, dependent_cache):
cache = self.retrieve_cache(cache_name)
cache.add_dependent(dependent_cache)
return cache
def deregister_cache(self, cache_name, apply_to_dependents=False):
if not self.cache_registered(cache_name):
return
cache = self.retrieve_cache(cache_name)
cache.save(False)
if apply_to_dependents:
for dependent in cache._retrieve_dependent_caches():
self.deregister_cache(dependent.name, apply_to_dependents)
del self.cache_by_name[cache_name]
def deregister_all_caches(self):
for cache_name in list(self.cache_by_name.keys()):
self.deregister_cache(cache_name, False)
def save_cache_contents(self, cache_name, apply_to_dependents=False):
cache = self.retrieve_cache(cache_name)
cache.save(apply_to_dependents)
return cache
def save_all_cache_contents(self):
for cache_name in self.cache_by_name:
self.save_cache_contents(cache_name, False)
def delete_saved_cache_content(self, cache_name, apply_to_dependents=True):
'''
Does NOT delete memory cache -- use invalidate_and_rebuild_cache to delete both
'''
cache = self.retrieve_cache(cache_name)
cache.delete_saved_content(apply_to_dependents)
return cache
def delete_all_saved_cache_contents(self):
for cache_name in self.cache_by_name:
self.delete_saved_cache_content(cache_name, False)
def invalidate_cache(self, cache_name, apply_to_dependents=True):
cache = self.retrieve_cache(cache_name)
cache.invalidate(apply_to_dependents)
return cache
def invalidate_and_rebuild_cache(self, cache_name, apply_to_dependents=True):
cache = self.retrieve_cache(cache_name)
cache.invalidate_and_rebuild(apply_to_dependents)
return cache
def invalidate_and_rebuild_all_caches(self):
for cache_name in self.cache_by_name:
self.invalidate_and_rebuild_cache(cache_name, False)
def reload_cache(self, cache_name, apply_to_dependents=False):
cache = self.retrieve_cache(cache_name)
cache.load(apply_to_dependents)
return cache
def reload_all_caches(self):
for cache_name in self.cache_by_name:
self.reload_cache(cache_name, False)
def reload_or_rebuild_cache(self, cache_name, apply_to_dependents=False):
cache = self.retrieve_cache(cache_name)
cache.load_or_build(apply_to_dependents)
return cache
def reload_or_rebuild_all_caches(self):
for cache_name in self.cache_by_name:
self.reload_or_rebuild_cache(cache_name, False)
| 6,790 |
py/variaveis.py
|
Viniciusalopes/BhaskaraPython3
| 0 |
2169466
|
# ---------------------------------------------------------------------------------------
# Licença : MIT - Copyright 2019 Viniciusalopes (Vovolinux) <<EMAIL>>
# Criado em : 03/10/2019
# Projeto : Bhaskara em Python - Matemática e Estatística
# Finalidade: Apresentação em sala de aula
# Temas : - Importância dos comentários no código
# - Variáveis
# - Função type()
# - Função print()
# - Função str.format()
# ---------------------------------------------------------------------------------------
# Diz a lenda que...
# Vai que, né...
print('Opa, mundo!')
# print() com multiplicação de caracteres
print('-' * 30)
# ---------------------------------------------------------------------------------------
# Atribuição de valores
nome = 'Vinicius'
idade = '43'
peso = '57.4'
# print() de variáveis
print(nome)
print(idade)
print(peso)
# print() de retorno de funções
# a função type() retorna o tipo do valor que está armazenado na variável
print(type(nome))
print(type(idade))
print(type(peso))
print('-' * 30)
# ---------------------------------------------------------------------------------------
# Atribuição de valores tipados
nome = 'Vinicius' # str
idade = 43 # int
peso = 57.4 # float
print(nome)
print(idade)
print(peso)
print(type(nome))
print(type(idade))
print(type(peso))
print('-' * 30)
# ---------------------------------------------------------------------------------------
# Conversão de tipos de variáveis
nome = 'Vinicius' # str
idade = int('43') # int
peso = float('57.4') # float
print(nome)
print(idade)
print(peso)
print(type(nome))
print(type(idade))
print(type(peso))
print('-' * 30)
# ---------------------------------------------------------------------------------------
# print() concatenado
# Strings e variáveis separadas por vírgula
# Já acrescenta espaço entre a string e a variável
print('Nome :', nome, '\nIdade:', idade, '\nPeso :', peso)
print() # o mesmo que print('\n')
# print() formatado
# VARIÁVEIS do Python são OBJETOS e, portanto, pentence à CLASSE 'str' que, por
# sua vez possuem métodos (ou funções), atributos, etc;
# Exemplo:
# CLASSE: Eletrodomésticos
# OBJETO DA CLASSE Eletrodomésticos: FORNO DE MICROONDAS
# -> Atributos: Pequeno, grande, silencioso, econômico.
# -> Funções : Aquecer, descongelar, dourar alimentos,
# ou simplesmente apitar incessantemente enquanto
# tentamos preparar uma aula, só porque esquecemos
# aberto...
#
# str.format(): Substitui as chaves {} pelo conteúdo das variáveis
# format() é uma função de OBJETOS da classe STRING
print('Nome : {}\nIdade: {}\nPeso : {}'.format(nome, idade, peso))
print()
print('Nome : {}\nIdade: {}\nPeso : {:.2f}'.format(nome, idade, peso))
# str.isnumeric(): Testa se o conteúdo da variável pode ser convertido para um
# tipo que armazene números (int, float, etc.)
#
# 'not' equivale ao operador lógico de negação '!' (Exclamação) de outras linguagens.
print('-' * 30)
if not nome.isnumeric():
print('{} não pode ser convertido em um número.'.format(nome))
print('Tipo do valor da variável nome: {}'.format(type(nome)))
nome = '1234' # str
print('{} pode ser convertido em número.'.format(nome))
nome = int(nome)
print(type(nome))
# Próximo arquivo: operadores_aritmeticos.py
| 3,463 |
hackerrank/StrangeGridAgain.py
|
0x8b/HackerRank
| 3 |
2168089
|
r, c = map(int, input().split())
a = ((r - 1) // 2 * 10) + (c * 2 - 1 if r % 2 == 0 else (c - 1) * 2)
print(a)
| 118 |
helios/plato/py/tests/nrucache_test.py
|
debjyoti0891/map
| 44 |
2171217
|
# Test for NRUCache
# WARNING: This test is time-sensitive so debugging may be complicated. The cache should really use some kind of
# "time provider" that allows simulated advancement of time rather than the wall-clock timer.
import sys
from os import path
import time
sys.path.append(path.split(path.dirname(__file__))[0])
from plato.backend.common import NRUCache
NRUCache.EXPIRATION_DEFAULT_MS = 450 # short expiration for this test
c = NRUCache(4) # Capacity 4
assert(len(c) == 0)
assert (1 not in c)
assert (2 not in c)
assert (3 not in c)
assert (4 not in c)
c[1] = 1
c[2] = 2
c[3] = 3
c[4] = 4
c[5] = 5 # Will not remove [1] because its not expired
assert (1 in c)
c.expire(1) # Allow 1 to be replaced by 5 without waiting for the appropriate amount of time
c[5] = 5 # Will finally remove [1] because last write expired it
# 1 should have been evicted because of capacity
assert (1 not in c)
assert (2 in c)
assert (3 in c)
assert (4 in c)
assert (5 in c)
# Allow anything to be replaced now
c.expire_all()
c[2] = 2
c[6] = 6
assert (1 not in c)
assert (3 not in c)
assert (2 in c)
assert (4 in c)
assert (5 in c)
assert (6 in c)
assert (not c.is_expiring(2))
assert (not c.is_expiring(6))
c[4] = 4
c[5] = 6
# Nothing should be expiring since it was all touched recently
assert (not c.is_expiring(2))
assert (not c.is_expiring(4))
assert (not c.is_expiring(5))
assert (not c.is_expiring(6))
print('\nsleep...\n')
time.sleep(1)
# Touching [2] will mark all others as expiring since 1s has passed.
# When each other is touched here, they will be marked as non-expiring once again.
_ = c[2]
_ = c[5]
_ = c[6]
# Since [4] was not touched, it is still in expiring state
print('check that 4 is expiring')
assert (c.is_expiring(4))
print('\nsleep...\n')
time.sleep(0.2)
assert (2 in c)
assert (4 in c)
assert (5 in c)
assert (6 in c)
# Touch everyone again. Note that 4 will not be removed because enough time has not elapsed after marking as expired
_ = c[2]
assert (4 in c)
_ = c[4]
_ = c[5]
_ = c[6]
print('\nsleep...\n')
time.sleep(1)
assert (4 in c)
# Let 5 be marked as expiring since we don't touch it
_ = c[2]
_ = c[4]
_ = c[6]
assert (2 in c)
assert (4 in c)
assert (5 in c)
assert (6 in c)
assert (c.is_expiring(5))
print('\nsleep...\n')
time.sleep(1)
# Since 5 is not touched first here, it will be removed because we slept for more than the expiration interval after 5
# was marked as expiring
_ = c[2]
_ = c[4]
_ = c[6]
assert (2 in c)
assert (4 in c)
assert (5 not in c)
assert (6 in c)
assert(len(c) == 3)
| 2,565 |
stock-filters/Buildings/fountain.py
|
Chris-Drury/caMelGDMC
| 0 |
2171392
|
from Buildings.material import DIRT, BRICKS, WATER, AIR, FENCE, TORCH
fountain = {
"height": -1,
"building": [
[
[DIRT, DIRT, DIRT, DIRT, DIRT, DIRT],
[DIRT, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], DIRT],
[DIRT, BRICKS["STONE"], WATER, WATER, BRICKS["STONE"], DIRT],
[DIRT, BRICKS["STONE"], WATER, WATER, BRICKS["STONE"], DIRT],
[DIRT, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], DIRT],
[DIRT, DIRT, DIRT, DIRT, DIRT, DIRT]
],
[
[BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"]],
[BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"]],
[BRICKS["STONE"], BRICKS["STONE"], WATER, WATER, BRICKS["STONE"], BRICKS["STONE"]],
[BRICKS["STONE"], BRICKS["STONE"], WATER, WATER, BRICKS["STONE"], BRICKS["STONE"]],
[BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"]],
[BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"]]
],
[
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], AIR, AIR, BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], AIR, AIR, BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, AIR, AIR, AIR, AIR, AIR]
],
[
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, FENCE, AIR, AIR, FENCE, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, FENCE, AIR, AIR, FENCE, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR]
],
[
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, FENCE, AIR, AIR, FENCE, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, FENCE, AIR, AIR, FENCE, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR]
],
[
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], BRICKS["STONE"], AIR],
[AIR, AIR, AIR, AIR, AIR, AIR]
],
[
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, TORCH["UP"], AIR, AIR, TORCH["UP"], AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, AIR, AIR, AIR, AIR, AIR],
[AIR, TORCH["UP"], AIR, AIR, TORCH["UP"], AIR],
[AIR, AIR, AIR, AIR, AIR, AIR]
]
]
}
def generate_fountains():
return [fountain]
| 3,113 |
filedupes.py
|
Emetophobe/filedupes
| 0 |
2170672
|
#!/usr/bin/env python
# Copyright (C) 2019-2020 Emetophobe (<EMAIL>)
# https://github.com/Emetophobe/filedupes/
import os
import time
import hashlib
import argparse
import textwrap
from collections import defaultdict
# Default hashlib algorithm
DEFAULT_ALGORITHM = 'sha256'
# Default directory excludes
DEFAULT_EXCLUDE = ['RCS', 'CVS', 'tags', '.git', '.venv', '.hg', '.bzr', '_darcs', '__pycache__']
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='Find duplicate files by comparing checksums.')
parser.add_argument('directory', help='top level directory to search')
parser.add_argument('-a', '--algorithm', help='hashing algorithm (default: %(default)s)',
default=DEFAULT_ALGORITHM)
parser.add_argument('-e', '--exclude', nargs='*', help='directories to exclude',
default=DEFAULT_EXCLUDE)
args = parser.parse_args()
# Make sure the directory exists
if not os.path.isdir(args.directory):
parser.error('Invalid directory: {}'.format(args.directory))
# Make sure the algorithm is support
if args.algorithm.lower() not in hashlib.algorithms_available:
supported = textwrap.fill(', '.join(sorted(hashlib.algorithms_available)), 70)
parser.error('Invalid algorithm. List of supported algorithms:\n\n{}'.format(supported))
# Find duplicate files
print('Searching for duplicates. This may take a while...', flush=True)
start_time = time.perf_counter()
dupes = find_dupes(args.directory, args.algorithm, args.exclude)
duration = time.perf_counter() - start_time
# Print results
print()
for digest, files in dupes.items():
print(digest)
for filename in files:
print(' ', filename)
print()
print('Found {} duplicate hashes in {:.2f} seconds.'.format(len(dupes), duration))
def find_dupes(directory, algorithm, exclude):
"""Create a dictionary of duplicate hashes (keys) and filenames (values). """
hashes = defaultdict(list)
for root, dirs, files in os.walk(os.path.abspath(directory), topdown=True):
dirs[:] = [d for d in dirs if d not in exclude]
for filename in files:
filename = os.path.join(root, filename)
try:
digest = get_hash(filename, algorithm)
except OSError as e:
print('Error reading file: {} ({})'.format(filename, e.strerror))
else:
hashes[digest].append(filename)
# Only return hashes with multiple filenames (dupes)
return {k: v for k, v in hashes.items() if len(v) > 1}
def get_hash(filename, algorithm):
""" Generate a file hash using the specified algorithm. """
hasher = hashlib.new(algorithm)
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(65536), b''):
hasher.update(chunk)
return hasher.hexdigest()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Aborted.')
| 3,039 |
rover_motor_controller/rover_motor_controller/lx16a/lx16a_consts.py
|
mgonzs13/ros2_rover
| 11 |
2171290
|
SERVO_ID_ALL = 0xfe
SERVO_FRAME_HEADER = 0x55
SERVO_FRAME_HEADER_STRING = "0x55"
# CMDS
SERVO_MOVE_TIME_WRITE = 1
SERVO_MOVE_TIME_READ = 2
SERVO_MOVE_TIME_WAIT_WRITE = 7
SERVO_MOVE_TIME_WAIT_READ = 8
SERVO_MOVE_START = 11
SERVO_MOVE_STOP = 12
SERVO_ID_WRITE = 13
SERVO_ID_READ = 14
SERVO_ANGLE_OFFSET_ADJUST = 17
SERVO_ANGLE_OFFSET_WRITE = 18
SERVO_ANGLE_OFFSET_READ = 19
SERVO_ANGLE_LIMIT_WRITE = 20
SERVO_ANGLE_LIMIT_READ = 21
SERVO_VIN_LIMIT_WRITE = 22
SERVO_VIN_LIMIT_READ = 23
SERVO_TEMP_MAX_LIMIT_WRITE = 24
SERVO_TEMP_MAX_LIMIT_READ = 25
SERVO_TEMP_READ = 26
SERVO_VIN_READ = 27
SERVO_POS_READ = 28
SERVO_OR_MOTOR_MODE_WRITE = 29
SERVO_OR_MOTOR_MODE_READ = 30
SERVO_LOAD_OR_UNLOAD_WRITE = 31
SERVO_LOAD_OR_UNLOAD_READ = 32
SERVO_LED_CTRL_WRITE = 33
SERVO_LED_CTRL_READ = 34
SERVO_LED_ERROR_WRITE = 35
SERVO_LED_ERROR_READ = 36
# ERRORS
SERVO_ERROR_OVER_TEMPERATURE = 1
SERVO_ERROR_OVER_VOLTAGE = 2
SERVO_ERROR_LOCKED_ROTOR = 4
# SERVOS
MOTOR_LEFT_FRONT = 1
MOTOR_LEFT_MIDDLE = 2
MOTOR_LEFT_BACK = 3
MOTOR_RIGHT_FRONT = 4
MOTOR_RIGHT_MIDDLE = 5
MOTOR_RIGHT_BACK = 6
SERVO_LEFT_FRONT = 7
SERVO_RIGHT_FRONT = 8
SERVO_LEFT_BACK = 9
SERVO_RIGHT_BACK = 10
| 1,163 |
backend/colleges/graphql/schema.py
|
cesko-digital/zacni-uc
| 4 |
2171848
|
import graphene
from colleges.graphql.types import CollegeObjectType, FacultyObjectType, CourseObjectType
from colleges.models import College, Faculty, Course
class Query(graphene.ObjectType):
# Colleges queries
colleges = graphene.List(CollegeObjectType)
college = graphene.Field(CollegeObjectType, pk=graphene.Int(required=True))
# Faculties queries
faculties = graphene.List(FacultyObjectType)
faculty = graphene.Field(FacultyObjectType, pk=graphene.Int(required=True))
# Courses queries
courses = graphene.List(CourseObjectType)
course = graphene.Field(CourseObjectType, pk=graphene.Int(required=True))
@staticmethod
def resolve_colleges(root, info):
return College.objects.all()
@staticmethod
def resolve_college(root, info, pk: int):
return College.objects.get(pk=pk)
@staticmethod
def resolve_faculties(root, info):
return Faculty.objects.all()
@staticmethod
def resolve_faculty(root, info, pk: int):
return College.objects.get(pk=pk)
@staticmethod
def resolve_courses(root, info):
return Course.objects.all()
@staticmethod
def resolve_course(root, info, pk: int):
return Course.objects.get(pk=pk)
| 1,247 |
python/interpret_community/shap/kwargs_utils.py
|
Nanthini10/interpret-community
| 338 |
2170516
|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines utilities for handling kwargs on SHAP-based explainers."""
from interpret_community.common.constants import ExplainParams
def _get_explain_global_kwargs(sampling_policy, method, include_local, batch_size):
"""Get the kwargs for explain_global.
:param sampling_policy: Optional policy for sampling the evaluation examples. See documentation on
SamplingPolicy for more information.
:type sampling_policy: interpret_community.common.SamplingPolicy
:param method: The explanation method used, e.g., shap_kernel, mimic, etc.
:type method: str
:param include_local: Whether a local explanation should be generated or only global
:type include_local: bool
:param batch_size: If include_local is False, specifies the batch size for aggregating
local explanations to global.
:type batch_size: int
:return: Args for explain_global.
:rtype: dict
"""
kwargs = {ExplainParams.METHOD: method,
ExplainParams.SAMPLING_POLICY: sampling_policy,
ExplainParams.INCLUDE_LOCAL: include_local,
ExplainParams.BATCH_SIZE: batch_size}
return kwargs
| 1,333 |
test/test_vendor/test_device_serdes.py
|
meetps/rhea
| 1 |
2170993
|
from __future__ import print_function
from __future__ import division
import myhdl
from myhdl import (Signal, ResetSignal, intbv, instance,
always, always_seq)
from rhea.system import Clock
from rhea.system import ticks_per_ns
from rhea.vendor import ClockManagement
from rhea.vendor import device_clock_mgmt
from rhea.vendor import SERDESInterface
from rhea.vendor import device_output_serdes
from rhea.vendor import device_input_serdes
def top_serdes_wrap(clockext, resetext,
sero_p, sero_n, seri_p, seri_n,
args=None):
"""
"""
clkmgmt = ClockManagement(clockext, reset=resetext,
output_frequencies=(125e6, 1e9))
clkmgmt.vendor = args.vendor
# @todo: add external_reset_sync module
| 800 |
spyder/plugins/toolbar/__init__.py
|
Earthman100/spyder
| 7,956 |
2170080
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
spyder.plugins.toolbar
======================
Toolbar Plugin.
"""
| 225 |
tests/package/test_artifact_metadata.py
|
pomes/valiant
| 2 |
2171676
|
"""Tests for ArtifactMetadataImpl.
Copyright (c) 2020 The Valiant Authors
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datetime import datetime
import pytest
from valiant.package import ArtifactMetadata, ArtifactMetadataImpl
@pytest.fixture
def basic_artifact() -> ArtifactMetadata:
"""Based on Flask 1.1.1."""
return ArtifactMetadataImpl( # noqa:DAR201
comment_text="No comment",
digests={
"md5": "b5cc35905a936f5f64e51421d1ebe29c",
"sha256": "45eb5a6fd193d6cf7e0cf5d8a5b31f83d5faae0293695626f539a823e93b13f6",
},
sha256_digest="45eb5a6fd193d6cf7e0cf5d8a5b31f83d5faae0293695626f539a823e93b13f6",
signed=True,
signature_url="https://files.pythonhosted.org/packages/9b/93/628509b8d5dc749656a9641f4caf13540e2cdec85276964ff8f43bbb1d3b/Flask-1.1.1-py2.py3-none-any.whl.asc", # noqa:B950
package_type="bdist_wheel",
python_version="py2.py3",
requires_python=[
">=2.7",
"!=3.0.*",
"!=3.1.*",
"!=3.2.*",
"!=3.3.*",
"!=3.4.*",
],
size=94457,
upload_time_iso_8601=datetime(2019, 7, 8, 18, 0, 28, 597456),
url="https://files.pythonhosted.org/packages/9b/93/628509b8d5dc749656a9641f4caf13540e2cdec85276964ff8f43bbb1d3b/Flask-1.1.1-py2.py3-none-any.whl", # noqa:B950
)
def test_basic_artifact_dict(basic_artifact: ArtifactMetadata) -> None:
"""Validate to_dict."""
d = basic_artifact.to_dict()
assert d["comment_text"] == "No comment"
def test_basic_artifact_json(basic_artifact: ArtifactMetadata) -> None:
"""Validate to_json."""
d = json.loads(basic_artifact.to_json())
assert d["comment_text"] == "No comment"
| 2,750 |
azure-iot-device/azure/iot/device/common/pipeline/pipeline_ops_http.py
|
dt-boringtao/azure-iot-sdk-python
| 0 |
2170740
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from . import PipelineOperation
class HTTPRequestAndResponseOperation(PipelineOperation):
"""
A PipelineOperation object which contains arguments used to connect to a server using the HTTP protocol.
This operation is in the group of HTTP operations because its attributes are very specific to the HTTP protocol.
"""
def __init__(self, method, path, headers, body, query_params, callback):
"""
Initializer for HTTPPublishOperation objects.
:param str method: The HTTP method used in the request
:param str path: The path to be used in the request url
:param dict headers: The headers to be used in the HTTP request
:param str body: The body to be provided with the HTTP request
:param str query_params: The query parameters to be used in the request url
:param Function callback: The function that gets called when this operation is complete or has failed.
The callback function must accept A PipelineOperation object which indicates the specific operation which
has completed or failed.
"""
super().__init__(callback=callback)
self.method = method
self.path = path
self.headers = headers
self.body = body
self.query_params = query_params
self.status_code = None
self.response_body = None
self.reason = None
| 1,728 |
pdetools/example/cde2d.py
|
jinanloubani/aTEAM
| 23 |
2171607
|
import numpy as np
import torch
import torch.nn as nn
from ..stepper import LinearTimeStepper, LinearSpectStepper
from ..init import initgen
__all__ = ['CDE', 'SingleCell1', 'SingleCell2']
# Convection diffusion equation
class _CDE(nn.Module):
@property
def dim(self):
return 2
@property
def timescheme(self):
return self._timescheme
@property
def spatialscheme(self):
return self._spatialscheme
@property
def coe(self):
return self._coe
def setcoe(self):
raise NotImplementedError
def __init__(self, max_dt=0.2e-3, mesh_size=(256,256), mesh_bound=((0,0),(1,1))):
super(_CDE, self).__init__()
self.max_dt = max_dt
self.mesh_size = np.array(mesh_size).copy()
self.mesh_bound = np.array(mesh_bound).copy()
dx0,dx1 = (self.mesh_bound[1]-self.mesh_bound[0])/self.mesh_size
assert abs(dx0-dx1)<1e-10
self.dx = dx0
xy = np.mgrid[self.mesh_bound[0][0]:self.mesh_bound[1][0]:(self.mesh_size[0]+1)*1j,
self.mesh_bound[0][1]:self.mesh_bound[1][1]:(self.mesh_size[1]+1)*1j]
xy = xy[:,:-1,:-1]
xy = np.transpose(xy, axes=[1,2,0])
xy = torch.from_numpy(xy)
self.setcoe(xy)
def forward(self, inputs, T, **kw):
return self.predict(inputs, T, **kw)
class CDE(_CDE, LinearTimeStepper):
def setcoe(self, xy):
coe = np.ndarray((3,3), dtype=object)
self._coe = coe
coe[0,0] = coe[1,1] = 0
coe[0,2] = coe[2,0] = 1/8
coe[0,1] = 0
coe[1,0] = nn.Parameter(torch.sin(2*np.pi*xy[...,1]))
self.coe10 = coe[1,0]
def __init__(self, max_dt=4e-5, mesh_size=(128,128), mesh_bound=((0,0),(1,1)), timescheme='rk2', spatialscheme='uw2'):
super(CDE, self).__init__(max_dt=max_dt, mesh_size=mesh_size, mesh_bound=mesh_bound)
self._timescheme = timescheme
self._spatialscheme = spatialscheme
class Heat(_CDE, LinearTimeStepper):
def setcoe(self, xy):
coe = np.ndarray((3,3), dtype=object)
self._coe = coe
coe[0,0] = coe[1,1] = coe[0,1] = coe[1,0] = 0
coe[0,2] = coe[2,0] = 1/8
def __init__(self, max_dt=4e-5, mesh_size=(128,128), mesh_bound=((0,0),(1,1)), timescheme='rk2', spatialscheme='uw2'):
super(Heat, self).__init__(max_dt=max_dt, mesh_size=mesh_size, mesh_bound=mesh_bound)
self._timescheme = timescheme
self._spatialscheme = spatialscheme
class _SingleCell1(_CDE):
def setcoe(self, xy):
coe = np.ndarray((3,3), dtype=object)
self._coe = coe
coe[0,0] = coe[1,1] = 0
coe[0,2] = coe[2,0] = 1/8
xy = xy/self.epsilon
xy = xy%1.0
coe[0,1] = 0
coe[1,0] = nn.Parameter(torch.sin(2*np.pi*xy[...,1]))
coe[1,0].data /= self.epsilon
self.coe10 = coe[1,0]
def __init__(self, max_dt, epsilon, cell_num, mesh_size=(256,256)):
mesh_bound = ((0,0),(cell_num*epsilon,cell_num*epsilon))
self.epsilon = epsilon
self.cell_num = cell_num
super(_SingleCell1, self).__init__(max_dt=max_dt, mesh_size=mesh_size, mesh_bound=mesh_bound)
class _SingleCell2(_CDE):
def setcoe(self, xy):
coe = np.ndarray((3,3), dtype=object)
self._coe = coe
coe[0,0] = coe[1,1] = 0
coe[0,2] = coe[2,0] = 1/8
xy = xy/self.epsilon
xy = xy%1.0
coe[0,1] = nn.Parameter(-(8*xy[...,1]-4)*torch.sin(2*np.pi*xy[...,1])-8*np.pi*(xy[...,1]-1)*xy[...,1]*torch.cos(2*np.pi*xy[...,1]))
coe[0,1].data /= self.epsilon*2*np.pi
coe[1,0] = nn.Parameter(-2*np.pi*torch.cos(2*np.pi*xy[...,0]))
coe[1,0].data /= self.epsilon*2*np.pi
self.coe01 = coe[0,1]
self.coe10 = coe[1,0]
def __init__(self, max_dt, epsilon, cell_num, mesh_size=(256,256)):
mesh_bound = ((0,0),(cell_num*epsilon,cell_num*epsilon))
self.epsilon = epsilon
self.cell_num = cell_num
super(_SingleCell2, self).__init__(max_dt=max_dt, mesh_size=mesh_size, mesh_bound=mesh_bound)
class SingleCell1(_SingleCell1, LinearTimeStepper):
def __init__(self, max_dt, epsilon, cell_num, mesh_size, timescheme='rk2', spatialscheme='uw2'):
super(SingleCell1, self).__init__(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=mesh_size)
self._timescheme = timescheme
self._spatialscheme = spatialscheme
class SingleCell2(_SingleCell2, LinearTimeStepper):
def __init__(self, max_dt, epsilon, cell_num, mesh_size, timescheme='rk2', spatialscheme='uw2'):
super(SingleCell2, self).__init__(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=mesh_size)
self._timescheme = timescheme
self._spatialscheme = spatialscheme
class SingleCell1Spect(_SingleCell1, LinearSpectStepper):
def __init__(self, max_dt, epsilon, cell_num, mesh_size, timescheme='rk2'):
super(SingleCell1Spect, self).__init__(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=mesh_size)
self._timescheme = timescheme
class SingleCell2Spect(_SingleCell2, LinearSpectStepper):
def __init__(self, max_dt, epsilon, cell_num, mesh_size, timescheme='rk2'):
super(SingleCell2Spect, self).__init__(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=mesh_size)
self._timescheme = timescheme
def test_SingleCell(cell_num=4, example=1, max_dt=1e-9, T=2e-7, epsilon=1/512):
import aTEAM.pdetools as pdetools
import aTEAM.pdetools.example.cde2d as cde2d
import torch
import matplotlib.pyplot as plt
import time
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = None
if example == 1:
SingleCell = cde2d.SingleCell1
SingleCellSpect = cde2d.SingleCell1Spect
else:
SingleCell = cde2d.SingleCell2
SingleCellSpect = cde2d.SingleCell2Spect
batch_size = 2
h = plt.figure()
a0 = h.add_subplot(121)
a1 = h.add_subplot(122)
linpde0 = SingleCell(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=(32*cell_num,32*cell_num))
linpde1 = SingleCellSpect(max_dt=max_dt, epsilon=epsilon, cell_num=cell_num, mesh_size=(32*cell_num,32*cell_num))
linpde0.to(device=device)
linpde1.to(device=device)
x0 = pdetools.init.initgen(mesh_size=linpde0.mesh_size, freq=1, device=device, batch_size=batch_size)
x1 = x0
for i in range(1,21):
startt = time.time()
with torch.no_grad():
x0 = linpde0.predict(x0, T=T)
x1 = linpde1.predict(x1, T=T)
stopt = time.time()
print('elapsed-time={:.1f}, sup(|x0-x1|)={:.2f}'.format(stopt-startt, (x0-x1).abs().max().item()))
a0.clear()
a1.clear()
xplot0 = x0 if batch_size == 1 else x0[0]
xplot1 = x1 if batch_size == 1 else x1[0]
b0 = a0.imshow(xplot0, cmap='jet')
b1 = a1.imshow(xplot1, cmap='jet')
a0.set_title('t={:.1e},max={:.2f},min={:.2f}'.format(i*T,x0.max(),x0.min()))
a1.set_title('t={:.1e},max={:.2f},min={:.2f}'.format(i*T,x1.max(),x1.min()))
if i > 1:
c0.remove()
c1.remove()
c0 = h.colorbar(b0, ax=a0)
c1 = h.colorbar(b1, ax=a1)
plt.pause(1e-3)
def test_CDE():
import aTEAM.pdetools as pdetools
import aTEAM.pdetools.example.cde2d as cde2d
import torch
import matplotlib.pyplot as plt
import time
if torch.cuda.is_available():
device = 'cuda:0'
else:
device = None
T = 1e-2
batch_size = 1
h = plt.figure()
a0 = h.add_subplot(121)
a1 = h.add_subplot(122)
linpde0 = cde2d.CDE()
linpde1 = cde2d.Heat()
linpde0.to(device=device)
linpde1.to(device=device)
linpde0.coe[0,2] = 0
linpde0.coe[2,0] = 0
linpde1.coe[0,2] = 1/4
linpde1.coe[2,0] = 1/16
init = pdetools.init.initgen(mesh_size=linpde0.mesh_size, freq=1, device=device, batch_size=batch_size)
x0 = init
x1 = init
for i in range(1,21):
startt = time.time()
with torch.no_grad():
x0 = linpde0.predict(x0, T=T)
x1 = linpde1.predict(x1, T=T)
stopt = time.time()
print('eplapsed-time={:.1f}'.format(stopt-startt))
a0.clear()
a1.clear()
xplot0 = x0 if batch_size == 1 else x0[0]
xplot1 = x1 if batch_size == 1 else x1[0]
b0 = a0.imshow(xplot0, cmap='jet')
b1 = a1.imshow(xplot1, cmap='jet')
a0.set_title('t={:.1e},max={:.2f},min={:.2f}'.format(i*T,x0.max(),x0.min()))
a1.set_title('t={:.1e},max={:.2f},min={:.2f}'.format(i*T,x1.max(),x1.min()))
if i > 1:
c0.remove()
c1.remove()
c0 = h.colorbar(b0, ax=a0)
c1 = h.colorbar(b1, ax=a1)
plt.pause(1e-3)
| 8,789 |
2018_10_p2.py
|
Dementophobia/-advent-of-code-2018
| 0 |
2171842
|
from aoc import get_ints, read_file, timer
@timer
def solve():
points = [get_ints(line) for line in read_file("10")]
result, found, size = 0, 0, 200
while found < len(points):
found = 0
for point in points:
point[0] += point[2]
point[1] += point[3]
if point[0] >= 0 and \
point[0] < size and \
point[1] >= 0 and \
point[1] < size:
found += 1
result += 1
return result
result = solve()
print(f"Solution: {result}")
| 560 |
send-auction-transactions.py
|
adamfeuer/google-measurement-protocol-utils
| 0 |
2169505
|
#!/usr/bin/env python
import csv
import uuid
import time
import argparse
from google_measurement_protocol import enhanced_item, enhanced_purchase, report
from prices import Money
def main():
parser = argparse.ArgumentParser(description='Send ecommerce transactions to Google Analytics')
parser.add_argument('--google-analytics-id', '-g', dest='google_analytics_id', default=None, help='Google Analytics ID')
parser.add_argument('--input-file', '-i', type=argparse.FileType('r'))
args = parser.parse_args()
google_analytics_id = args.google_analytics_id
input_file = args.input_file
with input_file as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
print(row)
client_id = row['userid']
transaction_id = ''
product_name = 'Auction Win'
product_price = 0
product_quantity = row['Transactions']
raw_transaction_amount = row['Revenue']
transaction_amount = raw_transaction_amount.replace('$', '')
cd1 = 'purchase'
url = row['Landing Page']
print(f"client_id:{client_id} url: {url} transaction_id: {transaction_id} product: {product_name}: {product_price} quantity: {product_quantity} transaction amount: {transaction_amount} cd1: {cd1}")
items = [
enhanced_item(product_name, Money(product_price, 'USD'), quantity=product_quantity)]
data = enhanced_purchase(transaction_id, items, Money(transaction_amount, 'USD'), url, cd1=cd1)
report(google_analytics_id, client_id, data)
time.sleep(0.1)
if __name__ == "__main__":
main()
| 1,683 |
examples/sorting/insertion_sort.py
|
kumarreddyn/python-examples
| 0 |
2170497
|
def insertion_sort(numbers):
print(numbers)
if len(numbers) > 1:
for i in range(1, len(numbers)):
k = i
while k > 0 and numbers[k] < numbers[k-1]:
temp = numbers[k]
numbers[k] = numbers[k-1]
numbers[k-1] = temp
k -= 1
data_list = [12, 11, 13, 5, 6]
insertion_sort(data_list)
print(data_list)
| 398 |
app.py
|
pamhinostroza/Web_Scraping_Challenge
| 0 |
2171533
|
#Import Dependencies
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
import os
# Create an instance of Flask
app = Flask(__name__)
# Use PyMongo to establish Mongo connection
mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
# Route to render index.html template using data from Mongo
@app.route("/")
def home():
# Find one record of data from the mongo database
mars_table = mongo.db.mars_information.find_one()
return render_template("index.html", mars_information=mars_table)
# Route that will trigger the scrape function
@app.route("/scrape")
def scrape():
# Run the scrape functions
mars_table = mongo.db.mars_information
news_title, news_paragraph = scrape_mars.scrape_mars_news()
featured_img_url = scrape_mars.scrape_mars_image()
mars_facts = scrape_mars.scrape_mars_facts()
mars_hemispheres = scrape_mars.mars_hemispheres()
mars_information={
"news_title":news_title,
"news_paragraph":news_paragraph,
"featured_image_url":featured_img_url,
"mars_table":mars_facts,
"hemisphere_image_urls":mars_hemispheres
}
print(mars_information)
mars_table.update({}, mars_information, upsert=True)
# Redirect back to home page
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True)
| 1,383 |
Exercicios/Ex059.py
|
RenanRibeiroDaSilva/Meu-Aprendizado-Python
| 2 |
2171504
|
""" Ex - 059 - Crie um programa que leia dois valores e mostre um menu como mostra o exemplo abaixo:
Seu programa deverá realizar a operação solicitada em cada caso.
[1] Somar.
[2] Multipicar.
[3] Maior
[4] Novos números
[5] Sair do programa"""
# Como eu fiz
# Cabeçalho:
print(f'{"":=^50}')
print(f'{"> MENU <":=^50}')
print(f'{"":=^50}')
# Receber informações do usuario:
num_1 = float(input('Digite um valor.....: '))
num_2 = float(input('Digite mais um valor: '))
menu = 0
# Criando um menu para o usuario:
while menu != 5:
print(f'{"":-^50}')
print(f'{"| Escolha uma das opções abaixo! |":^50}')
print(f'{"| [ 1 ] SOMA |":^50}')
print(f'{"| [ 2 ] MULTIPICAÇÃO |":^50}')
print(f'{"| [ 3 ] MAIOR VALOR |":^50}')
print(f'{"| [ 4 ] NOVOS VALORES |":^50}')
print(f'{"| [ 5 ] SAIR DO PROGRAMA |":^50}')
menu = int(input(' Qual opção...: '))
if menu == 1:
print('A Soma entre {} e {} é igual há {}'.format(num_1, num_2, num_1 + num_2))
elif menu == 2:
print('{} multiplicado por {} é igual há {}'.format(num_1, num_2, num_1 * num_2))
elif menu == 3:
if num_1 > num_2:
print('{} é maior do que {} !'.format(num_1, num_2))
else:
print('{} é maior do que {} !'.format(num_2, num_1))
elif menu == 4:
print('Informe novos valores: ')
num_1 = float(input('Digite um valor.....: '))
num_2 = float(input('Digite mais um valor: '))
elif menu == 5:
print('Finalizando programa...')
else:
print('Opção Inválida tente novamente: ')
print(f'{"":-^50}')
print('Fim')
# Como o professor <NAME>
"""
from time import sleep
n1 = int(input('Primeiro valor: ))
n2 = int(input('Segundo valor: ))
opção = 0
while opção != 5:
print(''' [ 1 ] somar
[ 2 ] multiplicar
[ 3 ] maior
[ 4 ] novos números
[ 5 ] sair do programa ''')
opção = int(input('>>>>> Qual é a sua opção? '))
if opção == 1:
soma = n1 + n2
print('A soma entre {} + {} é {}'.format(n1, n2, soma))
elif opção == 2:
produto = n1 * n2
print('O resultado de {} x {} é {}'.format(n1, n2, produto))
elif opção == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print('Entre {} e {} o maior valor é {}.format(n1, n2, maior))
elif opção == 4:
print('Informe os números novamente:')
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
elif opção == 5:
print('Finalizando...')
else:
print('Opção invalida. Tente novamente')
print('=-=' * 10)
sleep(2)
print('Fim do programa! Volte sempre!)
"""
| 2,919 |
lintcode/1833.py
|
jianershi/algorithm
| 1 |
2171595
|
"""
1833. pen box
https://www.lintcode.com/problem/pen-box/description
"""
class Solution:
"""
@param boxes: number of pens for each box
@param target: the target number
@return: the minimum boxes
"""
def minimumBoxes(self, boxes, target):
# write your code here
n = len(boxes)
min_length = sys.maxsize
min_left = self.min_length_subarray(boxes, target)
min_right = self.min_length_subarray(boxes[::-1], target)[::-1]
for i in range(n - 1):
min_length = min(min_length, min_left[i] + min_right[i + 1])
return min_length if min_length != sys.maxsize else -1
"""
return -1 if not exist
"""
def min_length_subarray(self, boxes, target):
if not boxes:
return sys.maxsize
left = 0
right = 0
subarray_sum = 0
min_left = [sys.maxsize] * len(boxes)
while right < len(boxes):
subarray_sum += boxes[right]
while subarray_sum > target:
subarray_sum -= boxes[left]
left += 1
# if right == 0 and subarray_sum == target:
# min_left[right] = 1
if subarray_sum != target:
min_left[right] = min_left[right - 1]
if subarray_sum == target:
min_left[right] = min(min_left[right - 1], right - left + 1)
right += 1
return min_left
| 1,440 |
adversarial.py
|
Algue-Rythme/anomaly
| 0 |
2171811
|
import gin
import tensorflow as tf
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
class PartialModel:
def __init__(self, model, layer_index):
self.model = model
self.layer_index = layer_index
def __call__(self, x):
for layer in self.model.layers[:self.layer_index]:
x = layer(x)
return x
@tf.function
def metric_entropy(x, fixed, scale, margin):
""" See https://arxiv.org/pdf/1908.11184.pdf for the construction.
"""
x = tf.reshape(x, shape=[x.shape[0], 1, -1])
fixed = tf.reshape(fixed, shape=[1, fixed.shape[0], -1])
distances = x - fixed
distances = tf.reduce_sum(distances ** 2., axis=2)
# distances = tf.minimum(distances, margin*margin) # ONLY TO CROP PENALTY
distances = distances / (scale**2.) # normalize to avoid errors
distances = tf.minimum(distances, 6.) # avoid saturation (security)
similarities = tf.math.exp(distances * (-1.))
typicality = tf.reduce_mean(similarities, axis=1)
entropy = -tf.reduce_mean(tf.math.log(typicality))
return entropy
@tf.function
def ball_repulsion(x, fixed, scale, margin):
"""Average distance from each 'x' to 'fixed'.
"""
x = tf.reshape(x, shape=[x.shape[0], 1, -1])
fixed = tf.reshape(fixed, shape=[1, fixed.shape[0], -1])
distances = x - fixed
distances = tf.reduce_sum(distances ** 2., axis=2)
distances = tf.minimum(distances, margin*margin) # avoid saturation
distances = distances / (scale**2.) # just to normalize gradient
sum_dists = tf.reduce_sum(distances, axis=1) # square distance instead of hinge
# because numerical instabilities otherwise :(
return tf.reduce_mean(sum_dists)
def renormalize_grads(grads):
return [tf.math.l2_normalize(grad) for grad in grads]
def uniform_noise(x_0, scale):
coef = 0.3 # a bit farther away
min_x0 = tf.reduce_min(x_0, axis=0, keepdims=True)
max_x0 = tf.reduce_max(x_0, axis=0, keepdims=True)
delta = max_x0 - min_x0
min_x0 = min_x0 - coef * delta
max_x0 = max_x0 + coef * delta
return tf.random.uniform(x_0.shape, min_x0, max_x0)
@tf.function
def frontiere_distance(y, margin):
return tf.math.abs(y + margin) # to be minimized
@gin.configurable
def generate_adversarial(model, x_0, scale, margin, true_negative,
max_iter=gin.REQUIRED,
w_weight=gin.REQUIRED,
border =gin.REQUIRED,
h_x_0 =gin.REQUIRED,
h_x =gin.REQUIRED,
mult =gin.REQUIRED,
logloss =gin.REQUIRED,
reversedlogloss=gin.REQUIRED):
learning_rate = (mult * margin) / max_iter
if true_negative:
learning_rate = learning_rate * model._get_coef()
optimizer = SGD(learning_rate=learning_rate) # no momentum required due to smooth optimization landscape
# x_init is perturbed x_0, with atmost 10% of a gradient step (which can be admittely quite high)
x_init = x_0 + 0.1*learning_rate*tf.math.l2_normalize(tf.random.uniform(x_0.shape, -1., 1.))
x = tf.Variable(initial_value=x_init, trainable=True)
for _ in range(max_iter):
with tf.GradientTape() as tape:
try:
y = model(x)
except tf.python.framework.errors_impl.InvalidArgumentError as e:
norm = tf.reduce_sum(x ** 2.)
print('adversarial', norm, x)
raise e
if logloss: # binary cross-entropy
zeros = tf.zeros([int(y.shape[0]),1]) # seek frontiere
ones = tf.ones([int(y.shape[0]),1])
if reversedlogloss:
zeros, ones = ones, zeros
if true_negative: # where f is already negative, add examples
ce = tf.nn.sigmoid_cross_entropy_with_logits(zeros+0.5, y)
else: # otherwise f is positive but really shouldn't (most of the time), we remove those parts
ce = tf.nn.sigmoid_cross_entropy_with_logits(ones, y)
adversarial_score = tf.reduce_mean(ce)
if reversedlogloss:
adversarial_score = -adversarial_score
else: # wasserstein bro: sign is flipped because minimization of loss instead maximization of cost
if true_negative:
adversarial_score = tf.reduce_mean(y) # seek x of f(x) negative
else:
adversarial_score = -tf.reduce_mean(y) # seek x of f(x) positive
loss = w_weight*adversarial_score
if (h_x_0 + h_x + border) > 0.:
fidelity = -h_x_0 * ball_repulsion(x, x_0, scale, margin) # avoid true positive, irrelevant
dispersion = -h_x * metric_entropy(x, x, scale, margin) # regularization to cover space
frontiere_score = border * frontiere_distance(y, margin)
loss = loss + dispersion + fidelity + frontiere_score # minimize loss
grad_f_x = tape.gradient(loss, [x])
grad_f_x = renormalize_grads(grad_f_x) # keep good learning rate
optimizer.apply_gradients(zip(grad_f_x,[x]))
return x.value()
"""
Two strategies with Binary Cross Entropy:
1) Maximize Error on Wrong Labels
Maximize Error on Label 1 (an attack, catch f negative)
Maximize Error on Label 0 (support, catch f positive)
2) Minimize Error on True Labels
Minimize Error on Label 0 (a "soft" attack, catch f negative)
Minimize Error on Label 1 (support, catch f positive)
For now we implemented Strategy 2) since it is more natural to minimize a loss
But strategy 1) may have some potential (warning NaN)
"""
@gin.configurable
def complement_distribution(model, x_0, scale, margin,
uniform =gin.REQUIRED,
symmetric=gin.REQUIRED):
if uniform:
return uniform_noise(x_0, scale)
x_false_positive = generate_adversarial(model, x_0, scale, margin, true_negative=False)
if not symmetric:
return x_false_positive
x_true_negative = generate_adversarial(model, x_0, scale, margin, true_negative=True)
return tf.concat(values=[x_false_positive, x_true_negative], axis=0)
| 6,366 |
tests/integration/test_code_deploy.py
|
mmcdermo/RedLeader
| 0 |
2171723
|
import unittest
import random
import json
import time
import pkg_resources
import botocore.exceptions
import redleader
from redleader.cluster import Cluster, OfflineContext, AWSContext
from redleader.resources import S3BucketResource, SQSQueueResource, CodeDeployEC2InstanceResource, CodeDeployDeploymentGroupResource, ReadWritePermission
from redleader.managers import CodeDeployManager
from util import cleanup_buckets
class TestCodeDeploy(unittest.TestCase):
def setUp(self):
self._context = AWSContext(aws_profile="testing")
cleanup_buckets(self._context.get_client('s3'), 'testbucketname')
def tearDown(self):
cleanup_buckets(self._context.get_client('s3'), 'testbucketname')
pass
def test_code_deploy_cluster(self):
context = self._context
# Configure our cluster
cluster = Cluster("testClusterClass", context)
bucket_name = "testbucketname%s" % 5
s3Bucket = S3BucketResource(context, bucket_name)
sqsQueue = SQSQueueResource(context, "redleaderTestQueue")
deploymentGroup = CodeDeployDeploymentGroupResource(context,
"my_application_name",
"my_deployment_group")
ec2Instance = CodeDeployEC2InstanceResource(
context,
deploymentGroup,
permissions=[ReadWritePermission(s3Bucket),
ReadWritePermission(sqsQueue)],
storage=10,
instance_type="t2.nano",
)
cluster.add_resource(sqsQueue)
cluster.add_resource(s3Bucket)
cluster.add_resource(ec2Instance)
cluster.add_resource(deploymentGroup)
template = cluster.cloud_formation_template()
for sub in template['Resources']:
print("ID: %s" % sub)
print(json.dumps(template['Resources'][sub], indent=4))
return
# Delete as a preemptive cleanup step
cluster.blocking_delete(verbose=True)
# Deploy cluster
x = cluster.blocking_deploy(verbose=True)
print("Deployed. Sleeping for 15.")
time.sleep(15)
# Perform a code deploy deployment
client = context.get_client('codedeploy')
manager = CodeDeployManager(context)
resource_package = redleader
resource_path = '/'.join(('test_resources', 'code_deploy_app'))
code_deploy_path = pkg_resources.resource_filename("redleader", resource_path)
deployment_id = manager.create_deployment("my_application_name",
"my_deployment_group",
#deploymentGroup.get_id(),
path=code_deploy_path,
bucket_name=bucket_name)
print("Code deploy succeeded with deployment id %s. Sleeping for 30s" % \
deployment_id)
time.sleep(30)
sqsClient = context.get_client("sqs")
queues = sqsClient.list_queues(QueueNamePrefix="redleaderTestQueue")
self.assertEqual(len(queues['QueueUrls']), 1)
queue_url = queues['QueueUrls'][0]
messages = sqsClient.receive_message(QueueUrl=queue_url)
if 'Messages' not in messages:
print(messages)
self.assertEqual(0, "No messages key in response")
for message in messages['Messages']:
print(message)
print(sqsClient.delete_message(
QueueUrl=queue_url,
ReceiptHandle=message['ReceiptHandle']
))
self.assertEqual(len(messages['Messages']), 1)
self.assertEqual(messages['Messages'][0]['Body'], 'message_from_ec2_server')
# Delete cluster
cluster.blocking_delete(verbose=True)
| 3,904 |
{{cookiecutter.project_name_kebab}}/tests/test_{{cookiecutter.project_name_snake}}.py
|
gekorob/cookiecutter-basic-pylib
| 0 |
2169964
|
from {{ cookiecutter.project_name_snake }} import {{ cookiecutter.project_name_snake }}
def test_sample():
assert False
| 125 |
pgcli/packages/pgspecial/tests/test_specials.py
|
johshoff/pgcli
| 82 |
2171529
|
from dbutils import dbtest
@dbtest
def test_slash_d(executor):
results = executor('\d')
title = None
rows = [('public', 'tbl1', 'table', 'postgres'),
('public', 'tbl2', 'table', 'postgres'),
('public', 'vw1', 'view', 'postgres')]
headers = ['Schema', 'Name', 'Type', 'Owner']
status = 'SELECT 3'
expected = [title, rows, headers, status]
assert results == expected
@dbtest
def test_slash_dn(executor):
"""List all schemas."""
results = executor('\dn')
title = None
rows = [('public', 'postgres'),
('schema1', 'postgres'),
('schema2', 'postgres')]
headers = ['Name', 'Owner']
status = 'SELECT 3'
expected = [title, rows, headers, status]
assert results == expected
@dbtest
def test_slash_dt(executor):
"""List all tables in public schema."""
results = executor('\dt')
title = None
rows = [('public', 'tbl1', 'table', 'postgres'),
('public', 'tbl2', 'table', 'postgres')]
headers = ['Schema', 'Name', 'Type', 'Owner']
status = 'SELECT 2'
expected = [title, rows, headers, status]
assert results == expected
@dbtest
def test_slash_dT(executor):
"""List all datatypes."""
results = executor('\dT')
title = None
rows = [('public', 'foo', None)]
headers = ['Schema', 'Name', 'Description']
status = 'SELECT 1'
expected = [title, rows, headers, status]
assert results == expected
@dbtest
def test_slash_df(executor):
results = executor('\df')
title = None
rows = [('public', 'func1', 'integer', '', 'normal')]
headers = ['Schema', 'Name', 'Result data type', 'Argument data types',
'Type']
status = 'SELECT 1'
expected = [title, rows, headers, status]
assert results == expected
| 1,801 |
4_merge_road_usages_hours.py
|
IntroDS2017/SteamingPlayers
| 0 |
2171891
|
import pandas as pd
import numpy as np
def flatten_list(to_flat):
"""
Flattens 2D-list to 1d
:param to_flat: List to flatten
:return: 1D list
"""
return [row for rows in to_flat for row in rows]
def sum_over_car_counts_of_rows_sharing_aika(grouped_roads):
"""
Sums car-amounts of given rows.
:param grouped_roads: Roads that are supposed to share (piste/nimi, year, and suunta).
:return: First road of the roads, with summed 'autot'
"""
roads = grouped_roads[1]
result = roads.iloc[0].copy() # copies the first row as the result field
for col in ['ha', 'pa', 'ka', 'ra', 'la', 'mp', 'rv', 'autot']:
result[col] = roads[col].sum()
return result
def handle_roads_by_id_and_vuosi(grouped_roads):
roads = grouped_roads[1]
return map(sum_over_car_counts_of_rows_sharing_aika, roads.groupby('aika'))
def handle_roads_by_id(grouped_roads):
# grouped_rows[0] = list of "groupping value" (in this case 'piste' as previously grouped with that value)
# grouped_rows[1] = "grouped rows (= road_usages_rows)"
roads = grouped_roads[1]
result = map(handle_roads_by_id_and_vuosi, roads.groupby('vuosi'))
return flatten_list(result)
def main(road_usages_data_path):
"""
Merges hours in 2_road_usages.csv
:param road_usages_data_path: String, where file is loaded from.
:return: New dataframe. If time-unit was between some hour, they were summed. Sums also over both 'suunta's!
"""
roads = pd.read_csv(road_usages_data_path)
roads['aika'] = roads['aika'].apply(lambda x: int(x / 100) + 1) # set for example each 700, 715, 730, and 745 to 8, for summing over them
result = map(handle_roads_by_id, roads.groupby('piste')) # piste = unique ID that stands for given road. Could use also 'nimi' here.
flat_result = flatten_list(result)
return pd.DataFrame(flat_result)
if __name__ == '__main__':
pd.options.mode.chained_assignment = None # default='warn'
load_path = "data/3_road_usages.csv"
save_path = "data/4_road_usages.csv"
df = main(load_path)
df = df.drop('suunta', 1)
df.insert(loc = 0, column = 'ru-index', value = np.arange(len(df)))
df.to_csv(save_path, index = False) # do not write index column in the file
print(df)
| 2,298 |
msghandle/stk/main.py
|
RaenonX/Jelly-Bot-API
| 5 |
2170490
|
from typing import List
from msghandle.models import LineStickerMessageEventObject, HandledMessageEvent, HandledMessageEventText
from strres.msghandle import HandledResult
from .info import process_display_info
from .autoreply import process_auto_reply
def handle_line_sticker_event(e: LineStickerMessageEventObject) -> List[HandledMessageEvent]:
if e.is_test_event:
return [HandledMessageEventText(content=HandledResult.TestSuccessLineSticker)]
handle_fn = [process_display_info]
if e.channel_model.config.enable_auto_reply:
handle_fn.append(process_auto_reply)
for fn in handle_fn:
responses = fn(e)
if responses:
return responses
return []
| 714 |
Network/python/MisProject/socketTCP/client.py
|
omidrk/MultiSensInteractive
| 0 |
2170048
|
import socket, queue, select, sys
from time import time, sleep
from random import randint
import json
"""
999:MESSAGE
"""
to_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
to_server.connect(('192.168.1.81', 50001))
inputs = [to_server]
outputs = [to_server]
command_queue = queue.Queue()
HEADER_LEN = 5
SENSOR_LEN = 506
COMMAND_LEN = 250
i = 0
def random_message():
value = randint(2, 5)
state = randint(0,1)
return json.dumps({"fan" : [value, state]})
while True:
readable, writable, exceptional = select.select(inputs, outputs, inputs)
for s in readable:
if s == to_server:
data = s. recv(SENSOR_LEN + HEADER_LEN + 1)
if not data: break
#print(data)
for s in writable:
if s == to_server:
i += 1
if i % 10 == 0:
MESSAGE = random_message()
msg = f"{len(MESSAGE):>{HEADER_LEN}}:" + f"{MESSAGE:<{COMMAND_LEN}}"
s.send(bytes(msg, 'utf8'))
for s in exceptional:
inputs.remove(s)
if s in outputs:
outputs.remove(s)
print(f"REMOVED: {s}")
s.close()
| 1,176 |
view_breadcrumbs/constants.py
|
codacy-badger/django-view-breadcrumbs
| 29 |
2171672
|
from django.utils.translation import gettext_lazy as _
LIST_VIEW_SUFFIX = _("list")
CREATE_VIEW_SUFFIX = _("create")
UPDATE_VIEW_SUFFIX = _("update")
DELETE_VIEW_SUFFIX = _("delete")
DETAIL_VIEW_SUFFIX = _("detail")
| 217 |
test/test_api_tracer.py
|
formazione/Cyberbrain
| 2 |
2171064
|
from cyberbrain import Binding, Symbol
def test_api_tracer(tracer, test_server):
tracer.start()
a = 1
tracer.stop()
assert tracer.events == [
Binding(lineno=6, target=Symbol("a"), value="1", sources=set())
]
test_server.assert_frame_sent("test_api_tracer")
| 293 |
wsi/WSISettings.py
|
mstekel/bertwsi
| 26 |
2171735
|
from collections import namedtuple
WSISettings = namedtuple('WSISettings', ['n_represents', 'n_samples_per_rep', 'cuda_device', 'debug_dir',
'disable_tfidf', 'disable_lemmatization', 'run_name', 'patterns',
'min_sense_instances', 'bert_model',
'max_batch_size', 'prediction_cutoff', 'max_number_senses',
])
DEFAULT_PARAMS = WSISettings(
n_represents=15,
n_samples_per_rep=20,
cuda_device=1,
debug_dir='debug',
disable_lemmatization=False,
disable_tfidf=False,
patterns=[('{pre} {target} (or even {mask_predict}) {post}', 0.4),
('{pre} {target_predict} {post}', 0.4)],
# (pattern,weight): each of these patterns will produce a prediction state.
# the weighted sum of them will be matmul'ed for a distribution over substitutes
# patterns=[('{pre} {target_predict} {post}', 0.5)], # - just predict on first token, no patterns
run_name='test-run',
max_number_senses=7,
min_sense_instances=2,
# sense clusters that dominate less than this number of samples
# would be remapped to their closest big sense
max_batch_size=10,
prediction_cutoff=200,
bert_model='bert-large-uncased'
)
| 1,331 |
cirtorch/geometry/depth.py
|
Tarekbouamer/Image-Retrieval-for-Image-Based-Localization
| 3 |
2170815
|
import torch
import torch.nn.functional as F
from .camera.perspective import project_points, unproject_points
from .conversions import normalize_pixel_coordinates
from .linalg import transform_points
from cirtorch.utils.grid import create_meshgrid
from cirtorch.filters.filter import spatial_gradient
def depth_to_3d(depth, camera_matrix, normalize_points=False):
"""
Compute a 3d point per pixel given its depth value and the camera intrinsics.
"""
if not len(depth.shape) == 4 and depth.shape[-3] == 1:
raise ValueError(f"Input depth musth have a shape (B, 1, H, W). Got: {depth.shape}")
if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
f"Got: {camera_matrix.shape}.")
# create base coordinates grid
batch_size, _, height, width = depth.shape
points_2d = create_meshgrid(height, width, normalized_coordinates=False) # 1xHxWx2
points_2d = points_2d.to(depth.device).to(depth.dtype)
# depth should come in Bx1xHxW
points_depth = depth.permute(0, 2, 3, 1) # 1xHxWx1
# project pixels to camera frame
camera_matrix_tmp = camera_matrix[:, None, None] # Bx1x1x3x3
points_3d = unproject_points(points_2d, points_depth, camera_matrix_tmp, normalize=normalize_points) # BxHxWx3
return points_3d.permute(0, 3, 1, 2) # Bx3xHxW
def depth_to_normals(depth, camera_matrix, normalize_points=False):
"""
Compute the normal surface per pixel.
"""
if not len(depth.shape) == 4 and depth.shape[-3] == 1:
raise ValueError(f"Input depth musth have a shape (B, 1, H, W). Got: {depth.shape}")
if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
f"Got: {camera_matrix.shape}.")
# compute the 3d points from depth
xyz = depth_to_3d(depth, camera_matrix, normalize_points) # Bx3xHxW
# compute the pointcloud spatial gradients
gradients = spatial_gradient(xyz) # Bx3x2xHxW
# compute normals
a, b = gradients[:, :, 0], gradients[:, :, 1] # Bx3xHxW
normals = torch.cross(a, b, dim=1) # Bx3xHxW
return F.normalize(normals, dim=1, p=2)
def warp_frame_depth(image_src, depth_dst, src_trans_dst, camera_matrix, normalize_points=False):
"""
Warp a tensor from a source to destination frame by the depth in the destination.
Compute 3d points from the depth, transform them using given transformation, then project the point cloud to an
image plane.
"""
if not len(image_src.shape) == 4:
raise ValueError(f"Input image_src musth have a shape (B, D, H, W). Got: {image_src.shape}")
if not len(depth_dst.shape) == 4 and depth_dst.shape[-3] == 1:
raise ValueError(f"Input depth_dst musth have a shape (B, 1, H, W). Got: {depth_dst.shape}")
if not len(src_trans_dst.shape) == 3 and src_trans_dst.shape[-2:] == (3, 3):
raise ValueError(f"Input src_trans_dst must have a shape (B, 3, 3). "
f"Got: {src_trans_dst.shape}.")
if not len(camera_matrix.shape) == 3 and camera_matrix.shape[-2:] == (3, 3):
raise ValueError(f"Input camera_matrix must have a shape (B, 3, 3). "
f"Got: {camera_matrix.shape}.")
# unproject source points to camera frame
points_3d_dst = depth_to_3d(depth_dst, camera_matrix, normalize_points) # Bx3xHxW
# transform points from source to destination
points_3d_dst = points_3d_dst.permute(0, 2, 3, 1) # BxHxWx3
# apply transformation to the 3d points
points_3d_src = transform_points(src_trans_dst[:, None], points_3d_dst) # BxHxWx3
# project back to pixels
camera_matrix_tmp = camera_matrix[:, None, None] # Bx1x1xHxW
points_2d_src = project_points(points_3d_src, camera_matrix_tmp) # BxHxWx2
# normalize points between [-1 / 1]
height, width = depth_dst.shape[-2:]
points_2d_src_norm = normalize_pixel_coordinates(points_2d_src, height, width) # BxHxWx2
return F.grid_sample(image_src, points_2d_src_norm, align_corners=True)
| 4,234 |
Locators/signinLocator.py
|
MikeSell/eyko_demo_tests
| 0 |
2171698
|
from selenium.webdriver.common.by import By
class SigninLocator:
# Signin page elements
signin_link = (By.LINK_TEXT, 'Sign in')
email_textbox = (By.XPATH, "//input[@type='email']")
password_textbox = (By.XPATH, "//input[@type='password']")
signin_button = (By.XPATH, "//button[@type='submit']")
| 316 |
codeforces.com/1529A/solution.py
|
zubtsov/competitive-programming
| 0 |
2171036
|
for t in range(int(input())):
array_len = int(input())
array = list(map(int, input().split()))
min_elem = array[0]
num_of_min_elems = 1
for i in range(1, array_len):
if array[i] < min_elem:
min_elem = array[i]
num_of_min_elems = 1
elif array[i] == min_elem:
num_of_min_elems += 1
print(array_len - num_of_min_elems)
| 395 |
myspiderdic.py
|
turodj/python3-src
| 0 |
2171635
|
# -*- coding: UTF-8 -*
# 对bt蚂蚁(btmayi.me)网址进行爬取,usage: mayispider.py <查询关键字> <起始页数> <结束页数>
from urllib.request import urlopen
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
from collections import OrderedDict
import socket,sys,os
import re
import urllib.request
import urllib
import sys,time
socket.setdefaulttimeout(120) #访问超时设为120秒
#定义获取电影名称和链接的函数
def get_magres(input_url):
#伪装成为正常访问
req=urllib.request.Request(input_url,headers=
{'User-Agent':'Mozilla/5.0 (Windows NT 6.1;WOW64;rv:23.0) Gecko/20100101 Firefox/23.0'})
#打开页面
r1 = urllib.request.urlopen(req)
soup1= BeautifulSoup(r1.read(),"html.parser")
print (soup1.title)
#print(soup1.prettify()) #输出整个页面
[script.extract() for script in soup1.find_all('script')] #移除sript脚本
#输出一个完整文档
#fp=open("/Users/dongjian/spiderdoc/testhtml.html","w")
#fp.write(soup1.prettify())
#fp.close()
avnamelist=[]
avlinklist=[]
avinfoall=[]
#find av的所有信息
avinfoall =soup1.find_all("div",{"class":"search-item"})
for avinfo in avinfoall:
avtitle=avinfo.find("div",class_="item-list")
avtitlestr=avtitle.get_text(strip=True) #获得去空格的文字内容
avnamelist.append(avtitlestr)
#print("name = %s"%avtitlestr)
#取av 磁力链接
avmag=avinfo.find("a",href=re.compile(r'^(\s)*magnet'))
avlink=avmag.get("href")
avlinklist.append(avlink)
#print("link = %s"%avlink)
return avnamelist,avlinklist #函数结束,返回av名称,av链接
if __name__=='__main__':
#程序正文开始
#对中文查询条件进行转译
snametemp=sys.argv[1] #通过参数传递查询条件
sname=urllib.parse.quote(snametemp) #中文参数标准化编码
beginpage=int(sys.argv[2]) # 开始页数
endpage=int(sys.argv[3]) #结束页数
#准备好写入的文件
basepath="/Users/dongjian/spiderdoc/"
finename=basepath+sys.argv[1]+".txt" #第一个参数形成文件名
targetfile=open(finename,"a")
page=beginpage #从开始页开始搜索
getavlist=[]
getavmaglist=[]
avlist=[]
searchedpage=0
avdict=OrderedDict()
#循环搜索每页
while page <= endpage :
url="http://www.btany.com/search/%s-size-desc-%d"%(sname,page) #搜索第page页
print("begin search %s\n"%url)
getavlist.clear()
getavmaglist.clear()
avlist.clear()
avdict.clear()
getavlist,getavmaglist=get_magres(url) #调用获取资源函数,返回av名称,av链接
print("avres=%d,avmag=%d"%(len(getavlist),len(getavmaglist)))
targetfile.write("\nsearch: url:%s\n\n"%url)
#print(getavlist)
#print(getavmaglist)
#写入每个影片对应的链接
avdict=OrderedDict(zip(getavlist,getavmaglist)) #形成有序字典,并同时剃重
#写入av及对应的下载链接
namesum=0
for key in avdict.keys():
targetfile.write("%s\n%s\n\n"%(key,avdict[key]))
namesum+=1
if namesum%5==0:
targetfile.write("\n")
targetfile.write("--------maglink list-------\n")
#逐行写入链接,每5行空一行
#print("write %d magnet"%len(getavmaglist))
avsum=0
for key in avdict.keys():
targetfile.write("%s\n"%avdict[key])
avsum += 1
if avsum%5 == 0 :
targetfile.write("\n")
targetfile.flush() #将缓存内容刷新到文件中
page += 1
searchedpage += 1
print(".......sleep 1 sec......")
time.sleep(1) #休眠1秒,防止网站封ip
else:
print("共查询了 %d 页"%searchedpage)
targetfile.close
print("get link over")
| 3,058 |
src/aiohttp_micro/web/middlewares/tracing.py
|
clayman083/aiohttp-micro
| 1 |
2170540
|
from typing import List, Optional
from aiohttp import web
from aiozipkin.helpers import make_context
from aiohttp_micro.web.middlewares import Handler
def tracing_middleware_factory(exclude_routes: Optional[List[str]] = None):
exclude: List[str] = []
if exclude_routes:
exclude = exclude_routes
@web.middleware
async def tracing_middleware(request: web.Request, handler: Handler) -> web.Response:
context = make_context(request.headers)
route_name = request.match_info.route.name
if context and route_name not in exclude:
tracer = request.app["tracer"]
span = tracer.join_span(context)
host = request.headers.get("Host", None)
with tracer.new_child(span.context) as child_span:
child_span.name(f"{request.method.upper()} {request.path}")
child_span.kind("SERVER")
child_span.tag("http.path", request.path)
child_span.tag("http.method", request.method.upper())
if host:
child_span.tag("http.host", host)
try:
response = await handler(request)
child_span.tag("http.status_code", str(response.status))
except web.HTTPException as e:
child_span.tag("http.status_code", str(e.status))
raise
child_span.tag("http.response.size", response.content_length)
else:
response = await handler(request)
return response
return tracing_middleware
| 1,600 |
skyportal/tests/frontend/test_frontpage.py
|
jialin-wu-02/skyportal
| 0 |
2169904
|
import uuid
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import skyportal
from skyportal.tests import api
def test_source_list(driver, user, public_source, private_source):
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
assert 'localhost' in driver.current_url
driver.get('/')
simbad_class = public_source.altdata['simbad']['class']
driver.wait_for_xpath("//div[contains(@title,'connected')]")
driver.wait_for_xpath('//h2[contains(text(), "Sources")]')
driver.wait_for_xpath(f'//a[text()="{public_source.id}"]')
driver.wait_for_xpath(f'//td[text()="{simbad_class}"]')
driver.wait_for_xpath_to_disappear(f'//a[text()="{private_source.id}"]')
el = driver.wait_for_xpath('//button[contains(.,"Next Page")]')
assert not el.is_enabled()
el = driver.wait_for_xpath('//button[contains(.,"Previous Page")]')
assert not el.is_enabled()
def test_source_filtering_and_pagination(driver, user, public_group, upload_data_token):
obj_id = str(uuid.uuid4())
for i in range(205):
status, data = api('POST', 'sources',
data={'id': f'{obj_id}_{i}',
'ra': 234.22,
'dec': -22.33,
'redshift': 3,
'altdata': {'simbad': {'class': 'RRLyr'}},
'transient': False,
'ra_dis': 2.3,
'group_ids': [public_group.id]},
token=upload_data_token)
assert status == 200
assert data['data']['id'] == f'{obj_id}_{i}'
driver.get(f"/become_user/{user.id}") # TODO decorator/context manager?
assert 'localhost' in driver.current_url
driver.get('/')
driver.wait_for_xpath("//div[contains(@title,'connected')]")
driver.wait_for_xpath('//h2[contains(text(), "Sources")]')
driver.wait_for_xpath('//td[text()="RRLyr"]')
# Pagination
next_button = driver.wait_for_xpath('//button[contains(.,"Next Page")]')
prev_button = driver.wait_for_xpath('//button[contains(.,"Previous Page")]')
assert next_button.is_enabled()
assert not prev_button.is_enabled()
driver.scroll_to_element_and_click(next_button)
time.sleep(0.5)
assert prev_button.is_enabled()
next_button.click()
time.sleep(0.5)
assert not next_button.is_enabled()
prev_button.click()
time.sleep(0.5)
assert next_button.is_enabled()
prev_button.click()
time.sleep(0.5)
assert not prev_button.is_enabled()
# Jump to page
jump_to_page_input = driver.wait_for_xpath("//input[@name='jumpToPageInputField']")
jump_to_page_input.clear()
jump_to_page_input.send_keys('3')
jump_to_page_button = driver.wait_for_xpath('//button[contains(.,"Jump to page:")]')
jump_to_page_button.click()
time.sleep(0.5)
#driver.wait_for_xpath('//div[contains(text(), "Displaying 1-100")]')
assert prev_button.is_enabled()
assert not next_button.is_enabled()
jump_to_page_input.clear()
jump_to_page_input.send_keys('1')
jump_to_page_button.click()
time.sleep(0.5)
assert next_button.is_enabled()
assert not prev_button.is_enabled()
# Source filtering
assert next_button.is_enabled()
obj_id = driver.wait_for_xpath("//input[@name='sourceID']")
obj_id.clear()
obj_id.send_keys('aaaa')
submit = driver.wait_for_xpath("//button[contains(.,'Submit')]")
driver.scroll_to_element_and_click(submit)
time.sleep(1)
assert not next_button.is_enabled()
def test_jump_to_page_invalid_values(driver):
driver.get('/')
jump_to_page_input = driver.wait_for_xpath("//input[@name='jumpToPageInputField']")
jump_to_page_input.clear()
jump_to_page_input.send_keys('abc')
jump_to_page_button = driver.wait_for_xpath('//button[contains(.,"Jump to page:")]')
driver.scroll_to_element_and_click(jump_to_page_button)
driver.wait_for_xpath('//div[contains(.,"Invalid page number value")]')
def test_skyportal_version_displayed(driver):
driver.get('/')
driver.wait_for_xpath(f"//div[contains(.,'SkyPortal v{skyportal.__version__}')]")
| 4,278 |
worf/settings.py
|
gundotio/worf
| 0 |
2171438
|
from django.conf import settings
WORF_API_NAME = getattr(settings, "WORF_API_NAME", "Worf API")
WORF_API_ROOT = getattr(settings, "WORF_API_ROOT", "/api/")
WORF_BROWSABLE_API = getattr(settings, "WORF_BROWSABLE_API", True)
WORF_DEBUG = getattr(settings, "WORF_DEBUG", settings.DEBUG)
| 288 |
github/migrations/0020_auto_20201004_1321.py
|
h3nnn4n/git-o-matic-9k
| 0 |
2171736
|
# Generated by Django 3.1.2 on 2020-10-04 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('github', '0019_auto_20201004_1254'),
]
operations = [
migrations.AddField(
model_name='developer',
name='following',
field=models.ManyToManyField(related_name='reverse_following', to='github.Developer'),
),
migrations.AlterField(
model_name='developer',
name='followers',
field=models.ManyToManyField(related_name='reverse_followers', to='github.Developer'),
),
]
| 644 |
src/confseq/predmix.py
|
WannabeSmith/confseq
| 1 |
2171612
|
import math
from typing import Callable, Sequence, Tuple, Union
import numpy as np
from confseq.betting_strategies import lambda_predmix_eb
from confseq.misc import get_running_intersection, get_ci_seq
def predmix_upper_cs(
x: Sequence[float],
v: Sequence[float],
lambdas_fn: Callable[[Sequence[float]], Sequence[float]],
psi_fn: Callable[[Sequence[float]], Sequence[float]],
alpha: float = 0.05,
running_intersection: bool = False,
N: Union[int, None] = None,
) -> Sequence[float]:
"""
Predictable mixture upper confidence sequence
Parameters
----------
x : Sequence[float]
Observations in [0, 1]
v : Sequence[float]
Variance increment (1 for Hoeffding, (x - muhat_{t-1})^2 for empbern)
lambdas_fn : Callable[[Sequence[float]], Sequence[float]]
Function to produce lambda values
psi_fn : Callable[[Sequence[float]], Sequence[float]]
psi function
alpha : float, optional
Significance level, by default 0.05
running_intersection : bool, optional
Should the running intersection be taken?, by default False
N : Union[int, None], optional
Population size if sampling WoR, by default None
Returns
-------
Sequence[float]
Upper confidence sequence
"""
x = np.array(x)
t = np.arange(1, len(x) + 1)
S_t = np.cumsum(x)
S_tminus1 = np.append(0, S_t[0 : (len(x) - 1)])
if N is not None:
Zstar = S_tminus1 / (N - t + 1)
Wstar = (t - 1) / (N - t + 1)
else:
Zstar = 0
Wstar = 0
lambdas = lambdas_fn(x)
psi = psi_fn(lambdas)
margin = (np.log(1 / alpha) + np.cumsum(v * psi)) / np.cumsum(lambdas * (1 + Wstar))
weighted_mu_hat_t = np.cumsum(lambdas * (x + Zstar)) / np.cumsum(
lambdas * (1 + Wstar)
)
u = weighted_mu_hat_t + margin
u = np.minimum(u, 1)
return np.minimum.accumulate(u) if running_intersection else u
def predmix_empbern_upper_cs(
x: Sequence[float],
alpha: float = 0.05,
truncation: float = 1 / 2,
running_intersection: bool = False,
N: Union[int, None] = None,
fixed_n: Union[int, None] = None,
) -> Sequence[float]:
"""
Predictable mixture empirical Bernstein upper confidence sequence
Parameters
----------
x : Sequence[float]
Observations in [0, 1]
alpha : float, optional
Significance level, by default 0.05
truncation : float, optional
Level at which to truncate lambda, by default 1/2
running_intersection : bool, optional
Should the running intersection be taken?, by default False
N : Union[int, None], optional
Population size if sampling WoR, by default None
fixed_n : Union[int, None], optional
Fixed time to optimize bound for (if CI desired), by default None
Returns
-------
Sequence[float]
Upper confidence sequence
"""
t = np.arange(1, len(x) + 1)
mu_hat_t = np.cumsum(x) / t
mu_hat_tminus1 = np.append(0, mu_hat_t[0 : (len(x) - 1)])
v = np.power(x - mu_hat_tminus1, 2)
return predmix_upper_cs(
x,
v=v,
lambdas_fn=lambda y: lambda_predmix_eb(
y, truncation=truncation, alpha=alpha, fixed_n=fixed_n
),
psi_fn=lambda lambdas: -np.log(1 - lambdas) - lambdas,
alpha=alpha,
running_intersection=running_intersection,
N=N,
)
def predmix_hoeffding_upper_cs(
x: Sequence[float],
alpha: float = 0.05,
truncation: float = 1,
running_intersection: bool = False,
N: Union[int, None] = None,
fixed_n: Union[int, None] = None,
) -> Sequence[float]:
"""
Predictable mixture Hoeffding upper confidence sequence
Parameters
----------
x : Sequence[float]
Observations in [0, 1]
alpha : float, optional
Significance level, by default 0.05
truncation : float, optional
Level at which to truncate lambda, by default 1
running_intersection : bool, optional
Should the running intersection be taken?, by default False
N : Union[int, None], optional
Population size if sampling WoR, by default None
fixed_n : Union[int, None], optional
Fixed time to optimize bound for (if CI desired), by default None
Returns
-------
Sequence[float]
Upper confidence sequence
"""
t = np.arange(1, len(x) + 1)
if fixed_n is not None:
lambdas_fn = lambda y: np.repeat(
np.sqrt(8 * np.log(1 / alpha) / fixed_n), len(x)
)
else:
lambdas_fn = lambda y: np.minimum(
np.sqrt(8 * np.log(1 / alpha) / (t * np.log(1 + t))), truncation
)
return predmix_upper_cs(
x,
v=1,
lambdas_fn=lambdas_fn,
psi_fn=lambda lambdas: np.power(lambdas, 2) / 8,
alpha=alpha,
running_intersection=running_intersection,
N=N,
)
def predmix_hoeffding_cs(
x: Sequence[float],
alpha: float = 0.05,
truncation: float = 1,
running_intersection: bool = False,
N: Union[int, None] = None,
fixed_n: Union[int, None] = None,
) -> Tuple[Sequence[float], Sequence[float]]:
"""
Predictable mixture Hoeffding confidence sequence
Parameters
----------
x : Sequence[float]
Observations in [0, 1]
alpha : float, optional
Significance level, by default 0.05
truncation : float, optional
Level at which to truncate lambda, by default 1
running_intersection : bool, optional
Should the running intersection be taken?, by default False
N : Union[int, None], optional
Population size if sampling WoR, by default None
fixed_n : Union[int, None], optional
Fixed time to optimize bound for (if CI desired), by default None
Returns
-------
Tuple[Sequence[float], Sequence[float]]
Confidence sequence
"""
upper_cs = predmix_hoeffding_upper_cs(
x,
alpha=alpha / 2,
truncation=truncation,
running_intersection=running_intersection,
N=N,
fixed_n=fixed_n,
)
lower_cs = 1 - predmix_hoeffding_upper_cs(
1 - x,
alpha=alpha / 2,
truncation=truncation,
running_intersection=running_intersection,
N=N,
fixed_n=fixed_n,
)
return lower_cs, upper_cs
def predmix_empbern_cs(
x: Sequence[float],
alpha: float = 0.05,
truncation: float = 1 / 2,
running_intersection: bool = False,
N: Union[int, None] = None,
fixed_n: Union[int, None] = None,
) -> Tuple[Sequence[float], Sequence[float]]:
"""
Predictable mixture empirical Bernstein confidence sequence
Parameters
----------
x : Sequence[float]
Observations in [0, 1]
alpha : float, optional
Significance level, by default 0.05
truncation : float, optional
Level at which to truncate lambda, by default 1
running_intersection : bool, optional
Should the running intersection be taken?, by default False
N : Union[int, None], optional
Population size if sampling WoR, by default None
fixed_n : Union[int, None], optional
Fixed time to optimize bound for (if CI desired), by default None
Returns
-------
Tuple[Sequence[float], Sequence[float]]
Confidence sequence
"""
u = predmix_empbern_upper_cs(
x=x,
alpha=alpha / 2,
truncation=truncation,
running_intersection=running_intersection,
N=N,
fixed_n=fixed_n,
)
l = 1 - predmix_empbern_upper_cs(
x=1 - x,
alpha=alpha / 2,
truncation=truncation,
running_intersection=running_intersection,
N=N,
fixed_n=fixed_n,
)
return l, u
def predmix_hoeffding_ci(
x: Sequence[float],
alpha: float = 0.05,
N: Union[int, None] = None,
running_intersection: bool = True,
):
l_cs, u_cs = predmix_hoeffding_cs(
x,
alpha=alpha,
truncation=math.inf,
running_intersection=running_intersection,
N=N,
fixed_n=len(x),
)
return l_cs[-1], u_cs[-1]
def predmix_hoeffding_ci_seq(
x: Sequence[float],
times: Sequence[int],
alpha: float = 0.05,
N: Union[int, None] = None,
running_intersection: bool = True,
parallel=False,
):
def ci_fn(x):
return predmix_hoeffding_ci(
x,
alpha=alpha,
N=N,
running_intersection=running_intersection,
)
return get_ci_seq(x, ci_fn, times=times, parallel=parallel)
def predmix_empbern_ci(
x: Sequence[float],
alpha: float = 0.05,
truncation: float = 1 / 2,
N: Union[int, None] = None,
running_intersection: bool = True,
):
l_cs, u_cs = predmix_empbern_cs(
x,
alpha=alpha,
truncation=truncation,
N=N,
fixed_n=len(x),
)
return l_cs[-1], u_cs[-1]
def predmix_empbern_ci_seq(
x: Sequence[float],
times: Sequence[int],
alpha: float = 0.05,
truncation: float = 1 / 2,
N: Union[int, None] = None,
running_intersection: bool = True,
parallel=False,
):
def ci_fn(x):
return predmix_empbern_ci(
x,
alpha=alpha,
N=N,
running_intersection=running_intersection,
truncation=truncation,
)
return get_ci_seq(x, ci_fn, times=times, parallel=parallel)
| 9,484 |
sisa/settings.py
|
ramosfm/sisa
| 0 |
2171318
|
import os
# credenciales brindadas por SISA
# cargar en local_settings
USER_SISA = os.environ.get('USER_SISA', '')
PASS_SISA = os.environ.get('PASS_SISA', '')
| 160 |
example/myapp/admin.py
|
Britefury/django-fine-uploader
| 36 |
2171748
|
import os
import shutil
from django import forms
from django.db import models
from django.db.models import FileField
from django.contrib import admin
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
# Class only used for denotation.
from django.http.request import HttpRequest
from .models import FineFile
from django_fine_uploader import widgets
@admin.register(FineFile)
class FineFileAdmin(admin.ModelAdmin):
formfield_overrides = {
models.FileField: {
'widget': widgets.FineUploaderWidget(attrs={'admin': True, 'itemLimit': 1})
},
}
class Media:
js = (
'//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',
)
def fineuploader_setting(self, request: HttpRequest):
post_info = request.POST.dict()
if type(self.formfield_overrides.get(FileField).get('widget')) is widgets.FineUploaderWidget:
model_fields = self.model._meta.fields
file_fields = {}
file_fields_name = []
for field in model_fields:
if field.get_internal_type() is 'FileField':
file_fields_name.append(field.name)
if request.method == 'POST':
print(file_fields_name)
for name in file_fields_name:
file_uploader = cache.get(request.POST.get(name))
if file_uploader is None:
return
# raise forms.ValidationError("There is no file in the field.")
file_path = file_uploader.storage.path(file_uploader.real_path)
post_info[name] = SimpleUploadedFile(
file_uploader.filename,
open(file_path, 'rb').read()
)
request.POST = post_info
folder_path = file_uploader.storage.path(file_uploader.file_path)
try:
shutil.rmtree(folder_path)
except (OSError, PermissionError):
pass
# return post_info
def add_view(self, request, form_url='', extra_context=None):
self.fineuploader_setting(request)
return super(type(self), self).add_view(request, form_url, extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
self.fineuploader_setting(request)
return super(type(self), self).change_view(request, object_id, form_url, extra_context)
| 2,574 |
inflateorg/inflateorg.py
|
xiki-tempula/inflateorg
| 0 |
2170095
|
"""
inflateorg.py
Inflate or shrink the membrane to resolve clash between membrane and protein.
Handles the primary functions
"""
import os
import shutil
from subprocess import call
from pkg_resources import resource_filename
import MDAnalysis as mda
import numpy as np
os.environ["GMX_MAXBACKUP"] = "-1"
gromacs = '/usr/local/gromacs/2018.8/bin/gmx'
mdp = resource_filename(__name__, 'data/minim.mdp')
grompp = '{gromacs} grompp -f minim.mdp -c {gro} -p {topol} -o em.tpr -maxwarn {maxwarn} -r {gro}'
mdrun = '{gromacs} mdrun -deffnm em'
trjconv_pbc = 'echo 0 | {gromacs} trjconv -f em.gro -s em.tpr -o em.gro -pbc mol'
trjconv_check = 'echo 0 | {gromacs} trjconv -f pre_inflation.gro -s em.tpr -o em.gro -pbc mol'
class InflateORG():
def __init__(self, start_file='start.gro', topol='topol.top', center='protein', mobile='not protein', sep=None,
scaling_factor = 0.95, dim = [1,1,0], cutoff=1, maxwarn=0):
'''
:param start_file: The coordinate file for the InflateAny program.
:param topol: The topology file compatible with the gromacs program
:param center: The center of the inflation which is not modified
:param mobile: The peripheral which will be expanded and shirked
:param sep: Define how to separate the peripheral.
:param scaling_factor: The factor of inflation at each
:param dim: The dimension of the scaling on x, y and z axis. default is 1,1,1.
:param cutoff: Cutoff distance where two particles are considered as separate.
'''
self.start_file = start_file
self.topol = topol
self.center = center
self.mobile = mobile
self.sep = sep
self.scaling_factor = scaling_factor
self.dim = np.array(dim)
self.cutoff =cutoff
self.maxwarn = maxwarn
self.sanity_check()
self.inflate_system()
self.shrink_system()
def sanity_check(self):
'''
Check if the input is correct.
'''
u = mda.Universe(self.start_file)
u.select_atoms('({}) or ({})'.format(self.center, self.mobile)).write('pre_inflation.gro')
try:
call(grompp.format(gromacs=gromacs, gro='pre_inflation.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True)
call(trjconv_check.format(gromacs=gromacs), shell=True)
shutil.move('em.gro', 'pre_inflation.gro')
except:
print('Make sure the mdp file (minim.mdp), the topology file ({}) and the input coordinate file ({}) is correct.'.format(
self.topol, self.start_file
))
os.mkdir('InflateAny')
os.remove('em.tpr')
shutil.move('pre_inflation.gro', 'InflateAny/pre_inflation.gro')
shutil.copy('minim.mdp', 'InflateAny/minim.mdp')
os.chdir('InflateAny')
self.topol = '../' + self.topol
def separate_molecule(self, selection=None, sep=None):
'''
Separate the selection into defferent entities.
:param u: input universe
:return: A list of the atom groups which will be scaled.
'''
if self.sep is None:
# By default the separtion is based on residue id
mobile_parts = u.select_atoms(self.mobile).residues
return [residue.atoms for residue in mobile_parts]
else:
return [u.select_atoms(part) for part in self.sep]
def inflate(self, u, scaling_factor):
'''
:param u: the input MDAnalysis Universe to which scaling will be applied
'''
# dimensions = u.dimensions
# center_of_geometry = u.select_atoms(self.center).center_of_geometry()
# u.atoms.translate(-center_of_geometry)
#
# for part in self.separate_mobile(u):
# vector = part.center_of_geometry()
# part.translate((vector * (scaling_factor - 1))*self.dim)
# u.atoms.translate(center_of_geometry * scaling_factor)
# dimensions[:3] = dimensions[:3] * scaling_factor
# u.dimensions = dimensions
# return u
def inflate_system(self):
'''
Inflate the system.
:return:
'''
u = mda.Universe('pre_inflation.gro')
repeat = True
count = 0
print('Start inflating the {}'.format(self.mobile))
while repeat:
count += 1
u = self.inflate(u, 1 / self.scaling_factor)
check = u.select_atoms('{} and around {} ({})'.format(self.mobile, self.cutoff, self.center))
print('Interation {}:'.format(count))
print('Atoms with {}A of {}:'.format(self.cutoff, self.center))
print(check)
if len(check) == 0:
'No atom in contact with {}.'.format(self.center)
repeat = False
print('Begin the shrinking process.')
u.atoms.write('inflated.gro')
call(grompp.format(gromacs=gromacs, gro='inflated.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True)
self.mdrun(gromacs=gromacs)
call(trjconv_pbc.format(gromacs=gromacs), shell=True)
shutil.copy('em.gro', 'inflated_em.gro')
self.count = count
def shrink_system(self):
for i in range(self.count):
print('Interation {}:'.format(i))
u = mda.Universe('em.gro')
u = self.inflate(u, self.scaling_factor)
u.atoms.write('shrinked_{}.gro'.format(i))
call(grompp.format(gromacs=gromacs, gro='shrinked_{}.gro'.format(i), topol=self.topol, maxwarn=self.maxwarn), shell=True)
self.mdrun(gromacs=gromacs)
call(trjconv_pbc.format(gromacs=gromacs), shell=True)
shutil.copy2('em.gro', 'equilibrated.gro')
shutil.copy2('em.gro', '../equilibrated.gro')
os.chdir('../')
def mdrun(self, gromacs=gromacs, additional=''):
# Try to get around the situation where opencl won't start
repeat = True
while repeat:
returncode = call(mdrun.format(gromacs=gromacs) + ' ' + additional, shell=True)
if returncode == 0:
repeat = False
| 6,151 |
crear_producto.py
|
creditec/creditec.github.io
| 0 |
2170590
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import os
import shutil
def get_categories():
lista=[]
md=os.listdir('category')
for file in md:
temp=file.replace('.md','')
lista.append(temp)
return lista;
def UploadImage(event=None):
global filename
filename = filedialog.askopenfilename()
global imagen
imagen.set(os.path.split(filename)[1])
def addProduct():
global imagen
global filename
if nombre.get() != "" and combo.get() != "" and imagen.get() != 'Ninguna imagen seleccionada' and descripcion.get('1.0', END) != "" and precio.get() != "":
plantilla=("---\n"
"name: "+nombre.get()+"\n"
"categories: "+combo.get()+"\n"
"description_markdown: >-\n"
" "+descripcion.get('1.0', END)+"\n"
"price: \'"+precio.get()+"\'\n"
"styles:\n"
" - name: Color\n"
" color: \'#dfd3c2\'\n"
" image: /images/products/"+imagen.get()+"\n"
"---")
n_archivo=nombre.get()
n_archivo=n_archivo.replace(" ","_")
n_archivo=n_archivo.lower()+".md"
f_end= open("_products/"+n_archivo,"w+")
f_end.write(plantilla)
f_end.close()
shutil.copy2(filename,"images/products/"+imagen.get())
print(plantilla)
print(n_archivo)
messagebox.showinfo(message="Producto realizado con éxito", title="Producto")
# reset de campos
nombre.set("")
combo.set("")
descripcion.delete('1.0', END)
precio.set("")
imagen.set("")
filename=''
imagen.set('Ninguna imagen seleccionada')
else:
messagebox.showinfo(message="ERROR: Debe llenar todos los campos", title="Producto")
filename=''
app=Tk()
app.title('Crear Producto')
app.rowconfigure(0,weight=5)
app.columnconfigure(0, weight=1)
#Nombre del equipo
Label(app, text='Equipo: ').grid(row=0, column=0, sticky=E)
nombre=StringVar()
Entry(app, textvariable=nombre).grid(row=0, column=1, sticky=W,pady=5, padx=5)
# Categorias
Label(app, text='Categoría: ').grid(row=1, column=0, sticky=E)
combo=ttk.Combobox(app, state="readonly")
combo.grid(row=1, column=1, sticky=W,pady=5, padx=5)
combo["values"] = get_categories()
imagen=StringVar()
imagen.set('Ninguna imagen seleccionada')
Button(app, text='Añadir imagen', command=UploadImage).grid(row=2, column=0, sticky=E)
Label(app, textvariable=imagen).grid(row=2, column=1, sticky=W,pady=5, padx=5)
Label(app, text='Descripción: ').grid(row=3, column=0, sticky=E)
descripcion=Text(app, height=10, width=40)
descripcion.grid(row=3,column=1, sticky=W, pady=5, padx=5)
Label(app, text='Precio: ').grid(row=4, column=0, sticky=E)
precio=StringVar()
Entry(app, textvariable=precio).grid(row=4, column=1, sticky=W,pady=5, padx=5)
Button(app, text='Añadir', command=addProduct).grid(row=5, column=0,pady=5)
Button(app, text='Salir', command=quit).grid(row=5, column=1,pady=5)
app.mainloop()
| 3,067 |
config/constants.py
|
YugantM/PowerGrid
| 0 |
2171870
|
col_len = 6
column_names = ['A'+str(each+1) for each in range(int(col_len/2))] + ['V'+str(each+1) for each in range(int(col_len/2))]
all_labels = ['NML','AB', 'AC', 'BC', 'ABC', 'AG', 'BG', 'ABG', 'CG', 'ACG', 'BCG', 'ABCG']
| 224 |
common/test_gce.py
|
sears-s/fuzzbench
| 800 |
2171766
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gce.py."""
from unittest import mock
from common import gce
PROJECT = 'my-cloud-project'
ZONE = 'my-compute-zone'
INSTANCE_GROUP = 'my-instance-group'
INSTANCE_TEMPLATE_URL = 'resource/my-instance-group'
EXPERIMENT = 'my-experiment'
@mock.patch('common.gce.get_instance_group_managers')
def test_delete_instance_group(mocked_get_instance_group_managers):
"""Tests that delete_instance_group uses the GCE API correctly."""
mock_managers = mock.Mock()
mocked_get_instance_group_managers.return_value = mock_managers
gce.delete_instance_group(INSTANCE_GROUP, PROJECT, ZONE)
assert mock_managers.delete.call_args_list == [
mock.call(instanceGroupManager=INSTANCE_GROUP,
project=PROJECT,
zone=ZONE)
]
@mock.patch('common.gce.get_instance_group_managers')
def test_resize_instance_group(mocked_get_instance_group_managers):
"""Tests that resize_instance_group uses the GCE API correctly."""
size = 10
mock_managers = mock.Mock()
mocked_get_instance_group_managers.return_value = mock_managers
gce.resize_instance_group(size, INSTANCE_GROUP, PROJECT, ZONE)
assert mock_managers.resize.call_args_list == [
mock.call(instanceGroupManager=INSTANCE_GROUP,
size=size,
project=PROJECT,
zone=ZONE)
]
@mock.patch('common.gce.get_instance_group_managers')
def test_create_instance_group(mocked_get_instance_group_managers):
"""Tests that create_instance_group uses the GCE API correctly."""
mock_managers = mock.Mock()
mocked_get_instance_group_managers.return_value = mock_managers
base_instance_name = 'm-' + EXPERIMENT
gce.create_instance_group(INSTANCE_GROUP, INSTANCE_TEMPLATE_URL,
base_instance_name, PROJECT, ZONE)
body = {
'baseInstanceName': 'm-' + EXPERIMENT,
'targetSize': 1,
'name': INSTANCE_GROUP,
'instanceTemplate': INSTANCE_TEMPLATE_URL,
}
assert mock_managers.insert.call_args_list == [
mock.call(body=body, project=PROJECT, zone=ZONE)
]
@mock.patch('common.gce.get_instance_group_managers')
def test_get_instance_group_size(mocked_get_instance_group_managers):
"""Tests that get_instance_group_size uses the GCE API correctly and returns
the right value."""
mock_managers = mock.Mock()
mocked_get_instance_group_managers.return_value = mock_managers
mock_req = mock.Mock()
mock_managers.get.return_value = mock_req
size = 1
mock_req.execute.return_value = {'targetSize': size}
result = gce.get_instance_group_size(INSTANCE_GROUP, PROJECT, ZONE)
assert mock_managers.get.call_args_list == [
mock.call(instanceGroupManager=INSTANCE_GROUP,
project=PROJECT,
zone=ZONE)
]
assert result == size
| 3,445 |
Example Code/plotGraph.py
|
KitronikLtd/micropython-microbit-kitronik-display128x64
| 0 |
2171838
|
from microbit import accelerometer, sleep
from display128x64Plot import *
plot = Kitronik128x64DisplayPlot()
graphYMax = 32
graphYMin = 12
graphYRange = graphYMax - graphYMin
variableMax = 2000
variableMin = -2000
variableRange = variableMax - variableMin
screenRatio = graphYRange/variableRange
x = 0
y = 0
length = 0
while True:
yPlot = accelerometer.get_y()
plot.display_as_text(" ", 0, 0)
plot.display_as_text(yPlot, 0, 0)
yPlotMapped = graphYMax - ((yPlot-variableMin) * screenRatio)
yPlotMapped = round(yPlotMapped)
if x == 0:
previousYPlot = yPlotMapped
if yPlotMapped < previousYPlot:
y = yPlotMapped
length = (previousYPlot-yPlotMapped)
elif yPlotMapped > previousYPlot:
y = previousYPlot
length = (yPlotMapped-previousYPlot)
else:
y = yPlotMapped
length = 1
if x == 63:
plot.clear_display()
x=0
else:
plot.draw_vert_line(x, y, length)
previousYPlot = yPlotMapped
x += 1
sleep(500)
| 1,116 |
tests/test_static/conftest.py
|
gitter-badger/pygsuite
| 0 |
2171017
|
from ast import literal_eval
import pytest
from pygsuite.docs.document import Document
import os
script_dir = os.path.dirname(__file__)
@pytest.fixture
def static_test_document():
with open(os.path.join(script_dir, "test_document.json_capture")) as file:
return Document(_document=literal_eval(file.read()), local=True)
| 337 |
lithic/types/api_status.py
|
lithic-com/lithic-python
| 0 |
2171490
|
# File generated from our OpenAPI spec by Stainless.
from typing import Optional, Union, List, Any
from typing_extensions import Literal
from .._models import StringModel, NoneModel, BaseModel
__all__ = ["APIStatus"]
class APIStatus(BaseModel):
message: Optional[str]
| 277 |
Python3.x/27-Remove Element.py
|
ranchlin/Leetcode
| 0 |
2171417
|
# Runtime: 16 ms, faster than 95.57% of Python online submissions for Remove Element.
# Memory Usage: 11.9 MB, less than 13.53% of Python online submissions for Remove Element.
class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
i = 0
for j, _ in enumerate(nums):
if nums[j] != val:
nums[i] = nums[j]
i += 1
return i
| 494 |
src/Printer.py
|
hridesh/pyarithlang
| 0 |
2169941
|
'''
This class implements a printer for AST nodes in
this programming language that makes use of the
formatter. The formatter extends the AST visitor
and provides implementation for each case of visit.
@author: <NAME>
Copyright (c) 2018. All rights reserved.
See LICENSE file in the root directory for licensing information.
'''
from AST import Visitor
from AST import AST
class Printer:
def prnt(self,obj):
if type(obj) is AST.Program:
print(Printer.Formatter().visit(obj))
else:
print(obj)
class Formatter(Visitor):
def visitProgram(self, program):
return program.e().accept(self)
def visitNumExp(self, numexp):
return str(numexp.val)
def visitAddExp(self, addexp):
result = "(+ "
for operand in addexp.all():
result += operand.accept(self) + " "
return result.rstrip() + ")"
def visitSubExp(self, subexp):
result = "(- "
for operand in subexp.all():
result += operand.accept(self) + " "
return result.rstrip() + ")"
def visitMultExp(self, multexp):
result = "(* "
for operand in multexp.all():
result += operand.accept(self) + " "
return result.rstrip() + ")"
def visitDivExp(self, divexp):
result = "(/ "
for operand in divexp.all():
result += operand.accept(self) + " "
return result.rstrip() + ")"
| 1,546 |
buildroot/support/testing/tests/core/test_root_password.py
|
bramkragten/operating-system
| 349 |
2170927
|
import os
import infra.basetest
from crypt import crypt
class TestRootPassword(infra.basetest.BRTest):
password = "<PASSWORD>"
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
BR2_TARGET_ROOTFS_CPIO=y
BR2_TARGET_ENABLE_ROOT_LOGIN=y
BR2_TARGET_GENERIC_ROOT_PASSWD="{}"
""".format(password)
def test_run(self):
# 1. Test by looking hash in the /etc/shadow
shadow = os.path.join(self.builddir, "target", "etc", "shadow")
with open(shadow, "r") as f:
users = f.readlines()
for user in users:
s = user.split(":")
n, h = s[0], s[1]
if n == "root":
# Fail if the account is disabled or no password is required
self.assertTrue(h not in ["", "*"])
# Fail if the hash isn't right
self.assertEqual(crypt(self.password, h), h)
# 2. Test by attempting to login
cpio_file = os.path.join(self.builddir, "images", "rootfs.cpio")
try:
self.emulator.boot(arch="armv7", kernel="builtin",
options=["-initrd", cpio_file])
self.emulator.login(self.password)
except SystemError:
self.fail("Unable to login with the password")
| 1,340 |
scripts/spw_network4a_1.py
|
andrisecker/KOKI_sharp_waves
| 0 |
2169600
|
#!/usr/bin/python
# -*- coding: utf8 -*-
"""
crates PC (adExp IF) and BC (IF) population in Brian, loads in recurrent connection matrix for PC population
runs simulation and checks the dynamics
see more: https://drive.google.com/file/d/0B089tpx89mdXZk55dm0xZm5adUE/view
authors: <NAME>, <NAME>, <NAME> last update: 05.2017
"""
import os
from brian import *
import numpy as np
import matplotlib.pyplot as plt
from detect_oscillations import *
from plots import *
fIn = "wmxR_asym_old.txt"
SWBasePath = os.path.sep.join(os.path.abspath(__file__).split(os.path.sep)[:-2])
np.random.seed(12345)
NE = 4000
NI = 1000
# sparseness
eps_pyr = 0.16
eps_bas = 0.4
# parameters for pyr cells
z = 1*nS
gL_Pyr = 4.333e-3 * uS
tauMem_Pyr = 60.0 * ms
Cm_Pyr = tauMem_Pyr * gL_Pyr
Vrest_Pyr = -70.0 * mV
reset_Pyr = -53.0*mV
theta_Pyr = -50.0*mV
tref_Pyr = 5*ms
# Adaptation parameters for pyr cells
a_Pyr = -0.8*nS # nS Subthreshold adaptation conductance
b_Pyr = 0.04*nA # nA Spike-triggered adaptation
delta_T_Pyr = 2.0*mV # Slope factor
tau_w_Pyr = 300*ms # Adaptation time constant
v_spike_Pyr = theta_Pyr + 10 * delta_T_Pyr
# parameters for bas cells
gL_Bas = 5.0e-3*uS
tauMem_Bas = 14.0*ms
Cm_Bas = tauMem_Bas * gL_Bas
Vrest_Bas = -70.0*mV
reset_Bas = -64.0*mV
theta_Bas = -50.0*mV
tref_Bas = 0.1*ms
# synaptic weights
J_PyrInh = 0.15 # 0.125
J_BasExc = 4.5 # 5.2083
J_BasInh = 0.25 # 0.15
print 'J_PyrInh:', J_PyrInh
print 'J_BasExc:', J_BasExc
print 'J_BasInh:', J_BasInh
# Synaptic reversal potentials
E_Exc = 0.0*mV
E_Inh = -70.0*mV
# Synaptic time constants
tauSyn_PyrExc = 10.0*ms
tauSyn_PyrInh = 3.0*ms
tauSyn_BasExc = 3.0*ms
tauSyn_BasInh = 1.5*ms
# Synaptic delays
delay_PyrExc = 3.0*ms
delay_PyrInh = 1.5*ms
delay_BasExc = 3.0*ms
delay_BasInh = 1.5*ms
# input parameters
p_rate_mf = 5.0*Hz
J_PyrMF = 5.0
# Creating populations
eqs_adexp = '''
dvm/dt = (-gL_Pyr*(vm-Vrest_Pyr) + gL_Pyr*delta_T_Pyr*exp((vm- theta_Pyr)/delta_T_Pyr)-w - (g_ampa*z*(vm-E_Exc) + g_gaba*z*(vm-E_Inh)))/Cm_Pyr : volt
dw/dt = (a_Pyr*(vm - Vrest_Pyr )-w)/tau_w_Pyr : amp
dg_ampa/dt = -g_ampa/tauSyn_PyrExc : 1
dg_gaba/dt = -g_gaba/tauSyn_PyrInh : 1
'''
def myresetfunc(P, spikes):
P.vm[spikes] = reset_Pyr # reset voltage
P.w[spikes] += b_Pyr # low pass filter of spikes (adaptation mechanism)
SCR = SimpleCustomRefractoriness(myresetfunc, tref_Pyr, state='vm')
eqs_bas = '''
dvm/dt = (-gL_Bas*(vm-Vrest_Bas) - (g_ampa*z*(vm-E_Exc) + g_gaba*z*(vm-E_Inh)))/Cm_Bas : volt
dg_ampa/dt = -g_ampa/tauSyn_BasExc : 1
dg_gaba/dt = -g_gaba/tauSyn_BasInh : 1
'''
# ====================================== end of parameters ======================================
fName = os.path.join(SWBasePath, 'files', fIn)
Wee = load_Wee(fName)
PE = NeuronGroup(NE, model=eqs_adexp, threshold=v_spike_Pyr, reset=SCR)
PI = NeuronGroup(NI, model=eqs_bas, threshold=theta_Bas, reset=reset_Bas, refractory=tref_Bas)
PE.vm = Vrest_Pyr
PE.g_ampa = 0
PE.g_gaba = 0
PI.vm = Vrest_Bas
PI.g_ampa = 0
PI.g_gaba = 0
MF = PoissonGroup(NE, p_rate_mf)
print 'Connecting the network'
Cext = IdentityConnection(MF, PE, 'g_ampa', weight=J_PyrMF)
Cee = Connection(PE, PE, 'g_ampa', delay=delay_PyrExc)
Cee.connect(PE, PE, Wee)
Cei = Connection(PE, PI, 'g_ampa', weight=J_BasExc, sparseness=eps_pyr, delay=delay_BasExc)
Cie = Connection(PI, PE, 'g_gaba', weight=J_PyrInh, sparseness=eps_bas, delay=delay_PyrInh)
Cii = Connection(PI, PI, 'g_gaba', weight=J_BasInh, sparseness=eps_bas, delay=delay_BasInh)
print 'Connections done'
del Wee # clear memory
# Monitors
sme = SpikeMonitor(PE)
smi = SpikeMonitor(PI)
popre = PopulationRateMonitor(PE)
popri = PopulationRateMonitor(PI)
# other monitors factored out to speed up simulation and make the process compatible with Brian2
selection = np.arange(0, 4000, 100) # subset of neurons for recoring variables
msMe = MultiStateMonitor(PE, vars=['vm', 'w', 'g_ampa', 'g_gaba'], record=selection.tolist()) # comment this out later (takes a lot of memory!)
run(10000*ms, report='text')
if sme.nspikes > 0 and smi.nspikes > 0: # check if there is any activity
# analyse spikes
spikeTimesE, spikingNeuronsE, poprE, ISIhist, bin_edges = preprocess_monitors(sme, popre)
spikeTimesI, spikingNeuronsI, poprI = preprocess_monitors(smi, popri, calc_ISI=False)
# detect replay
avgReplayInterval = replay(ISIhist[3:16]) # bins from 150 to 850 (range of interest)
# analyse rates
meanEr, rEAC, maxEAC, tMaxEAC, fE, PxxE = analyse_rate(poprE)
meanIr, rIAC, maxIAC, tMaxIAC, fI, PxxI = analyse_rate(poprI)
maxEACR, tMaxEACR, avgRippleFE, ripplePE = ripple(rEAC, fE, PxxE)
maxIACR, tMaxIACR, avgRippleFI, ripplePI = ripple(rIAC, fI, PxxI)
avgGammaFE, gammaPE = gamma(fE, PxxE)
avgGammaFI, gammaPI = gamma(fI, PxxI)
# Print out some info
print 'Mean excitatory rate: ', meanEr
print 'Maximum exc. autocorrelation:', maxEAC, 'at', tMaxEAC, '[ms]'
print 'Maximum exc. AC in ripple range:', maxEACR, 'at', tMaxEACR, '[ms]'
print 'Mean inhibitory rate: ', meanIr
print 'Maximum inh. autocorrelation:', maxIAC, 'at', tMaxIAC, '[ms]'
print 'Maximum inh. AC in ripple range:', maxIACR, 'at', tMaxIACR, '[ms]'
print ''
print 'Average exc. ripple freq:', avgRippleFE
print 'Exc. ripple power:', ripplePE
print 'Average exc. gamma freq:', avgGammaFE
print 'Exc. gamma power:', gammaPE
print 'Average inh. ripple freq:', avgRippleFI
print 'Inh. ripple power:', ripplePI
print 'Average inh. gamma freq:', avgGammaFI
print 'Inh. gamma power:', gammaPI
print "--------------------------------------------------"
# Plots
plot_raster_ISI(spikeTimesE, spikingNeuronsE, poprE, [ISIhist, bin_edges], "blue", multiplier_=1)
plot_PSD(poprE, rEAC, fE, PxxE, "Pyr_population", "blue", multiplier_=1)
plot_PSD(poprI, rIAC, fI, PxxI, "Bas_population", "green", multiplier_=1)
subset = plot_zoomed(spikeTimesE, spikingNeuronsE, poprE, "Pyr_population", "blue", multiplier_=1,
sm=msMe, selection=selection)
plot_zoomed(spikeTimesI, spikingNeuronsI, poprI, "Bas_population", "green", multiplier_=1, Pyr_pop=False)
plot_detailed(msMe, subset, multiplier_=1)
#plot_adaptation(msMe, selection, multiplier_=1)
else: # if there is no activity the auto-correlation function will throw an error!
print "No activity !"
print "--------------------------------------------------"
plt.show()
| 6,472 |
tests/test_nodeEndpoints/connect_users.py
|
alekLukanen/pyDist
| 5 |
2169188
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 23 19:30:06 2017
@author: lukanen
"""
import sys
sys.path.append('.')
from pyDist import Interfaces, intercom
import concurrent
import logging
import urllib.request
import json
import os
import time
import asyncio
from pyDist.TaskManager import TaskManager
import tests.testerHelpers as testHelpers
#logging utility
logging.getLogger("Nodes").setLevel(logging.WARNING)
logging.getLogger("endpoints").setLevel(logging.WARNING)
logging.basicConfig(format='%(name)-12s:%(lineno)-3s | %(levelname)-8s | %(message)s'
, stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def connect_n_users(n):
cluster_exs = []
for i in range(0, n):
cluster_ex = Interfaces.ClusterExecutor('0.0.0.0', 9000)
cluster_ex.connect(f'connect_one_user({i})', group_id='connect_users')
cluster_exs.append(cluster_ex)
#cluster_ex.disconnect()
time.sleep(0.5)
interface_stats = json.loads(urllib.request.urlopen("http://0.0.0.0:9000/interfaceStats").read())
logger.debug(f'interface_stats: {interface_stats}')
assert interface_stats['data']['num_users'] == n
assert interface_stats['data']['num_nodes'] == 0
assert interface_stats['data']['num_clients'] == 0
for cluster_ex in cluster_exs:
cluster_ex.disconnect()
def start_one_node_and_connect_n_users(n):
task_manager = TaskManager()
task_manager.num_cores = 2
task_manager.executor = concurrent.futures.ProcessPoolExecutor(task_manager.num_cores)
task_manager.tasks.append(
task_manager.executor.submit(testHelpers.create_master_node, '0.0.0.0', 9000)
)
logger.debug('----- creating executor and connecting users -----')
connect_n_users(n)
io_loop = asyncio.get_event_loop()
counts = io_loop.run_until_complete(intercom.get_user_counts('0.0.0.0', 9000,
params={'user_id': 'connect_one_user(0)'}))
logger.debug(f'counts: {counts}')
# shutdown the executor then kill all child processes
logger.debug('Shutting down the test processes')
task_manager.executor.shutdown(wait=False)
testHelpers.kill_child_processes(os.getpid())
def test_start_one_node_and_connect_one_user():
start_one_node_and_connect_n_users(1)
def test_start_one_node_and_connect_two_users():
start_one_node_and_connect_n_users(2)
def test_start_one_node_and_connect_three_users():
start_one_node_and_connect_n_users(3)
def test_start_one_node_and_connect_a_bunch_of_users():
start_one_node_and_connect_n_users(64)
if __name__ == '__main__':
logger.debug('basic task sending test')
test_start_one_node_and_connect_one_user()
#test_start_one_node_and_connect_a_bunch_of_users()
| 2,839 |
algospot/lec8_DP/ASYMTILING.py
|
cutz-j/AlgorithmStudy
| 3 |
2171943
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 23 10:13:22 2019
@author: samle
"""
from functools import lru_cache
MOD = 1000000007
@lru_cache()
def tiling(width):
if width <= 1:
return 1
else:
return((tiling(width-2)+tiling(width-1))% MOD)
def asym(width):
if (width % 2 == 1):
return((tiling(width) - tiling(width/2) + MOD) % 2)
else:
ret = tiling(width)
ret = ((ret - tiling(width/2) + MOD) % MOD)
ret = ((ret - tiling(width/2 - 1) + MOD) % MOD)
return ret
if __name__ =='__main__':
width = int(input())
print()
print(asym(width))
| 631 |
cyberspace/__init__.py
|
idin/cyberspace
| 0 |
2170900
|
from .find_common import find_common
from .MaskSet import MaskSet, Mask
from .wikipedia import Wikipedia
from .Scraper import Scraper
from .get_id_token import get_id_token
from .imdb import IMDB
| 196 |
stock_wrapper/shares.py
|
JadenSWang/StockTracker-Wrapper
| 4 |
2171498
|
import stock_wrapper
import robin_stocks
class Shares(stock_wrapper.Stock):
def __init__(self, ticker):
super().__init__(ticker)
def sell(self, quantity):
robin_stocks.orders.order_sell_market(self.ticker, quantity)
def limt_sell(self, quantity, price):
robin_stocks.orders.order_sell_limit(self.ticker, quantity, price)
def buy(self, quantity):
robin_stocks.orders.order_buy_market(self.ticker, quantity)
def limit_buy(self, quantity, price):
robin_stocks.orders.order_buy_limit(self.ticker, quantity, price)
@property
def equity(self):
stocks_data = robin_stocks.account.get_current_positions()
stock_data = self.__get_stock_from_positions_list(self.ticker, stocks_data)
return float(stock_data['quantity']) * self.price
@staticmethod
def __get_stock_from_positions_list(ticker, list):
for stock in list:
if robin_stocks.get_symbol_by_url(stock['instrument']) == ticker:
return stock
| 1,032 |
src/qt/src/3rdparty/webkit/Source/ThirdParty/glu/glu.gyp
|
ant0ine/phantomjs
| 46 |
2171763
|
{
'targets': [{
'target_name': 'libtess',
'type': '<(library)',
'include_dirs': [
'../..',
],
'sources': [
'gluos.h',
'internal_glu.h',
'libtess/dict-list.h',
'libtess/dict.c',
'libtess/dict.h',
'libtess/geom.c',
'libtess/geom.h',
'libtess/memalloc.c',
'libtess/memalloc.h',
'libtess/mesh.c',
'libtess/mesh.h',
'libtess/normal.c',
'libtess/normal.h',
'libtess/priorityq-heap.h',
'libtess/priorityq-sort.h',
'libtess/priorityq.c',
'libtess/priorityq.h',
'libtess/render.c',
'libtess/render.h',
'libtess/sweep.c',
'libtess/sweep.h',
'libtess/tess.c',
'libtess/tess.h',
'libtess/tessmono.c',
'libtess/tessmono.h',
],
}],
}
| 804 |
arguments.py
|
Jungyhuk/plotcoder
| 10 |
2171560
|
import argparse
import time
import os
import sys
def get_arg_parser(title):
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--cpu', action='store_true', default=False)
parser.add_argument('--eval', action='store_true')
parser.add_argument('--model_dir', type=str, default='../checkpoints/model_0')
parser.add_argument('--load_model', type=str, default=None)
parser.add_argument('--num_LSTM_layers', type=int, default=2)
parser.add_argument('--num_MLP_layers', type=int, default=1)
parser.add_argument('--LSTM_hidden_size', type=int, default=512)
parser.add_argument('--MLP_hidden_size', type=int, default=512)
parser.add_argument('--embedding_size', type=int, default=512)
parser.add_argument('--keep_last_n', type=int, default=None)
parser.add_argument('--eval_every_n', type=int, default=1500)
parser.add_argument('--log_interval', type=int, default=1500)
parser.add_argument('--log_dir', type=str, default='../logs')
parser.add_argument('--log_name', type=str, default='model_0.csv')
parser.add_argument('--max_eval_size', type=int, default=1000)
data_group = parser.add_argument_group('data')
data_group.add_argument('--train_dataset', type=str, default='../data/train_plot.json')
data_group.add_argument('--dev_dataset', type=str, default='../data/dev_plot_hard.json')
data_group.add_argument('--test_dataset', type=str, default='../data/test_plot_hard.json')
data_group.add_argument('--code_vocab', type=str, default='../data/code_vocab.json')
data_group.add_argument('--word_vocab', type=str, default='../data/nl_vocab.json')
data_group.add_argument('--word_vocab_size', type=int, default=None)
data_group.add_argument('--code_vocab_size', type=int, default=None)
data_group.add_argument('--num_plot_types', type=int, default=6)
data_group.add_argument('--joint_plot_types', action='store_true', default=False)
data_group.add_argument('--data_order_invariant', action='store_true', default=False)
data_group.add_argument('--nl', action='store_true', default=False)
data_group.add_argument('--use_comments', action='store_true', default=False)
data_group.add_argument('--code_context', action='store_true', default=False)
data_group.add_argument('--local_df_only', action='store_true', default=False)
data_group.add_argument('--target_code_transform', action='store_true', default=False)
data_group.add_argument('--max_num_code_cells', type=int, default=2)
data_group.add_argument('--max_word_len', type=int, default=512)
data_group.add_argument('--max_code_context_len', type=int, default=512)
data_group.add_argument('--max_decode_len', type=int, default=200)
model_group = parser.add_argument_group('model')
model_group.add_argument('--hierarchy', action='store_true', default=False)
model_group.add_argument('--copy_mechanism', action='store_true', default=False)
model_group.add_argument('--nl_code_linking', action='store_true', default=False)
train_group = parser.add_argument_group('train')
train_group.add_argument('--optimizer', type=str, default='adam', choices=['adam', 'sgd', 'rmsprop'])
train_group.add_argument('--lr', type=float, default=1e-3)
train_group.add_argument('--lr_decay_steps', type=int, default=6000)
train_group.add_argument('--lr_decay_rate', type=float, default=0.9)
train_group.add_argument('--dropout_rate', type=float, default=0.2)
train_group.add_argument('--gradient_clip', type=float, default=5.0)
train_group.add_argument('--num_epochs', type=int, default=50)
train_group.add_argument('--batch_size', type=int, default=32)
train_group.add_argument('--param_init', type=float, default=0.1)
train_group.add_argument('--seed', type=int, default=None)
return parser
| 3,689 |
pyredditlive.py
|
nbr23/pyredditlive
| 0 |
2171917
|
#!/usr/bin/env python3
import asyncio
import websockets
import requests
import json
import sys
import os
import yaml
import urllib
import time
def load_config(config_path):
config = {}
if os.path.exists(config_path):
with open(config_path) as configfile:
config = yaml.load(configfile, Loader=yaml.SafeLoader)
if 'PYRL_TELEGRAM_BOT' in os.environ:
config['telegram_bot'] = os.environ['PYRL_TELEGRAM_BOT']
if 'PYRL_TELEGRAM_CHAT_ID' in os.environ:
config['telegram_chat_id'] = os.environ['PYRL_TELEGRAM_CHAT_ID']
if 'telegram_bot' not in config or 'telegram_chat_id' not in config:
raise Exception("No configuration file found or environment variable set")
return config
def get_ws_url(url):
res = (
requests.get(f"{url}about.json", headers={"User-agent": "Mozilla/5.0"})
.json()
.get("data", {})
)
if res.get("state") == "live":
return res.get("websocket_url")
raise Exception(f"Livethread state is {res.get('state')}")
def post_update(update, config):
if update.get("type") == "update":
body = update.get("payload", {}).get("data", {}).get("body")
if body is not None:
print(f"POSTING {body}")
requests.get(
f'https://api.telegram.org/{config["telegram_bot"]}/sendMessage?chat_id={config.get("telegram_chat_id")}&text={urllib.parse.quote_plus(body)}'
)
async def livethread(url, config):
while True:
ws_url = get_ws_url(url)
try:
async with websockets.connect(ws_url) as websocket:
requests.get(
f'https://api.telegram.org/{config["telegram_bot"]}/sendMessage?chat_id={config.get("telegram_chat_id")}&text={urllib.parse.quote_plus("Connected to "+ws_url)}'
)
print(f"Connected to {ws_url}")
while True:
update = await websocket.recv()
print(f"RAW JSON:\n{update}")
post_update(json.loads(update), config)
except websockets.ConnectionClosed:
continue
def main():
config = load_config("./config.yml")
while True:
try:
asyncio.run(livethread(sys.argv[1], config))
except asyncio.exceptions.TimeoutError:
time.sleep(5)
if __name__ == "__main__":
sys.exit(main())
| 2,412 |
supermarket_pricing/python/supermarket_abxsantos/src/product/product_inserter.py
|
TijsDeBelie/codekatas-TDD
| 0 |
2171954
|
from src.product.products import Product
from src.supermarket import Supermarket
class ProductInserter(object):
def __init__(self, supermarket: Supermarket, products: [Product]):
"""
Used to insert products into the supermarket_abxsantos.
Concerned with adding a new product to the supermarket_abxsantos.
>>> bean_product = Product(name='beans', cost=0.50, price=1.00, sku='001', unit='un', stock_quantity=100)
>>> myrket_supermarket = Supermarket('mYrket')
>>> ProductInserter(supermarket_abxsantos=myrket_supermarket, products=[bean_product]).add_products()
>>> myrket_supermarket.products[0].name = 'beans'
"""
self.products = products
self.supermarket = supermarket
def add_products(self):
for product in self.products:
self.supermarket.products.append(product)
| 880 |
unn/models/networks/network_factory.py
|
zongdaoming/TinyTransformer
| 2 |
2171119
|
from unn.models.networks.base_network import BaseNetwork
from unn.models.networks.darts.backbone_darts_network import BackboneDartsNetwork
class NetworkFactory:
@classmethod
def create(cls, name, cfg):
if name == 'base':
network = BaseNetwork(cfg)
elif name == 'backbone_darts':
network = BackboneDartsNetwork(cfg)
else:
raise ValueError('Unrecognized Network: ' + name)
return network
| 465 |
Poggle Application Project/Server Connections/PoggleClient.py
|
Chuffman-CSD/Poggle-Application
| 0 |
2171852
|
#!/usr/bin/python3
#<NAME>
#9/20/19
import socket
import sys
host = '127.0.0.1'
port = 5555
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host,port))
except socket.error as e:
print (str(e))
def threaded_client(conn):
conn.send(str.encode('welcome, type a message'))
print("Successfully connected! Host:",host,"Port:",port)
print("\nWelcome to the chat!\n")
x = 0
while x == 0:
msg = input("Enter a message: ")
msgb = str.encode(msg)
#msg = b""
print(msg)
while True:
data = conn.recv(2048)
reply = 'Server Output: ' + data.decode('t-f8')
threaded_client(s)
| 675 |
beproud/django/ssl/tests/__init__.py
|
beproud/bpssl
| 0 |
2171059
|
# NOTE: Tests are imported in this file to support Django < 1.6.
# The test runner in Django 1.6 and above does not recognize
# tests in this file because it (__init__.py) does not match
# the pattern test*.py.
from beproud.django.ssl.tests.test_wsgi import * # NOQA
from beproud.django.ssl.tests.test_proxy import * # NOQA
from beproud.django.ssl.tests.test_context_processors import * # NOQA
| 416 |
webapp/receipt/models.py
|
sanchos2/nautilus
| 0 |
2170142
|
"""Receipt models."""
from sqlalchemy.orm import relationship
from webapp.db import db
class Purchase(db.Model):
"""Purchase model."""
# fn_number - fn, fd_number - i, fpd_number - fp
id = db.Column(db.Integer, primary_key=True) # noqa: WPS125
user_id = db.Column(
db.Integer,
db.ForeignKey('user.id', ondelete='CASCADE'),
index=True,
)
fn_number = db.Column(db.String)
fd_number = db.Column(db.String)
fpd_number = db.Column(db.String)
receipt_type = db.Column(db.String)
date = db.Column(db.DateTime)
sum = db.Column(db.Float) # noqa: WPS125
loaded = db.Column(db.String)
organization = db.Column(db.String)
user = relationship('User', backref='purchases')
category = relationship('Category', secondary='purchase_category', backref='purchases')
def __str__(self):
return self.organization
def __repr__(self):
return '<Покупка-{0}, за дату-{1}, на сумму-{2}, валид-{3}'.format(
self.id, self.date, self.sum, self.loaded,
)
class PurchaseCategory(db.Model):
"""Purchase - Category."""
purchase_id = db.Column(
db.Integer,
db.ForeignKey('purchase.id', ondelete='CASCADE'),
primary_key=True,
)
category_id = db.Column(
db.Integer,
db.ForeignKey('category.id', ondelete='CASCADE'),
primary_key=True,
)
class Category(db.Model):
"""Category model."""
id = db.Column(db.Integer, primary_key=True) # noqa: WPS125
category = db.Column(db.String)
def __str__(self):
return self.category
def __repr__(self):
return f'<Category({self.category})'
class Receipt(db.Model):
"""Receipt model."""
id = db.Column(db.Integer, primary_key=True) # noqa: WPS125
purchase_id = db.Column(
db.Integer,
db.ForeignKey('purchase.id', ondelete='CASCADE'),
index=True,
)
product = db.Column(db.String)
price = db.Column(db.Integer)
quantity = db.Column(db.Float)
sum = db.Column(db.Integer) # noqa: WPS125
purchase = relationship('Purchase', backref='receipts')
subcategory = relationship('Subcategory', secondary='receipt_subcategory', backref='receipts')
def __str__(self):
return self.product
def __repr__(self):
return f'<Позиция по чеку - {self.product}, сумма позиции - {self.sum}'
class ReceiptSubcategory(db.Model):
"""Purchase - Subcategory."""
receipt_id = db.Column(
db.Integer,
db.ForeignKey('receipt.id', ondelete='CASCADE'),
primary_key=True,
)
subcategory_id = db.Column(
db.Integer,
db.ForeignKey('subcategory.id', ondelete='CASCADE'),
primary_key=True,
)
class Subcategory(db.Model):
"""Subcategory model."""
id = db.Column(db.Integer, primary_key=True) # noqa: WPS125
subcategory = db.Column(db.String)
category = relationship('Category', secondary='category_subcategory', backref='subcategories')
def __str__(self):
return self.subcategory
def __repr__(self):
return f'<Category ({self.subcategory})'
class CategorySubcategory(db.Model):
"""Category - Subcategory."""
category_id = db.Column(
db.Integer,
db.ForeignKey('category.id', ondelete='CASCADE'),
primary_key=True,
)
subcategory_id = db.Column(
db.Integer,
db.ForeignKey('subcategory.id', ondelete='CASCADE'),
primary_key=True,
)
| 3,514 |
Scripts/update_missing_cwes_in_database.py
|
joaohenggeler/software-vulnerability-collection-scripts
| 0 |
2171874
|
#!/usr/bin/env python3
"""
This script updates any missing CWE value associated with a vulnerability from the five C/C++ projects.
This is done by using the CSV files generated after running "collect_missing_cwes.py".
"""
import numpy as np # type: ignore
import pandas as pd # type: ignore
from modules.common import log
from modules.database import Database
from modules.project import Project
####################################################################################################
with Database() as db:
project_list = Project.get_project_list_from_config()
for project in project_list:
for input_csv_path in project.find_output_csv_files('missing-cwe'):
log.info(f'Updating the missing CWEs for the project "{project}" using the information in "{input_csv_path}".')
missing_cwes = pd.read_csv(input_csv_path, dtype=str)
missing_cwes = missing_cwes.replace({np.nan: None})
for row in missing_cwes.itertuples():
success, error_code = db.execute_query( '''
UPDATE VULNERABILITIES SET V_CWE = %(V_CWE)s WHERE CVE = %(CVE)s;
''',
params={'V_CWE': row.CWE, 'CVE': row.CVE})
if not success:
log.error(f'Failed to update the CWE {row.CWE} for the vulnerability {row.CVE} with the error code {error_code}.')
##################################################
log.info(f'Updating any remaining CWEs to NULL.')
success, error_code = db.execute_query( '''
UPDATE VULNERABILITIES SET V_CWE = NULL WHERE V_CWE = 'TBD';
''')
if not success:
log.error(f'Failed to update any remaining CWEs to NULL with the error code {error_code}.')
##################################################
log.info('Committing changes.')
db.commit()
log.info('Finished running.')
print('Finished running.')
| 1,813 |
tests/config/test_get_base_directory.py
|
adamghill/coltrane
| 28 |
2171611
|
from pathlib import Path
from unittest.mock import patch
from coltrane.config.paths import get_base_directory
def test_get_base_directory_no_base_dir_setting(settings):
delattr(settings, "BASE_DIR")
with patch("coltrane.config.paths.getcwd", return_value="1234"):
expected = Path("1234")
actual = get_base_directory()
assert actual == expected
def test_get_base_directory_str_base_dir_setting(settings):
settings.BASE_DIR = "5678"
expected = Path("5678")
actual = get_base_directory()
assert actual == expected
| 568 |
GT3/RadialTransport/Functions/CorePatch.py
|
gt-frc/gt3
| 2 |
2170958
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from collections import namedtuple
from GT3 import Core
from GT3.Core.Functions.CalcFSA import calc_fsa
import numpy as np
def corePatch(core: Core, neutFlag=True):
"""
Updates deuterium ion density and zeff. D density is invalid if no D density file is given because it will be
set to 0. This screws up subsequent calculations in non-obvious ways. As an example, z_eff calculation will
include D density as 0s but still have a carbon density from the fracz input file, giving an invalid result
This is addressed by recreating the n_fsa namedtuple and z_eff_fsa in full, as namedtuples cannot be changed piecemeal
:param neutFlag:
:param core:
:return:
"""
if neutFlag:
core.n = namedtuple('n', 'i e n C')(core.n.e/(1.+.025*6.0), core.n.e, core.n.n, 0.025 * core.n.e/(1.+.025*6.0)) # TODO: Update 0.025 and 6.0 to actual fracz and zbar2
else:
core.n.update_neutrals()
core.n = namedtuple('n', 'i e n C')(core.n.e / (1. + .025 * 6.0), core.n.e,
namedtuple('n', 's t tot')(
np.zeros(core.n.i.shape), # slow
np.zeros(core.n.i.shape), # thermal
np.zeros(core.n.i.shape)), # total
0.025 * core.n.e / (1. + .025 * 6.0))
core.z_eff_fsa = calc_fsa((core.n.i * (1.**2) + core.n.C * (6.0**2))/(core.n.i * 1.0 + core.n.C * 6.0), core.R, core.Z) #TODO Similar updates (1.0 = atnum, 6.0 = zbar2)
| 1,601 |
tests/garage/torch/policies/test_discrete_cnn_policy.py
|
Rnhondova/garage
| 0 |
2171416
|
"""Test categoricalCNNPolicy in PyTorch."""
import cloudpickle
import pytest
import torch.nn as nn
from garage.envs import GymEnv
from garage.torch import TransposeImage
from garage.torch.policies import DiscreteCNNPolicy
from tests.fixtures.envs.dummy import DummyDiscreteEnv
class TestCategoricalCNNPolicy:
def _initialize_obs_env(self, env):
"""Initialize observation env depends on observation space type.
If observation space (i.e. akro.Image, gym.spaces.Box) is an image,
wrap the input of shape (W, H, 3) for PyTorch (N, 3, W, H).
Return:
Transformed environment (garage.envs).
"""
obs_shape = env.observation_space.shape
if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:
env = TransposeImage(env)
return env
@pytest.mark.parametrize(
'action_dim, kernel_sizes, hidden_channels, strides, paddings', [
(3, (1, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (2, ), (0, )),
(3, (5, ), (12, ), (1, ), (2, )),
(3, (1, 1), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (2, 2), (0, 0)),
])
def test_get_action(self, action_dim, kernel_sizes, hidden_channels,
strides, paddings):
"""Test get_action function."""
batch_size = 64
input_width = 32
input_height = 32
in_channel = 3
input_shape = (batch_size, in_channel, input_height, input_width)
env = GymEnv(
DummyDiscreteEnv(obs_dim=input_shape, action_dim=action_dim))
env = self._initialize_obs_env(env)
policy = DiscreteCNNPolicy(env_spec=env.spec,
hidden_channels=hidden_channels,
hidden_sizes=hidden_channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
padding_mode='zeros',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
env.reset()
obs = env.step(1).observation
action, _ = policy.get_action(obs.flatten())
assert env.action_space.contains(int(action[0]))
assert env.action_space.n == action_dim
@pytest.mark.parametrize(
'action_dim, kernel_sizes, hidden_channels, strides, paddings', [
(3, (1, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (2, ), (0, )),
(3, (5, ), (12, ), (1, ), (2, )),
(3, (1, 1), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (2, 2), (0, 0)),
])
def test_get_actions(self, action_dim, kernel_sizes, hidden_channels,
strides, paddings):
"""Test get_actions function."""
batch_size = 64
input_width = 32
input_height = 32
in_channel = 3
input_shape = (batch_size, in_channel, input_height, input_width)
env = GymEnv(
DummyDiscreteEnv(obs_dim=input_shape, action_dim=action_dim))
env = self._initialize_obs_env(env)
policy = DiscreteCNNPolicy(env_spec=env.spec,
hidden_channels=hidden_channels,
hidden_sizes=hidden_channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
padding_mode='zeros',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
env.reset()
obs = env.step(1).observation
actions, _ = policy.get_actions([obs, obs, obs])
for action in actions:
assert env.action_space.contains(int(action[0]))
assert env.action_space.n == action_dim
@pytest.mark.parametrize(
'action_dim, kernel_sizes, hidden_channels, strides, paddings', [
(3, (1, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (2, ), (0, )),
(3, (5, ), (12, ), (1, ), (2, )),
(3, (1, 1), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (2, 2), (0, 0)),
])
def test_is_pickleable(self, action_dim, kernel_sizes, hidden_channels,
strides, paddings):
"""Test if policy is pickable."""
batch_size = 64
input_width = 32
input_height = 32
in_channel = 3
input_shape = (batch_size, in_channel, input_height, input_width)
env = GymEnv(
DummyDiscreteEnv(obs_dim=input_shape, action_dim=action_dim))
env = self._initialize_obs_env(env)
policy = DiscreteCNNPolicy(env_spec=env.spec,
hidden_channels=hidden_channels,
hidden_sizes=hidden_channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
padding_mode='zeros',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
env.reset()
obs = env.step(1).observation
output_action_1, _ = policy.get_action(obs.flatten())
p = cloudpickle.dumps(policy)
policy_pickled = cloudpickle.loads(p)
output_action_2, _ = policy_pickled.get_action(obs)
assert env.action_space.contains(int(output_action_1[0]))
assert env.action_space.contains(int(output_action_2[0]))
assert output_action_1.shape == output_action_2.shape
@pytest.mark.parametrize(
'action_dim, kernel_sizes, hidden_channels, strides, paddings', [
(3, (1, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (1, ), (0, )),
(3, (3, ), (32, ), (2, ), (0, )),
(3, (5, ), (12, ), (1, ), (2, )),
(3, (1, 1), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (1, 1), (0, 0)),
(3, (3, 3), (32, 64), (2, 2), (0, 0)),
])
def test_obs_unflattened(self, action_dim, kernel_sizes, hidden_channels,
strides, paddings):
"""Test if a flattened image obs is passed to get_action
then it is unflattened.
"""
batch_size = 64
input_width = 32
input_height = 32
in_channel = 3
input_shape = (batch_size, in_channel, input_height, input_width)
env = GymEnv(
DummyDiscreteEnv(obs_dim=input_shape, action_dim=action_dim))
env = self._initialize_obs_env(env)
env.reset()
policy = DiscreteCNNPolicy(env_spec=env.spec,
hidden_channels=hidden_channels,
hidden_sizes=hidden_channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
padding_mode='zeros',
hidden_w_init=nn.init.ones_,
output_w_init=nn.init.ones_)
obs = env.observation_space.sample()
action, _ = policy.get_action(env.observation_space.flatten(obs))
env.step(action)
| 7,865 |
src/tensor/op/logical/comparison/_operator.py
|
jedhsu/tensor
| 0 |
2170533
|
"""
*Comparison*
"""
from abc import ABCMeta
from .._operator import ArrayOperator
__all__ = ["Comparison"]
class Comparison(
ArrayOperator,
):
__metaclass__ = ABCMeta
| 187 |
picotext/scripts/split_data.py
|
phiweger/picotext
| 0 |
2171900
|
'''
TODO:
- add subsample arg to create reduced test set
Aim:
Read in 2 fasta files, one holding positive examples of some sequence, and another holding negative examples.
Create a single fasta w/ a sensical header, e.g.
>id::label
sequence
Then split them into train/ dev/ test sets.
Tokenize.
Reshape so it can enter the NN.
'''
from collections import Counter
from pathlib import Path
import numpy as np
import screed
import torch
from tqdm import tqdm
from picotext.utils import encode_dayhoff
'''
mkdir -p tmp/data
mkdir tmp/processed
cp ToxClassifier/datasets/trainingsets/* tmp/data
'''
# TODO: Allow standard (IUPAC) amino acid code as well as Dayhoff
translate = True
splits = [0.8, 0.1, 0.1]
reformat = lambda x: x.split()[0]
outdir = 'tmp/processed'
labels = {
'toxin': 'tmp/data/dataset_pos.fa',
'negative': 'tmp/data/dataset_easy.fa'}
with open(Path(outdir) / 'train.csv', 'w+') as train_out, \
open(Path(outdir) / 'dev.csv', 'w+') as dev_out, \
open(Path(outdir) / 'test.csv', 'w+') as test_out:
for label, path in labels.items():
cnt = []
with screed.open(path) as file:
for read in file:
if reformat:
name = reformat(read.name)
# sp|Q15WI5|QUEA_PSEA6 S-adeno ...
else:
name = read.name
throw = np.random.multinomial(1, splits)
# [0, 0, 1] -- now in which position is 1?
group = ['train', 'dev', 'test'][list(throw).index(1)]
# groups[name] = group
cnt.append(group)
if translate:
sequence = encode_dayhoff(read.sequence)
if not sequence:
continue
else:
sequence = read.sequence
str_ = f'{name},{label},{sequence}\n'
if group == 'train':
train_out.write(str_)
elif group == 'dev':
dev_out.write(str_)
elif group == 'test':
test_out.write(str_)
else:
raise ValueError('A very specific bad thing happened.')
print(label, Counter(cnt))
# from collections import Counter
# print(Counter(cnt))
| 2,352 |
python/fibonacci_sample.py
|
renegadevi/scripts-and-snippets
| 0 |
2170695
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
def Fibonacci(num):
""" Create a list from 0 to num """
fib, values = lambda x: 1 if x <= 1 else fib(x-1) + fib(x-2), []
for i in range(0, num):
values.append(fib(i))
return values
try:
if len(sys.argv) > 1:
print(Fibonacci(int(float(sys.argv[1]))))
except ValueError:
exit("Use a valid number")
| 397 |
apuntes-y-recursos-didacticos/a03_flexion/code/shear_moment_diagram.py
|
JorgeDeLosSantos/curso_mecanica_de_materiales
| 0 |
2171881
|
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
P = 50e3
L = 5
x1 = np.linspace(0,L/2)
x2 = np.linspace(L/2,L)
V1 = P/2*np.ones_like(x1)
V2 = -P/2*np.ones_like(x2)
M1 = P*x1/2
M2 = -P*x2/2 + P*L/2
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
#~ plt.fill_between(x,V)
ax1.fill_between(x1,V1, color="#1E90FF")
ax1.fill_between(x2,V2, color="#1E90FF")
ax2.fill_between(x1,M1, color="#1E90FF")
ax2.fill_between(x2,M2, color="#1E90FF")
#~ plt.fill_between(x,M)
for ax in (ax1,ax2):
ax.axhline(0, color="k")
ax.axvline(0, color="k")
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_visible(False)
ax.grid(ls=":")
#~ ax.yaxis.set_visible(False)
plt.savefig("shear_moment_02.pdf", transparent=True)
plt.show()
| 946 |
TicTacToe minimax.py
|
S-t-a-l-k-e-r-py/TicTacToe-Game
| 0 |
2171890
|
board = [["1", "2", "3"], ["4", "5", "6"], ["7", "8", "9"]]
best_moves = [["1", "2", "3"], ["4", "5", "6"], ["7", "8", "9"], ["1", "4", "7"],
["2", "5", "8"], ["3", "6", "9"], ["1", "5", "9"], ["3", "5", "7"]]
scores = {"O": 1,
"X": -1,
"tie": 0
}
ai = "O"
human = "X"
def check_best_move():
best_score = -1
move = None
for i in range(3):
for j in range(3):
if board[i][j] != "X" and board[i][j] != "O":
temp = board[i][j]
board[i][j] = ai
score = minimax(board, 0, False)
board[i][j] = temp
if score > best_score:
best_score = score
move = (i, j)
add_to_bm(board[move[0]][move[1]], ai)
print("\nComputer's turn is:", board[move[0]][move[1]])
board[move[0]][move[1]] = ai
def equals(a, b, c):
return a == b and b == c and (a == "X" or a == "O")
def check_winner():
winner = None
# Horizontal
for i in range(3):
if equals(board[i][0], board[i][1], board[i][2]):
winner = board[i][0]
# Vertical
for i in range(3):
if equals(board[0][i], board[1][i], board[2][i]):
winner = board[0][i]
# Diagonal
if equals(board[0][0], board[1][1], board[2][2]):
winner = board[0][0]
if equals(board[2][0], board[1][1], board[0][2]):
winner = board[2][0]
open_spots = 0
for i in range(3):
for j in range(3):
if board[i][j] != "X" and board[i][j] != "O":
open_spots += 1
if winner is None and open_spots == 0:
return "tie"
else:
return winner
def minimax(board, depth, is_max):
result = check_winner()
if result is not None:
return scores[result]
if is_max:
best_score = -1
for i in range(3):
for j in range(3):
if board[i][j] != "X" and board[i][j] != "O":
temp = board[i][j]
board[i][j] = ai
score = minimax(board, depth + 1, False)
board[i][j] = temp
best_score = max(score, best_score)
return best_score
else:
best_score = 1
for i in range(3):
for j in range(3):
if board[i][j] != "X" and board[i][j] != "O":
temp = board[i][j]
board[i][j] = human
score = minimax(board, depth + 1, True)
board[i][j] = temp
best_score = min(score, best_score)
return best_score
def print_board():
print("|---|---|---|")
print("|", board[0][0], "|", board[0][1], "|", board[0][2], "|")
print("|-----------|")
print("|", board[1][0], "|", board[1][1], "|", board[1][2], "|")
print("|-----------|")
print("|", board[2][0], "|", board[2][1], "|", board[2][2], "|")
print("|---|---|---|")
def add_to_bm(pos, sym):
for i in range(8):
for j in range(3):
if best_moves[i][j] == str(pos):
best_moves[i][j] = str(sym)
def check_win():
arr1, arr2 = ["X", "X", "X"], ["O", "O", "O"]
for winner in best_moves:
if winner == arr1:
print("\nYou are win! Thanks for playing.")
print_board()
return False
if winner == arr2:
print("\nComputer win! Thanks for playing.")
print_board()
return False
c = 0
for move in board[0:3]:
for i in move:
if i == "X" or i == "O":
c += 1
if c == 9:
print("It's Tie")
print_board()
return False
return True
def main():
print("Welcome to Tic Tac Toe.\n--------------------------------")
print_board()
print("X's will play first")
while True:
try:
num_input = int(input("\nEnter a slot number to place X in: "))
if num_input < 1 or num_input > 9:
print("\nInvalid input; re-enter slot number: ")
continue
except:
print("\nInvalid input; re-enter slot number: ")
continue
i = (num_input - 1) // 3
j = num_input - i * 3 - 1
if board[i][j] != "X" and board[i][j] != "O":
board[i][j] = human
add_to_bm(num_input, human)
if check_win():
check_best_move()
if check_win():
print_board()
else:
break
else:
print("\nSlot already taken; re-enter slot number: ")
main()
| 4,671 |
import_users.py
|
getupcloud/referral
| 4 |
2171965
|
from referralapi import app
from database import configure_database
configure_database(app)
from models import User, ReferralProgram
file = open('../users.txt','r')
rp = ReferralProgram.select().first()
for line in file.readlines():
dados = [x.strip() for x in line.split("|") if x.strip()]
print("Saving {0} {1}".format(*dados))
u = User()
u.hash = dados[0]
u.referral_program = rp
u.user_indicator = None
u.save()
| 447 |
src/dp_translation/model_mixin.py
|
dataPuzzler/dp_translation
| 0 |
2171889
|
from .key_logic import TranslationKeyBuilder
from typing import List
from abc import abstractmethod
class TranslatableModelMixin:
def get_logical_key_for_attr_value(self, attr_name: str):
return TranslationKeyBuilder.construct_translation_key(
context=self.get_translation_context(),
concept=type(self).__name__,
instance_id=self.choose_instance_id(),
attr=attr_name)
def choose_instance_id(self) -> str:
if hasattr(self, "name"):
return self.name
else:
return str(self.id )
@classmethod
def get_logical_key_for_attr(cls, attr_name):
assert attr_name in cls.get_translateable_attrs()
return TranslationKeyBuilder.construct_translation_key(
context=cls.get_translation_context(),
concept=cls.__name__,
instance_id=TranslationKeyBuilder.NULL_VALUE,
attr=attr_name)
@classmethod
def get_logical_key_for_concept(cls):
return TranslationKeyBuilder.construct_translation_key(
context=cls.get_translation_context(),
concept=cls.__name__,
instance_id=TranslationKeyBuilder.NULL_VALUE,
attr=TranslationKeyBuilder.NULL_VALUE)
@classmethod
@abstractmethod
def get_translation_context(cls) -> str:
pass
@classmethod
@abstractmethod
def get_translateable_attrs(cls) -> List[str]:
pass
| 1,474 |
Data Science With Python/21-deep-learning-in-python/01-basics-of-deep-learning-and-neural-networks/01-coding-the-forward-propagation-algorithm.py
|
aimanahmedmoin1997/DataCamp
| 3 |
2171492
|
'''
Coding the forward propagation algorithm
In this exercise, you'll write code to do forward propagation (prediction) for your first neural network:
Ch1Ex4
Each data point is a customer. The first input is how many accounts they have, and the second input is how many children they have. The model will predict how many transactions the user makes in the next year. You will use this data throughout the first 2 chapters of this course.
The input data has been pre-loaded as input_data, and the weights are available in a dictionary called weights. The array of weights for the first node in the hidden layer are in weights['node_0'], and the array of weights for the second node in the hidden layer are in weights['node_1'].
The weights feeding into the output node are available in weights['output'].
NumPy will be pre-imported for you as np in all exercises.
INSTRUCTIONS
100XP
Calculate the value in node 0 by multiplying input_data by its weights weights['node_0'] and computing their sum. This is the 1st node in the hidden layer.
Calculate the value in node 1 using input_data and weights['node_1']. This is the 2nd node in the hidden layer.
Put the hidden layer values into an array. This has been done for you.
Generate the prediction by multiplying hidden_layer_outputs by weights['output'] and computing their sum.
Hit 'Submit Answer' to print the output!
'''
# Calculate node 0 value: node_0_value
node_0_value = (input_data * weights['node_0']).sum()
# Calculate node 1 value: node_1_value
node_1_value = (input_data * weights['node_1']).sum()
# Put node values into array: hidden_layer_outputs
hidden_layer_outputs = np.array([node_0_value, node_1_value])
# Calculate output: output
output = (hidden_layer_outputs * weights['output']).sum()
# Print output
print(output)
| 1,797 |
2021/15.py
|
bernikr/advent-of-code
| 1 |
2170729
|
import math
from collections import defaultdict
from itertools import product
from aoc_utils import Vec, dirs4, PriorityQueue
from aocd import get_data
def find_shortest_path(mapp):
start = Vec(0, 0)
goal = max(mapp, key=sum)
open_set = PriorityQueue()
open_set.put(start, 0)
g_score = defaultdict(lambda: math.inf, {start: 0})
while open_set:
current = open_set.get()
if current == goal:
return g_score[current]
for neighbor in (current + d for d in dirs4 if (current + d) in mapp):
tentative_g_score = g_score[current] + mapp[neighbor]
if tentative_g_score < g_score[neighbor]:
g_score[neighbor] = tentative_g_score
f_score = tentative_g_score + (neighbor - goal).manhatten()
open_set.put(neighbor, f_score)
def part1(inp):
return find_shortest_path(inp)
def part2(inp):
mapp = {}
lx, ly = max(x for x, _ in inp) + 1, max(y for _, y in inp) + 1
for (x, y), v in inp.items():
for a, b in product(range(5), repeat=2):
nv = (v + a + b) % 9
mapp[Vec(x + a * lx, y + b * ly)] = 9 if nv == 0 else nv
return find_shortest_path(mapp)
if __name__ == '__main__':
data = get_data(day=15, year=2021)
inp = {Vec(x, y): int(v) for y, l in enumerate(data.splitlines()) for x, v in enumerate(l)}
print(part1(inp))
print(part2(inp))
| 1,429 |
app.py
|
Auto-bot-Channel/text-to-video
| 6 |
2169375
|
import os
import time
os.system('python3 audio.py')
time.sleep(1)
print('audio file made')
os.system('python3 req.py')
time.sleep(1)
print('alligned files')
os.system('python3 make.py')
time.sleep(1)
os.system('rm -r downl outd')
time.sleep(1)
os.system('rm output.mp4 list.txt')
| 282 |
tests/utils/test_metrics.py
|
fariasfc/solo-learn
| 3 |
2170272
|
import torch
from solo.utils.metrics import accuracy_at_k
def test_accuracy_at_k():
b, c = 32, 100
output = torch.randn(b, c)
target = torch.randint(low=0, high=c, size=(b,))
acc1, acc5 = accuracy_at_k(output, target)
assert isinstance(acc1, torch.Tensor)
assert isinstance(acc5, torch.Tensor)
| 321 |
models/train_classifier.py
|
a1pat/Disaster-Pipeline
| 0 |
2170996
|
import sys
import numpy as np
import pandas as pd
import pickle
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from sqlalchemy import create_engine
from sklearn.pipeline import Pipeline
from sklearn.metrics import precision_recall_fscore_support
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import RidgeClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
def load_data(database_filepath):
'''
Reads from a specified database and splits the data into messages and categories
Input:
database_filepath: (string) database file path
Returns:
X: (pandas dataframe) messages (to be convverted into features)
y: (pandas dataframe) categories (labels)
y.columns: (list of string) category (label) names
'''
# read in the file
table_name = 'messages'
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table(table_name, engine)
# define features and label arrays
X = df['message']
y = df.drop(['id','message','original','genre'], axis=1).copy()
return X, y, y.columns
def tokenize(text):
'''
Tokenizes text
Input:
text: (string) text to be tokenized
Return:
tokens: (list of strings) tokens
'''
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
# normalize case and remove punctuation
#print(text)
text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())
#print(text)
# tokenize text
tokens = word_tokenize(text)
# lemmatize and remove stop words
tokens = [lemmatizer.lemmatize(word).strip() for word in tokens if word not in stop_words]
#print(tokens)
return tokens
def build_model():
'''
Creates a text processing and model pipeline with grid search
Inputs: none
Return: (object) grid search
'''
# define parameters for grid search
parameters = {'clf__estimator__alpha': [0.5,1,1.5,2]}
# create a text processing and model pipeline
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RidgeClassifier()))
])
model = GridSearchCV(pipeline, param_grid=parameters)
return model
def make_classification_report(y_true, y_pred):
'''
Create a pandas dataframe containing the precision, recall, fscore and support metrics
for each category
Inputs:
y_true: label values in the data set (ground truth)
y_pred: label values predicted by the model
Return:
df: (pandas dataframe) metrics
'''
df = pd.DataFrame(np.array(precision_recall_fscore_support(y_true, y_pred)).T)
df.columns = ['precision','recall','fscore','support']
df['category'] = y_true.columns
df = df.reindex(columns=['category' , 'precision', 'recall', 'fscore', 'support'])
print(df)
return df
def evaluate_model(model, X_test, Y_test, category_names):
'''
Evaluates model predictions versus test data
Inputs:
model: (object) model
X_test: (dataframe) feature
Y_test:
category_names:
Return: None
'''
Y_test_pred = model.predict(X_test)
make_classification_report(Y_test, Y_test_pred)
return
def save_df(df, database_filepath, table_name):
'''
Save pandas dataframe to the specified database and table name.
If the table exists, it is deleted.
Inputs:
df: (pandas dataframe) dataframe to be saved
database_filepath: (string) database location
table_name: (string) table name for saving the statistics
Returns: None
'''
engine = create_engine('sqlite:///' + database_filepath)
df.to_sql(table_name, engine, index=False, if_exists='replace')
return
def save_model(model, model_filepath):
'''
Export model to a pickle file
Inputs:
model: (onbect) trained model
model_filepath: (string) pickle file path
Returns: None
'''
# export model as a pickle file
with open(model_filepath, 'wb') as f:
pickle.dump(model, f)
return
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
#####
# additionally, save three tables to the database for plotting on the web page:
# 1. training statistics
# 2. test statistics
# 3. the percentage of non-zero values for each category in the dataset
print('Saving training stats...\n DATABASE: {}'.format(database_filepath))
save_df(make_classification_report(Y_train, model.predict(X_train)), database_filepath, 'train_stats')
print('Saving test stats...\n DATABASE: {}'.format(database_filepath))
save_df(make_classification_report(Y_test, model.predict(X_test)), database_filepath, 'test_stats')
df_non_zero = pd.DataFrame(list(Y.mean()), columns=['pct_not_zero'])
df_non_zero['category'] = Y.columns
print('Saving non-zero stats...\n DATABASE: {}'.format(database_filepath))
save_df(df_non_zero, database_filepath, 'pct_non_zero')
#####
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| 6,649 |
kbr/mq_utils.py
|
brugger/kbr-tools
| 1 |
2171760
|
#!/usr/bin/env python3
"""
<NAME> (03 Apr 2019), contact: <EMAIL>
"""
import sys
import pprint
pp = pprint.PrettyPrinter(indent=4)
import pika
import kbr.log_utils as logger
class Mq(object):
def __init__(self):
self.connection = None
self.channel = None
self.exchange = None
self.channels = []
self.uri = None
def connect(self, uri:str, exchange:str='default', exchange_type:str='direct', prefetch_count=0):
logger.debug(f'connecting to {uri} exchange: {exchange} with prefetch: {prefetch_count}')
self.connection = pika.BlockingConnection( pika.connection.URLParameters(uri) )
self.channel = self.connection.channel()
self.exchange = exchange
self.uri = uri
self.channel.exchange_declare(exchange=self.exchange, exchange_type=exchange_type, durable=True)
self.channel.basic_qos(prefetch_count=prefetch_count)
def disconnect(self):
self.channel.close()
def _check_channel(self, name:str):
if name not in self.channels:
result = self.channel.queue_declare(queue=name, durable=True)
self.channel.queue_bind(exchange=self.exchange,
queue=name,
routing_key=name)
self.channels.append( name )
def publish(self, body:str, route:str='default'):
try:
self._check_channel( route)
self.channel.basic_publish( exchange=self.exchange, routing_key=route, body=body, properties=pika.BasicProperties( delivery_mode=2))
except:
logger.info( '==========================')
logger.info( 'Reconnecting to RMQ ......')
logger.info( '==========================')
self.connect(self.uri, self.exchange)
self._check_channel( route)
self.channel.basic_publish( exchange=self.exchange, routing_key=route, body=body, properties=pika.BasicProperties( delivery_mode=2))
def consume(self, route:str, callback):
try:
self._check_channel( route)
self.channel.basic_consume(queue=route, on_message_callback=callback)
self.channel.start_consuming()
except Exception as e:
print( e )
def queue_length(self, queue:str=None):
result = self.channel.queue_declare(queue=queue, durable=True, passive=True)
return result.method.message_count
def flush(self, queue:str='default'):
self.channel.queue_purge(queue)
| 2,626 |
trivio/http.py
|
CuzImSyntax/TrivIO
| 2 |
2170285
|
import aiohttp
from .enums import Type, Category, Difficulty
from .exceptions import NoResultFound, InvalidParameter
class Url:
"""Represents a Url needed for making a request to the api
Parameters:
is_command: :class:`bool`
Whether the Url should be for a command call.
_type: :class:`Type`
Determines whether returned questions should be multiple choice or yes/no questions.
amount: :class:`int`
The number of questions that should be returned the number of returned questions must be between 1 and 50.
category: Optional[:class:`Category`]
The category of the returned questions. When leaving empty there will be questions from a random category.
difficulty: Optional[:class:`Difficulty`]
The difficulty of the returned questions. When leaving empty there will be questions from all difficulties.
"""
Base_url = "https://opentdb.com/api.php?encode=base64"
TOKEN_URL = "https://opentdb.com/api_token.php"
def __init__(self,
is_command: bool,
_type: Type,
amount: int = None,
category: Category = None,
difficulty: Difficulty = None):
self.is_command = is_command
self._type = _type
self.amount = amount
self.category = category
self.difficulty = difficulty
@property
def url(self):
""":class:`str`: Returns the url as an string."""
if self.is_command:
url = self.TOKEN_URL
url += f"?command={self._type.value}"
return url
url = self.Base_url
url += f"&type={self._type.value}&amount={self.amount}"
if self.category:
url += f"&category={self.category.value}"
if self.difficulty:
url += f"&difficulty={self.difficulty.value}"
return url
class HttpClient:
"""Represents an HTTP client to send requests to the Open Trivia DB
Parameters:
utils: :class:`.Utils`
A instance of the Utils class
use_token: Optional[:class:`bool`]
Determines whether the client should use a session token or not.
"""
def __init__(self, utils, use_token: bool):
self.utils = utils
self.use_token = use_token
self.token = None
self._session = aiohttp.ClientSession()
async def get_token(self):
"""Gets a token from the opentdb api."""
#Create a url object
url = Url(True, Type.REQUEST)
#get the token
result = await self.request(url)
return result["token"]
async def request(self, url: Url):
"""|coro|
Makes a http request to the opentdb api.
Parameters:
url: :class:`Url`
The url object to make the request with.
Returns:
:class:`list`
The requested questions in a list.
"""
_url = url.url
#Check if we should use a token
if not url.is_command:
if self.use_token:
self.token = await self.get_token()
if self.token:
_url += f"&token={self.token}"
#make the request
async with self._session.request("get", _url) as r:
data = await r.json()
#Cheking the response codes from the api
if data["response_code"] == 1:
raise NoResultFound
elif data["response_code"] == 2:
raise InvalidParameter
elif data["response_code"] in (3, 4):
self.token = await self.get_token()
return await self.request(_url)
# Decode the data if necessary
if "response_message" in data:
return data
return self.utils.build_list(data)
async def close(self):
"""|coro|
Close the ClientSession if existing"""
if self._session:
await self._session.close()
| 4,007 |
TopicModeler.py
|
dkalamar/TopicModeler
| 0 |
2171967
|
import numpy as np
import pandas as pd
from nltk import sent_tokenize, word_tokenize
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer, ENGLISH_STOP_WORDS
from sklearn.metrics.pairwise import pairwise_distances as pair_dist
from sklearn.cluster import DBSCAN,KMeans
from sklearn import metrics
from sklearn.decomposition import TruncatedSVD
from glob import glob
from bs4 import BeautifulSoup as bsoup
from time import time
import matplotlib.pyplot as plt
import re, itertools, pickle, itertools, scipy, json
class Clusterer:
def __init__(self,k=8,is_tfidf=True):
self.is_tfidf=is_tfidf
self.vect=CountVectorizer(stop_words=ENGLISH_STOP_WORDS.union(self._load_stops()))
self.tfidf=TfidfTransformer()
self.cluster=KMeans(n_clusters=k)
self.stats=dict()
def _load_stops(self):
with open('data/stops.json','rb') as f:
self.stops=json.load(f)
return self.stops
def fit(self):
start=time()
self.sparse = self.vect.fit_transform(self.corpus)
if self.is_tfidf:
self.sparse = self.tfidf.fit_transform(self.sparse)
self.cluster.fit(self.sparse)
self.stats['time']=time()-start
def transform_sentences(self):
sentences = list(itertools.chain.from_iterable([sent_tokenize(passage) for passage in self.corpus]))
self.sentences = np.array([s for s in sentences if len(s.split(' '))>5])
self.sent_sparse = self.vect.transform(self.sentences)
if self.is_tfidf:
self.sent_sparse = self.tfidf.transform(self.sent_sparse)
def calc_stats(self):
self.stats['n_labels']=len(set(self.cluster.labels_)) - (1 if -1 in self.cluster.labels_ else 0)
self.stats['silhouette'] = metrics.silhouette_score(self.sparse, self.cluster.labels_,
metric='euclidean',
sample_size=len(self.corpus))
self.stats['mean_centroid_dist'] = pd.DataFrame(pair_dist(self.cluster.cluster_centers_,self.cluster.cluster_centers_)).mean(0).to_dict()
return self.stats
def save_corpus(self,path):
with open(path,'wb') as fp:
pickle.dump((self.corpus, self.books),fp)
def load_corpus(self,path):
with open(path,'rb') as fp:
self.corpus, self.books=pickle.load(fp)
def _read_text(self,path):
try:
page = bsoup(open(path,'rb').read(),'html.parser')
text = [div.text for div in page.find('div',class_='main').find_all('div')[1:-2]]
return re.sub(' [0-9]+\\xa0','',''.join(text))
except:
pass
def write_text(self):
paths=np.array(glob('eng-web_html/*.htm'))
texts = np.array(list(map(self._read_text,paths)))
self.corpus = texts[texts != np.array(None)]
self.books = np.array([re.sub('(.*/|[0-9]+.htm)','',x) for x in paths[texts != np.array(None)]])
def analyze(self):
self.calc_stats()
self._group_passages()
self.key = self._key_sentences()
self.links = self._linking_sentences().to_dict()
self.topics = self.get_topics()
def write_results(self):
for i in range(len(self.sections)):
result={
'key_sentences':self.key[i].tolist(),
'linking_sentences': {k:v.tolist() for k,v in self.links.items() if str(i) in k},
'topics': self.topics[i]}
with open(f'results/cluster_{i}.json','w') as fp:
json.dump(result,fp ,sort_keys=True,indent=4)
with open(f'results/stats.json','w') as fp:
json.dump(self.stats,fp ,sort_keys=True,indent=4)
def _group_passages(self):
self.sections=list()
self.transform_sentences()
df = pd.DataFrame(self.cluster.transform(self.sent_sparse))
for i in set(self.cluster.labels_):
self.sections.append(self.sentences[df.idxmin(1)==i])
def _key_sentences(self,k=5):
key_phrases=list()
for sect in self.sections:
ratings = self.pair_sim(sect).sum(0)
indices = list(ratings.sort_values().index[:k])
key_phrases.append(sect[indices])
return key_phrases
def _linking_sentences(self,k=5):
df = pd.DataFrame(self.cluster.transform(self.sent_sparse))
df = df.apply(lambda x: x.sort_values().head(2),1).fillna(0)
results = pd.DataFrame()
results['sums'] = df.sum(1)
results['connections'] = df.apply(lambda x: ''.join(str(np.array(df.columns[x.values>0]))), 1)
results['sentences'] = self.sentences
return results.groupby('connections').apply(lambda y: y.sentences[:3].values)
def get_edges(self):
df = pd.DataFrame(self.cluster.transform(self.sent_sparse))
df = df.apply(lambda x: x.sort_values().head(2),1).fillna(0)
return df.apply(lambda x: tuple(df.columns[x.values>0]), 1)
def pair_sim(self,texts):
vectors = self.vect.transform(texts)
if self.is_tfidf:
vectors = self.tfidf.transform(vectors)
return pd.DataFrame(pair_dist(vectors,vectors,'euclidean'))
def get_topics(self,n=5):
feature_names = self.vect.get_feature_names()
topics=list()
for topic_idx, topic in enumerate(self.cluster.cluster_centers_):
topics.append([feature_names[i] for i in topic.argsort()[:-n - 1:-1]])
return topics
def plot(self):
data = pd.DataFrame(TruncatedSVD(5).fit_transform(self.sparse))
colors = ["red", "orange", "yellow", "green", "blue", "purple", "pink", "blue"]
a=0
b=1
df = pd.DataFrame(self.cluster.transform(self.sent_sparse))
for i in set(self.cluster.labels_):
x=data[df.idxmin(1)==i]
plt.scatter(x[a],x[b],c=colors[i])
plt.show()
if __name__ == "__main__":
print('\rInstiating...',end='')
c=Clusterer(6)
print('\rLoading Corpus...',end='')
c.load_corpus('data/bible')
print('\rFitting Models...',end='')
c.fit()
print('\rAnalyzing...',end='')
c.analyze()
print('\rPrinting Results...')
c.write_results()
| 6,226 |
mundo-2/ex-056.py
|
guilhermesm28/python-curso-em-video
| 0 |
2171706
|
# Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos.
print('-' * 100)
print('{: ^100}'.format('EXERCÍCIO 056 - ANALISADOR COMPLETO'))
print('-' * 100)
soma = 0
idade_velho = 0
nome_velho = ''
qtd_f_menor = 0
feminino = 0
for i in range(1,5):
print(f'\n{i}ª pessoa')
nome = str(input('Nome: ')).strip().title()
idade = int(input('Idade: '))
sexo = str(input('Sexo: ')).strip().upper()
soma += idade
if i == 1 and sexo[:1] == 'M':
idade_velho = idade
nome_velho = nome
if idade > idade_velho and sexo[:1] == 'M':
idade_velho = idade
nome_velho = nome
if sexo[:1] == 'F' and idade < 20:
qtd_f_menor += 1
feminino = 1
media = soma / 4
print(f'\nA média de idade do grupo é {media:.2f}!')
if nome_velho != '':
print(f'O homem mais velho é o {nome_velho}, com {idade_velho} anos!')
if feminino == 1:
if qtd_f_menor == 1:
print(f'{qtd_f_menor} mulher tem menos de 20 anos!')
else:
print(f'{qtd_f_menor} mulheres têm menos de 20 anos!')
print('-' * 100)
input('Pressione ENTER para sair...')
| 1,249 |
data/missing_features.py
|
labs14-investment-risk-analysis/Data-Science
| 0 |
2170854
|
from decouple import config
from fin_data_fundamentals import find_fundamentals
from fin_data import DailyTimeSeries
from alpha_vantage.foreignexchange import ForeignExchange
from alpha_vantage.techindicators import TechIndicators
from alpha_vantage.timeseries import TimeSeries
import pandas as pd
import numpy as np
import quandl
import datetime
import warnings
def financials_available(symbol):
alpha_vantage_key = config('ALPHA_VANTAGE')
ts = TimeSeries(key=self.alpha_vantage_key,
output_format='pandas')
data, meta_data = ts.get_daily_adjusted(symbol=symbol, outputsize='compact')
data = data.drop(columns='8. split coefficient')
return(data.columns)
def technicals_available(symbol):
indicators = np.load('technicals_list.npy')
have_techs = []
ti = TechIndicators(key=alpha_vantage_key, output_format='pandas')
for ind in indicators:
try:
got_techs = getattr(ti, ind)(symbol=symbol)
except:
continu
if got_techs[0].isnull().sum().values[0] == 0:
have_techs.append(got_techs[1]['2: Indicator'])
return(have_techs)
def macros_available(symbol):
macros = ['housing_index', 'confidence_index', 'trade_index', 'longterm_rates', 'shortterm_rates']
dts = DailyTimeSeries(symbol)
df = dts.initiate()
df = dts.add_macro(df, macros)
macro_na = []
for ma in df.columns:
| 1,425 |
proxies/stubs/python/GeneratedTest.py
|
ChaosGroup/json-ws
| 29 |
2170750
|
#
# Test API 1.0
#
# Part of the JSON-WS library - Python Proxy
# Copyright (c) 2014 ChaosGroup. All rights reserved.
#
# This code uses the following libraries:
# - autobahn.asyncio.websocket (https://pypi.python.org/pypi/autobahn/0.9.3)
#
# For asyncio and enum support in Python <= 3.3 install trollius and enum34:
# - https://pypi.python.org/pypi/trollius/1.0.2
# - https://pypi.python.org/pypi/enum34/1.0.3
from datetime import datetime
from rpctunnel import RpcTunnel, Optional, Type, Enum
class GeneratedTest:
'''
Proxy for json-ws web services. Instances can be used as context managers.
'''
RenderMode = Enum('RenderMode', {
'Production' : -1,
'RtCpu' : 0,
'RtGpuCuda' : 5,
})
RenderOptions = Type('RenderOptions', {
'width' : int,
'height' : int,
'renderMode' : 'RenderMode',
}, lambda: GeneratedTest)
DefaultArray = Type('DefaultArray', {
'property' : [str],
}, lambda: GeneratedTest)
class ns1:
class sub1:
class sub2:
def __init__(self, root):
self.root = root
def method1(self):
return self.root._rpc('ns1.sub1.sub2.method1', [], return_type=None)
def __init__(self, root):
self.root = root
self.sub2 = self.sub2(root)
def __init__(self, root):
self.root = root
self.sub1 = self.sub1(root)
def method1(self):
return self.root._rpc('ns1.method1', [], return_type=str)
class ns2:
class sub1:
class sub2:
def __init__(self, root):
self.root = root
def method1(self):
return self.root._rpc('ns2.sub1.sub2.method1', [], return_type=None)
def __init__(self, root):
self.root = root
self.sub2 = self.sub2(root)
def __init__(self, root):
self.root = root
self.sub1 = self.sub1(root)
def __init__(self, url):
'''
Args:
url (string): The url of the web service
'''
# RpcTunnel
self._rpc = RpcTunnel(url)
# The default transport is HTTP
self.useHTTP()
self.ns1 = self.ns1(self)
self.ns2 = self.ns2(self)
def sum(self, a, b):
'''
Some test method example, does int sum
Args:
a (int)
b (int)
Returns:
int
'''
return self._rpc('sum', [a, b], return_type=int)
def sumReturn(self):
return self._rpc('sumReturn', [], return_type=None)
def echo(self, a):
'''
Args:
a (self.RenderOptions)
Returns:
self.RenderOptions
'''
return self._rpc('echo', [a], return_type=self.RenderOptions)
def echoObject(self, a):
'''
Args:
a (object)
Returns:
dict
'''
return self._rpc('echoObject', [a], return_type=dict)
def throwError(self):
return self._rpc('throwError', [], return_type=int)
def testMe(self):
return self._rpc('testMe', [], return_type=None)
def testMe1(self):
return self._rpc('testMe1', [], return_type=None)
def testMe2(self, a):
'''
A sample method.
Args:
a (str): A simple string parameter.
Returns:
str
'''
return self._rpc('testMe2', [a], return_type=str)
def testMe3(self):
return self._rpc('testMe3', [], return_type=None)
def testMe4(self):
return self._rpc('testMe4', [], return_type=None)
def TestDefaultArray(self, p):
'''
Args:
p (self.DefaultArray)
'''
return self._rpc('TestDefaultArray', [p], return_type=None)
def TestUrl(self, u):
'''
Args:
u (str)
Returns:
str
'''
return self._rpc('TestUrl', [u], return_type=str)
def getRenderOptions(self):
return self._rpc('getRenderOptions', [], return_type=[self.RenderOptions])
def echoStringAsBuffer(self, theString):
'''
Args:
theString (str)
Returns:
bytearray
'''
return self._rpc('echoStringAsBuffer', [theString], return_type=bytearray)
def getBufferSize(self, buffer):
'''
Args:
buffer (bytearray)
Returns:
int
'''
return self._rpc('getBufferSize', [buffer], return_type=int)
def returnFrom0ToN(self, n):
'''
Args:
n (int)
Returns:
[int]
'''
return self._rpc('returnFrom0ToN', [n], return_type=[int])
def optionalArgs(self, required, p1=Optional(int), p2=Optional(int)):
'''
Args:
required (bool)
p1 (Optional(int))
p2 (Optional(int))
'''
return self._rpc('optionalArgs', [required, p1, p2], return_type=None)
def sumArray(self, ints):
'''
Args:
ints ([int])
Returns:
int
'''
return self._rpc('sumArray', [ints], return_type=int)
def testAny(self, a):
'''
Args:
a (object)
Returns:
object
'''
return self._rpc('testAny', [a], return_type=object)
def getSeconds(self, timeParam):
'''
Args:
timeParam (datetime)
Returns:
int
'''
return self._rpc('getSeconds', [timeParam], return_type=int)
def getNow(self):
return self._rpc('getNow', [], return_type=datetime)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def useHTTP(self):
self._rpc.useHTTP()
def useWS(self):
self._rpc.useWS()
def onTestEvent(self, callback=None):
self._rpc.event('testEvent', callback, return_type=int)
def onTestEvent2(self, callback=None):
self._rpc.event('testEvent2', callback, return_type=[GeneratedTest.RenderOptions])
def onTestEvent3(self, callback=None):
self._rpc.event('testEvent3', callback, return_type=dict)
def onTestEvent4(self, callback=None):
self._rpc.event('testEvent4', callback, return_type=bool)
def onTestBinaryEvent(self, callback=None):
self._rpc.event('testBinaryEvent', callback, return_type=bytearray)
def onNs1_testEvent1(self, callback=None):
self._rpc.event('ns1.testEvent1', callback, return_type=None)
| 6,731 |
Back-End/Python/Standard Libraries/SYS/sys.getsizeof.py
|
ASHISHKUMAR2411/Programming-CookBook
| 25 |
2169096
|
import sys
prev = 0
for i in range(11):
c = tuple(range(i+1))
size_c = sys.getsizeof(c)
delta, prev = size_c - prev, size_c
print(f'{i+1} TUPLE={c} items: {size_c}, delta={delta}')
print('==='*15)
prev = 0
for i in range(11):
c = list(range(i+1))
size_c = sys.getsizeof(c)
delta, prev = size_c - prev, size_c
print(f'{i+1} LIST={c} items:{size_c}, delta={delta}')
print('==='*15)
# Size of List when appened
c = []
prev = sys.getsizeof(c)
print(f'0 items: {sys.getsizeof(c)}')
for i in range(255):
c.append(i)
size_c = sys.getsizeof(c)
delta, prev = size_c - prev, size_c
print(f'{i+1} items: {size_c}, delta={delta}')
| 672 |
lib/models/sync_bn/inplace_abn/__init__.py
|
Zealoe/HRNet-Semantic-Segmentation
| 0 |
2171000
|
from .bn import ABN, InPlaceABN, InPlaceABNSync
from .functions import ACT_RELU, ACT_LEAKY_RELU, ACT_ELU, ACT_NONE
| 117 |
todoism/models.py
|
zhaofangfang1991/airsupport-
| 0 |
2170511
|
# -*- coding: utf-8 -*-
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from todoism.extensions import db
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, index=True)
password_hash = db.Column(db.String(128))
locale = db.Column(db.String(20))
# items = db.relationship('Item', back_populates='author', cascade='all')
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def validate_password(self, password):
return check_password_hash(self.password_hash, password)
class Item(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
done = db.Column(db.Boolean, default=False)
# author_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# author = db.relationship('User', back_populates='items')
# 监测点数据监测
class Point(db.Model):
id = db.Column(db.Integer, primary_key=True)
segment = db.Column(db.String(64), default='', nullable=False, comment='大段名,X1段')
point = db.Column(db.String(64), default='', nullable=False, comment='监测点名,5号监测点')
temperature = db.Column(db.Integer, default=0, comment='温度')
humidity = db.Column(db.Integer, default=0, comment='湿度')
windpressure = db.Column(db.Integer, default=0, comment='风压')
status = db.Column(db.Integer, default=3, nullable=False, comment='状态,1停止,2警告,3正常,其他值都不属于正常值')
| 1,519 |
django/contrib/gis/db/models/constants.py
|
fizista/django
| 2 |
2171941
|
from django.db.models.sql.constants import QUERY_TERMS
GIS_LOOKUPS = {
'bbcontains', 'bboverlaps', 'contained', 'contains',
'contains_properly', 'coveredby', 'covers', 'crosses', 'disjoint',
'distance_gt', 'distance_gte', 'distance_lt', 'distance_lte',
'dwithin', 'equals', 'exact',
'intersects', 'overlaps', 'relate', 'same_as', 'touches', 'within',
'left', 'right', 'overlaps_left', 'overlaps_right',
'overlaps_above', 'overlaps_below',
'strictly_above', 'strictly_below'
}
ALL_TERMS = GIS_LOOKUPS | QUERY_TERMS
__all__ = ['ALL_TERMS', 'GIS_LOOKUPS']
| 587 |
api/tests/conftest.py
|
galbwe/meep-backend
| 0 |
2170487
|
import pytest
from app import create_app
from models import User, Role, Project, db, ProjectType
from db_operations import reset
@pytest.fixture(scope='function')
def new_user():
user = User('<EMAIL>', '1289rhth')
return user
@pytest.fixture(scope='function')
def new_role():
role = Role(id=8, role_name="admin")
return role
@pytest.fixture(scope='function')
def new_project():
project = Project(
id=1,
name="testName",
description="testDescription",
photo_url="www.google.com",
website_url="www.aol.com",
year=1999,
gge_reduced=1.234,
ghg_reduced=2.234
)
return project
@pytest.fixture(scope='function')
def new_projectType():
projectType = ProjectType(
id=9,
type_name="typeName"
)
return projectType
@pytest.fixture(scope='function')
def new_location():
location = Location(
id=5,
address="7510 Floyd St",
city="Overland Park",
state="KS",
zip_code=66204,
location='POINT(-94.668954 38.992762)',
project_id=1
)
return location
@pytest.fixture(scope='session')
def app():
app = create_app('test')
with app.app_context():
db.create_all()
yield app
db.session.remove()
db.drop_all()
| 1,323 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.