Dataset Viewer
max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
django_http_auth/__init__.py
|
nopped/django-http-auth
| 1 |
2023251
|
import time
import base64
from datetime import datetime
from datetime import timedelta
from django.conf import settings
from django.shortcuts import HttpResponse
from django.contrib.auth import authenticate
from django.contrib.auth import login
class HTTPBasicAuthenticator(object):
S_FAILURE_COUNT = 'http_basic_auth_failure_count'
S_IS_BLOCKED = 'http_basic_auth_block'
S_LAST_CHECK_TIMESTAMP = 'http_basic_auth_last_check_timestamp'
REALM = getattr(settings, 'HTTP_BASIC_AUTH_REALM', '')
BF_ENABLED = getattr(settings, 'HTTP_BASIC_AUTH_BF_ENABLED', True) # Brute Force Protection
BF_ATTEMPTS = getattr(settings, 'HTTP_BASIC_AUTH_BF_ATTEMPTS', 10) # Requests
BF_MONITOR_WINDOW = getattr(settings, 'HTTP_BASIC_AUTH_BF_MONITOR_WINDOW', 30) # Seconds
BF_BLOCK_PERIOD = getattr(settings, 'HTTP_BASIC_AUTH_BF_BLOCK_PERIOD', 60 * 10) # Seconds
@staticmethod
def check(request):
# Check if user already authenticated, if so, skip HTTP Basic authentication
if request.user.is_authenticated():
return True
# Perform anti-brute force process
if HTTPBasicAuthenticator._anti_bruteforce(request):
return False
# Get authorization HTTP header
if request.META.get('HTTP_AUTHORIZATION', False):
# Extract authentication type and credentials from header
auth_type, credentials_base64 = request.META['HTTP_AUTHORIZATION'].split(' ')
# Decode credentials using Base64
credentials = base64.b64decode(credentials_base64)
# Get username and password from decoded credentials
username, password = credentials.split(':')
# Try to authenticate username and password to allow access
user_object = authenticate(username = username, password = password)
# If we found a match and user is active, allow access
if user_object is not None and user_object.is_active:
login(request, user_object)
return True
# Failed to get authorization HTTP header or to authenticate user
return False
@staticmethod
def challenge(request):
# If we detect an attempt to brute force, we disable access of the attacker for an hour
if HTTPBasicAuthenticator._is_blocked(request):
# Build HTTPResponse with Service Unavailable error
response = HttpResponse("Service Unavailable", status = 503)
return response
# Build HTTPResponse with authentication challenge
response = HttpResponse("Authentication Required", status = 401)
response['WWW-Authenticate'] = 'Basic realm="%s"' % HTTPBasicAuthenticator.REALM
return response
@staticmethod
def _anti_bruteforce(request):
# Update http basic authentication failure count
try:
# Increment by one every time we have a new request
request.session[HTTPBasicAuthenticator.S_FAILURE_COUNT] += 1
except KeyError:
# New session, create session variables to be used for our anti brute-force mechanism
request.session[HTTPBasicAuthenticator.S_IS_BLOCKED] = False
request.session[HTTPBasicAuthenticator.S_FAILURE_COUNT] = 0
request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP] = time.time()
# Get the last check timestamp of the current session
last_check_timestamp = datetime.fromtimestamp(request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP])
# If last check timestamp is older than BF_BLOCK_PERIOD, reset last check timestamp
if datetime.now() - last_check_timestamp > timedelta(seconds = HTTPBasicAuthenticator.BF_BLOCK_PERIOD):
request.session[HTTPBasicAuthenticator.S_IS_BLOCKED] = False
request.session[HTTPBasicAuthenticator.S_FAILURE_COUNT] = 0
request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP] = time.time()
else:
# If we have more than BRUTE_FORCE_ATTEMPTS attempts in the last BF_MONITOR_WINDOW seconds,
# block for BF_BLOCK_PERIOD seconds
if request.session[HTTPBasicAuthenticator.S_FAILURE_COUNT] > HTTPBasicAuthenticator.BF_ATTEMPTS and \
(datetime.now() - last_check_timestamp < timedelta(seconds = HTTPBasicAuthenticator.BF_MONITOR_WINDOW)):
# Too many requests, it's time to block for BRUTE_FORCE_BLOCK_PERIOD
request.session[HTTPBasicAuthenticator.S_IS_BLOCKED] = True
request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP] = time.time()
# Return whether session is currently blocked or not
return request.session[HTTPBasicAuthenticator.S_IS_BLOCKED]
@staticmethod
def _is_blocked(request):
if request.session[HTTPBasicAuthenticator.S_IS_BLOCKED]:
# Get the last check timestamp of the current session
last_check_timestamp = datetime.fromtimestamp(request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP])
# If we have been blocking the session for more than BF_BLOCK_PERIOD seconds - remove block
if datetime.now() - last_check_timestamp >= timedelta(seconds = HTTPBasicAuthenticator.BF_BLOCK_PERIOD):
request.session[HTTPBasicAuthenticator.S_IS_BLOCKED] = False
request.session[HTTPBasicAuthenticator.S_LAST_CHECK_TIMESTAMP] = time.time()
return False
# Session is still blocked
return True
# Session is currently not blocked
return False
| 5,774 |
com/gionee/ota/intall/installMulti.py
|
qmxiu531/OtaNeedInstalledApps
| 1 |
2023126
|
# -*- coding: utf-8 -*-
import queue,threading
from com.gionee.ota.util import Util
__author__ = 'suse'
rs = {}
logger = Util.logger
class MultiInstall():
def __init__(self,device,installAddrList):
self.device = device
self.installAddrList = installAddrList
self.threadNameList = ["Thread-1","Thread-2","Thread-3","Thread-4","Thread-5"]
self.workQueue = queue.Queue(len(installAddrList))
print(self.workQueue.maxsize)
self.threads=[]
self.threadID = 1
for installAddr in installAddrList:
self.workQueue.put(installAddr)
def startMultiThread(self):
for tName in self.threadNameList:
thread = InstallAppThread(self.threadID,self.device,self.workQueue)
thread.start()
self.threads.append(thread)
self.threadID +=1
for i in self.threads:
i.join()
return rs
class InstallAppThread(threading.Thread):
def __init__(self,threadID,device,q):
threading.Thread.__init__(self)
self.threadID = threadID
self.device = device
self.q = q
def run(self):
print("Staring"+self.name)
while True:
if self.q.qsize() > 0:
installAddr = self.q.get()
self.process_data(self.device,installAddr)
# rs[installAddr] = rc
else:
break
print("Exiting "+str(self.threadID))
def process_data(self,threadName,installAddr):
rc = []
cmd = 'adb -s %s install -r %s'% (self.device,installAddr)
logger.info("开始安装:"+installAddr)
Util.exccmd('adb -s %s wait-for-device '%self.device)
installResult = Util.exccmd(cmd);
logger.info("安装结果:"+installResult)
| 2,029 |
cpyquickhelper/numbers/speed_measure.py
|
sdpython/cpyquickhelper
| 2 |
2022884
|
"""
@file
@brief Measures speed.
"""
import sys
from timeit import Timer
def measure_time(stmt, context, repeat=10, number=50, div_by_number=False):
"""
Measures a statement and returns the results as a dictionary.
@param stmt string
@param context variable to know in a dictionary
@param repeat average over *repeat* experiment
@param number number of executions in one row
@param div_by_number divide by the number of executions
@return dictionary
.. runpython::
:showcode:
from cpyquickhelper.numbers import measure_time
from math import cos
res = measure_time("cos(x)", context=dict(cos=cos, x=5.))
print(res)
See `Timer.repeat <https://docs.python.org/3/library/timeit.html?timeit.Timer.repeat>`_
for a better understanding of parameter *repeat* and *number*.
The function returns a duration corresponding to
*number* times the execution of the main statement.
"""
import numpy # pylint: disable=C0415
tim = Timer(stmt, globals=context)
res = numpy.array(tim.repeat(repeat=repeat, number=number))
if div_by_number:
res /= number
mean = numpy.mean(res)
dev = numpy.mean(res ** 2)
dev = (dev - mean**2) ** 0.5
mes = dict(average=mean, deviation=dev, min_exec=numpy.min(res),
max_exec=numpy.max(res), repeat=repeat, number=number)
if 'values' in context:
if hasattr(context['values'], 'shape'):
mes['size'] = context['values'].shape[0]
else:
mes['size'] = len(context['values']) # pragma: no cover
else:
mes['context_size'] = sys.getsizeof(context)
return mes
def _fcts():
"""
Returns functions to measure.
"""
import numpy # pylint: disable=C0415
from .cbenchmark_dot import vector_dot_product # pylint: disable=E0611,C0415
from .cbenchmark_dot import vector_dot_product16 # pylint: disable=E0611,C0415
from .cbenchmark_dot import vector_dot_product16_nofcall # pylint: disable=E0611,C0415
from .cbenchmark_dot import vector_dot_product16_sse # pylint: disable=E0611,C0415
def simple_dot(values):
return numpy.dot(values, values)
def c11_dot(vect):
return vector_dot_product(vect, vect)
def c11_dot16(vect):
return vector_dot_product16(vect, vect)
def c11_dot16_nofcall(vect):
return vector_dot_product16_nofcall(vect, vect)
def c11_dot16_sse(vect):
return vector_dot_product16_sse(vect, vect)
return [simple_dot, c11_dot, c11_dot16, c11_dot16_nofcall, c11_dot16_sse]
def check_speed(dims=[100000], repeat=10, number=50, fLOG=print): # pylint: disable=W0102
"""
Prints out some information about speed computation
of this laptop. See :ref:`cbenchmarkbranchingrst` to compare.
@param dims sets of dimensions to try
@param repeat average over *repeat* experiment
@param number number of execution in one row
@param fLOG logging function
@return iterator on results
:epkg:`numpy` is multithreaded. For an accurate comparison,
this needs to be disabled. This can be done by setting environment variable
``MKL_NUM_THREADS=1`` or by running:
::
import mkl
mkl.set_num_threads(1)
.. index:: MKL_NUM_THREADS
One example of use:
.. runpython::
:showcode:
from cpyquickhelper.numbers import check_speed
res = list(check_speed(dims=[100, 1000]))
import pprint
pprint.pprint(res)
"""
import numpy # pylint: disable=C0415
fcts = _fcts()
mx = max(dims)
vect = numpy.ones((mx,))
for i in range(0, vect.shape[0]):
vect[i] = i
for i in dims:
values = vect[:i].copy()
for fct in fcts:
ct = {fct.__name__: fct}
ct['values'] = values
t = measure_time("{0}(values)".format(fct.__name__),
repeat=repeat, number=number, context=ct)
t['name'] = fct.__name__
if fLOG:
fLOG(t)
yield t
| 4,232 |
datasets/signals.py
|
pmwaniki/ppg-analysis
| 2 |
2022922
|
from scipy.signal import butter,lfilter
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
from functools import reduce
def butter_bandpass(lowcut=0.1,highcut=5,fs=125,order=5):
nyq=0.5*fs
low=lowcut/nyq
high=highcut/nyq
return butter(order,[low,high],btype='band')
def butter_filter(data,lowcut=0.1,highcut=5,fs=128,order=5):
b,a=butter_bandpass(lowcut,highcut,fs,order=order)
y=lfilter(b,a,data)
return y
def stft(sig,fs,nperseg,noverlap,spec_only=False):
f, t, Zxx = signal.stft(sig, fs=fs, window='hann', nperseg=nperseg, noverlap=noverlap, boundary=None)
Zxx=2*np.abs(Zxx)/np.sum(np.hanning(nperseg))
# Zxx = np.log(Zxx+1e-8)
Zxx=Zxx[np.where(np.logical_and(f>=0.0 , f <=5))[0],:]
f=f[np.where(np.logical_and(f>=0.0 , f <=5))]
if spec_only:
return Zxx
return f,t,Zxx
def rand_sfft(sig,fs,output_shape=(30,15)):
slice_sec=np.random.uniform(2,3,1)[0]
slide_sec = np.random.uniform(0.1,0.3,1)[0]
nperseg = int(slice_sec * fs)
step = int(slide_sec * fs)
noverlap = nperseg - step
f, t, Zxx = signal.stft(sig, fs=fs, window='hann', nperseg=nperseg, noverlap=noverlap, boundary=None)
Zxx = 2 * np.abs(Zxx) / np.sum(np.hanning(nperseg))
h,w=Zxx.shape
if h < output_shape[0]:
Zxx=np.pad(Zxx,((output_shape[0]-h,0),(0,0)),mode='constant',constant_values=0)
else:
Zxx=Zxx[h-output_shape[0]:,:]
if w < output_shape[1]:
Zxx=np.pad(Zxx,((0,0),(0,output_shape[1]-w)),mode="constant",constant_values=0)
else:
Zxx=Zxx[:,0:output_shape[1]]
return Zxx
def resample(x,fs_in,fs_out):
n_out=int(len(x)*fs_out/fs_in)
sig_out=signal.resample(x,n_out)
return sig_out
def gaus_noise(x1,x2=None,min_sd=0.00001,max_sd=0.01,p=0.5):
if np.random.rand()<p:
if x2 is not None:
return x1,x2
return x1
sds=np.logspace(np.log10(min_sd),np.log10(max_sd),num=1000)
sd=np.random.choice(sds,size=1)
if x2 is None:
return x1+np.random.normal(0,sd,len(x1))
else:
return x1+np.random.normal(0,sd,len(x1)),x2+np.random.normal(0,sd,len(x2))
def permute(x1,x2=None,n_segments=5,p=0.5):
assert len(x1) % n_segments == 0
if np.random.rand()<p:
if x2 is not None:
return x1,x2
return x1
l=len(x1)
l_segment=l//n_segments
i_segments=[i*l_segment for i in range(n_segments)]
order_segments = np.random.permutation(range(n_segments))
x1_segments=[x1[i:i+l_segment] for i in i_segments]
x1_new = [x1_segments[i] for i in order_segments]
x1_new=np.concatenate(x1_new)
if x2 is not None:
x2_segments=[x2[i:i+l_segment] for i in i_segments]
x2_new = [x2_segments[i] for i in order_segments]
x2_new = np.concatenate(x2_new)
return x1_new,x2_new
return x1_new
| 2,881 |
deteksi_mandiri/views.py
|
agilghif/temenin-isoman
| 0 |
2022655
|
from django.shortcuts import render
from django.views.generic import ListView
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from .models import *
class QuizListView(ListView):
model = Quiz
template_name = 'quizes/main.html'
@login_required(login_url='/admin/login/')
def quiz_view(request, pk):
quiz = Quiz.objects.get(pk=pk)
return render(request, 'quizes/quiz.html', {'quiz': quiz})
@login_required(login_url='/admin/login/')
def quiz_data_view(request, pk):
quiz = Quiz.objects.get(pk=pk)
questions = []
for question in quiz.get_questions():
answers = []
for answer in question.get_answers():
answers.append(answer.text)
questions.append({str(question): answers})
return JsonResponse({
'data': questions,
'time': quiz.time,
})
@login_required(login_url='/admin/login/')
def save_quiz_view(request, pk):
if request.is_ajax():
questions = []
data = request.POST
data_ = dict(data.lists())
data_.pop('csrfmiddlewaretoken')
for key in data_.keys():
question = Question.objects.get(text=key)
questions.append(question)
user = request.user
quiz = Quiz.objects.get(pk=pk)
multiplier = 100 / quiz.number_of_questions
score = 0
results = []
correct_answer = None
full = True
for question in questions:
answer_selected = request.POST.get(question.text)
if answer_selected != "":
truth = False
question_answer = Answer.objects.filter(question=question)
for answer in question_answer:
if answer_selected == answer.text and (not truth):
if answer.correct:
score += 1
correct_answer = answer.text
results.append({
str(question): {
'correct_answer': correct_answer,
'answered': answer_selected
}})
truth = True
else:
if answer.correct:
correct_answer = answer.text
if not truth:
results.append({
str(question): {
'correct_answer': correct_answer,
'answered': answer_selected
}})
else:
results.append({str(question): 'not-answered'})
full = False
score_ = score * multiplier
if full:
Result.objects.create(quiz=quiz, user=user, skor=score_)
if score_ >= quiz.required_score_to_pass:
return JsonResponse({
'passed': "True",
'score': score_,
'results': results,
'full': "True"
})
else:
return JsonResponse({
'passed': "False",
'score': score_,
'results': results,
'full': "True"
})
else:
return JsonResponse({
'passed': "False",
'score': score_,
'results': results,
'full': "False"
})
| 3,553 |
api/namex/resources/auto_analyse/paths/bc_name_analysis/__init__.py
|
sumesh-aot/namex
| 1 |
2022690
|
from .bc_name_analysis import BcNameAnalysis
from .bc_name_analysis import api as bc_name_analysis_api
| 103 |
RecunoastereFaciala/IdentificareFata.py
|
newparts/Python
| 0 |
2022631
|
import face_recognition
imaginea_lui_obama = face_recognition.load_image_file('./img/cunoscuti/obama.jpg')
obama_face_encoding = face_recognition.face_encodings(imaginea_lui_obama)[0]
imagine_necunoscuta = face_recognition.load_image_file('./img/necunoscuti/sosie.jpg')
necunoscuta_face_encoding = face_recognition.face_encodings(imagine_necunoscuta)[0]
results = face_recognition.compare_faces([obama_face_encoding], necunoscuta_face_encoding)
if results[0]:
print('Acesta este Obama')
else:
print('Acesta nu este Obama')
| 533 |
faiss_utils.py
|
woctezuma/match-steam-banners
| 0 |
2022790
|
from time import time
import faiss
def get_faiss_search_structure(embeddings, use_cosine_similarity=True):
d = embeddings.shape[1]
xb = embeddings.astype('float32')
if use_cosine_similarity:
# Caveat: you need to normalize the embeddings,
# because faiss uses dot-product instead of cosine similarity!
# cf. https://github.com/facebookresearch/faiss/wiki/MetricType-and-distances#metric_inner_product
faiss.normalize_L2(xb)
index = faiss.IndexFlatIP(d)
index.add(xb)
else:
index = faiss.IndexFlatL2(d)
index.add(xb)
return index
def find_faiss_knn_for_all(
index, embeddings_for_query, num_neighbors, use_cosine_similarity=True
):
xq = embeddings_for_query.astype('float32')
if use_cosine_similarity:
faiss.normalize_L2(xq)
start = time()
D, I = index.search(xq, num_neighbors)
print("Elapsed time: {:.2f} s".format(time() - start))
return D, I
| 984 |
tests/unit/projects/test_projectconfig.py
|
cjolowicz/cutty
| 1 |
2023428
|
"""Unit tests for cutty.projects.projectconfig."""
import dataclasses
import json
import pathlib
from typing import Any
import pytest
from cutty.filestorage.adapters.disk import DiskFileStorage
from cutty.filestorage.domain.files import RegularFile
from cutty.filesystems.domain.purepath import PurePath
from cutty.projects.projectconfig import COOKIECUTTER_JSON_FILE
from cutty.projects.projectconfig import createprojectconfigfile
from cutty.projects.projectconfig import ProjectConfig
from cutty.projects.projectconfig import readcookiecutterjson
from cutty.projects.projectconfig import readprojectconfigfile
from cutty.templates.domain.bindings import Binding
@pytest.fixture
def projectconfig() -> ProjectConfig:
"""Fixture for a project configuration."""
template = "https://example.com/repository.git"
revision = "cac8df79d0680240f6d7d11c027548d5582ea308"
bindings = [Binding("project", "example"), Binding("license", "MIT")]
directory = pathlib.Path("a")
return ProjectConfig(template, bindings, revision=revision, directory=directory)
@pytest.fixture
def storage(tmp_path: pathlib.Path) -> DiskFileStorage:
"""Fixture for disk file storage."""
return DiskFileStorage(tmp_path / "storage")
def test_roundtrip(storage: DiskFileStorage, projectconfig: ProjectConfig) -> None:
"""It returns the persisted project configuration."""
file = createprojectconfigfile(PurePath(), projectconfig)
with storage:
storage.add(file)
assert projectconfig == readprojectconfigfile(storage.root)
def test_readprojectconfigfile_typeerror(
storage: DiskFileStorage, projectconfig: ProjectConfig
) -> None:
"""It checks that the payload is a JSON object."""
file = createprojectconfigfile(PurePath(), projectconfig)
file = dataclasses.replace(file, blob=json.dumps("teapot").encode())
with storage:
storage.add(file)
with pytest.raises(TypeError):
readprojectconfigfile(storage.root)
@pytest.mark.parametrize(
("field", "value"),
[
("location", None),
("directory", 42),
("revision", 42),
],
)
def test_readprojectconfigfile_template_typeerror(
storage: DiskFileStorage, projectconfig: ProjectConfig, field: str, value: Any
) -> None:
"""It checks that the template location is a string."""
file = createprojectconfigfile(PurePath(), projectconfig)
# Replace the template location with `None` in the JSON record.
data = json.loads(file.blob.decode())
data["template"][field] = value
file = dataclasses.replace(file, blob=json.dumps(data).encode())
with storage:
storage.add(file)
with pytest.raises(TypeError):
readprojectconfigfile(storage.root)
def test_createprojectconfigfile_format(
storage: DiskFileStorage, projectconfig: ProjectConfig
) -> None:
"""It formats the JSON file in a standard way."""
file = createprojectconfigfile(PurePath(), projectconfig)
with storage:
storage.add(file)
path = storage.resolve(file.path)
lines = path.read_text().splitlines(keepends=True)
assert "{\n" == lines[0]
assert lines[1].startswith(' "')
assert lines[1].endswith('": {\n')
assert "}\n" == lines[-1]
def createlegacyprojectconfigfile(
project: PurePath, projectconfig: ProjectConfig
) -> RegularFile:
"""Create a .cookiecutter.json file."""
data = {"_template": projectconfig.template} | {
binding.name: binding.value for binding in projectconfig.bindings
}
path = project / COOKIECUTTER_JSON_FILE
text = json.dumps(data, indent=4)
return RegularFile(path, text.encode())
def test_readcookiecutterjson(
storage: DiskFileStorage, projectconfig: ProjectConfig
) -> None:
"""It loads a project configuration from a .cookiecutter.json file."""
# The .cookiecutter.json format does not include the template directory.
projectconfig = dataclasses.replace(projectconfig, revision=None, directory=None)
file = createlegacyprojectconfigfile(PurePath(), projectconfig)
with storage:
storage.add(file)
assert projectconfig == readcookiecutterjson(storage.root)
| 4,175 |
main_window.py
|
fochoao/CryptX-Chat
| 0 |
2023264
|
import sys, os
from PySide2.QtWidgets import QApplication, QMainWindow, QLabel
from PySide2.QtCore import Qt
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
backend = default_backend()
pwd=b"<PASSWORD>"
msg=b"Hello World, My name is Fernando!!!! The password is 256 bit long."
aed=b"saltallovermypassword"
iv=os.urandom(27)
cipher=Cipher(algorithms.AES(pwd), modes.GCM(iv), backend=backend)
e=cipher.encryptor()
e.authenticate_additional_data(aed)
ct=e.update(msg) + e.finalize()
tag=e.tag
cipher=Cipher(algorithms.AES(pwd), modes.GCM(iv,tag), backend=backend)
d=cipher.decryptor()
d.authenticate_additional_data(aed)
clear=d.update(ct)+d.finalize()
assert clear,msg
x = clear.decode()
class VentanaPrincipal(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle("CryptX Chat")
label = QLabel(x)
label.setAlignment(Qt.AlignCenter)
self.setCentralWidget(label)
if __name__=="__main__":
app = QApplication(sys.argv)
win = VentanaPrincipal()
win.show()
sys.exit(app.exec_())
| 1,151 |
svm_net_xor.py
|
GRSEB9S/deepSVMnet
| 1 |
2023527
|
# <NAME> (BCD)
# December 2017
from layers import Layer
from model import Model
import numpy as np
model = []
X = np.array([[0,0],
[1,0],
[0,1],
[1,1]])
Y = np.array([[1],
[0],
[0],
[1]])
np.random.seed(1) # seed random number generator for reproducible results
# Set up model layers
model.append(Layer._input_layer(input_shape = (2,1)))
model.append(Layer.dense(input_layer = model[0], num_nodes = 2, rectified = True))
model.append(Layer.dense(input_layer = model[1], num_nodes = 1, rectified = False)) # We need linear support vector machine output layer | Set rectified to false during training
model[1].learning_rate = 0.02
model[2].learning_rate = 0.02
# train model using the BSSP learning algorithm
Model.sgd_bssp_train(model, X, Y, 5000)
# rectify the output layer | This line can be commented out
model[len(model) - 1].rectified = True
# Test model
for x in X:
output = Model.predict(model, x)
print(output)
| 1,071 |
jpylib/jtrees/_rmq.py
|
JiniousChoi/encyclopedia-in-code
| 2 |
2023118
|
#!/usr/bin/python3
## author: <EMAIL>
from math import inf
from jpylib.jmath import single_bit_ceil
class RMQ(object):
''' Range Minimum Query '''
def __init__(self, arr):
self.arr_sz = arr_sz = len(arr)
self.tree_sz = tree_sz = self.__calc_tree_sz(arr_sz)
self.range_min = range_min = [None] * tree_sz # complete binary tree
self.__initialize(arr, 0, 0, arr_sz-1)
def __initialize(self, arr, root, root_left, root_right):
''' @param arr
@param root: 0-based
@param root_left: 0-based
@param root_right: 0-based
@return range_min of root after setting in post-order '''
if root_left == root_right:
self.range_min[root] = arr[root_left]
return self.range_min[root]
mid = (root_left + root_right) >> 1
left_min = self.__initialize(arr, root*2+1, root_left, mid)
right_min = self.__initialize(arr, root*2+2, mid+1, root_right)
self.range_min[root] = min(left_min, right_min)
return self.range_min[root]
def __calc_tree_sz(self, arr_sz):
''' @return tree_sz::int '''
return single_bit_ceil(arr_sz) * 2
def query(self, left, right):
assert left <= right
return self.__query(left, right, 0, 0, self.arr_sz-1)
def __query(self, left, right, root, root_left, root_right):
range_min = self.range_min
if (right < root_left) or (root_right < left):
return inf
elif left <= root_left <= root_right <= right:
return range_min[root]
mid = (root_left + root_right) >> 1
return min(self.__query(left, right, root*2+1, root_left, mid),
self.__query(left, right, root*2+2, mid+1, root_right))
def update(self, arr_idx, new_val):
return self.__update(arr_idx, new_val, 0, 0, self.arr_sz-1)
def __update(self, arr_idx, new_val, root, root_left, root_right):
''' @return newly updated value for the `root` '''
range_min = self.range_min
#over the hedge
if (arr_idx < root_left) or (root_right < arr_idx):
# no need to update
return range_min[root]
#on the hedge
if root_left == root_right:
range_min[root] = new_val
return range_min[root]
mid = (root_left + root_right) >> 1
range_min[root] = min(self.__update(arr_idx, new_val, 2*root+1, root_left, mid),
self.__update(arr_idx, new_val, 2*root+2, mid+1, root_right))
return range_min[root]
import unittest
def naive_min(arr, l, r):
return min(arr[l:r+1])
class RMQTest(unittest.TestCase):
def test_query(self):
arr = [5,2,1,4,3]
rmq = RMQ(arr)
for l in range(len(arr)):
for r in range(l, len(arr)):
self.assertEqual(rmq.query(l,r), naive_min(arr,l,r))
def test_update(self):
arr = [5,2,1,4,3]
rmq = RMQ(arr)
for arr_idx, new_val in [(0,3),(2,-1),(4,-2)]:
arr[arr_idx] = new_val
rmq.update(arr_idx, new_val)
for l in range(len(arr)):
for r in range(l, len(arr)):
self.assertEqual(rmq.query(l,r), naive_min(arr,l,r))
if __name__ == "__main__":
unittest.main()
| 3,356 |
server/openapi_server/models/full_capacity_term.py
|
hubmapconsortium/ontology-api
| 2 |
2023221
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class FullCapacityTerm(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, term=None, tty=None, code=None, concept=None, perfterm=None, semantic=None): # noqa: E501
"""FullCapacityTerm - a model defined in OpenAPI
:param term: The term of this FullCapacityTerm. # noqa: E501
:type term: str
:param tty: The tty of this FullCapacityTerm. # noqa: E501
:type tty: str
:param code: The code of this FullCapacityTerm. # noqa: E501
:type code: str
:param concept: The concept of this FullCapacityTerm. # noqa: E501
:type concept: str
:param perfterm: The perfterm of this FullCapacityTerm. # noqa: E501
:type perfterm: str
:param semantic: The semantic of this FullCapacityTerm. # noqa: E501
:type semantic: str
"""
self.openapi_types = {
'term': str,
'tty': str,
'code': str,
'concept': str,
'perfterm': str,
'semantic': str
}
self.attribute_map = {
'term': 'term',
'tty': 'tty',
'code': 'code',
'concept': 'concept',
'perfterm': 'perfterm',
'semantic': 'semantic'
}
self._term = term
self._tty = tty
self._code = code
self._concept = concept
self._perfterm = perfterm
self._semantic = semantic
@classmethod
def from_dict(cls, dikt) -> 'FullCapacityTerm':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The FullCapacityTerm of this FullCapacityTerm. # noqa: E501
:rtype: FullCapacityTerm
"""
return util.deserialize_model(dikt, cls)
@property
def term(self):
"""Gets the term of this FullCapacityTerm.
:return: The term of this FullCapacityTerm.
:rtype: str
"""
return self._term
@term.setter
def term(self, term):
"""Sets the term of this FullCapacityTerm.
:param term: The term of this FullCapacityTerm.
:type term: str
"""
self._term = term
@property
def tty(self):
"""Gets the tty of this FullCapacityTerm.
:return: The tty of this FullCapacityTerm.
:rtype: str
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this FullCapacityTerm.
:param tty: The tty of this FullCapacityTerm.
:type tty: str
"""
self._tty = tty
@property
def code(self):
"""Gets the code of this FullCapacityTerm.
:return: The code of this FullCapacityTerm.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this FullCapacityTerm.
:param code: The code of this FullCapacityTerm.
:type code: str
"""
self._code = code
@property
def concept(self):
"""Gets the concept of this FullCapacityTerm.
:return: The concept of this FullCapacityTerm.
:rtype: str
"""
return self._concept
@concept.setter
def concept(self, concept):
"""Sets the concept of this FullCapacityTerm.
:param concept: The concept of this FullCapacityTerm.
:type concept: str
"""
self._concept = concept
@property
def perfterm(self):
"""Gets the perfterm of this FullCapacityTerm.
:return: The perfterm of this FullCapacityTerm.
:rtype: str
"""
return self._perfterm
@perfterm.setter
def perfterm(self, perfterm):
"""Sets the perfterm of this FullCapacityTerm.
:param perfterm: The perfterm of this FullCapacityTerm.
:type perfterm: str
"""
self._perfterm = perfterm
@property
def semantic(self):
"""Gets the semantic of this FullCapacityTerm.
:return: The semantic of this FullCapacityTerm.
:rtype: str
"""
return self._semantic
@semantic.setter
def semantic(self, semantic):
"""Sets the semantic of this FullCapacityTerm.
:param semantic: The semantic of this FullCapacityTerm.
:type semantic: str
"""
self._semantic = semantic
| 4,741 |
demo/hello_dicts.py
|
peitur/demo_python
| 1 |
2023585
|
#!/usr/bin/env python3
from pprint import pprint
if __name__ == "__main__":
## empty dictionary, empty
dict0 = dict()
## creting a simple dict of numbers
dict1 = { "103":"aaaa", "101":"bbbb", "108":"cccc", "114":"dddd", "105":"eeee" }
## reference copy of dict1
dict2 = dict1
## copying the dict into a new dict (copy, not reference)
dict3 = dict1.copy()
dict4 = {"a":"1111","b":"2222","c":"3333","d":"4444","e":"5555","f":"6666","g":"7777","h":"8888","i":"9999","j":"9090","k":"1010","l":"2020"}
print("\n# start dict")
pprint( dict1 )
## dict comprehension,
print("\n# dict comprehension")
pprint( { int( dict4[ x ] ) for x in dict4 } )
## map funciton, takes a transform (map) function and a dict (or iterable) object
## Since we copied dict1 to dict2, it's not just a reference.
print( "\n# dict2, created as reference")
pprint( dict2 )
print("\n# dict3, copy of dict1")
pprint( dict3 )
pritn("\n# check for elemnt in dict")
if "a" in dict4:
print("a is in dict4")
print("\n# lets print a dict with indexes" )
for i, e in enumerate( dict1 ):
print("%d > %s => %s" % ( i, e, dict1[ e ] ) )
print("\n## Joining dicts")
print( ", ".join( dict4.keys() ) )
print( dict4["b"] )
print( dict4.get("x", "missing" )
dict4["4"] = "newval"
dict4["x"] = "exists"
print( dict4.get("x", "missing" )
| 1,445 |
utils/pillars.py
|
Jabb0/FastFlow3D
| 6 |
2023344
|
import numpy as np
def remove_out_of_bounds_points(pc, y, x_min, x_max, y_min, y_max, z_min, z_max):
# Max needs to be exclusive because the last grid cell on each axis contains
# [((grid_size - 1) * cell_size) + *_min, *_max).
# E.g grid_size=512, cell_size = 170/512 with min=-85 and max=85
# For z-axis this is not necessary, but we do it for consistency
mask = (pc[:, 0] >= x_min) & (pc[:, 0] < x_max) \
& (pc[:, 1] >= y_min) & (pc[:, 1] < y_max) \
& (pc[:, 2] >= z_min) & (pc[:, 2] < z_max)
pc_valid = pc[mask]
y_valid = None
if y is not None:
y_valid = y[mask]
return pc_valid, y_valid
def create_pillars_matrix(pc_valid, grid_cell_size, x_min, y_min, z_min, z_max, n_pillars_x):
"""
Compute the pillars using matrix operations.
:param pc: point cloud data. (N_points, features) with the first 3 features being the x,y,z coordinates.
:return: augmented_pointcloud, grid_cell_indices, y_valid
"""
num_laser_features = pc_valid.shape[1] - 3 # Calculate the number of laser features that are not the coordinates.
# Calculate the cell id that this entry falls into
# Store the X, Y indices of the grid cells for each point cloud point
grid_cell_indices = np.zeros((pc_valid.shape[0], 2), dtype=int)
grid_cell_indices[:, 0] = ((pc_valid[:, 0] - x_min) / grid_cell_size).astype(int)
grid_cell_indices[:, 1] = ((pc_valid[:, 1] - y_min) / grid_cell_size).astype(int)
# Initialize the new pointcloud with 8 features for each point
augmented_pc = np.zeros((pc_valid.shape[0], 6 + num_laser_features))
# Set every cell z-center to the same z-center
augmented_pc[:, 2] = z_min + ((z_max - z_min) * 1 / 2)
# Set the x cell center depending on the x cell id of each point
augmented_pc[:, 0] = x_min + 1 / 2 * grid_cell_size + grid_cell_size * grid_cell_indices[:, 0]
# Set the y cell center depending on the y cell id of each point
augmented_pc[:, 1] = y_min + 1 / 2 * grid_cell_size + grid_cell_size * grid_cell_indices[:, 1]
# Calculate the distance of the point to the center.
# x
augmented_pc[:, 3] = pc_valid[:, 0] - augmented_pc[:, 0]
# y
augmented_pc[:, 4] = pc_valid[:, 1] - augmented_pc[:, 1]
# z
augmented_pc[:, 5] = pc_valid[:, 2] - augmented_pc[:, 2]
# Take the two laser features
augmented_pc[:, 6:] = pc_valid[:, 3:]
# augmented_pc = [cx, cy, cz, Δx, Δy, Δz, l0, l1]
# Convert the 2D grid indices into a 1D encoding
# This 1D encoding is used by the models instead of the more complex 2D x,y encoding
# To make things easier we transform the 2D indices into 1D indices
# The cells are encoded as j = x * grid_width + y and thus give an unique encoding for each cell
# E.g. if we have 512 cells in both directions and x=1, y=2 is encoded as 512 + 2 = 514.
# Each new row of the grid (x-axis) starts at j % 512 = 0.
grid_cell_indices = grid_cell_indices[:, 0] * n_pillars_x + grid_cell_indices[:, 1]
return augmented_pc, grid_cell_indices
| 3,071 |
vocabularies/api_views.py
|
acdh-oeaw/apisbaseproject
| 1 |
2023247
|
from django.contrib.auth.models import User
from rest_framework import viewsets
from .serializers import (
InstitutionInstitutionRelationSerializer, TextTypeSerializer,
CollectionTypeSerializer, VocabsBaseClassSerializer,
InstitutionTypeSerializer, ProfessionTypeSerializer, InstitutionPlaceRelationSerializer,
PlaceTypeSerializer, PersonInstitutionRelationSerializer,
PersonPlaceRelationSerializer, UserAccSerializer, VocabNamesSerializer,
PersonPersonRelationSerializer, PersonEventRelationSerializer, PersonWorkRelationSerializer,
InstitutionEventRelationSerializer, InstitutionWorkRelationSerializer, PlaceEventRelationSerializer,
PlaceWorkRelationSerializer, PlacePlaceRelationSerializer, EventWorkRelationSerializer,
EventEventRelationSerializer, WorkWorkRelationSerializer, EventTypeSerializer, WorkTypeSerializer)
from .models import (
InstitutionInstitutionRelation, TextType, CollectionType, VocabsBaseClass,
InstitutionType, ProfessionType, PlaceType, PersonInstitutionRelation, InstitutionPlaceRelation,
PersonPlaceRelation, PersonPersonRelation, VocabNames, InstitutionPlaceRelation, PersonEventRelation,
PersonWorkRelation, InstitutionEventRelation, InstitutionWorkRelation, PlaceWorkRelation, PlaceEventRelation,
PlacePlaceRelation, EventWorkRelation, EventEventRelation, WorkWorkRelation, EventType, WorkType)
###########################################################
#
# Meta - ViewSets
#
##########################################################
class UserViewSet(viewsets.ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserAccSerializer
class VocabNamesViewSet(viewsets.ModelViewSet):
queryset = VocabNames.objects.all()
serializer_class = VocabNamesSerializer
class CollectionTypeViewSet(viewsets.ModelViewSet):
queryset = CollectionType.objects.all()
serializer_class = CollectionTypeSerializer
class VocabsBaseClassViewSet(viewsets.ModelViewSet):
queryset = VocabsBaseClass.objects.all()
serializer_class = VocabsBaseClassSerializer
########################################################
#
# Entity Types
#
#######################################################
class TextTypeViewSet(viewsets.ModelViewSet):
queryset = TextType.objects.all()
serializer_class = TextTypeSerializer
class InstitutionTypeViewSet(viewsets.ModelViewSet):
queryset = InstitutionType.objects.all()
serializer_class = InstitutionTypeSerializer
class ProfessionTypeViewSet(viewsets.ModelViewSet):
queryset = ProfessionType.objects.all()
serializer_class = ProfessionTypeSerializer
class PlaceTypeViewSet(viewsets.ModelViewSet):
queryset = PlaceType.objects.all()
serializer_class = PlaceTypeSerializer
class EventTypeViewSet(viewsets.ModelViewSet):
queryset = EventType.objects.all()
serializer_class = EventTypeSerializer
class WorkTypeViewSet(viewsets.ModelViewSet):
queryset = WorkType.objects.all()
serializer_class = WorkTypeSerializer
######################################################
#
# Relation Types
#
#####################################################
class PersonInstitutionRelationViewSet(viewsets.ModelViewSet):
queryset = PersonInstitutionRelation.objects.all()
serializer_class = PersonInstitutionRelationSerializer
class PersonPlaceRelationViewSet(viewsets.ModelViewSet):
queryset = PersonPlaceRelation.objects.all()
serializer_class = PersonPlaceRelationSerializer
class PersonEventRelationViewSet(viewsets.ModelViewSet):
queryset = PersonEventRelation.objects.all()
serializer_class = PersonEventRelationSerializer
class PersonWorkRelationViewSet(viewsets.ModelViewSet):
queryset = PersonWorkRelation.objects.all()
serializer_class = PersonWorkRelationSerializer
class PersonPersonRelationViewSet(viewsets.ModelViewSet):
queryset = PersonPersonRelation.objects.all()
serializer_class = PersonPersonRelationSerializer
class InstitutionInstitutionRelationViewSet(viewsets.ModelViewSet):
queryset = InstitutionInstitutionRelation.objects.all()
serializer_class = InstitutionInstitutionRelationSerializer
class InstitutionPlaceRelationViewSet(viewsets.ModelViewSet):
queryset = InstitutionPlaceRelation.objects.all()
serializer_class = InstitutionPlaceRelationSerializer
class InstitutionEventRelationViewSet(viewsets.ModelViewSet):
queryset = InstitutionEventRelation.objects.all()
serializer_class = InstitutionEventRelationSerializer
class InstitutionWorkRelationViewSet(viewsets.ModelViewSet):
queryset = InstitutionWorkRelation.objects.all()
serializer_class = InstitutionWorkRelationSerializer
class PlaceEventRelationViewSet(viewsets.ModelViewSet):
queryset = PlaceEventRelation.objects.all()
serializer_class = PlaceEventRelationSerializer
class PlaceWorkRelationViewSet(viewsets.ModelViewSet):
queryset = PlaceWorkRelation.objects.all()
serializer_class = PlaceWorkRelationSerializer
class PlacePlaceRelationViewSet(viewsets.ModelViewSet):
queryset = PlacePlaceRelation.objects.all()
serializer_class = PlacePlaceRelationSerializer
class EventWorkRelationViewSet(viewsets.ModelViewSet):
queryset = EventWorkRelation.objects.all()
serializer_class = EventWorkRelationSerializer
class EventEventRelationViewSet(viewsets.ModelViewSet):
queryset = EventEventRelation.objects.all()
serializer_class = EventEventRelationSerializer
class WorkWorkRelationViewSet(viewsets.ModelViewSet):
queryset = WorkWorkRelation.objects.all()
serializer_class = WorkWorkRelationSerializer
| 5,642 |
images_to_pdf.py
|
spacelover92/images_to_pdf
| 1 |
2023158
|
#!/usr/bin/env python3
from PIL import Image
import os
import sys
image_folder_path = sys.argv[1]
images_list = []
list_dir = os.listdir(image_folder_path)
for image in list_dir:
images_list.append(Image.open(image_folder_path+f"/{image}"))
images_list[0].save(image_folder_path+'/images.pdf',save_all=True, append_images=images_list[1:])
| 347 |
docker_tools/verify.py
|
danielpanteleit/docker-tools
| 1 |
2023174
|
import json
from docker_tools.common import docker, docker_list
class Command:
HELP = None
def run(self, opts):
pass
@classmethod
def help(cls):
return cls.HELP
@classmethod
def addArguments(cls, parser):
pass
class Verify(Command):
HELP = "Verifies that containers use images as intended (e.g. using exposed ports and volumes)"
def run(self, opts):
for containerId in docker_list("ps", "-aq"):
info = json.loads(docker("inspect", containerId))
assert len(info) == 1
info = info[0]
name = info["Name"].lstrip("/")
verifyUnexposedPorts(name, info)
def verifyUnexposedPorts(name, info):
# verify used ports were all exposed
imageInfo = json.loads(docker("inspect", info["Image"]))[0]
exposed_ports = set(imageInfo["Config"].get("ExposedPorts", {}).keys())
for p in info["HostConfig"]["PortBindings"] or []:
if p not in exposed_ports:
print("%s: port %s mapped but not exposed" % (name, p))
| 1,061 |
escalera.py
|
mat0ta/trabajo-grupal
| 0 |
2023550
|
import math
import os
import random
import re
import sys
def staircase(n):
for i in range(1,n+1):
fila=''
for j in range(i):
fila += '' .join('# ')
print(fila)
if __name__ == '__main__':
n = int(input().strip())
staircase(n)
| 279 |
exhibition/settings.py
|
xdusongwei/exhibition
| 0 |
2022892
|
import addict
class Settings:
def __init__(self, config: addict.Addict):
self.config = config
def update(self, config: dict) -> bool:
start, end = self.working_port_range
self.config = addict.Addict({
'settings': config,
})
return (start, end, ) != self.working_port_range
@property
def test_url(self) -> str:
return self.config.settings.testUrl or 'https://ssl.gstatic.com/gb/images/p2_edfc3681.png'
@property
def export_reboot_period(self) -> int:
value = self.config.settings.exportRebootPeriod
if not isinstance(value, int) or value < 0:
return 8 * 60 * 60
return value
@property
def working_port_range(self) -> tuple[int, int]:
default = (9000, 9999, )
start = self.config.settings.workingPortRangeStart
if not isinstance(start, int) or start < 1:
return default
end = self.config.settings.workingPortRangeEnd
if not isinstance(end, int) or end < 1:
return default
if end < start:
return default
return start, end,
def to_dict(self):
return {
'testUrl': self.test_url,
'exportRebootPeriod': self.export_reboot_period,
'workingPortRangeStart': self.working_port_range[0],
'workingPortRangeEnd': self.working_port_range[1],
}
__all__ = [
'Settings',
]
| 1,455 |
RedditWordFrequency/main.py
|
SnailDragon/Python-Exploration
| 0 |
2023430
|
from Scraper import Scraper
subreddit = input("Subreddit to analyze: ")
depth = int(input("Number of posts: "))
feed = input("top/hot/new: ")
scraper = Scraper(subreddit, depth, feed)
scraper.commentWordAnalysis()
| 220 |
app/web/auth.py
|
HuaiGuang10/mini-shop-server
| 1 |
2023285
|
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/4/19.
"""
from flask import render_template, redirect, url_for
from . import web
__author__ = 'Allen7D'
# @web.route('/', defaults={'path': ''})
# @web.route('/<path:path>')
@web.route('/')
def index():
'''默认跳转的 API 文档'''
return redirect('/apidocs/#/')
# return render_template("index.html")
@web.route('/doc')
def doc():
'''跳转'''
return redirect(url_for('web.index'))
| 449 |
warrior/WarriorCore/custom_parallel_kw_driver.py
|
YutakaMizugaki/warriorframework
| 24 |
2022617
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#!/usr/bin/python
"""This is custom parallel keyword driver which is used to execute
the keywords of a testcase in parallel where data_type = custom"""
import WarriorCore.step_driver as step_driver
import traceback
from collections import OrderedDict
import Framework.Utils as Utils
from Framework.Utils.print_Utils import print_debug, print_error
from WarriorCore.multiprocessing_utils import create_and_start_process_with_queue, \
get_results_from_queue, update_tc_junit_resultfile
from Framework.Utils import testcase_Utils
def execute_custom_parallel(step_list, data_repository, tc_status, system_name):
"""Takes a list of steps as input and executes them in parallel by
creating separate process of step_driver for each of these steps """
jobs_list = []
step_num = 0
output_q = None
for step in step_list:
step_num += 1
target_module = step_driver.main
#args_list = [step, step_num, data_repository, system_name, True]
args_dict = OrderedDict([("step", step),
("step_num", step_num),
("data_repository", data_repository),
("system_name", system_name),
("kw_parallel", True),
("output_q", output_q),
])
process, jobs_list, output_q = create_and_start_process_with_queue(target_module, args_dict,
jobs_list, output_q)
print_debug("process: {0}".format(process))
for job in jobs_list:
job.join()
result_list = get_results_from_queue(output_q)
step_status_list = []
kw_resultfile_list = []
step_impact_list = []
tc_junit_list = []
for result in result_list:
step_status_list.append(result[0])
kw_resultfile_list.append(result[1])
step_impact_list.append(result[2])
tc_junit_list.append(result[3])
tc_status = testcase_Utils.compute_status_using_impact(step_status_list, step_impact_list)
# parallel keywords generate multiple keyword junit result files
# each files log the result for one keyword and not intergrated
# update testcase junit result file with individual keyword result files
data_repository['wt_junit_object'] = update_tc_junit_resultfile(data_repository['wt_junit_object'], tc_junit_list, data_repository['wt_tc_timestamp'])
print_debug("Updating Testcase result file...")
Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'], kw_resultfile_list)
return tc_status
def main(step_list, data_repository, tc_status, system_name=None):
"""Executes the list of steps in parallel
Computes and returns the testcase status"""
try:
testcase_status = execute_custom_parallel(step_list, data_repository,
tc_status, system_name)
except Exception:
testcase_status = False
print_error('unexpected error {0}'.format(traceback.format_exc()))
return testcase_status
| 3,743 |
fylesdk/apis/advance_requests.py
|
vishalsoni242/fyle-sdk-py
| 0 |
2023009
|
from .api_base import ApiBase
class AdvanceRequests(ApiBase):
"""Class for Advance Requests APIs"""
GET_ADVANCE_REQUESTS = '/api/tpa/v1/advance_requests'
GET_ADVANCE_REQUESTS_COUNT = '/api/tpa/v1/advance_requests/count'
def get(self, offset=None, limit=None, updated_at=None, exported=None, state=None, approved_at=None):
"""Get a list of existing Advance requests .
Parameters:
updated_at (str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
(optional)
offset (int): A cursor for use in pagination, offset is an object ID that defines your place in the list.
(optional)
limit (int): A limit on the number of objects to be returned, between 1 and 1000. (optional)
exported (bool): If set to true, all Advance requests that are exported alone will be returned. (optional)
state(str) : A parameter to filter Advance requests by the state that they're in. (optional)
approved_at(str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
(optional)
Returns:
List with dicts in Advance requests schema.
"""
return self._get_request({
'updated_at': updated_at,
'offset': offset,
'limit': limit,
'exported': exported,
'state': state,
'approved_at': approved_at
}, AdvanceRequests.GET_ADVANCE_REQUESTS)
def count(self, offset=None, limit=None, updated_at=None, exported=None, state=None, approved_at=None):
"""Get a count of the existing Advance requests that match the parameters.
Parameters:
offset (int): A cursor for use in pagination, offset is an object ID that defines your place in the list.
(optional)
limit (int): A limit on the number of objects to be returned, between 1 and 1000. (optional)
updated_at (str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
(optional)
exported (bool): If set to true, all Advance requests that are exported alone will be returned. (optional)
approved_at(str): Date string in yyyy-MM-ddTHH:mm:ss.SSSZ format along with operator in RHS colon pattern.
(optional)
state(str) : A parameter to filter Advance requests by the state that they're in. (optional)
Returns:
Count of Advance requests.
"""
return self._get_request({
'offset': offset,
'limit': limit,
'approved_at': approved_at,
'updated_at': updated_at,
'exported': exported,
'state': state,
}, AdvanceRequests.GET_ADVANCE_REQUESTS_COUNT)
def get_all(self, offset=None, limit=None, updated_at=None, exported=None, state=None, approved_at=None):
"""
Get all the Advance requests based on paginated call
"""
count = self.count(offset=offset, limit=limit, updated_at=updated_at,
exported=exported, state=state, approved_at=approved_at)['count']
advance_requests = []
page_size = 300
for i in range(0, count, page_size):
segment = self.get(offset=i, limit=page_size, updated_at=updated_at, exported=exported,
approved_at=approved_at)
advance_requests = advance_requests + segment['data']
return advance_requests
| 3,583 |
data/external/repositories/243548/cs145-project1-master/Naive Bayes.py
|
Keesiu/meta-kaggle
| 0 |
2023472
|
#!/cs145/
#Read trainning data#
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.cross_validation import cross_val_score
import json
with open('train.json') as data_file:
data = json.load(data_file)
with open('test.json') as data_test_file:
data_test = json.load(data_test_file)
def getUniqueWords(allWords) :
uniqueWords = []
for i in allWords:
if not i in uniqueWords:
uniqueWords.append(i)
return uniqueWords
dictionary = [];
def featurize(data):
for d in data:
feat = []
ings = d["ingredients"]
for ing in ings:
for word in ing.strip().split(' '):
feat.append(word)
d["feat"] = feat
all_gredients = [];
featurize(data);
n = len(data);
for i in range(len(data)):
for j in range(len(data[i]["feat"])):
all_gredients.append(data[i]["feat"][j])
dictionary = getUniqueWords(all_gredients);
unique = len(dictionary);
train_feature = [[0 for x in range(unique)] for x in range(n)]
for i in range(len(data)):
for j in range(len(data[i]["feat"])):
for k in range(unique):
if(data[i]["feat"][j] == dictionary[k]):
train_feature[i][k] += 1
break
#read test data
n_test = len(data_test);
featurize(data_test)
test_feature = [[0 for x in range(unique)] for x in range(n_test)]
for i in range(n_test):
for j in range(len(data_test[i]["feat"])):
for k in range(unique):
if(data_test[i]["feat"][j] == dictionary[k]):
test_feature[i][k] +=1
break
#Transform matrix into tf-idf version
transformer = TfidfTransformer();
train = transformer.fit_transform(train_feature);
test=transformer.fit_transform(test_feature);
target= []
for i in range(n):
target.append(data[i]["cuisine"])
#Use crossvalidation to determine value of paramter
#10-fold cross-validation with alpha=1 for Multinomial Naive Bayes.
clf3=MultinomialNB(alpha=1, fit_prior=False)
scores=cross_val_score(clf3,train_feature1, target,cv=10,scoring='accuracy')
print scores
#Use average accuracy as an estimate of out-of-sample accuracy
print scores.mean()
#search for an optimal value of alpha of Multinomial Naive Bayes
k_range=range(0,11)
k_scores=[]
for k in k_range:
a=0.1*k
clf3=MultinomialNB(alpha=a,fit_prior=False)
scores=cross_val_score(clf3,train_feature1,target,cv=10,scoring='accuracy')
k_scores.append(scores.mean())
print k_scores
import matplotlib.pyplot as plt
%matplotlib inline
#plot the value of alpha for Multinomial NB (x-axis) versus the cross-validated accuracy(y-axis)
plt.plot(k_range, k_scores)
plt.xlabel('Value of 100xalpha for Multinomial NB')
plt.ylabel('cross-validated Accuracy')
#predict test data
clfb = MultinomialNB(alpha=0.5, fit_prior=False)
clfb.fit(train, target)
pred=clfb.predict(test)
output = [['id','cuisine']]
for i in range(n_test):
output.append([data_test[i]["id"],pred[i]])
import csv
b = open('test5.csv', 'w')
a = csv.writer(b)
a.writerows(output)
b.close()
====================================================================
import json
with open('train.json') as data_file:
data1 = json.load(data_file)
with open('test.json') as data_test_file:
data_test1 = json.load(data_test_file)
#Instead fo spliting ingredients into words , take it as a whole item
def featurize1(data):
for d in data:
feat = []
ings = d["ingredients"]
for ing in ings:
feat.append(ing)
d["feat"] = feat
all_gredients1 = [];
featurize1(data1);
n1 = len(data1);
for i in range(len(data1)):
for j in range(len(data1[i]["feat"])):
all_gredients1.append(data1[i]["feat"][j])
dictionary1 = getUniqueWords(all_gredients1);
unique1 = len(dictionary1);
#Build binary entry matrix. "True" means occurence of the feature
train_feature1 = [[False for x in range(unique1)] for x in range(n1)]
for i in range(len(data1)):
for j in range(len(data1[i]["feat"])):
for k in range(unique1):
if(data1[i]["feat"][j] == dictionary1[k]):
train_feature1[i][k] =True
break
n_test1 = len(data_test1);
featurize(data_test1)
test_feature1 = [[False for x in range(unique1)] for x in range(n_test1)]
for i in range(n_test1):
for j in range(len(data_test1[i]["feat"])):
for k in range(unique1):
if(data_test1[i]["feat"][j] == dictionary1[k]):
test_feature1[i][k] =True
break
#Use crossvalidation to determine value of paramter
#10-fold cross-validation with alpha=1 for Bernoulli Naive Bayes.
clf3=BernoulliNB(alpha=1, fit_prior=False)
scores=cross_val_score(clf3,train_feature1, target,cv=10,scoring='accuracy')
print scores
#Use average accuracy as an estimate of out-of-sample accuracy
print scores.mean()
#search for an optimal value of alpha of Bernoulli Naive Bayes
k_range=range(0,11)
k_scores=[]
for k in k_range:
a=0.1*k
clf3=BernoulliNB(alpha=a,fit_prior=False)
scores=cross_val_score(clf3,train_feature1,target,cv=10,scoring='accuracy')
k_scores.append(scores.mean())
print k_scores
import matplotlib.pyplot as plt
%matplotlib inline
#plot the value of alpha for Bernoulli NB (x-axis) versus the cross-validated accuracy(y-axis)
plt.plot(k_range, k_scores)
plt.xlabel('Value of 100xalpha for Bernoulli NB')
plt.ylabel('cross-validated Accuracy')
clfb = BernoulliNB(alpha=0.15, fit_prior=False)
clfb.fit(train_feature1, target)
pred=clfb.predict(test_feature1)
output = [['id','cuisine']]
for i in range(n_test):
output.append([data_test[i]["id"],pred[i]])
import csv
b = open('test4.csv', 'w')
a = csv.writer(b)
a.writerows(output)
b.close()
| 5,690 |
smarty_left_pad/left_pad.py
|
nirmalchandra/smarty-left-pad
| 6 |
2023169
|
def left_pad(input, length, fill_character=' '):
"""
Returns a string, which will be padded on the left with characters if necessary. If the input string is longer
than the specified length, it will be returned unchanged.
>>> left_pad('foo', 5)
' foo'
>>> left_pad('foobar', 6)
'foobar'
>>> left_pad('toolong', 2)
'toolong'
>>> left_pad(1, 2, '0')
'01'
>>> left_pad(17, 5, 0)
'00017'
:param input:
:param length: The return string's desired length.
:param fill_character:
:rtype str:
"""
return str(input).rjust(length, str(fill_character))
| 653 |
lessonsApp/forms.py
|
glen-s-abraham/Elearning-platform
| 0 |
2022648
|
from django.forms import ModelForm
from .models import Lessons
class LessonsForm(ModelForm):
class Meta:
model = Lessons
fields = ['lessonname', 'lesson']
| 160 |
pytmpdir/__init__.py
|
brentonford/pytmpdir
| 0 |
2023379
|
__project__ = 'pytmpdir'
__copyright__ = '2016, Synerty'
__author__ = 'Synerty'
__version__ = '0.1.2'
| 102 |
saladier/common/exception.py
|
chmouel/kitchen-saladier
| 1 |
2023630
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import sqlalchemy.orm.exc
from saladier.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(OPTS)
class SaladierException(Exception):
"""Base Saladier Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred."
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception('Exception in string format operation')
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(SaladierException, self).__init__(message)
class Conflict(SaladierException):
message = 'Conflict'
code = 409
class NotFound(SaladierException):
message = 'Not found.'
code = 404
class ProductAlreadyExists(Conflict):
message = "Product %(name)s already exist."
class ProductNotFound(NotFound):
message = "Product %(name)s is not found"
class PlatformAlreadyExists(Conflict):
message = "Platform %(name)s already exist."
class PlatformNotFound(NotFound):
message = "Platform %(name)s is not found."
class ProductVersionAlreadyExists(Conflict):
# TODO(chmou): Get the actual version in there.
message = "Version %(name)s already exist."
class SubscriptionAlreadyExists(Conflict):
message = "Subscription %(name)s already exist."
class ProductVersionNotFound(Conflict):
message = "Version %(name)s is not found."
class ProductVersionStatusNotFound(Conflict):
message = "Product version status %(name)s is not found."
class ProductVersionStatusAlreadyExists(Conflict):
message = "Product version status %(name)s already exist."
class SaladierFlushError(sqlalchemy.orm.exc.FlushError):
"""Raised when an error occurs during a flush to the database."""
| 3,396 |
scripts/plot_velocity.py
|
LitterBot2017/Babysitter
| 0 |
2023623
|
#!/usr/bin/env python
import argparse
import matplotlib.pyplot as plt
import numpy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--time-padding', type=float, default=0.1)
parser.add_argument('--time-steady', type=float, default=0.74)
parser.add_argument('input_file', type=str)
args = parser.parse_args()
data = numpy.loadtxt(args.input_file)
stamps, positions, velocities = data.T
stamps -= stamps[0]
velocity_limit = velocities[stamps > args.time_steady].mean()
linear_mask = numpy.logical_and(
stamps > args.time_padding,
stamps < args.time_steady - args.time_padding
)
b = velocities[linear_mask]
A = numpy.column_stack((
stamps[linear_mask],
numpy.ones(len(b))
))
x, _, _, _ = numpy.linalg.lstsq(A, b)
acceleration_limit = x[0]
print 'Velocity Limit: {:.6f}'.format(abs(velocity_limit))
print 'Acceleration Limit: {:.6f}'.format(abs(acceleration_limit))
fig = plt.figure()
axis_position = fig.add_subplot(2, 1, 1)
axis_position.plot(stamps, positions, '-k')
axis_position.set_ylabel('Position (rad)')
axis_velocity = fig.add_subplot(2, 1, 2)
axis_velocity.plot(stamps, velocities, '-b')
axis_velocity.plot(stamps, [velocity_limit] * len(stamps), '-r')
axis_velocity.plot(stamps, x[0] * stamps + x[1], '-g')
axis_velocity.set_xlabel('Time (s)')
axis_velocity.set_ylabel('Velocity (rad/s)')
plt.show()
| 1,396 |
colors.py
|
P-py/OpenCV-course
| 0 |
2022704
|
import cv2
import numpy as np
capture = cv2.VideoCapture(0)
while True:
ret, frame = capture.read()
width = int(capture.get(3))
height = int(capture.get(4))
## HSV Color
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Takes BGR and converts into HSV
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
## Creating a mask
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('result', result)
cv2.imshow('mask', mask)
if cv2.waitKey(1) == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
| 646 |
src/main.py
|
gmatuz/inthewilddb
| 36 |
2023478
|
import typer
import json
import collections
from texttable import Texttable
import sqlite3
con = sqlite3.connect('inthewild.db')
cur = con.cursor()
app = typer.Typer()
@app.command()
def exploitations(vulnerability_id: str, format_cli: bool = True):
exploitations = get_exploitations(vulnerability_id)
if(format_cli):
format_report_table(exploitations, vulnerability_id)
else:
print(json.dumps(exploitations))
@app.command()
def exploits(vulnerability_id: str, format_cli: bool = True):
exploits = get_exploits(vulnerability_id)
if(format_cli):
format_report_table(exploits, vulnerability_id)
else:
print(json.dumps(exploits))
def get_exploits(vulnerability_id: str):
return __get_exploit_reports(vulnerability_id, "exploit")
def get_exploitations(vulnerability_id: str):
return __get_exploit_reports(vulnerability_id, "exploitation")
def __get_exploit_reports(vulnerability_id: str, type: str):
exploits = []
for row in cur.execute('SELECT id, referenceURL, timeStamp FROM exploits WHERE type=? AND id=?', [type, vulnerability_id]):
exploit = collections.OrderedDict()
exploit['id'] = row[0]
exploit['referenceURL'] = row[1]
exploit['timeStamp'] = row[2]
exploits.append(exploit)
return exploits
@app.command()
def reports(vulnerability_id: str, format_cli: bool = True):
report = get_report(vulnerability_id)
if(format_cli):
format_full_report_table(report)
else:
print(json.dumps(report))
def get_report(vulnerability_id: str):
details = get_vulnerability_description(vulnerability_id)
return { "id": details["id"], "description": details["description"], "exploitations": get_exploitations(vulnerability_id), "exploits": get_exploits(vulnerability_id)}
def get_vulnerability_description(vulnerability_id: str):
cur.execute('SELECT id, description FROM vulns WHERE id=?',
[vulnerability_id])
vulnerability = cur.fetchone()
if(vulnerability == None):
return {"id": vulnerability_id, "description": "Vulnerability description missing"}
return {"id": vulnerability[0], "description": vulnerability[1]}
def format_report_table(reports, vulnerability_id):
formatted = []
formatted.append(["Vulnerability", "URL", "Report Time"])
for exploitation in reports:
formatted.append(["", exploitation["referenceURL"],
exploitation["timeStamp"]])
if(len(reports) == 0):
formatted.append(["", "No reports", ""])
formatted[1][0] = vulnerability_id
table = Texttable(max_width=100)
table.add_rows(formatted)
print(table.draw())
def format_full_report_table(output):
formatted = []
formatted.append(["Vulnerability", "report", "URL", "Report Time"])
for exploitation in output["exploitations"]:
formatted.append(
["", "", exploitation["referenceURL"], exploitation["timeStamp"]])
if(len(output["exploitations"]) == 0):
formatted.append(["", "", "No reports of exploitation inTheWild", ""])
for exploit in output["exploits"]:
formatted.append(
["", "", exploit["referenceURL"], exploit["timeStamp"]])
if(len(output["exploits"]) == 0):
formatted.append(["", "", "No reports of exploits available", ""])
formatted[1][0] = output["id"]
formatted[2][0] = output["description"]
formatted[1][1] = "exploitation"
if(len(output["exploitations"]) == 0):
formatted[len(output["exploitations"])+2][1] = "exploit"
else:
formatted[len(output["exploitations"])+1][1] = "exploit"
table = Texttable(max_width=150)
table.add_rows(formatted)
print(table.draw())
if __name__ == "__main__":
app()
| 3,777 |
pillow-learn/learn.py
|
Yeharold/PKG-Learn
| 0 |
2022637
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Date : 2018-01-01 15:39:00
# @Author : YeHarold (<EMAIL>)
# @Link : https://github.com/Yeharold
# from PIL import Image,ImageFilter
# img = Image.open('test.jpeg')
# gray = img.convert('L')
# px = gray.load()
# (w,h) = gray.size
# res = gray.filter(ImageFilter.GaussianBlur(radius=8))
# res.show()
| 358 |
random_number.py
|
xielidawan/560_hw2
| 0 |
2023156
|
import random
import json
def run():
num_list = []
for i in range(1000):
num_list.append(random.randint(0, 100))
with open('file1.json', 'w') as f:
f.write(json.dumps(num_list))
if __name__ == '__main__':
run()
| 259 |
guild_scrape_script.py
|
f-iber/rpguild_scraper
| 0 |
2022914
|
import pandas as pd
import requests
from bs4 import BeautifulSoup
import time
import csv
import os
import argparse
headers = {
'user-agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'}
rawHTML = False
def get_posts(currentpageposts, thread_info):
# Gets all of the posts on a page
post_list = []
for item in currentpageposts:
user_item = item.find('div', class_='panel-body when-expanded-block')
postdiction = {}
postdiction.update(thread_info)
postdiction['id'] = item.attrs['id']
postdiction['user'] = user_item.find('div', class_='user-uname').text
try:
postdiction['date'] = item.select('abbr.ago')[0].attrs['title']
except (IndexError, AttributeError):
pass
if rawHTML:
postdiction['text'] = user_item.find(
'div', class_='post-body-html').contents[0]
else:
postdiction['text'] = user_item.find(
'div', class_='post-body-html').get_text("\n", strip=True)
post_list.append(postdiction)
return post_list
def get_thread_info(soup):
thread_info = {}
thread_info['title'] = soup.find('h2', class_='topic-heading').text
thread_info['forum'] = soup.find('ol', class_='breadcrumb').text
try:
thread_info['page_count'] = soup.find(
'ul', class_='pager').get_text("\n", strip=True)
thread_info['page_count'] = int(thread_info['page_count'].split('of')[
1].replace('\nNext →\nLast', '').strip())
except AttributeError:
thread_info['page_count'] = 1
if soup.select(".page-header > ul:nth-child(3)"):
thread_info['GM'] = soup.select(
".page-header > ul:nth-child(3)")[0].text
else:
thread_info['GM']='not an RP'
return thread_info
def page_loop(page_n, base_url, thread_info, post_df):
# Loops through all pages of a thread. First page was grabbed in outer loop
for i in range(2, int(page_n)+1):
resp = requests.get(base_url+'?page='+str(i))
soup = BeautifulSoup(resp.text, 'html.parser')
postdiction = {}
postdiction.update(thread_info)
time.sleep(.59)
post_list = get_posts(soup.find_all(
'div', class_='panel panel-default post visible-post expanded-post'), thread_info)
post_df = post_df.append(post_list, ignore_index=True)
return post_df
def get_all(url_list):
post_data = pd.DataFrame()
for url in url_list:
resp = requests.get(url, headers=headers)
url_base = resp.url
if url_base[-3:] == '/ic':
multi_tab = True
url_base = resp.url[:-3]
else:
multi_tab = False
url_base = resp.url[:-4]
postdiction = {}
soup = BeautifulSoup(resp.text, 'html.parser')
thread_info = get_thread_info(soup)
thread_info['page'] = 'IC/Base'
postdiction.update(thread_info)
post_list = get_posts(soup.find_all(
'div', class_='panel panel-default post visible-post expanded-post'), thread_info)
post_data = post_data.append(post_list)
post_data = page_loop(
thread_info['page_count'], url_base, thread_info, post_data)
if multi_tab:
thread_info['page'] = 'OOC'
resp = requests.get(url_base+r'/ooc')
soup = BeautifulSoup(resp.text, 'html.parser')
try:
thread_info['page_count'] = soup.find(
'ul', class_='pager').get_text("\n", strip=True)
thread_info['page_count'] = int(thread_info['page_count'].split('of')[
1].replace('\nNext →\nLast', '').strip())
except AttributeError:
thread_info['page_count'] = 1
post_list = get_posts(soup.find_all(
'div', class_='panel panel-default post visible-post expanded-post'), thread_info)
post_data = post_data.append(post_list)
post_data = page_loop(
thread_info['page_count'], url_base+r'/ooc', thread_info, post_data)
thread_info['page'] = 'Char'
resp = requests.get(url_base+r'/char')
soup = BeautifulSoup(resp.text, 'html.parser')
try:
thread_info['page_count'] = soup.find(
'ul', class_='pager').get_text("\n", strip=True)
thread_info['page_count'] = int(thread_info['page_count'].split('of')[
1].replace('\nNext →\nLast', '').strip())
except AttributeError:
thread_info['page_count'] = 1
post_list = get_posts(soup.find_all(
'div', class_='panel panel-default post visible-post expanded-post'), thread_info)
post_data = post_data.append(post_list)
post_data = page_loop(
thread_info['page_count'], url_base+r'/char', thread_info, post_data)
return post_data
def list_build(input_csv):
url_list = []
with open(input_csv, newline='') as csvfile:
url_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in url_reader:
try:
if row[0][0] not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
url_list.append(row[0])
elif len(row)>1 and row[1] != '':
to_range = range(int(row[0]), int(row[1])+1)
for item in to_range:
url_list.append(
'https://www.roleplayerguild.com/topics/'+str(item))
else:
url_list.append(
'https://www.roleplayerguild.com/topics/'+row[0])
except IndexError:
print(row)
pass
return url_list
def begin(resume=False, save=False, limit=0):
if resume:
url_list = list_build('in_progress.csv')
else:
url_list = list_build('threads.csv')
if save:
full_url_list = url_list
if limit > 0:
url_list = url_list[:limit]
if save:
for num, url in enumerate(url_list, start=1):
if num == 1:
get_all([url]).to_csv('posts.csv', header='column_names')
else:
get_all([url]).to_csv('posts.csv', mode='a', header=False)
last_position = num
else:
if not os.path.isfile('posts.csv'):
get_all(url_list).to_csv('posts.csv', header='column_names')
else:
get_all(url_list).to_csv('posts.csv', mode='a', header=False)
if save:
save_position(full_url_list, last_position)
def save_position(url_list, last_position):
with open('in_progress.csv', 'w', newline='') as csvfile:
list_writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for item in url_list[last_position:]:
list_writer.writerow([item])
parser = argparse.ArgumentParser(description='scrape some threads.')
parser.add_argument('--resume', '--r', metavar='N', default=False, type=bool,
help='Whether to start from the in_progress CSV or start from scratch')
parser.add_argument('--save', '--s', metavar='N', default=False, type=bool,
help='Whether to save progress continually or only at the end')
parser.add_argument('--limit', '--l', metavar='N', default=0, type=int,
help='Wheter to proceed through a set number of threads or not')
parser.add_argument('--HTML', '--h', metavar='N', default=False, type=bool,
help='Whether to retrieve text or Raw HTML tags')
args = parser.parse_args()
rawHTML = args.HTML
begin(args.resume, args.save, args.limit)
| 7,931 |
tensorlayerx/nn/layers/inputs.py
|
tensorlayer/TensorLayerX
| 34 |
2023096
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorlayerx as tlx
from tensorlayerx import logging
from tensorlayerx.nn.core import Module
from ..initializers import *
__all__ = ['Input', '_InputLayer']
class _InputLayer(Module):
"""
The :class:`Input` class is the starting layer of a neural network.
Parameters
----------
shape : tuple (int)
Including batch size.
dtype: dtype or None
The type of input values. By default, tf.float32.
name : None or str
A unique layer name.
"""
def __init__(self, shape, dtype=None, name=None, init_method=None):
super(_InputLayer, self).__init__(name)
logging.info("Input %s: %s" % (self.name, str(shape)))
self.shape = shape
self.dtype = dtype
self.shape_without_none = [_ if _ is not None else 1 for _ in shape]
if tlx.BACKEND == 'paddle':
self.outputs = tlx.ops.ones(self.shape)
else:
if init_method is None:
self.outputs = ones()(self.shape_without_none, dtype=self.dtype)
else:
self.outputs = init_method(self.shape_without_none, dtype=self.dtype)
self._built = True
self._add_node(self.outputs, self.outputs)
def __repr__(self):
s = 'Input(shape=%s' % str(self.shape)
if self.name is not None:
s += (', name=\'%s\'' % self.name)
s += ')'
return s
def __call__(self, *args, **kwargs):
return self.outputs
def build(self, inputs_shape):
pass
def forward(self):
return self.outputs
def Input(shape, init=None, dtype=tlx.float32, name=None):
"""
The :class:`Input` class is the starting layer of a neural network.
Parameters
----------
shape : tuple (int)
Including batch size.
init : initializer or str or None
The initializer for initializing the input matrix
dtype: dtype
The type of input values. By default, tf.float32.
name : None or str
A unique layer name.
Examples
---------
With TensorLayer
>>> ni = tlx.nn.Input([10, 50, 50, 32], name='input')
>>> output shape : [10, 50, 50, 32]
"""
input_layer = _InputLayer(shape, dtype=dtype, name=name, init_method=init)
outputs = input_layer()
return outputs
| 2,358 |
Xana/Xfit/DetectorCorrection.py
|
ClLov/Xana
| 0 |
2023037
|
#! /usr/bin/env python
import numpy as np
import lmfit
from Xfit.PoissonGammaDistribution import PoissonGamma as poisgam
def rescale(y, rng=(0,1)):
return ((rng[1]-rng[0])*(y - min(y)))/(max(y)-min(y)) + rng[0]
def correct_probability(prob, kappa, method='shift p2'):
"""Correct probability
"""
if method == 'shift p2':
p = prob[2:]
c = p[2] * kappa
p[2] -= c
p[1] += 2*c
p[0] -= c
elif method == 'kb fraction':
kb = prob[1] * kappa
p = prob[2:]
p[2] -= c
p[2][p[2]<=0] = 0
p[1] += 2*c
p[0] -= c
return prob
def detector_correction(prob, prob_ref, npix, err=None, kv=None, init={}, fix=None, method='Nelder-mead',
mspacing=200, correction_method='shift p2'):
""" Fit the Poisson-Gamma distribution using the likelihood ratio approach.
"""
if kv is None:
kv = np.arange(prob.shape[0]-2)
#make initial guess for parameters
for vn in ['kappa']:
if vn not in init.keys():
if vn == 'kappa':
init[vn] = (0, 0, 1)
# initialize fit parameters
pars = lmfit.Parameters()
pars.add('kappa', value=init['kappa'][0], min=init['kappa'][1], max=init['kappa'][2])
if fix is not None:
for vn in fix.keys():
pars[vn].set(value=fix[vn], vary=0)
if err is not None:
err = np.abs(err)
wgt = err.copy()
wgt[wgt>0] = 1./wgt[wgt>0]**2
else:
wgt = None
M = np.logspace( 0, 2, mspacing )
def chi2(prob):
"""Calculate likelihood ratio
"""
kb = prob[1]
prob = prob[kv+2]
chi2 = np.zeros(( mspacing ))
for j,m in enumerate(M):
for i in range(kv.size):
probi = prob[i]
ind = np.where(probi)
pg = poisgam(kb[ind], m, kv[i], ind_var='kb')
chi2[j] += np.sum(probi[ind] * np.log(pg/probi[ind]))
chi2[j] *= -2 * npix
return rescale(chi2, (0,1))
chi2_ref = chi2(prob_ref)
def residual(pars, prob, eps=None):
"""Residual function to minimize
"""
prob = prob.copy()
v = pars.valuesdict()
prob = correct_probability(prob, v['kappa'], correction_method)
return np.sum(np.abs(chi2_ref - chi2(prob)))
out = lmfit.minimize(residual, pars, args=(prob,), kws={'eps':wgt}, method=method,
nan_policy='omit')
pars_arr = np.zeros((1,2))
for i,vn in enumerate(['kappa']):
pars_arr[i,0] = out.params[vn].value
# pars_arr[i,1] = pars_arr[i,0]**2*out.params[vn].stderr
gof = np.array([out.chisqr, out.redchi, out.bic, out.aic])
return pars_arr, gof, out, lmfit.fit_report(out)
| 2,851 |
HandTrackTest.py
|
TheUnity42/Micro2Project
| 0 |
2023018
|
from ctypes.wintypes import RGB
import tensorflow as tf
import numpy as np
import cv2
import os
import sys
def main(model_path):
# handle relative paths
model_path = os.path.abspath(model_path)
print("Loading model from: {}".format(model_path))
# load the model
model = tf.keras.models.load_model(model_path)
print("Model loaded.")
# connect to webcam
cap = cv2.VideoCapture(1)
# run in a loop until user presses 'q' or 'esc'
while True:
# read the frame
ret, frame = cap.read()
if not ret:
print("Error reading frame.")
break
# mirror the frame
frame = cv2.flip(frame, 1)
# convert to RGB
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# resize to fit the model
crop_frame = cv2.resize(rgb, (224, 224))
crop_frame = tf.convert_to_tensor(crop_frame, dtype=tf.float32)
# predict the class
prediction = model.predict(crop_frame[None, :, :, :])[0]
# print the prediction
print(prediction)
# calculate the x and y scale factors
x_scale = frame.shape[1] / 224
y_scale = frame.shape[0] / 224
# scale the prediction
prediction[0] = prediction[0] * y_scale
prediction[1] = prediction[1] * x_scale
prediction[2] = prediction[2] * y_scale
prediction[3] = prediction[3] * x_scale
# cast to int
prediction = prediction.astype(np.int32)
# draw the bounding box
cv2.rectangle(frame, (prediction[1], prediction[0]), (prediction[3], prediction[2]), (0, 255, 0), 2)
# show the frame
cv2.imshow("frame", frame)
# wait for a key press
key = cv2.waitKey(1)
# if the user presses 'q' or 'esc'
if key == ord('q') or key == 27:
break
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python HandTrackTest.py <model_path>")
sys.exit(1)
main(sys.argv[1])
sys.exit(0)
| 2,034 |
camps/summer_school_Kremenchuk_2016/kkzp/Dalter.py
|
mstrechen/cp
| 0 |
2022643
|
n = 1000000000000000
ans = 0
currAns = 0
j = 1
i = (n-1)**(1/3)
i = int(i)
print(i)
while(i>0):
sb = n-i*i*i
while(sb>=j*j):
currAns+=(j-1)*(2*j-1)
j+=1
ans+=currAns+(j-1)*(sb-(j-1)*(j-1))
if(i%100==0):
print(n-i*i*i)
i-=1
print(ans)
| 295 |
test/trait/test_localized.py
|
marrow/mongo
| 22 |
2022620
|
# encoding: utf-8
from __future__ import unicode_literals
import pytest
from marrow.mongo.field import String, Translated
from marrow.mongo.trait import Localized
class TestTranslated(object):
class Sample(Localized):
class Locale(Localized.Locale):
word = String()
word = Translated('word')
def test_construction(self):
inst = self.Sample.from_mongo({'locale': [
{'language': 'en', 'word': 'hello'},
{'language': 'fr', 'word': 'bonjour'}
]})
assert inst.word == {'en': 'hello', 'fr': 'bonjour'}
def test_assignment(self):
inst = self.Sample()
with pytest.raises(TypeError):
inst.word = None
def test_query_translated(self):
q = self.Sample.word == 'bonjour'
assert q == {'locale.word': 'bonjour'}
class TestLocalized(object):
class Sample(Localized):
class Locale(Localized.Locale):
word = String()
def test_repr(self):
inst = self.Sample.from_mongo({'locale': [
{'language': 'en', 'word': 'hello'},
{'language': 'fr', 'word': 'bonjour'}
]})
assert repr(inst) == "Sample({en, fr})"
def test_empty_repr(self):
inst = self.Sample()
assert repr(inst) == "Sample()"
def test_query(self):
q = self.Sample.locale.word == 'bonjour'
assert q == {'locale.word': 'bonjour'}
| 1,268 |
dnppy/raster/raster_fig.py
|
NASA-DEVELOP/dnppy
| 65 |
2022739
|
__author__ = "Jwely"
__all__ = ["raster_fig"]
# standard imports
import matplotlib.pyplot as plt
class raster_fig:
"""
raster_fig objects are used for heads up displays of raster
data to the user.
:param numpy_rast: a numpy array representing araster dataset
:param title: title to put on the raster figure plot
"""
def __init__(self, numpy_rast, title = False):
""" initializes the raster figure """
self.numpy_rast = numpy_rast
self.title = title
self.make_fig()
return
def make_fig(self):
""" function to set up an initial figure """
self.fig, ax = plt.subplots()
self.fig.show()
self.im = ax.imshow(self.numpy_rast)
if self.title:
self.fig.suptitle(self.title, fontsize = 20)
self.im.set_data(self.numpy_rast)
self.fig.canvas.draw()
return
def update_fig(self, numpy_rast, title = False):
"""
Function to update a figure that already exists.
:param numpy_rast: a numpy array representing a raster dataset
:param title: title to put on the raster figure object
"""
if title:
self.fig.suptitle(title, fontsize = 20)
self.im.set_data(numpy_rast)
self.fig.canvas.draw()
return
def close_fig(self):
""" closes an active figure """
plt.close(self.fig)
return
| 1,462 |
api/serializers.py
|
TjandraD/batamlawancorona_api
| 26 |
2022623
|
from django.db import models
from rest_framework import serializers
from .models import DataHarian, DataPerKecamatan
class DataHarianSerializer(serializers.ModelSerializer):
class Meta:
model = DataHarian
fields = [
'tanggal',
'kumulatif',
'sembuh',
'dalam_perawatan',
'meninggal',
]
class DataPerKecamatanSerializer(serializers.ModelSerializer):
class Meta:
model = DataPerKecamatan
fields = [
'tanggal',
'sagulung',
'bulang',
'batu_aji',
'belakang_padang',
'sekupang',
'lubuk_baja',
'batu_ampar',
'bengkong',
'nongsa',
'batam_kota',
'galang',
'sei_beduk',
]
| 844 |
behavioral/examples/interpreter/morse_code.py
|
rcavaz/Design-Patterns
| 3 |
2023637
|
import re
class Context(object):
def __init__(self, morse):
self._input = morse
self._output = ''
def __str__(self):
return self._output
def morse(self, txt=None):
if txt is None:
return self._input
else:
self._input = txt
def abc(self, txt=None):
if txt is None:
return self._output
else:
self._output = txt
class AbstractExpression(object):
def interpret(self, context):
# If end of message
if len(context.morse()) == 0:
context.abc(context.abc() + self.char())
# If current is a single blank
elif re.match(' ', context.morse()):
# If current is a double blank
if re.match(' ', context.morse()):
context.morse(context.morse()[2:])
context.abc(context.abc() + self.char())
else:
context.morse(context.morse()[1:])
context.abc(context.abc() + self.char())
# If current is a dot
elif re.match('\.', context.morse()):
context.morse(context.morse()[1:])
self.left().interpret(context)
# If current is a dash
elif re.match('-', context.morse()):
context.morse(context.morse()[1:])
self.right().interpret(context)
else:
raise Exception('Syntax error')
def left(self):
raise NotImplementedError
def right(self):
raise NotImplementedError
def char(self):
raise NotImplementedError
class TerminalExpression(AbstractExpression):
def __init__(self, char='<?>'):
self._char = char
def char(self):
return self._char
def left(self):
return self
def right(self):
return self
class NonterminalExpression(AbstractExpression):
def __init__(self, **args):
self._left = args['dot'] if 'dot' in args else TerminalExpression()
self._right = args['dash'] if 'dash' in args else TerminalExpression()
self._char = args['char'] if 'char' in args else TerminalExpression()
def left(self, node=None):
if node is None:
return self._left
else:
self._left = node
def right(self, node=None):
if node is None:
return self._right
else:
self._right = node
def char(self):
return self._char.char()
class Client(object):
def main(self):
message = '... --- ...'
context = Context(message)
# Create chars
a = TerminalExpression('a')
b = TerminalExpression('b')
c = TerminalExpression('c')
d = TerminalExpression('d')
e = TerminalExpression('e')
f = TerminalExpression('f')
g = TerminalExpression('g')
h = TerminalExpression('h')
i = TerminalExpression('i')
j = TerminalExpression('j')
k = TerminalExpression('k')
l = TerminalExpression('l')
m = TerminalExpression('m')
n = TerminalExpression('n')
o = TerminalExpression('o')
p = TerminalExpression('p')
q = TerminalExpression('q')
r = TerminalExpression('r')
s = TerminalExpression('s')
t = TerminalExpression('t')
u = TerminalExpression('u')
v = TerminalExpression('v')
w = TerminalExpression('w')
x = TerminalExpression('x')
y = TerminalExpression('y')
z = TerminalExpression('z')
#null = TerminalExpression('<?>')
blank = TerminalExpression(' ')
# Build the abstract syntax tree
#n30 = NonterminalExpression()
#n29 = NonterminalExpression()
n28 = NonterminalExpression(char=q)
n27 = NonterminalExpression(char=z)
n26 = NonterminalExpression(char=y)
n25 = NonterminalExpression(char=c)
n24 = NonterminalExpression(char=x)
n23 = NonterminalExpression(char=b)
n22 = NonterminalExpression(char=j)
n21 = NonterminalExpression(char=p)
#n20 = NonterminalExpression()
n19 = NonterminalExpression(char=l)
#n18 = NonterminalExpression()
n17 = NonterminalExpression(char=f)
n16 = NonterminalExpression(char=v)
n15 = NonterminalExpression(char=h)
n14 = NonterminalExpression(char=o)
n13 = NonterminalExpression(char=g, dot=n27, dash=n28)
n12 = NonterminalExpression(char=k, dot=n25, dash=n26)
n11 = NonterminalExpression(char=d, dot=n23, dash=n24)
n10 = NonterminalExpression(char=w, dot=n21, dash=n22)
n9 = NonterminalExpression(char=r, dot=n19)
n8 = NonterminalExpression(char=u, dot=n17)
n7 = NonterminalExpression(char=s, dot=n15, dash=n16)
n6 = NonterminalExpression(char=m, dot=n13, dash=n14)
n5 = NonterminalExpression(char=n, dot=n11, dash=n12)
n4 = NonterminalExpression(char=a, dot=n9, dash=n10)
n3 = NonterminalExpression(char=i, dot=n7, dash=n8)
n2 = NonterminalExpression(char=t, dot=n5, dash=n6)
n1 = NonterminalExpression(char=e, dot=n3, dash=n4)
root = NonterminalExpression(char=blank, dot=n1, dash=n2)
while len(context.morse()) > 0:
root.interpret(context)
print '%s = %s' % (message, context.abc())
if __name__ == '__main__':
c = Client()
c.main()
| 5,437 |
museum-mineralogie/groupe-5/src/python/ardui.py
|
stvinho/TerrA-LigeriA-1
| 1 |
2023140
|
# import serial
import time
import parse
import re
from pygame import mixer
import serial.tools.list_ports
from dotenv import load_dotenv
import os
load_dotenv()
PORT = os.getenv('PORT')
FORMAT_STRING = "a(?P<no>[0-9]+)bc(?P<val>[0-9]+)d"
if PORT == "": # if port not defined in .env, we scan devices to try to find it. It is better to define it .env
for port in list(serial.tools.list_ports.comports()):
if port[2].startswith('USB VID:PID=2341:0043'):
PORT = port[0]
arduino = serial.Serial(port = int(PORT), baudrate = int(os.getenv('BAUDRATE')))
pins = dict([[pin, "0"] for pin in os.getenv("PINS").split(',')])
currentSong = None
mixer.init()
while True:
line = arduino.readline().decode('ascii')
# line='a6bc1d' # test quand pas d'arduino
d = re.match(FORMAT_STRING, line)
if(d is not None):
d=d.groupdict()
p, v = d["no"], d["val"]
if(v != pins[p]):
print(("Set changed for pin {}. New state : {}").format(p,v))
pins[p] = v
if v=="1" and p in pins.keys():
if currentSong is not None:
currentSong.stop()
print("sound playing")
currentSong = mixer.Sound(os.path.join(os.path.dirname(__file__), "../../records/DUBUISOUND{}.wav".format(p)))
currentSong.set_volume(.5)
currentSong.play()
| 1,439 |
assetman/tests/test_shunt.py
|
ASSETIO/Assetman
| 8 |
2023262
|
import os
import sys
import logging
# Hack: Do this before attempting to load any django-related stuff.
# FIXME boo do not modify environ at module scope
os.environ['DJANGO_SETTINGS_MODULE'] = 'assetman.tests.django_test_settings'
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
format='%(asctime)s %(process)d %(filename)s %(lineno)d %(levelname)s #| %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
| 466 |
config.py
|
wancy86/tornado-seed
| 0 |
2022872
|
DATABASES = {
'cs': [
{
'dialect': 'mysql',
'driver': 'pymysql',
'user': 'root',
'password': '<PASSWORD>',
'host': '127.0.0.1',
'database': 'cs'
},
{
'dialect': 'mysql',
'driver': 'pymysql',
'user': 'root',
'password': '<PASSWORD>',
'host': '127.0.0.1',
'database': 'cstest'
}
]
}
INSTALLED_APPS = ['session', 'secu']
# Session 有效时间(分钟)
SESSION_TIMEOUT = 120
# 此变量决定tornado的运行模式是否为调试模式,如果为真,则文件发生变化服务立刻重启
DEBUG = True
# 此变量用于自动化测试,根据此变量决定数据库的选择
TEST = 0
# 此变量用于自动化测试,根据此变量决定自动化测试的端口号
TEST_PORT = '8888'
# SQLAlchemy log
ECHO = False
# 此变量表示在程序报错是否会抛出异常,在测试模式用于需要调试的错误
PROCEDURE_ECHO = False
# 系统报错接收邮箱
ERROR_REPORT_RECEIVERS = ['<EMAIL>', '<EMAIL>', '<EMAIL>', '<EMAIL>']
HTTPS_KEY_PATH = '/home/ubuntu/sites/cert/localhost.key'
HTTPS_CRT_PATH = '/home/ubuntu/sites/cert/localhost.crt'
# # 网易短信: 国学
# SMS = {
# 'URL': 'https://api.netease.im/sms/sendcode.action',
# 'AppKey': 'f643a4b27d11876c1e125d08ad9befe6',
# 'AppSecret':'8343544a1549',
# 'Type': {
# 'login': '4042090', #登陆
# 'get-money': '3982120', # 提现申请
# 'change-phone': '4042091', # 更换手机
# 'buy-sucess': '4102132', # 购买成功
# }
# }
# # 微信配置基础数据
# WPC = {
# 'APPID': 'wx53c18f32ad626eb8',
# 'APPSECRET': 'ff45bca0fec005462e1d01a9e55182fd',
# 'MCHID': '1397809602',
# 'KEY': 'a7fab967242e4c438a2ba95b9d4db287',
# 'GOODDESC': '泽慧国学-国学课程',
# 'NOTIFY_URL': 'https://service.huizeguoxue.com/service/applesson/wechatordernotice',
# }
STATIC = {
# 访问路径
'URL': 'http://192.168.1.54:8080/',
# 本地存储路径
'LOCALPATH': '/home/max/sites/cs/services/medias',
# 远程存储路径
'REMOTES': [
]
}
| 1,979 |
Platforms/Web/Processing/Api/Admin/avatar.py
|
The-CJ/Phaazebot
| 2 |
2023230
|
from typing import TYPE_CHECKING, Coroutine
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
from Platforms.Discord.main_discord import PhaazebotDiscord
import json
import asyncio
from aiohttp.web_request import FileField
from aiohttp.web import Response
from Utils.Classes.webrequestcontent import WebRequestContent
from Utils.Classes.authwebuser import AuthWebUser
from Utils.Classes.extendedrequest import ExtendedRequest
from Platforms.Web.index import PhaazeWebIndex
from Platforms.Web.utils import authWebUser
@PhaazeWebIndex.view("/api/admin/avatar")
async def apiAdminAvatar(cls:"PhaazebotWeb", WebRequest:ExtendedRequest) -> Response:
"""
Default url: /api/admin/avatar
"""
WebAuth:AuthWebUser = await authWebUser(cls, WebRequest)
if not WebAuth.found:
return await cls.Tree.Api.errors.apiMissingAuthorisation(WebRequest)
if not WebAuth.User.checkRoles(["admin", "superadmin"]):
return await cls.Tree.Api.errors.apiNotAllowed(WebRequest, msg="Admin rights required")
Data:WebRequestContent = WebRequestContent(WebRequest, force_method="unpackPost")
await Data.load()
platform:str = Data.getStr("platform", None)
if platform == "discord":
return await apiAdminAvatarDiscord(cls, WebRequest, Data)
else: return await cls.Tree.Api.errors.apiMissingValidMethod(cls, WebRequest, msg=f"'{platform}' is not a known platform")
async def apiAdminAvatarDiscord(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, Data:WebRequestContent) -> Response:
"""
Default url: /api/admin/avatar?platform=discord
"""
DISCORD_TIMEOUT:int = 120
PhaazeDiscord:"PhaazebotDiscord" = cls.BASE.Discord
if not PhaazeDiscord: return await cls.Tree.Api.errors.apiNotAllowed(cls, WebRequest, msg="Discord module is not active")
AvatarData:FileField = Data.get("file", None)
if not AvatarData or type(AvatarData) is not FileField:
return await cls.Tree.Api.errors.apiMissingData(cls, WebRequest, msg="missing or invalid `file`")
avatar_as_bytes:bytes = AvatarData.file.read()
change_coro:Coroutine = PhaazeDiscord.user.edit(avatar=avatar_as_bytes)
WaitForDiscord:asyncio.Event = asyncio.Event()
AvatarChangeTask:asyncio.Future = asyncio.ensure_future(change_coro, loop=PhaazeDiscord.BASE.DiscordLoop)
AvatarChangeTask.add_done_callback(lambda x: WaitForDiscord.set())
# NOTE (for my later self), calling `await asyncio.wait_for` here, will snap the processing to somewhere in aiohttp, that listens to new request,
# only after... something happens, this coro will continue since its marked with call_soon
# So it the server is somewhat busy, everything should go smoothly
try: await asyncio.wait_for(WaitForDiscord.wait(), DISCORD_TIMEOUT)
except: pass
if not AvatarChangeTask.done():
AvatarChangeTask.cancel()
return await cls.Tree.Api.errors.apiTimeout(cls, WebRequest, time=DISCORD_TIMEOUT)
try:
AvatarChangeTask.result() # if everything is ok, return value should be none
return cls.response(
text=json.dumps(dict(msg="Avatar change successfully", status=200)),
content_type="application/json",
status=200
)
except Exception as E:
return cls.response(
text=json.dumps(dict(msg="Avatar change failed", exception=str(E), status=400)),
content_type="application/json",
status=400
)
| 3,259 |
src/detectAndCompareFaces.py
|
ly1996/facenet
| 1 |
2023643
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import copy
import argparse
import facenet
import align.detect_face
import math
import cv2
def main():
g1 = tf.Graph() # 加载到Session 1的graph
g2 = tf.Graph() # 加载到Session 2的graph
sess1 = tf.Session(graph=g1) # Session1
sess2 = tf.Session(graph=g2) # Session2
global pnet, rnet, onet
with sess1.as_default():
with g1.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess1, None)
images,image_names = load_and_align_data("data/images",160,32,0.9,sess1,pnet, rnet, onet)
nrof_images = len(image_names)
video_full_path = "rtsp://admin:[email protected]:554/h264/ch1/main"
cap = cv2.VideoCapture(video_full_path)
print(cap.isOpened())
frame_count = 1
success = True
with sess2.as_default():
with g2.as_default():
# Load the model
facenet.load_model("data/facenet/20180402-114759/20180402-114759.pb")
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = {images_placeholder: images, phase_train_placeholder: False}
emb = sess2.run(embeddings, feed_dict=feed_dict)
# mean = np.mean(emb, axis=0)
count = 0
while count < 50:
count += 1
success, frame = cap.read()
# print('Read a new frame: ', success)
if not success:
continue
with sess1.as_default():
with g1.as_default():
[canFind,imgs] = load_and_align_single_img(count,frame,160,32,0.9,sess1,pnet, rnet, onet)
if not canFind:
continue
# print("can find face")
print("the ",count ,"th detect")
with sess2.as_default():
with g2.as_default():
feed_dict = {images_placeholder: imgs, phase_train_placeholder: False}
embs = sess2.run(embeddings, feed_dict=feed_dict)
for emFind in embs:
minDist = 10000
minIndex = -1
for j in range(nrof_images):
# dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
dot = np.sum(np.multiply(emFind , emb[j, :]))
norm = np.linalg.norm(emFind) * np.linalg.norm(emb[j, :])
similarity = dot / norm
# print (similarity)
dist = np.arccos(similarity) / math.pi
if dist < minDist:
minDist = dist
minIndex = j
print ("find person: ",image_names[j])
# with tf.Graph().as_default():
# with tf.Session() as sess:
# # Load the model
# facenet.load_model("data/facenet/20180408-102900")
cap.release()
def load_and_align_single_img(count ,img, image_size, margin, gpu_memory_fraction,sess,pnet, rnet, one):
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
# print(bounding_boxes)
# print("_ : ", _)
if len(bounding_boxes) < 1:
print("can't detect face, remove ")
return [False,False]
img_list = []
nrof_faces = bounding_boxes.shape[0]
det = bounding_boxes[:, 0:4]
det_arr = []
img_size = np.asarray(img.shape)[0:2]
for i in range(nrof_faces):
det_arr.append(np.squeeze(det[i]))
for i, det in enumerate(det_arr):
det = np.squeeze(det)
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
# scaled = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
output_filename_n = "{}{}_{}{}".format("data/faces/",count, i, ".jpg")
misc.imsave(output_filename_n, cropped)
eye_center = ((_[0][i] + _[1][i]) / 2, (_[5][i] + _[6][i]) / 2)
dy = _[6][i] - _[5][i]
dx = _[1][i] - _[0][i]
angle = cv2.fastAtan2(dy, dx)
rot = cv2.getRotationMatrix2D(eye_center, angle, scale=1.0)
cropped = cv2.warpAffine(cropped, rot, dsize=(cropped.shape[1], cropped.shape[0]))
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return [True,images]
def load_and_align_data(image_dir, image_size, margin, gpu_memory_fraction,sess,pnet, rnet, onet):
# print (image_paths)
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
# print('Creating networks and loading parameters')
# with tf.Graph().as_default():
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
# with sess.as_default():
# pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
image_paths = []
image_names = []
for file in os.listdir(os.path.expanduser(image_dir)):
image_paths.append(os.path.join(image_dir, file))
image_names.append(file)
tmp_image_paths = copy.copy(image_paths)
print (tmp_image_paths)
img_list = []
for image in tmp_image_paths:
img = misc.imread(os.path.expanduser(image), mode='RGB')
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
# print(bounding_boxes)
# print("_ : ", _)
if len(bounding_boxes) < 1:
image_paths.remove(image)
print("can't detect face, remove ", image)
continue
det = np.squeeze(bounding_boxes[0, 0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0] - margin / 2, 0)
bb[1] = np.maximum(det[1] - margin / 2, 0)
bb[2] = np.minimum(det[2] + margin / 2, img_size[1])
bb[3] = np.minimum(det[3] + margin / 2, img_size[0])
cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]
eye_center = ((_[0] + _[1]) / 2, (_[5] + _[6]) / 2)
dy = _[6] - _[5]
dx = _[1] - _[0]
angle = cv2.fastAtan2(dy, dx)
rot = cv2.getRotationMatrix2D(eye_center, angle, scale=1.0)
cropped = cv2.warpAffine(cropped, rot, dsize=(cropped.shape[1], cropped.shape[0]))
aligned = misc.imresize(cropped, (image_size, image_size), interp='bilinear')
prewhitened = facenet.prewhiten(aligned)
img_list.append(prewhitened)
images = np.stack(img_list)
return images,image_names
if __name__ == '__main__':
main()
| 7,570 |
media_grab-container/src/dataTypes/TorrentRecord.py
|
tomconnolly94/media_grab
| 0 |
2022998
|
#!/venv/bin/python
# external dependencies
import enum
class TorrentCategory(enum.Enum):
TV_EPISODE = 1
TV_SEASON = 2
class TorrentRecord():
def __init__(self, name, torrentId, infoHash, seeders, leechers=None, category=None):
self.__name = name
self.__torrentId = torrentId
self.__infoHash = infoHash
self.__seeders = seeders
self.__leechers = leechers
self.__category = category
def getMagnet(self):
return f"magnet:?xt=urn:btih:{self.__infoHash}&dn={self.__name}"
def getName(self):
return self.__name
def getId(self):
return self.__name
def getSeeders(self):
return self.__seeders
def getInfoHash(self):
return self.__infoHash
def getCategory(self):
return self.__category
def setCategory(self, category):
self.__category = category
| 892 |
app/L6SOsgE6HT.py
|
hazxone/docker-flask-nginx-uwsgi
| 0 |
2023075
|
import pickle
import os
users = {
"medkad" : "mk101",
"haibe" : "hb201",
"maybank" : "may404",
"alphaadmin" : "alpha303"
}
tokens = {
"<PASSWORD>" : "<PASSWORD>"
}
cred_pickle = os.path.join('pickle','users.cred')
if os.path.isfile(cred_pickle):
with open(cred_pickle, "rb") as u:
users = pickle.load(u)
token_pickle = os.path.join('pickle','session.token')
if os.path.isfile(token_pickle):
with open(token_pickle, "rb") as t:
tokens = pickle.load(t)
| 542 |
website/jdpages/migrations/0010_auto_20171019_1357.py
|
jonge-democraten/website
| 5 |
2023612
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jdpages', '0009_organisation_page_and_members'),
]
operations = [
migrations.AlterModelOptions(
name='blogcategorypage',
options={'verbose_name': 'Blog categorie pagina', 'verbose_name_plural': 'Blog categorie paginas', 'ordering': ('_order',)},
),
migrations.AlterModelOptions(
name='homepage',
options={'verbose_name': 'Home pagina', 'verbose_name_plural': 'Home paginas', 'ordering': ('_order',)},
),
]
| 678 |
planex/cms_site/apps.py
|
octue/planex-cms
| 0 |
2023419
|
from django.apps import AppConfig
class CMSSiteAppConfig(AppConfig):
name = "cms_site"
label = "cms_site"
verbose_name = "Site"
def ready(self):
# import cms_site.signals
pass
| 211 |
tests/test_query_metadata.py
|
msonderegger/PolyglotDB
| 25 |
2023595
|
from polyglotdb import CorpusContext
import pytest
@pytest.mark.xfail
def test_query_metadata_words(acoustic_config):
with CorpusContext(acoustic_config) as g:
q = g.query_metadata(g.word)
assert q.factors() == ['label']
assert q.numerics() == ['begin', 'end']
assert (len(q.levels(g.word.label)) == 10)
assert q.range(g.word.begin) == [0, 10]
assert len(q.grouping_factors() == 0)
@pytest.mark.xfail
def test_query_metadata_discourses(acoustic_config):
with CorpusContext(acoustic_config) as g:
q = g.query_metadata(g.discourse)
assert len(q.levels() == 5)
assert len(q.grouping_factors() == 0)
| 683 |
src/solved/problem149.py
|
bgwines/project-euler
| 0 |
2022994
|
import pdb
lfg_cache = dict()
def lfg(k):
if k in lfg_cache:
return lfg_cache[k]
if 1 <= k and k <= 55:
lfg_cache[k] = ((100003 - 200003*k + 300007*(k**3)) % 1000000) - 500000
else:
lfg_cache[k] = ((lfg(k-24) + lfg(k-55)) % 1000000) - 500000
return lfg_cache[k]
def in_bounds(matrix, i, j):
return (0 <= i and i < len(matrix)
and 0 <= j and j < len(matrix[i]))
def diag_from(matrix, i, j, i_increment, j_increment):
diag = []
while in_bounds(matrix, i, j):
diag.append(matrix[i][j])
i += i_increment
j += j_increment
return diag
def ldiag_from(matrix, i, j):
return diag_from(matrix, i, j, 1, -1)
def rdiag_from(matrix, i, j):
return diag_from(matrix, i, j, 1, 1)
def init_matrix():
matrix = []
k = 1
for i in xrange(0, 2000):
row = []
for j in xrange(0, 2000):
row.append(lfg(k))
k += 1
matrix.append(row)
return matrix
def max_contiguous_subsequence_sum(sequence):
max_sums_ending_at_indices = []
for (i, x) in enumerate(sequence):
max_sum_ending_at_index = x
if i > 0:
max_sum_ending_at_index = max(x, max_sums_ending_at_indices[i-1] + x)
max_sums_ending_at_indices.append(max_sum_ending_at_index)
return max(max_sums_ending_at_indices)
matrix = init_matrix()
horizontals = matrix
verticals = [[matrix[j][i] for j in xrange(0, len(matrix))] for i in xrange(0, len(matrix[0]))]
ldiags = [ldiag_from(matrix, 0, j) for j in xrange(len(matrix[0]))] + [ldiag_from(matrix, i, len(matrix[0])-1) for i in xrange(len(matrix))]
rdiags = [rdiag_from(matrix, 0, j) for j in xrange(len(matrix[0]))] + [rdiag_from(matrix, i, 0) for i in xrange(len(matrix))]
print max(
[ max(map(max_contiguous_subsequence_sum, horizontals))
, max(map(max_contiguous_subsequence_sum, verticals))
, max(map(max_contiguous_subsequence_sum, ldiags))
, max(map(max_contiguous_subsequence_sum, rdiags)) ])
| 1,840 |
tox_docker/tox3/config.py
|
tkdchen/tox-docker
| 0 |
2023300
|
from typing import Container, Dict, Mapping, Optional, Sequence
import re
from tox.config import Config, SectionReader
from tox.venv import VirtualEnv
import py
from tox_docker.config import (
ContainerConfig,
Image,
Link,
Port,
RunningContainers,
Volume,
)
# nanoseconds in a second; named "SECOND" so that "1.5 * SECOND" makes sense
SECOND = 1000000000
EnvRunningContainers = Dict[VirtualEnv, RunningContainers]
def getfloat(reader: SectionReader, key: str) -> Optional[float]:
val = reader.getstring(key)
if val is None:
return None
try:
return float(val)
except ValueError:
msg = f"{val!r} is not a number (for {key} in [{reader.section_name}])"
raise ValueError(msg)
def gettime(reader: SectionReader, key: str) -> Optional[int]:
raw = getfloat(reader, key)
if raw is None:
return None
return int(raw * SECOND)
def getint(reader: SectionReader, key: str) -> Optional[int]:
raw = getfloat(reader, key)
if raw is None:
return None
val = int(raw)
if val != raw:
msg = f"{val!r} is not an int (for {key} in [{reader.section_name}])"
raise ValueError(msg)
return val
def getenvdict(reader: SectionReader, key: str) -> Mapping[str, str]:
environment = {}
for value in reader.getlist(key):
envvar, _, value = value.partition("=")
environment[envvar] = value
return environment
def discover_container_configs(config: Config) -> Sequence[str]:
"""
Read the tox.ini, and return a list of docker container configs.
"""
inipath = str(config.toxinipath)
iniparser = py.iniconfig.IniConfig(inipath)
container_names = set()
for section in iniparser.sections:
if not section.startswith("docker:"):
continue
_, _, container_name = section.partition(":")
if not re.match(r"^[a-zA-Z][-_.a-zA-Z0-9]+$", container_name):
raise ValueError(f"{container_name!r} is not a valid container name")
# populated in the next loop
container_names.add(container_name)
return list(container_names)
def parse_container_config(
config: Config, container_name: str, all_container_names: Container[str]
) -> ContainerConfig:
inipath = str(config.toxinipath)
iniparser = py.iniconfig.IniConfig(inipath)
reader = SectionReader(f"docker:{container_name}", iniparser)
reader.addsubstitutions(
distdir=config.distdir,
homedir=config.homedir,
toxinidir=config.toxinidir,
toxworkdir=config.toxworkdir,
)
kwargs = {
"name": container_name,
"image": Image(reader.getstring("image")),
"stop": container_name not in config.option.docker_dont_stop,
}
environment = None
if reader.getstring("environment"):
environment = getenvdict(reader, "environment")
hc_cmd = hc_interval = hc_timeout = hc_start_period = hc_retries = None
if reader.getstring("healthcheck_cmd"):
hc_cmd = reader.getstring("healthcheck_cmd")
if reader.getstring("healthcheck_interval"):
hc_interval = gettime(reader, "healthcheck_interval")
if reader.getstring("healthcheck_timeout"):
hc_timeout = gettime(reader, "healthcheck_timeout")
if reader.getstring("healthcheck_start_period"):
hc_start_period = gettime(reader, "healthcheck_start_period")
if reader.getstring("healthcheck_retries"):
hc_retries = getint(reader, "healthcheck_retries")
ports = None
if reader.getstring("ports"):
ports = [Port(line) for line in reader.getlist("ports")]
links = None
if reader.getstring("links"):
links = [Link(line) for line in reader.getlist("links")]
volumes = None
if reader.getstring("volumes"):
volumes = [Volume(line) for line in reader.getlist("volumes")]
return ContainerConfig(
name=container_name,
image=Image(reader.getstring("image")),
stop=container_name not in config.option.docker_dont_stop,
environment=environment,
healthcheck_cmd=hc_cmd,
healthcheck_interval=hc_interval,
healthcheck_timeout=hc_timeout,
healthcheck_start_period=hc_start_period,
healthcheck_retries=hc_retries,
ports=ports,
links=links,
volumes=volumes,
)
| 4,369 |
mainsite/management/commands/gauge.py
|
amokryshev/amokryshev-com
| 0 |
2023565
|
import os
import json
from django.core.management.base import BaseCommand
from amokryshev.utils import AnimatedPageGaugeGenerator, AnimatedPageScenarioImproperlyConfigured
class Command(BaseCommand):
help = '''
The command makes file with gauge for the custom functional of testing pages, animated through CSS and JS.
The code and docs of the functional placed in amokryshev.utils.animated_pages_test_tools.py
The Examples:
gauge -f "initial-data.json" -r "/ru" "/en" -p "mainsite/fixtures/index_page_snapshot.json" -v 2 -si 20 -sd 0.1 -sc 400
gauge -f "initial-data.json" -r "/ru/articles/lorem_ipsum_43/" "/en/articles/lorem_ipsum_46/" -p "mainsite/fixtures/article_page_snapshot.json" -v 2 -si 20 -sd 0.1 -sc 400
'''
GaugeGenerator = AnimatedPageGaugeGenerator()
parameters = [
'fixtures',
'references',
'page_parts',
'snapshots_iter',
'snapshots_density',
'snapshots_count',
'connection_alias',
'verbosity',
'host',
'port',
]
def handle(self, *args, **options):
for param in self.parameters:
if options[param]:
setattr(self.GaugeGenerator, param, options[param])
try:
if os.path.isfile(self.GaugeGenerator.page_parts):
gauge_file = open(self.GaugeGenerator.page_parts, "r+")
print('The {} file has been opened successful!'.format(self.GaugeGenerator.page_parts))
self.GaugeGenerator.page_parts = json.loads(gauge_file.read())
else:
raise AnimatedPageScenarioImproperlyConfigured("Incorrect value in the self.GaugeGenerator.page_parts!")
gauge_file.truncate(0)
gauge_file.seek(0)
gauge_file.write(json.dumps(self.GaugeGenerator.run()))
print('The file {} has been saved successful!'.format(gauge_file.name))
except (OSError, IOError) as e:
print('Something goes wrong when processing Page_parts file: {}'.format(e))
finally:
gauge_file.close()
print('The file has been closed, process completed!')
def add_arguments(self, parser):
parser.add_argument(
'-f',
'--fixtures',
nargs='+',
action='store',
default='initial-data.json',
help='Fixtures with data for test environment of the page',
required=True
)
parser.add_argument(
'-r',
'--references',
nargs='+',
action='store',
help='The list of URNs, that should be tested',
required=True
)
parser.add_argument(
'-p',
'--page_parts',
action='store',
help='The path to page_parts json',
required=True
)
parser.add_argument(
'-si',
'--snapshots_iter',
type=int,
action='store',
help='The count of iteration for snapshots that should be taken from the page (one snapshot per N seconds)',
required=True
)
parser.add_argument(
'-sd',
'--snapshots_density',
type=float,
action='store',
help='The density of snapshots that should be taken from the page (one snapshot per N seconds)',
required=True
)
parser.add_argument(
'-sc',
'--snapshots_count',
type=int,
action='store',
help='The count of snapshots that should be taken from the page',
required=True
)
parser.add_argument(
'-ca',
'--connection_alias',
action='store',
help='The URL of the test server',
required=False
)
parser.add_argument(
'-ht',
'--host',
action='store',
help='The URL of the test server',
required=False
)
parser.add_argument(
'-pt',
'--port',
type=int,
action='store',
help='The port of the test server',
required=False
)
| 4,259 |
app/helpers.py
|
codelableidenvelux/agestudy
| 1 |
2023265
|
import csv
import os
import smtplib, ssl
import urllib.request
import pandas as pd
import numpy as np
from flask import redirect, render_template, request, session
from functools import wraps
from dateutil.parser import parse
import string
import random
from datetime import datetime, timedelta
from db.postgresql import Db
def min_max_norm(data):
""" min max normalization """
x = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data))
return x
def calculate_ranking(sh):
# Determine how long the participant has been participating
today = datetime.now()
G = list(sh['G'])
days_since_sign_up = list(map(lambda a: (today - a.value).days ,G[1:]))
days_since_sign_up = np.array(days_since_sign_up)
# Determine how many tasks they have made
tasks_made = list(map(lambda a: a.value ,list(sh['H'])))
tasks_made = np.array(tasks_made[1:])
tasks_made[tasks_made == 'many'] = 100
tasks_made = tasks_made.astype('int')
# normalize values
days_since_sign_up_norm = min_max_norm(days_since_sign_up)
tasks_made_norm = min_max_norm(tasks_made)
# determine which participants have already been booked
booked = list(map(lambda a: a.value == 'yes' ,list(sh['L'])))
# weigh values and rank participants
weighted = tasks_made_norm * 0.5 + days_since_sign_up_norm * 0.5
r = np.argsort(-weighted)
# reorder ranking ingnoring already booked participants
large_ranking = r.max() * 10
r[booked[1:]] = large_ranking
rank = dict(zip(r,np.zeros(len(r))))
count = 0
for x in sorted(rank): #now we go from lowest to highest in the array, adding 1 to the dict-value
rank[x] = count
count += 1
# set booked participant rankings to nan
ranking = np.array([rank[x] for x in r], 'float')
ranking[booked[1:]] = np.nan
ranking_list = list(ranking)
#ranking_list.insert(0, G[0].value)
return ranking_list
def read_csv(filename):
df = pd.read_csv(filename, sep=",", index_col=0, encoding = "utf-8")
df = df.to_dict()
return df
def preprocess_birthdate(date):
"""
Function preprocesses the input birthday from the register page,
it uses dateutil library to parse the date and make sure its in the right
format. It also strips the white spaces if there were any.
Return format str yyyy-mm-01
"""
dt = parse(date)
return (dt.strftime('%Y-%m-%d'))
def preprocess_checkbox(input):
"""
Function preprocesses the input user type from the register page,
it return 1 if the box was checked meaning the user does want to participate
for monetary compensation.
It returns 0 if the user does not want to participate for monetary compensation
It also is used to check if the user lives in the Netherlands
returns 1 if they do and 0 if they dont
"""
if input == "on":
return 1
else:
return 0
def preprocess_gender(gender):
"""
Function preprocesses the input gender from the register page,
Returns 1 for male, 2 for female and 3 for other
"""
if gender == "male":
return 1
elif gender == "female":
return 2
elif gender == "other":
return 3
def total_money(df):
df["time_exec"] = pd.to_datetime(df["time_exec"])
total_survey_money = 0
total_rt_money = 0
total_tasks_money = 0
df = df[df["user_type"] == 1]
by_id = df.groupby(by="user_id")
for (key, values) in by_id:
for value in values["task_id"]:
if value == 4:
total_survey_money = total_survey_money + 2
elif value == 5:
total_survey_money = total_survey_money + 2
per_month = values.groupby(by="month")
for (key, value) in per_month:
if 8 in value["task_id"].values:
total_rt_money = total_rt_money + 0.25
if 1 in value["task_id"].values or 2 in value["task_id"].values or 3 in value["task_id"].values:
total_tasks_money = total_tasks_money + 1.75
total = total_rt_money + total_tasks_money + total_survey_money
money = {"total": total, "tasks": total_tasks_money, "survey": total_survey_money, "rt": total_rt_money}
return money
def projected_money(num_p):
# total tasks
tasks = num_p * 1.75 * 12
# total rt
rt = num_p * 0.25 * 12
# surveys
sf_36 = num_p * 2.00 * 3
phone_survey = num_p * 2.00
# total survey
survey = sf_36 + phone_survey
total = survey + rt + tasks * 3
return {"total": total, "tasks": tasks, "survey": survey, "rt": rt}
def get_num_active_participants(groupby_object):
num_active_participants = 0
for (key, value) in groupby_object:
today = datetime.now()
months_participating = today.month - value["time_sign_up"].iloc[0].month
num_test_per_month = len(value["month"].unique())
if months_participating == num_test_per_month:
num_active_participants = num_active_participants + 1
return num_active_participants
def task_frequency(df):
sf_36_done = 0
sf_36= 0
sf_36_p = 0
phone_survey_done = 0
phone_survey = 0
phone_survey_p = 0
rt_done = 0
rt = 0
rt_p = 0
corsi_done = 0
corsi = 0
corsi_p = 0
n_back_done = 0
n_back= 0
n_back_p = 0
t_switch_done = 0
t_switch = 0
t_switch_p = 0
by_id = df.groupby(by=["user_id", "status"])
for (key, values) in by_id:
for value in values["task_id"]:
if value == 4 and key[1] == 1:
sf_36_done = sf_36_done + 1
elif value == 4 and key[1] == 0:
sf_36 = sf_36 + 1
elif value == 5 and key[1] == 1:
phone_survey_done = phone_survey_done + 1
elif value == 5 and key[1] == 0:
phone_survey = phone_survey + 1
elif value == 8 and key[1] == 1:
rt_done = rt_done + 1
elif value == 8 and key[1] == 0:
rt = rt + 1
elif value == 1 and key[1] == 1:
corsi_done = corsi_done + 1
elif value == 1 and key[1] == 0:
corsi = corsi + 1
elif value == 2 and key[1] == 1:
n_back_done = n_back_done + 1
elif value == 2 and key[1] == 0:
n_back = n_back + 1
elif value == 3 and key[1] == 1:
t_switch_done = t_switch_done + 1
else:
t_switch = t_switch + 1
by_id = df.groupby(by=["task_id", "user_id"])
for (key, values) in by_id:
if key[0] == 4:
sf_36_p = sf_36_p + 1
elif key[0] == 5:
phone_survey_p = phone_survey_p + 1
elif key[0] == 8:
rt_p = rt_p + 1
elif key[0] == 1:
corsi_p = corsi_p + 1
elif key[0] == 2:
n_back_p = n_back_p + 1
else:
t_switch_p = t_switch_p + 1
tasks = [{"task": "sf_36","complete": sf_36_done,"incomplete": sf_36},
{"task": "phone_survey","complete": phone_survey_done,"incomplete": phone_survey},
{"task": "rt","complete": rt_done,"incomplete": rt},
{"task": "corsi","complete": corsi_done,"incomplete": corsi},
{"task": "n_back","complete": n_back_done,"incomplete": n_back},
{"task": "t_switch","complete": t_switch_done,"incomplete": t_switch}]
tasks_p = [{"task": "sf_36","complete": sf_36_p, "incomplete": 0},
{"task": "phone_survey","complete": phone_survey_p, "incomplete": 0},
{"task": "rt","complete": rt_p, "incomplete": 0},
{"task": "corsi","complete": corsi_p, "incomplete": 0},
{"task": "n_back","complete": n_back_p, "incomplete": 0},
{"task": "t_switch","complete": t_switch_p, "incomplete": 0}]
return (tasks,tasks_p)
def remove_whitespace(input):
return input.replace(' ', '')
def apology(message, code=400):
"""Render message as an apology to user."""
def escape(s):
"""
Escape special characters.
https://github.com/jacebrowning/memegen#special-characters
"""
for old, new in [("-", "--"), (" ", "-"), ("_", "__"), ("?", "~q"),
("%", "~p"), ("#", "~h"), ("/", "~s"), ("\"", "''")]:
s = s.replace(old, new)
return s
return render_template("apology.html", top=code, bottom=escape(message)), code
def login_required(f):
"""
Decorate routes to require login.
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if session.get('user_id') is None:
return redirect("/login")
return f(*args, **kwargs)
return decorated_function
def language_check(f):
"""
Decorator function to set the chosen language
http://flask.pocoo.org/docs/0.12/patterns/viewdecorators/
"""
@wraps(f)
def language_function(*args, **kwargs):
if request.method == "GET":
language = request.args.get('language')
if language:
if language.lower() == "english":
session['language'] = "english"
elif language.lower() == "dutch":
session['language'] = "dutch"
return f(*args, **kwargs)
return language_function
| 9,692 |
cpyquickhelper/fastdata/pandas2numpy.py
|
sdpython/cpyquickhelper
| 2 |
2023455
|
"""
@file
@brief Fast data manipulations.
"""
import pandas
def df2array(df, check=True):
"""
Converts a dataframe into a :epkg:`numpy:array`
without copying. :epkg:`pandas` is merging
consecutive columns sharing the same type
into one memory block. The function can be used
only if the data is stored in one block and one type
as a consequence.
@param df dataframe
@param check verifies the operation can be done (True)
or skip verification (False)
@return :epkg:`numpy:array`
See `data member <https://pandas.pydata.org/pandas-docs/stable/search.html?q=pointer&check_keywords=yes&area=default>`_,
`_data <https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L322>`_.
.. seealso:: @see fn df2array
"""
if check:
if not isinstance(df, pandas.DataFrame):
raise TypeError("df is not a pandas.DataFrame") # pragma: no cover
if len(df._data.blocks) != 1:
raise ValueError(
"The dataframe has many block of data. There should be only one column type.")
return df._data.blocks[0].values
def df2arrays(df, sep=",", check=True):
"""
Converts a dataframe into a list of
a list of tuple *(column name, :epkg:`numpy:array`)*
without copying. :epkg:`pandas` is merging
consecutive columns sharing the same type
into one memory block. That's what the function extracts
@param df dataframe
@param check verifies the operation can be done (True)
or skip verification (False)
@param sep columns separator
@return a list of tuple ``(column, array)``
Example:
.. runpython::
:showcode:
from pandas import DataFrame
from cpyquickhelper.fastdata import df2arrays
df = DataFrame([dict(a=3.4, b=5.6, c="e"),
dict(a=3.5, b=5.7, c="r")])
arr = df2arrays(df)
print(arr)
.. seealso:: @see fn df2array
"""
if check:
if not isinstance(df, pandas.DataFrame):
raise TypeError("df is not a pandas.DataFrame") # pragma: no cover
cols = df.columns
res = []
pos = 0
for b in df._data.blocks:
name = sep.join(cols[pos:pos + b.shape[1]])
res.append((name, b.values))
pos += b.shape[1]
return res
| 2,425 |
Pi/Python_Code/9_pir.py
|
erickmusembi/Robot-Project
| 1 |
2023303
|
#!/usr/bin/python
import RPi.GPIO as GPIO
import time
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(27,GPIO.OUT)
GPIO_PIR = 7
print "PIR Module Test (CTRL-C to exit)"
# Set pin as input
GPIO.setup(GPIO_PIR,GPIO.IN) # Echo
Current_State = 0
Previous_State = 0
try:
print "Waiting for PIR to settle ..."
# Loop until PIR output is 0
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Record previous state
GPIO.output(27,GPIO.HIGH)
time.sleep(1)
GPIO.output(27,GPIO.LOW)
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Wait for 10 milliseconds
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
| 1,081 |
app/package/models/Calibrate_model.py
|
pablohawz/tfg-Scan-Paint-clone
| 0 |
2023003
|
from PySide2.QtCore import QObject
class CalibrateModel(QObject):
def __init__(self):
super().__init__()
self.clear_state()
def clear_state(self):
pass
| 187 |
accommodations/tools.py
|
resurtm/task-tracker-api
| 0 |
2023490
|
import datetime
import json
from os import path as p
import jsonschema
from bson import ObjectId
from accommodations.main import app
def validate_json(data, schema):
path = p.join(app.config['JSON_SCHEMA_PATH'], schema + '.json')
with open(path, 'r') as fp:
fd = json.loads(fp.read())
try:
jsonschema.validate(data, fd)
except jsonschema.ValidationError:
return False
return True
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def prepare_data(data):
return json.loads(JSONEncoder().encode(data))
| 751 |
another_way/sim_pearson.py
|
eduardagoulart/my_anime_playlist
| 1 |
2022750
|
from math import sqrt
def sim(users, p1, p2):
# p1, p2 = int(p1), int(p2)
print(users[p1])
print(users[p2])
si = {}
for item in users[p1]:
if item in users[p2]:
si[item] = 1
qtd_sim = len(si)
if qtd_sim == 0:
return 0
print(f'Dicionário de similaridade: {si}')
try:
sum1 = sum([users[p1][item] for item in si])
sum2 = sum([users[p2][item] for item in si])
except:
sum1 = users[p1][1]
sum2 = users[p2][1]
squares_sum1 = sum([pow(users[p1][item], 2) for item in si])
squares_sum2 = sum([pow(users[p2][item], 2) for item in si])
product_sum = sum([users[p1][item] * users[p2][item] for item in si])
num = product_sum - (sum1 * sum2 / qtd_sim)
den = sqrt((squares_sum1 - pow(sum1, 2) / qtd_sim) * (squares_sum2 - pow(sum2, 2) / qtd_sim))
if den == 0:
return 0
return num / den
| 921 |
pypika_gis/dialects/PostgreSQLSpatial.py
|
mccarthyryanc/pypika-gis
| 0 |
2023193
|
#
# PostGIS submodule
#
from pypika.terms import Function
class SpatialMethods(object):
"""
Defines a PostGIS Spatial types/functions.
"""
def Area(self, term, *args):
return Function("ST_Area", term, *args)
def AsBinary(self, term, *args):
return Function("ST_AsBinary", term, *args)
def AsGeoJSON(self, term, *args):
return Function("ST_AsGeoJSON", term, *args)
def AsMVT(self, term, *args):
return Function("ST_AsMVT", term, *args)
def AsText(self, term, *args):
return Function("ST_AsText", term, *args)
def Boundary(self, geom, *args):
return Function("ST_Boundary", geom, *args)
def Buffer(self, term, length, *args):
return Function("ST_Buffer", term, length, *args)
def Centroid(self, term, *args):
return Function("ST_Centroid", term, *args)
def Contains(self, geomA, geomB, *args):
return Function("ST_Contains", geomA, geomB, *args)
def ConvexHull(self, geomA, *args):
return Function("ST_ConvexHull", geomA, *args)
def ClosestPoint(self, geomA, geomB, *args):
return Function("ST_ClosestPoint", geomA, geomB, *args)
def CoveredBy(self, geomA, geomB, *args):
return Function("ST_CoveredBy", geomA, geomB, *args)
def Covers(self, geomA, geomB, *args):
return Function("ST_Covers", geomA, geomB, *args)
def Crosses(self, geomA, geomB, *args):
return Function("ST_Crosses", geomA, geomB, *args)
def CurveN(self, curve_index, *args):
raise NotImplementedError('PostGIS has no ST_CurveN method')
def CurveToLine(self, geom, tolerance, *args):
return Function("ST_CurveToLine", geom, tolerance, *args)
def Difference(self, geomA, geomB, *args):
return Function("ST_Difference", geomA, geomB, *args)
def Dimension(self, geom, *args):
return Function("ST_Dimension", geom, *args)
def Disjoint(self, geomA, geomB, *args):
return Function("ST_Disjoint", geomA, geomB, *args)
def Distance(self, geomA, geomB, *args):
return Function("ST_Distance", geomA, geomB, *args)
def DWithin(self, geomA, geomB, distance, use_spheroid=False):
return Function("ST_DWithin", geomA, geomB, distance, use_spheroid)
def EndPoint(self, geom, *args):
return Function("ST_EndPoint", geom, *args)
def Envelope(self, term, *args):
return Function("ST_Envelope", term, *args)
def Equals(self, geomA, geomB, *args):
return Function("ST_Equals", geomA, geomB, *args)
def Extent(self, term, *args):
return Function("ST_Extent", term, *args)
def ExteriorRing(self, geom, *args):
return Function("ST_ExteriorRing", geom, *args)
def GeoHash(self, term, *args):
return Function("ST_GeoHash", term, *args)
def GeometryN(self, geom, integer, *args):
return Function("ST_GeometryN", geom, integer, *args)
def GeometryType(self, geom, *args):
return Function("ST_GeometryType", geom, *args)
def GeogFromGeoJSON(self, term, *args):
return Function("ST_GeogFromGeoJSON", term, *args)
def GeogFromText(self, term, *args):
return Function("ST_GeogFromText", term, *args)
def GeogFromWKB(self, wkb, *args):
return Function("ST_GeogFromWKB", wkb, *args)
def GeogPoint(self, long, lat, *args):
return Function("ST_GeogPoint", long, lat, *args)
def GeogPointFromGeoHash(self, geohash, *args):
return Function("ST_GeogPointFromGeoHash", geohash, *args)
def GeomFromGeoJSON(self, term, *args):
return Function("ST_GeomFromGeoJSON", term, *args)
def Length(self, geom, *args):
return Function("ST_Length", geom, *args)
def InteriorRingN(self, geom, integer, *args):
return Function("ST_InteriorRingN", geom, integer, *args)
def Intersection(self, geomA, geomB, *args):
return Function("ST_Intersection", geomA, geomB, *args)
def Intersects(self, geomA, geomB, *args):
return Function("ST_Intersects", geomA, geomB, *args)
def IsClosed(self, geom, *args):
return Function("ST_IsClosed", geom, *args)
def IsCollection(self, term, *args):
return Function("ST_IsCollection", term, *args)
def IsEmpty(self, term, *args):
return Function("ST_IsEmpty", term, *args)
def IsRing(self, term, *args):
return Function("ST_IsRing", term, *args)
def IsSimple(self, term, *args):
return Function("ST_IsSimple", term, *args)
def IsValid(self, term, *args):
return Function("ST_IsValid", term, *args)
def MakeLine(self, *args):
return Function("ST_MakeLine", *args)
def MakePoint(self, long, lat, *args):
return Function("ST_MakePoint", long, lat, *args)
def MakePolygon(self, *args):
return Function("ST_MakePolygon", *args)
def NumPoints(self, geom, *args):
return Function("ST_NumPoints", geom, *args)
def Perimeter(self, geom, *args):
return Function("ST_Perimeter", geom, *args)
def Point(self, long, lat, *args):
return Function("ST_Point", long, lat, *args)
def PointN(self, geom, integer, *args):
return Function("ST_PointN", geom, integer, *args)
def PointOnSurface(self, geom, *args):
return Function("ST_PointOnSurface", geom, *args)
def Relate(self, geomA, geomB, *args):
return Function("ST_Relate", geomA, geomB, *args)
def SetSRID(self, geom, epsg, *args):
return Function("ST_SetSRID", geom, epsg, *args)
def StartPoint(self, geom, *args):
return Function("ST_StartPoint", geom, *args)
def Touches(self, geomA, geomB, *args):
return Function("ST_Touches", geomA, geomB, *args)
def Union(self, geomA, geomB, *args):
return Function("ST_Union", geomA, geomB, *args)
def Within(self, geomA, geomB, *args):
return Function("ST_Within", geomA, geomB, *args)
def X(self, term, *args):
return Function("ST_X", term, *args)
def Y(self, term, *args):
return Function("ST_Y", term, *args)
def Z(self, term, *args):
return Function("ST_Z", term, *args)
| 6,245 |
2 Half/lab2.5.2.py
|
stepangorabch/pstu
| 0 |
2023336
|
f1 = open('assets/text.txt')
f2 = open('assets/file.txt', 'w')
symbol = str(input("symbol: "))
string = f1.readlines()
for i in range(len(string)):
if string[i][0] == symbol:
print(string[i])
f2.write(string[i])
f1.close()
f2.close()
| 254 |
tests/validators.py
|
hronecviktor/django-GDPR
| 55 |
2023460
|
import re
from datetime import date
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
def get_day_from_personal_id(personal_id):
day = int(personal_id[4:6])
if day > 50:
day -= 50
return day
def get_month_from_personal_id(personal_id):
year = get_year_from_personal_id(personal_id)
month = int(personal_id[2:4])
if month > 70 and year > 2003:
month -= 70
elif month > 50:
month -= 50
elif month > 20 and year > 2003:
month -= 20
return month
def get_year_from_personal_id(personal_id):
year = int(personal_id[0:2])
value = personal_id.replace('/', '')
year += 2000 if year < 54 and len(value) == 10 else 1900
return year
def personal_id_date(personal_id):
try:
return date(get_year_from_personal_id(personal_id), get_month_from_personal_id(personal_id),
get_day_from_personal_id(personal_id))
except ValueError:
raise ValueError('Invalid personal id')
class CZBirthNumberValidator:
"""
Czech birth number field validator.
"""
BIRTH_NUMBER = re.compile(r'^(?P<birth>\d{6})/?(?P<id>\d{3,4})$')
def __call__(self, value):
value = force_text(value)
match = re.match(self.BIRTH_NUMBER, value)
if not match:
raise ValidationError(_('Enter a birth number in the format XXXXXX/XXXX.'))
birth, id = match.groupdict()['birth'], match.groupdict()['id']
# Three digits for verificatin number were used until 1. january 1954
if len(id) != 3:
# Fourth digit has been added since 1. January 1954.
# It is modulo of dividing birth number and verification number by 11.
# If the modulo were 10, the last number was 0 (and therefore, the whole
# birth number weren't dividable by 11. These number are no longer used (since 1985)
# and condition 'modulo == 10' can be removed in 2085.
modulo = int(birth + id[:3]) % 11
if (modulo != int(id[-1])) and (modulo != 10 or id[-1] != '0'):
raise ValidationError(_('Enter a valid birth number.'))
try:
personal_id_date(value)
except ValueError:
raise ValidationError(_('Enter a valid birth number.'))
class IDCardNoValidator:
"""
Czech id card number field validator.
"""
ID_CARD_NUMBER = re.compile(r'^\d{9}$')
def __call__(self, value):
value = force_text(value)
match = re.match(self.ID_CARD_NUMBER, value)
if not match:
raise ValidationError(_('Enter an ID card in the format XXXXXXXXX.'))
elif value[0] == '0':
raise ValidationError(_('Enter a valid ID card number.'))
else:
return value
class BankAccountValidator:
BANK_ACCOUNT_NUMBER_REVERSE_PATTERN = re.compile(
r'^(?P<bank>\d{1,6})/(?P<number>\d{1,10})(-?(?P<prefix>\d{1,6}))?$')
def __call__(self, value):
match = re.match(self.BANK_ACCOUNT_NUMBER_REVERSE_PATTERN, force_text(value)[::-1])
if match:
return construct_bank_account_number((match.groupdict()['prefix'] or '')[::-1],
match.groupdict()['number'][::-1],
match.groupdict()['bank'][::-1])
else:
raise ValidationError(_('Enter a valid bank account number.'))
def construct_bank_account_number(prefix, number, bank_code):
return '{:0>6}-{:0>10}/{}'.format(prefix, number, bank_code)
def split_bank_account_to_prefix_postfix(bank_account_number):
return bank_account_number.split('-') if '-' in bank_account_number else ('', bank_account_number)
def clean_bank_account_number_or_none(bank_account_number):
try:
return BankAccountValidator()(bank_account_number)
except ValidationError:
return None
| 4,009 |
hackerrank/Greedy/04-LuckBalance.py
|
MrSquanchee/ProblemSolving
| 0 |
2022990
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the luckBalance function below.
def luckBalance(k, contests):
luckBal = 0
impo = [x[0] for x in contests if x[1]==1]
unimpo = [x[0] for x in contests if x[1]==0]
luckBal += sum(unimpo)
impo.sort(reverse=True)
luckBal += sum(impo[:k])
luckBal -= sum(impo[k:])
return luckBal
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
contests = []
for _ in range(n):
contests.append(list(map(int, input().rstrip().split())))
result = luckBalance(k, contests)
fptr.write(str(result) + '\n')
fptr.close()
| 735 |
data_loader/src/main/python/model/logistic_regression.py
|
uiyunkim-private/recommendation-model
| 0 |
2022925
|
import tempfile
from joblib import dump, load
from sklearn.linear_model import LogisticRegression,Ridge,Lasso
from sklearn.multioutput import MultiOutputRegressor,MultiOutputClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC,SVC
from utils.aws import build_s3
from sklearn.decomposition import PCA as RandomizedPCA
class LogisticRegressionModel:
def __init__(self, x_train, y_train):
self.s3 = build_s3()
#self.model = LogisticRegression(solver='lbfgs')
self.model = MultiOutputRegressor(Lasso(normalize=True,tol=0.000001))
#self.model = MultiOutputRegressor(Ridge())
self.x_train = x_train
self.y_train = y_train
def fit(self, sample_weight=None):
self.model.fit(self.x_train, self.y_train, sample_weight)
def predict(self, test_x):
return self.model.predict(test_x)
#return self.model.predict_proba(test_x)
def score(self):
return self.model.score(self.x_train, self.y_train)
def save_to_s3(self, bucket_name, key):
with tempfile.TemporaryFile() as fp:
dump(self.model, fp)
fp.seek(0)
self.s3.Bucket(bucket_name).put_object(Body=fp.read(), Bucket=bucket_name, Key=key)
fp.close()
@staticmethod
def load_from_s3(s3, bucket_name, key):
with tempfile.TemporaryFile() as fp:
s3.Bucket(bucket_name).download_fileobj(Fileobj=fp, Key=key)
fp.seek(0)
model = load(fp)
fp.close()
return model
@staticmethod
def save_predictions_to_s3(df, bucket_name, key):
df.to_csv(f's3://{bucket_name}/{key}', index=False)
| 1,700 |
booster/config.py
|
mrabins/booster
| 33 |
2023654
|
#!/usr/bin/env python
# Booster Twitter Bot - Developed by acidvegas in Python (https://acid.vegas/booster)
# config.py
class api:
consumer_key = 'CHANGEME'
consumer_secret = 'CHANGEME'
access_token = 'CHANGEME'
access_token_secret = 'CHANGEME'
class throttle:
favorite = 75
follow = 75
message = 750
tweet = 750
unfollow = 75
class settings:
keywords = ['500aday','autofollow','autofollowback','f4f','follow','follow4follow','followback','followtrain','instantfollow','instantfollowback','teamfollowback','wefollowback']
message = 'Thank you for following our Twitter account!' # Set to None to disable sending messages to new followers
woeid = 23424975 # Where On Earth ID (http://www.woeidlookup.com/)
| 748 |
29/29.py
|
cjm00/project-euler
| 1 |
2023100
|
import math
UPPER_BOUND = 101
def RemoveDuplicates(input_list):
return list(set(input_list))
distinct_powers = []
for a in range(2,UPPER_BOUND):
for b in range(2, UPPER_BOUND):
distinct_powers.append(math.pow(a, b))
distinct_powers = RemoveDuplicates(distinct_powers)
print len(distinct_powers)
| 308 |
multivis/utils/mergeBlocks.py
|
brettChapman/cimcb_vis
| 1 |
2023333
|
import sys
import copy
import numpy as np
import pandas as pd
def mergeBlocks(peak_blocks, data_blocks, mergeType):
"""Merge multiple Peak and Data Tables from different datasets, and consolidates any statistical results
generated from the multivis.utils.statistics package in relation to each block.
Parameters
----------
peak_blocks : A dictionary of Pandas Peak Table dataframes from different datasets indexed by dataset type.
data_blocks : A dictionary of Pandas Data Table dataframes from different datasets indexed by dataset type.
mergeType : The type of merging to perform. Either by 'SampleID' or 'Index'.
Returns
-------
DataTable: Merged Pandas dataFrame
PeakTable: Merged Pandas dataFrame (with any statistical results generated by multivis.utils.statistics consolidated into each block)
"""
peak_blocks = __checkData(peak_blocks)
data_blocks = __checkData(data_blocks)
blocks = list(data_blocks.keys())
df_peaks = pd.DataFrame()
df_data = pd.DataFrame()
for idx, block in enumerate(blocks):
peak = peak_blocks[block]
data = data_blocks[block]
peak_columns = peak.columns
if 'Name' not in peak_columns:
print("Error: No \"Name\" column in {} peak block".format(block))
sys.exit()
if 'Label' not in peak_columns:
print("Error: No \"Label\" column in {} peak block".format(block))
sys.exit()
if df_peaks.empty:
df_peaks = peak.copy(deep=True)
df_peaks.insert(len(df_peaks.columns), "Block", block)
else:
peak_dat = peak.copy(deep=True)
peak_dat.insert(len(peak_dat.columns), "Block", block)
df_peaks = pd.concat([df_peaks, peak_dat], ignore_index=True).reset_index(drop=True)
if df_data.empty:
df_data = data.copy(deep=True)
if mergeType.lower() == 'sampleid':
if 'SampleID' not in df_data.columns:
print("Error: No \"SampleID\" column in {} data block".format(block))
sys.exit()
elif mergeType.lower() == 'index':
if 'SampleID' in df_data.columns:
df_data = df_data.drop(['SampleID'], axis=1).reset_index(drop=True)
else:
if mergeType.lower() == 'sampleid':
if 'SampleID' in data.columns:
x = data[["SampleID"] + list(peak['Name'].values)].reset_index(drop=True)
df_data = pd.merge(df_data, x, left_on="SampleID", right_on="SampleID").reset_index(drop=True)
elif mergeType.lower() == 'index':
x = data[list(peak['Name'].values)].reset_index(drop=True)
df_data = pd.merge(df_data, x, left_index=True, right_index=True).reset_index(drop=True)
df_peaks['Idx'] = df_peaks.index
if 'Idx' in peak_columns:
df_peaks = df_peaks[list(peak_columns)+list(['Block'])]
else:
df_peaks = df_peaks[list(['Idx'])+list(peak_columns) + list(['Block'])]
# Merges any statistical results for each peak into each block
df_peaks = __merge_multiple_statistics(df_peaks)
return df_peaks, df_data
def __checkData(data):
if not isinstance(data, dict):
print("Error: A dictionary was not entered. Please check your data.")
sys.exit()
else:
df = data[list(data.keys())[0]]
if not isinstance(df, pd.DataFrame):
print("Error: A dataframe was not entered into the dictionary. Please check your data.")
sys.exit()
return data
def __merge_statistic(MergedPeakTable, stat_name):
merged_peak_stats = copy.deepcopy(MergedPeakTable)
column_name_list = list(filter(lambda x: x.startswith(stat_name), merged_peak_stats.columns))
if column_name_list:
blocks = list(set(merged_peak_stats['Block']))
merged_column_list = set([])
for column_name in column_name_list:
column_name_array = column_name.split('_')
merged_column_list.add('_'.join(column_name_array[:-1]))
merged_column_list = list(merged_column_list)
merged_column_list.sort()
for column in merged_column_list:
stats = []
for block_idx, block in enumerate(blocks):
column_name = column + '_' + block
# if true then these are case columns
if column_name in column_name_list:
stats.extend(list(merged_peak_stats[merged_peak_stats['Block'] == block][column_name]))
else:
# else the column doesn't exist and is a control group, therefore check the data types
# in the other columns to infer the values to use for the control block
if block_idx - 1 >= 0:
other_column_name = column + '_' + blocks[block_idx - 1]
else:
other_column_name = column + '_' + blocks[block_idx + 1]
bool_type = False
for x in list(merged_peak_stats[other_column_name]):
if type(x) == bool:
bool_type = True
break
if bool_type:
tmp_stats = [False] * merged_peak_stats[merged_peak_stats['Block'] == block].shape[0]
else:
tmp_stats = [np.nan] * merged_peak_stats[merged_peak_stats['Block'] == block].shape[0]
stats.extend(list(tmp_stats))
merged_peak_stats[column] = stats
merged_peak_stats = merged_peak_stats.drop(columns=column_name_list, axis=1)
return merged_peak_stats
def __merge_multiple_statistics(MergedPeakTable):
stat_types = ['MeanFoldChange', 'MedianFoldChange', 'Group_mean', 'Group_median', 'TTEST-twoGroup', 'MannWhitneyU', 'LEVENE-twoGroup']
multi_block_stats = copy.deepcopy(MergedPeakTable)
for stat in stat_types:
multi_block_stats = __merge_statistic(multi_block_stats, stat)
return multi_block_stats
| 6,218 |
app/core/connect/content_service.py
|
VadymHutei/ukubuka-front
| 0 |
2023504
|
import requests
import config
from errors import ContentServiceError, NotFoundError
class ContentService():
def __init__(self):
self._url = config.CONTENT_SERVICE['url']
self._timeout = (
config.CONNECT_TIMEOUT,
config.READ_TIMEOUT
)
self._headers = {
'Accept': 'application/json',
'language': config.CURRENT_LANGUAGE
}
def _setEntity(self, entity):
self._entity = entity
def _setParams(self, params):
self._params = params
def _get(self):
return requests.get(
self._url + self._entity,
timeout = self._timeout,
headers = self._headers,
params = self._params
)
def _makeRequest(self, method):
method = '_' + method
if not hasattr(self, method):
raise ContentServiceError(f'Method {method} is not implemented')
reqMeth = getattr(self, method)
try:
response = reqMeth()
except requests.exceptions.ReadTimeout:
raise ContentServiceError('Read timeout occured')
except requests.exceptions.ConnectTimeout:
raise ContentServiceError('Connection timeout occured')
except requests.exceptions.RequestException:
raise ContentServiceError('RequestException')
except Exception:
raise ContentServiceError()
if response.status_code == 200:
return response.json()
if response.status_code == 404:
raise NotFoundError
else:
raise ContentServiceError
def get(self, entity, **params):
self._setEntity(entity)
self._setParams(params)
return self._makeRequest('get')
| 1,767 |
databases/migrations/2021_01_24_175410_M3u8Hls.py
|
puzzle9/m3u8-copy
| 0 |
2022687
|
"""M3u8Hls Migration."""
from masoniteorm.migrations import Migration
from app.Models.M3u8Hls import M3u8Hls as M3u8HlsModel
class M3u8Hls(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create("m3u8_hls") as table:
table.increments("id")
table.integer("m3u8_list_id").unsigned().index("m3u8_list_id")
table.integer("duration").unsigned()
table.string("url")
table.string("path").index("path")
table.string("status").index("status")
table.string("key").nullable()
table.timestamps()
def down(self):
"""
Revert the migrations.
"""
self.schema.drop("m3u8_hls")
| 756 |
skhep/utils/decorators.py
|
AdvaitDhingra/scikit-hep
| 0 |
2023549
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license, see LICENSE.
"""
Submodule for decorators
========================
.. note:: not meant for user code in general, though possible.
"""
def inheritdoc(cls, gap="\n"):
"""
Decorator to automatize the inheritance of documentation from a class method.
Example
-------
>>> from skhep.utils.decorators import inheritdoc
>>> class ADerivedClass(ABaseClass): # doctest: +SKIP
... @inheritdoc(ABaseClass) # doctest: +SKIP
... def amethod(self): pass # doctest: +SKIP
"""
def _fn(fn):
if fn.__name__ in cls.__dict__:
if fn.__doc__ is None:
fn.__doc__ = cls.__dict__[fn.__name__].__doc__
else:
fn.__doc__ = (
cls.__dict__[fn.__name__].__doc__.strip() + gap + fn.__doc__.strip()
)
return fn
return _fn
| 985 |
behavioral/iterator/iterator_main.py
|
Kozak24/Patterns
| 0 |
2023146
|
from behavioral.iterator.data import Node
from behavioral.iterator.logic import DfsIterator, BfsIterator
def main() -> None:
"""
1
/ \
2 3
/ \ \
4 5 6
\ /
7
"""
seventh_node = Node(7)
sixth_node = Node(6, left=seventh_node)
fifth_node = Node(5, right=seventh_node)
fourth_node = Node(4)
third_node = Node(3, right=sixth_node)
second_node = Node(2, left=fourth_node, right=fifth_node)
root = Node(1, left=second_node, right=third_node, iterator=DfsIterator)
print("DFS Iterator")
for node in root:
print(node)
root = Node(1, left=second_node, right=third_node, iterator=BfsIterator)
print("\nBFS Iterator")
for node in root:
print(node)
if __name__ == "__main__":
main()
| 816 |
python/Gaffer/_BlockedConnection.py
|
pier-robot/gaffer
| 0 |
2022972
|
##########################################################################
#
# Copyright (c) 2011, <NAME>. All rights reserved.
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
# This isn't a direct binding of the C++ class because
# binding C++ scopes as Python context managers isn't
# completely straightforward, and in Python it is useful
# to provide the additional functionality of passing multiple
# connections.
class BlockedConnection( object ) :
def __init__( self, connectionOrConnections ) :
if isinstance( connectionOrConnections, Gaffer.Signals.Connection ) :
self.__connections = [ connectionOrConnections ]
else :
self.__connections = connectionOrConnections
self.__previouslyBlocked = None
def __enter__( self ) :
assert( self.__previouslyBlocked is None )
self.__previouslyBlocked = [ c.getBlocked() for c in self.__connections ]
for c in self.__connections :
c.setBlocked( True )
def __exit__( self, type, value, traceBack ) :
for c, b in zip( self.__connections, self.__previouslyBlocked ) :
c.setBlocked( b )
self.__previouslyBlocked = None
Gaffer.Signals.BlockedConnection = BlockedConnection
| 2,844 |
src/sensu_go/resources/namespace.py
|
sensu/sensu-go-python
| 3 |
2023287
|
# Copyright (c) 2020 <NAME>
from typing import cast, List
from sensu_go.resources.cluster import ClusterResource
from sensu_go.typing import JSONItem
class Namespace(ClusterResource):
PATH_TEMPLATE = "/api/core/v2/namespaces"
TYPE = "Namespace"
API_VERSION = "core/v2"
FIELD_PREFIX = "namespace"
@staticmethod
def api_to_native(data: JSONItem, type: str) -> JSONItem:
return dict(metadata={}, spec=data, type=type)
@staticmethod
def native_to_api(
spec: JSONItem, metadata: JSONItem, type: str, api_version: str
) -> JSONItem:
return spec
def validate(self) -> List[str]:
result = []
if not self.spec.get("name"):
result.append("Namespace needs to have a 'name'.")
return result
@property
def name(self) -> str:
return cast(str, self.spec["name"])
| 872 |
src/data/make_dataset.py
|
Zerthick/ml-demo
| 0 |
2023186
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import logging
import os
import pathlib
from pathlib import Path
import click
import pandas as pd
from pandas.api import types
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
def main(input_filepath: str) -> None:
"""Runs data processing scripts.
Runs scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
Args:
input_filepath: The filepath containing the raw data
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data...')
df = pd.read_csv(input_filepath)
df = process_data(df)
logger.info('writing final data set to ./data/processed/out.csv')
write_data(df)
logger.info('done')
def _encode_ordinals(df: pd.DataFrame) -> pd.DataFrame:
"""Encodes ordinal features.
Encodes catagorical ordinal features within the dataset as numeric features.
Args:
df: The dataframe to be encoded
Returns:
The encoded dataframe
"""
subway_mapping = {
'0-5min': 4,
'5min~10min': 3,
'10min~15min': 2,
'15min~20min': 1,
'no_bus_stop_nearby': 0
}
bus_mapping = {'0~5min': 2, '5min~10min': 1, '10min~15min': 0}
df['TimeToSubway'] = df['TimeToSubway'].map(subway_mapping)
df['TimeToBusStop'] = df['TimeToBusStop'].map(bus_mapping)
return df
def process_data(df: pd.DataFrame) -> pd.DataFrame:
"""Processeses the data.
Converts the catagorical features present in the dataset into numerical
features for future analysis.
Args:
df: The dataframe to be converted
Returns:
The processed dataframe with catagorical features removed.
"""
# Mark catagorical features that must be one-hot encoded as catagorical.
# Ensures get_dummies will work properly on test data
df['HallwayType'] = df['HallwayType'].astype(
types.CategoricalDtype(categories=['terraced', 'corridor', 'mixed']))
df['HeatingType'] = df['HeatingType'].astype(
types.CategoricalDtype(
categories=['individual_heating', 'central_heating']))
df['AptManageType'] = df['AptManageType'].astype(
types.CategoricalDtype(
categories=['management_in_trust', 'self_management']))
df['SubwayStation'] = df['SubwayStation'].astype(
types.CategoricalDtype(categories=[
'Kyungbuk_uni_hospital', 'Daegu', 'Sin-nam', 'Myung-duk',
'Chil-sung-market', 'Bangoge', 'Banwoldang', 'no_subway_nearby'
]))
df = _encode_ordinals(df)
df = pd.get_dummies(
df,
columns=[
'HallwayType', 'HeatingType', 'AptManageType', 'SubwayStation'
])
return df
def write_data(df: pd.DataFrame) -> None:
"""Writes dataframe to (../processed).
Writes dataframe to a csv file named out.csv in the proccessed data
directory.
Args:
df: The dataframe to be written
"""
project_dir = pathlib.Path(__file__).resolve().parents[2]
processed_data_path = os.path.join(project_dir, 'data', 'processed')
df.to_csv(os.path.join(processed_data_path, 'out.csv'))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main() # pylint: disable=no-value-for-parameter
| 3,452 |
test_chars.py
|
sosi-deadeye/randomchar
| 0 |
2022869
|
import string
import randomChar
def test_Letter_char():
test_result = string.ascii_letters
result = randomChar.Letter()
assert result in test_result
def test_LoCase_char():
test_result = string.ascii_lowercase
result = randomChar.LoCase()
assert result in test_result
def test_UpCase_char():
test_result = string.ascii_uppercase
result = randomChar.UpCase()
assert result in test_result
def test_Digit():
test_result = string.digits
result = randomChar.Digit()
assert result in test_result
def test_Symbol():
test_result = string.punctuation
result = randomChar.Symbol()
assert result in test_result
if __name__ == '__main__':
print("randomChar v0.1.0-beta")
| 748 |
chap7/practice.py
|
theChad/ThinkPython
| 0 |
2022936
|
# Section 7.3 exercise
def print_n(s, n):
"""Print a string (s) n times.
s: string
n: number of times to print (will be rounded down to nearest integer)
negative n will not print anything
"""
while n>=1:
print(s)
n-=1 # decrement n; same as n = n-1
print_n('test', 5.5)
print_n('not a test', -34)
| 342 |
End of preview. Expand
in Data Studio
- Downloads last month
- 8