max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
declaraciones/sitio/urls.py
|
rafaelhn2021/proyecto
| 0 |
2025371
|
from django.conf.urls import url
from django.conf.urls.static import static
from django.conf import settings
from django.urls import path, re_path, reverse_lazy
from django.contrib.auth import views as auth_views
from declaracion.views import PerfilView
from .views import (IndexView, LoginView, LogoutView, FAQView, DeclaracionesPreviasView, PasswordResetRFCView,
DeclaracionesPreviasDescargarView, activar, CambioPasswordView,
PersonalizacionCatalogoPuestosView,
PersonalizacionDatosEntidadView,
PersonalizacionCatalogoAreasView,
DeclaracionesPreviasVerView)
urlpatterns = [
path('', IndexView.as_view(), name="index"),
path('inicio', PerfilView.as_view(), name="inicio"),
path('preguntas-frecuentes', FAQView.as_view(), name="preguntas-frecuentes"),
path('login', LoginView.as_view(), name="login"),
path('logout', LogoutView.as_view(), name="logout"),
path('declaraciones-previas', DeclaracionesPreviasView.as_view(), name="declaraciones-previas"),
re_path(r'^declaraciones-previas/descargar/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionesPreviasDescargarView.as_view(),
name="declaraciones-previas-descargar"),
re_path(r'^declaraciones-previas/ver/(?P<folio>[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12})/$',
DeclaracionesPreviasVerView.as_view(),
name="declaraciones-previas-ver"),
url(r'^activar/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', activar, name='activar'),
path('cambiar', CambioPasswordView.as_view(), name="cambiar"),
path('recuperar', LogoutView.as_view(), name="recuperar"),
path('password_reset/', PasswordResetRFCView.as_view(success_url=reverse_lazy('password_reset_done')),
name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('password_reset/confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('password_reset/complete/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
path('personalizar/catalogos/puestos', PersonalizacionCatalogoPuestosView.as_view(), name='personalizar_catpuestos'),
path('personalizar/catalogos/puestos/agregar', PersonalizacionCatalogoPuestosView.as_view(), name='personalizar_catpuestos_agregar'),
path('personalizar/catalogos/puestos/editar-<slug:pkid>', PersonalizacionCatalogoPuestosView.as_view(), name='personalizar_catpuestos_editar'),
path('personalizar/catalogos/puestos/eliminar-<int:pkid>', PersonalizacionCatalogoPuestosView.as_view(), name='personalizar_catpuestos_eliminar'),
path('personalizar/catalogos/areas', PersonalizacionCatalogoAreasView.as_view(), name='personalizar_catareas'),
path('personalizar/catalogos/areas/agregar', PersonalizacionCatalogoAreasView.as_view(), name='personalizar_catareas_agregar'),
path('personalizar/catalogos/areas/editar-<slug:pkid>', PersonalizacionCatalogoAreasView.as_view(), name='personalizar_catareas_editar'),
path('personalizar/catalogos/areas/eliminar-<int:pkid>', PersonalizacionCatalogoAreasView.as_view(), name='personalizar_catareas_eliminar'),
path('personalizar/datos_entidad', PersonalizacionDatosEntidadView.as_view(), name='personalizar_entidad'),
path('personalizar/datos_entidad/editar', PersonalizacionDatosEntidadView.as_view(), name='personalizar_entidad_editar')
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 3,725 |
py/py_0325_stone_game_ii.py
|
lcsm29/project-euler
| 0 |
2025609
|
# Solution of;
# Project Euler Problem 325: Stone Game II
# https://projecteuler.net/problem=325
#
# A game is played with two piles of stones and two players. On each player's
# turn, the player may remove a number of stones from the larger pile. The
# number of stones removed must be a positive multiple of the number of stones
# in the smaller pile. E. g. Let the ordered pair $(6,14)$ describe a
# configuration with 6 stones in the smaller pile and 14 stones in the larger
# pile, then the first player can remove 6 or 12 stones from the larger pile.
# The player taking all the stones from a pile wins the game. A winning
# configuration is one where the first player can force a win. For example,
# $(1,5)$, $(2,6)$, and $(3,12)$ are winning configurations because the first
# player can immediately remove all stones in the second pile. A losing
# configuration is one where the second player can force a win, no matter what
# the first player does. For example, $(2,3)$ and $(3,4)$ are losing
# configurations: any legal move leaves a winning configuration for the second
# player. Define $S(N)$ as the sum of $(x_i + y_i)$ for all losing
# configurations $(x_i, y_i), 0 \lt x_i \lt y_i \le N$. We can verify that
# $S(10) = 211$ and $S(10^4) = 230312207313$. Find $S(10^{16}) \mod 7^{10}$.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 325
timed.caller(dummy, n, i, prob_id)
| 1,522 |
python/amici/testing.py
|
kristianmeyerr/AMICI
| 0 |
2025805
|
"""Test support functions"""
import sys
from tempfile import TemporaryDirectory
class TemporaryDirectoryWinSafe(TemporaryDirectory):
"""TemporaryDirectory that will not raise if cleanup fails.
If any extension was loaded from the temporary directory, cleanup would
otherwise fail on Windows with a ``PermissionError``. This class ignores
such failures.
"""
def cleanup(self):
try:
super().cleanup()
except PermissionError as e:
if sys.platform not in {'win32', 'cygwin'}:
raise e
except NotADirectoryError:
# Ignore exception on Windows for pyd files:
# NotADirectoryError: [WinError 267] The directory name is
# invalid: '....pyd'
pass
| 779 |
json_to_csv.py
|
liorazi/liorazi.github.io
| 0 |
2025598
|
import json
import pandas as pd
import requests
import subprocess
print("Downloading Tag JSON...")
items = requests.get('https://data.gov.il/api/action/datastore_search?resource_id=c8b9f9c8-4612-4068-934f-d4acd2e3c06e&limit=1000000')
data = items.json()
records = data["result"]["records"]
print("Loading Tag JSON...")
tag_flattened = []
for line in records:
tag_flattened.append({
"MISPAR RECHEV" : line['MISPAR RECHEV'],
"TAARICH HAFAKAT TAG" : line['TAARICH HAFAKAT TAG'],
"SUG TAV" : line['SUG TAV']
})
df = pd.DataFrame(tag_flattened)
df['MISPAR RECHEV'] = df['MISPAR RECHEV'].apply('{0:0>8}'.format)
df['SUG TAV'] = df['SUG TAV'].apply('{0:0>2}'.format)
print("Saving JSON to CSV File")
df.to_csv('tag.csv', sep='|', index=None)
rc = subprocess.call("./release.sh",shell=True)
| 798 |
data/mnli/hans/word_overlap_baseline.py
|
technion-cs-nlp/Generative-NLI
| 1 |
2025748
|
import numpy as np
import json
import os
from sklearn.linear_model import LogisticRegression
idx2label = ["contradiction", "entailment", "neutral"]
label2idx = {"contradiction": 0, "entailment": 1, "neutral": 2}
def frac_overlap(p, h):
p = p.split(' ')
h = h.split(' ')
total_overlap = 0
for h_i in h:
if h_i in p:
total_overlap += 1
return total_overlap/len(h)
def load_data(prefix):
source_file = prefix+'_source_file'
label_file = prefix+'_lbl_file'
id_file = prefix+'_id_file'
premises = []
hypotheses = []
labels = []
ids = []
fraction_overlap = []
with open(source_file, 'r') as f:
for line in f:
p, h = line.strip().split('|||')
premises.append(p)
hypotheses.append(h)
fraction_overlap.append(frac_overlap(p, h))
with open(label_file, 'r') as f:
for line in f:
label = line.strip()
labels.append(label2idx[label])
if os.path.exists(id_file):
with open(id_file, 'r') as f:
for line in f:
id_i = line.strip()
ids.append(id_i)
else:
ids = None
X = np.array(fraction_overlap)[:, np.newaxis]
y = np.array(labels)
return X, y, ids
def train():
X, y, _ = load_data('../cl_multinli_train')
clf = LogisticRegression(random_state=0).fit(X, y)
return clf, clf.score(X, y)
def dev_acc(clf):
X, y, _ = load_data('../cl_multinli_dev_matched')
return clf.score(X, y)
def hans_acc(clf):
X, y, _ = load_data('hans_evalset_lexical_overlap')
lexical_overlap_acc = clf.score(X, y)
X, y, _ = load_data('hans_evalset_subsequence')
subsequence_acc = clf.score(X, y)
X, y, _ = load_data('hans_evalset_constituent')
constituent_acc = clf.score(X, y)
return lexical_overlap_acc, subsequence_acc, constituent_acc
def hans_predictions(clf):
X1, _, ids1 = load_data('hans_evalset_lexical_overlap')
X2, _, ids2 = load_data('hans_evalset_subsequence')
X3, _, ids3 = load_data('hans_evalset_constituent')
X = np.concatenate([X1, X2, X3], 0)
ids = np.concatenate([ids1, ids2, ids3], 0)
logprobs = clf.predict_log_proba(X)
logprobs_dict = {id_i: logprob_i.tolist() for id_i, logprob_i in zip(ids, logprobs)}
return logprobs_dict
if __name__ == '__main__':
clf, train_acc = train()
logprobs_dict = hans_predictions(clf)
print(len(logprobs_dict))
with open(f"output/wordoverlapmodel_logprobs_allsubsets.json", 'w') as f:
json.dump(logprobs_dict, f)
#print(train_acc)
#print(dev_acc(clf))
#print(hans_acc(clf))
| 2,670 |
Neural Network/imagePreprocessing.py
|
khaftool/Hand-Digit-Reco
| 0 |
2025443
|
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy.ndimage import center_of_mass
def load(filename):
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
return image
def resize(image):
image = cv2.resize(image, (28, 28))
return image
def normalize(image):
_, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY_INV)
image = image / 255.0
return image
def center(image):
cy, cx = center_of_mass(image)
rows, cols = image.shape
shiftx = np.round(cols/2.0-cx).astype(int)
shifty = np.round(rows/2.0-cy).astype(int)
M = np.float32([[1, 0, shiftx], [0, 1, shifty]])
image = cv2.warpAffine(image, M, (cols, rows))
return image
def correct(image):
image[:,0] = 0.0
image[:,-1] = 0.0
image[0,:] = 0.0
image[-1,:] = 0.0
return image
def get_image(DrawingFrame):
pixmap = DrawingFrame.grab()
pixmap.save("image", "jpg")
image = load("image").astype(np.float32)
image = normalize(image)
image = correct(image)
image = center(image)
image = resize(image)
return image
| 1,115 |
openwater/urls.py
|
openwater/h2o-really
| 3 |
2025720
|
from django.conf.urls import patterns, include, url
from django.views.generic.base import TemplateView
from django.contrib import admin
admin.autodiscover()
from .views import HomePageView
urlpatterns = patterns(
'',
# Examples:
url(r'^$', HomePageView.as_view(), name='home'),
url(r'^contributing/technical/$',
TemplateView.as_view(template_name="contributing.html"),
name='contribute-technical'),
url(r'^sample-kits/$',
TemplateView.as_view(template_name="sample-kits.html"),
name="sample-kits"),
url(r'^supporting-data/', include('supplements.urls')),
url(r'^observations/', include('observations.urls')),
url(r'^api/v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^api/v1/observations/', include('observations.api.urls')),
url(r'^api/v1/geocode_postcode$', 'observations.api.views.geocode_postcode', name="geocode_postcode"),
url(r'^api/v1/geocode_postcode/(?P<postcode>[a-zA-Z0-9 +]+)$', 'observations.api.views.geocode_postcode', name="geocode_postcode"),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include('diario.urls', namespace='blog', app_name='diario')),
)
| 1,397 |
command_handlers/flipcoin.py
|
NeonChicken/thiliumbot
| 0 |
2024216
|
import random
from .insultme import generate_insult
# Flip a coin and send the result.
async def run(client, message):
if len(message.content.split()) < 2:
response = "It's {}!".format(random.choice(['heads','tails']))
await message.channel.send(response)
return
else:
response = "Don't put anything after the command, you {}!".format(generate_insult())
await message.channel.send(response)
return
| 455 |
Leetcode/Intermediate/Array_and_string/73_Set_Matrix_Zeroes.py
|
ZR-Huang/AlgorithmPractices
| 1 |
2025757
|
'''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
- A straight forward solution using O(mn) space is probably a bad idea.
- A simple improvement uses O(m + n) space, but still not the best solution.
- Could you devise a constant space solution?
'''
class Solution:
def setZeroes(self, matrix) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
"""
Idea (direactly with extra spaces):
1. scan the matrix and store indexes of zeroes.
2. set rows and columns zeroes.
"""
m = len(matrix)
n = len(matrix[0])
if n == 0 or m == 0:
return
indexes = []
for i, row in enumerate(matrix):
for j, element in enumerate(row):
if element == 0:
indexes.append((i, j))
# print(indexes)
for i, j in indexes:
matrix[i] = [0] * n
for _i in range(m):
matrix[_i][j] = 0
def setZeroes_v2(self, matrix) -> None:
"""
Idea (a constant space solution):
1. Scan the matrix.
2. If meet a zero, label the head elements of this row and this column.
For example, if matrix[i][j] = 0, set matrix[i][0] and matrix[0][j] INF.
3. Scan the matrix again. Set rows and columns which have the label to zeroes.
Bugs:
The problem does not give the range of elements of the matrix,
so the solution easily gets the Wrong Answer if the label isn't set appropriately.
"""
m = len(matrix)
n = len(matrix[0])
if n == 0 or m == 0:
return
col_0 = False
for i in range(m):
if matrix[i][0] == 0:
col_0 = True
for i in range(m):
for j in range(1, n):
if matrix[i][j] == 0:
matrix[i][0] = 0
matrix[0][j] = 0
# scan the matrix to find out LABEL,
# and set rows and columns to zeroes
for i in range(1, m):
for j in range(1, n):
if matrix[i][0] == 0 or matrix[0][j] == 0:
matrix[i][j] = 0
# See if the first row needs to be set to zero
if matrix[0][0] == 0:
matrix[0] = [0] * n
# See if the first column needs to be set to zero
if col_0:
for i in range(m):
matrix[i][0] = 0
matrix = [[8,3,6,9,7,8,0,6],[0,3,7,0,0,4,3,8],[5,3,6,7,1,6,2,6],[8,7,2,5,0,6,4,0],[0,2,9,9,3,9,7,3]]
Solution().setZeroes_v2(matrix)
print(matrix)
| 2,945 |
apps/accounts/views/oauth2_profile.py
|
dtisza1/bluebutton-web-server
| 0 |
2024013
|
import waffle
from django.http import JsonResponse
from rest_framework import exceptions
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from apps.fhir.bluebutton.models import Crosswalk
from apps.capabilities.permissions import TokenHasProtectedCapability
from oauth2_provider.decorators import protected_resource
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
from collections import OrderedDict
from apps.fhir.bluebutton.permissions import ApplicationActivePermission
def get_userinfo(user):
"""
OIDC-style userinfo
"""
data = OrderedDict()
data['sub'] = user.username
data['name'] = "%s %s" % (user.first_name, user.last_name)
data['given_name'] = user.first_name
data['family_name'] = user.last_name
data['email'] = user.email
data['iat'] = user.date_joined
# Get the FHIR ID if its there
fhir_id = get_fhir_id(user)
if fhir_id:
data['patient'] = fhir_id
data['sub'] = fhir_id
return data
@api_view(["GET"])
@authentication_classes([OAuth2Authentication])
@permission_classes([ApplicationActivePermission, TokenHasProtectedCapability])
@protected_resource()
def openidconnect_userinfo(request, **kwargs):
if request.path.startswith('/v2') and (not waffle.flag_is_active(request, 'bfd_v2_flag')):
raise exceptions.NotFound("bfd_v2_flag not active.")
user = request.resource_owner
data = get_userinfo(user)
return JsonResponse(data)
def get_fhir_id(user):
r = None
if Crosswalk.objects.filter(user=user).exists():
c = Crosswalk.objects.get(user=user)
r = c.fhir_id
return r
| 1,678 |
code/exercises/exercise_01.py
|
DahlitzFlorian/python-basic-training
| 3 |
2025624
|
# Create a program that asks the user to enter their name and their age.
# Print out a message addressed to them that tells them the year that they will turn 100 years old.
| 173 |
package.py
|
dhicks6345789/govuk-frontend-jekyll-theme
| 0 |
2024741
|
#!/usr/bin/python3
import os
import sys
import shutil
import zipfile
import urllib.request
# Recursivly copy all files of a given extension from src to dest folders.
# If a dict named "replace" is passed in, key:value pairs will be replaced on both the target filenames and file contents.
def copyFiles(src, dest, filetypes, replace={}):
for item in os.listdir(src):
if os.path.isdir(src + os.sep + item):
copyFiles(src + os.sep + item, dest + os.sep + item, filetypes, replace=replace)
else:
if item.split(".")[-1].lower() in filetypes:
os.makedirs(dest, exist_ok=True)
targetFile = dest + os.sep + item
shutil.copy(src + os.sep + item, targetFile)
targetFileHandle = open(targetFile, encoding="latin-1")
targetFileContents = targetFileHandle.read()
for findValue in replace.keys():
targetFileContents = targetFileContents.replace(findValue, replace[findValue])
targetFileHandle = open(targetFile, "w")
targetFileHandle.write(targetFileContents)
targetFileHandle.close()
for findValue in replace.keys():
if findValue in targetFile:
os.rename(targetFile, targetFile.replace(findValue, replace[findValue]))
# Print a message for the user if they haven't specified any parameters.
outputFolder = ""
if len(sys.argv) < 2:
print("Generates a Jekyll-compatible template from the GOV.UK Design System frontend.")
print("Usage: python3 package.py outputFolder")
sys.exit(0)
outputFolder = sys.argv[1]
print("Downloading govuk-frontend archive...")
zipArchive = open("master.zip", "wb")
zipArchive.write(urllib.request.urlopen("https://github.com/alphagov/govuk-frontend/archive/master.zip").read())
zipArchive.close()
zipfile.ZipFile("master.zip", "r").extractall("master")
os.remove("master.zip")
versionHandle = open("master/govuk-frontend-master/dist/VERSION.txt")
govukFrontendVersion = versionHandle.read().strip()
versionHandle.close()
print("Version obtained: " + govukFrontendVersion)
os.makedirs(outputFolder, exist_ok=True)
os.makedirs(outputFolder + os.sep + "_sass", exist_ok=True)
os.makedirs(outputFolder + os.sep + "_includes", exist_ok=True)
os.makedirs(outputFolder + os.sep + "_layouts", exist_ok=True)
os.makedirs(outputFolder + os.sep + "_plugins", exist_ok=True)
os.makedirs(outputFolder + os.sep + "assets", exist_ok=True)
os.makedirs(outputFolder + os.sep + "javascript", exist_ok=True)
os.makedirs(outputFolder + os.sep + "stylesheets", exist_ok=True)
print("Copying files...")
# Copy over the SCSS files from govuk-frontend
copyFiles("master" + os.sep + "govuk-frontend-master" + os.sep + "package" + os.sep + "govuk", outputFolder + os.sep + "_sass", ["scss"])
# Copy over compiled / minified Javascript files from govuk-frontend.
copyFiles("master" + os.sep + "govuk-frontend-master" + os.sep + "dist", outputFolder + os.sep + "javascript", ["js"])
# Copy over static assets (fonts, icons, images) from govuk-frontend.
copyFiles("master" + os.sep + "govuk-frontend-master" + os.sep + "dist" + os.sep + "assets", outputFolder + os.sep + "assets", ["woff","woff2","eot","ico","png","svg"])
# Remove the govuk-frontend folder.
shutil.rmtree("master")
# Copy over our stylesheets folder, replacing version numbers along the way.
copyFiles("stylesheets", outputFolder + os.sep + "stylesheets", ["scss"], replace={"versionGoesHere":govukFrontendVersion})
# Copy over our includes folder.
copyFiles("_includes", outputFolder + os.sep + "_includes", ["html"])
# Copy over our layouts folder.
copyFiles("_layouts", outputFolder + os.sep + "_layouts", ["html"])
# Copy over our plugins folder.
copyFiles("_plugins", outputFolder + os.sep + "_plugins", ["rb"])
print("Done.")
| 3,753 |
astigmia/lobby/views.py
|
marcus-crane/astigmia
| 0 |
2024723
|
from django.http import HttpResponse
def index(request):
# TODO: Stick a proper homepage here
return HttpResponse("Please take a seat.")
| 147 |
tests/handlers/test_federation.py
|
arekinath/synapse
| 0 |
2025174
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.api.constants import EventTypes
from synapse.api.errors import AuthError, Codes
from synapse.rest import admin
from synapse.rest.client.v1 import login, room
from tests import unittest
class FederationTestCase(unittest.HomeserverTestCase):
servlets = [
admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(http_client=None)
self.handler = hs.get_handlers().federation_handler
self.store = hs.get_datastore()
return hs
def test_exchange_revoked_invite(self):
user_id = self.register_user("kermit", "test")
tok = self.login("kermit", "test")
room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
# Send a 3PID invite event with an empty body so it's considered as a revoked one.
invite_token = "sometoken"
self.helper.send_state(
room_id=room_id,
event_type=EventTypes.ThirdPartyInvite,
state_key=invite_token,
body={},
tok=tok,
)
d = self.handler.on_exchange_third_party_invite_request(
room_id=room_id,
event_dict={
"type": EventTypes.Member,
"room_id": room_id,
"sender": user_id,
"state_key": "@someone:example.org",
"content": {
"membership": "invite",
"third_party_invite": {
"display_name": "alice",
"signed": {
"mxid": "@alice:localhost",
"token": invite_token,
"signatures": {
"magic.forest": {
"ed25519:3": "fQpGIW1Snz+pwLZu6sTy2aHy/DYWWTspTJRPyNp0PKkymfIsNffysMl6ObMMFdIJhk6g6pwlIqZ54rxo8SLmAg"
}
},
},
},
},
},
)
failure = self.get_failure(d, AuthError).value
self.assertEqual(failure.code, 403, failure)
self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure)
self.assertEqual(failure.msg, "You are not invited to this room.")
| 3,009 |
mistral/mistral/engine/utils.py
|
Toure/openstack_mistral_wip
| 0 |
2024561
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from mistral.db.v2 import api as db_api
from mistral import exceptions as exc
from mistral import utils
# TODO(rakhmerov): This method is too abstract, validation rules may vary
# depending on object type (action, wf), it's not clear what it can be
# applied to.
# TODO(rakhmerov): It must not do any manipulations with parameters
# (input_dict)!
def validate_input(definition, input_dict, spec=None):
input_param_names = copy.deepcopy(list((input_dict or {}).keys()))
missing_param_names = []
spec_input = (spec.get_input() if spec else
utils.get_dict_from_string(definition.input))
for p_name, p_value in spec_input.items():
if p_value is utils.NotDefined and p_name not in input_param_names:
missing_param_names.append(str(p_name))
if p_name in input_param_names:
input_param_names.remove(p_name)
if missing_param_names or input_param_names:
msg = 'Invalid input [name=%s, class=%s'
msg_props = [definition.name, spec.__class__.__name__]
if missing_param_names:
msg += ', missing=%s'
msg_props.append(missing_param_names)
if input_param_names:
msg += ', unexpected=%s'
msg_props.append(input_param_names)
msg += ']'
raise exc.InputException(
msg % tuple(msg_props)
)
else:
utils.merge_dicts(input_dict, spec_input, overwrite=False)
def resolve_workflow_definition(parent_wf_name, parent_wf_spec_name,
wf_spec_name):
wf_def = None
if parent_wf_name != parent_wf_spec_name:
# If parent workflow belongs to a workbook then
# check child workflow within the same workbook
# (to be able to use short names within workbooks).
# If it doesn't exist then use a name from spec
# to find a workflow in DB.
wb_name = parent_wf_name.rstrip(parent_wf_spec_name)[:-1]
wf_full_name = "%s.%s" % (wb_name, wf_spec_name)
wf_def = db_api.load_workflow_definition(wf_full_name)
if not wf_def:
wf_def = db_api.load_workflow_definition(wf_spec_name)
if not wf_def:
raise exc.WorkflowException(
"Failed to find workflow [name=%s]" % wf_spec_name
)
return wf_def
| 2,988 |
HLTrigger/Configuration/python/HLT_75e33/modules/hltCaloMET_cfi.py
|
PKUfudawei/cmssw
| 1 |
2022723
|
import FWCore.ParameterSet.Config as cms
hltCaloMET = cms.EDProducer("CaloMETProducer",
alias = cms.string('RawCaloMET'),
calculateSignificance = cms.bool(False),
globalThreshold = cms.double(0.3),
noHF = cms.bool(False),
src = cms.InputTag("towerMaker")
)
| 278 |
brl_ipcam/scripts/ipcam_hs.py
|
HeartsBRL/hearts_vision
| 1 |
2025538
|
#!/usr/bin/env python
import base64
import time
import urllib2
import cv2
import numpy as np
import rospy
import rospkg
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import sys
import json
"""
Examples of objects for image frame aquisition from both IP and
physically connected cameras
Requires:
- opencv (cv2 bindings)
- numpy
"""
class ipCamera(object):
def __init__(self, url, user=None, password=None):
self.url = url
auth_encoded = base64.encodestring('%s:%s' % (user, password))[:-1]
self.req = urllib2.Request(self.url)
self.req.add_header('Authorization', 'Basic %s' % auth_encoded)
def get_frame(self):
response = urllib2.urlopen(self.req)
img_array = np.asarray(bytearray(response.read()), dtype=np.uint8)
frame = cv2.imdecode(img_array, 1)
return frame
def main(camNum, videoMode):
# living room - 192.168.1.72
# bedroom - 192.168.1.74
# kitchen - 192.168.1.75
# hall - 192.168.1.78
# load password and username from file:
rospack = rospkg.RosPack()
basepath = rospack.get_path('brl_ipcam')
with open(basepath + '/conf/passwords.json') as json_file:
data = json.load(json_file)
#print data
usr = data["user"]
pwd = data["pass"]
rospy.init_node('ipcam_' + videoMode + "_" + camNum, anonymous=True)
image_pub = rospy.Publisher('ipcam/' + videoMode + "/" + camNum ,Image)
bridge = CvBridge()
##### SNAPSHOT Mode:
if videoMode == "SS":
# snapshot url:
camURL = { "1" : "http://192.168.1.78/snapshot.cgi?user=" + usr + "&pwd=" + pwd,
"2" : "http://192.168.1.72:88/cgi-bin/CGIProxy.fcgi?cmd=snapPicture2&usr=" + usr + "&pwd=" + pwd +"&.mjpg",
"3" : "http://192.168.1.75/snapshot.cgi?user=" + usr + "&pwd=" + pwd,
"4" : "http://192.168.1.74:88/cgi-bin/CGIProxy.fcgi?cmd=snapPicture2&usr=" + usr + "&pwd=" + pwd +"&.mjpg"
}
cam = ipCamera(camURL[camNum])
while not rospy.is_shutdown():
#print time.strftime("[%H:%M:%S] New Frame", time.gmtime())
img = cam.get_frame()
#cv2.imshow("hello", img)
#cv2.waitKey(1)
try:
image_pub.publish(bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
error = e
#print e
##### VIDEO STREAM Mode
# @TODO: Make this work.
elif videoMode == "VS":
camURL = { "1" : "http://192.168.1.78/videostream.cgi?user" + usr + "&pwd=" + pwd,
"2" : "http://192.168.1.72:88/cgi-bin/CGIStream.cgi?cmd=GetMJStream&usr=" + usr + "&pwd=" + pwd +"&.mjpg",
"3" : "http://192.168.1.75/videostream.cgi?user" + usr + "&pwd=" + pwd,
"4" : "http://192.168.1.74:88/cgi-bin/CGIStream.cgi?cmd=GetMJStream&usr=" + usr + "&pwd=" + pwd +"<PASSWORD>"
}
cap = cv2.VideoCapture(camURL[camNum])
while not rospy.is_shutdown():
_, img = cap.read()
if img is not None:
#print time.strftime("[%H:%M:%S] New Frame", time.gmtime())
cv2.imshow("hello", img)
#cv2.waitKey(1)
try:
image_pub.publish(bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
error = e
#print(e)
cv2.destroyAllWindows()
# run code if specifically executed as a program, not an import
if __name__ == "__main__":
if len(sys.argv) < 2:
print("usage: ipcam.py camera_number[1-4] video_mode[SS or VS]")
else:
main(sys.argv[1], sys.argv[2])
| 3,750 |
docs/examples/solving_meddocan_2019.py
|
yacth/autogoal
| 0 |
2023012
|
# # Solving the MEDDOCAN challenge
# This script runs an instance of [`AutoML`](/api/autogoal.ml#automl)
# in the [MEDDOCAN 2019 challenge](https://github.com/PlanTL-SANIDAD/SPACCC_MEDDOCAN).
# | Dataset | URL |
# |--|--|
# | MEDDOCAN 2019 | <https://github.com/PlanTL-SANIDAD/SPACCC_MEDDOCAN> |
# ## Experimentation parameters
#
# This experiment was run with the following parameters:
#
# | Parameter | Value |
# |--|--|
# | Total epochs | 1 |
# | Maximum iterations | 10000 |
# | Timeout per pipeline | 30 min |
# | Global timeout | - |
# | Max RAM per pipeline | 20 GB |
# | Population size | 50 |
# | Selection (k-best) | 10 |
# | Early stop |- |
# The experiments were run in the following hardware configurations
# (allocated indistinctively according to available resources):
# | Config | CPU | Cache | Memory | HDD |
# |--|--|--|--|--|
# | **A** | 12 core Intel Xeon Gold 6126 | 19712 KB | 191927.2MB | 999.7GB |
# | **B** | 6 core Intel Xeon E5-1650 v3 | 15360 KB | 32045.5MB | 2500.5GB |
# | **C** | Quad core Intel Core i7-2600 | 8192 KB | 15917.1MB | 1480.3GB |
# !!! note
# The hardware configuration details were extracted with `inxi -CmD` and summarized.
# ## Relevant imports
# Most of this example follows the same logic as the [UCI example](/examples/solving_uci_datasets).
# First the necessary imports
from autogoal.ml import AutoML
from autogoal.datasets import meddocan
from autogoal.search import (
Logger,
PESearch,
ConsoleLogger,
ProgressLogger,
MemoryLogger,
)
from autogoal.kb import List, Sentence, Word, Postag
# ## Parsing arguments
# Next, we parse the command line arguments to configure the experiment.
# The default values are the ones used for the experimentation reported in the paper.
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--iterations", type=int, default=10000)
parser.add_argument("--timeout", type=int, default=1800)
parser.add_argument("--memory", type=int, default=20)
parser.add_argument("--popsize", type=int, default=50)
parser.add_argument("--selection", type=int, default=10)
parser.add_argument("--global-timeout", type=int, default=None)
parser.add_argument("--examples", type=int, default=None)
parser.add_argument("--token", default=None)
parser.add_argument("--channel", default=None)
args = parser.parse_args()
print(args)
# ## Experimentation
# Instantiate the classifier.
# Note that the input and output types here are defined to match the problem statement,
# i.e., entity recognition.
classifier = AutoML(
search_algorithm=PESearch,
input=List(List(Word())),
output=List(List(Postag())),
search_iterations=args.iterations,
score_metric=meddocan.F1_beta,
cross_validation_steps=1,
search_kwargs=dict(
pop_size=args.popsize,
search_timeout=args.global_timeout,
evaluation_timeout=args.timeout,
memory_limit=args.memory * 1024 ** 3,
),
)
# This custom logger is used for debugging purposes, to be able later to recover
# the best pipelines and all the errors encountered in the experimentation process.
class CustomLogger(Logger):
def error(self, e: Exception, solution):
if e and solution:
with open("meddocan_errors.log", "a") as fp:
fp.write(f"solution={repr(solution)}\nerror={e}\n\n")
def update_best(self, new_best, new_fn, *args):
with open("meddocan.log", "a") as fp:
fp.write(f"solution={repr(new_best)}\nfitness={new_fn}\n\n")
# Basic logging configuration.
logger = MemoryLogger()
loggers = [ProgressLogger(), ConsoleLogger(), logger]
if args.token:
from autogoal.contrib.telegram import TelegramLogger
telegram = TelegramLogger(
token=args.token,
name=f"MEDDOCAN",
channel=args.channel,
)
loggers.append(telegram)
# Finally, loading the MEDDOCAN dataset, running the `AutoML` instance,
# and printing the results.
X_train, y_train, X_test, y_test = meddocan.load(max_examples=args.examples)
classifier.fit(X_train, y_train, logger=loggers)
score = classifier.score(X_test, y_test)
print(score)
print(logger.generation_best_fn)
print(logger.generation_mean_fn)
| 4,250 |
examples/R/config-iris-dataset-from-python.py
|
vishalbelsare/palladium
| 528 |
2025687
|
# Use this file in conjunction with config-iris.py by setting:
#
# export PALLADIUM_CONFIG=config-iris.py,config-iris-dataset-from-python.py
{
'dataset_loader_train': {
'!': 'palladium.dataset.CSV',
'path': 'iris.data',
'names': [
'sepal length',
'sepal width',
'petal length',
'petal width',
'species',
],
'target_column': 'species',
'nrows': 100,
},
'dataset_loader_test': {
'__copy__': 'dataset_loader_train',
'skiprows': 100,
'nrows': None,
},
}
| 602 |
reststore/config.py
|
pombredanne/reststore
| 0 |
2024585
|
import os
import yaml
import tempfile
import hashlib
# Define our defaults.
project = 'reststore'
values = dict(
files=dict(
name='files',
hash_function='md5',
tune_size=100000000,
root=tempfile.gettempdir(),
assert_data_ok=False),
webapp=dict(
debug=False,
quiet=False,
host='127.0.0.1',
port=8586,
server='wsgiref',
proxy_requests=False,
),
client=dict(
uri='http://127.0.0.1:8586/',
cache_max_entries=-1,
cache_batch_delete=10,
),
)
def _update_values(new):
for interface, kwargs in new.items():
values[interface].update(kwargs)
# Load the system configuration file
if os.path.exists('/etc/reststore.yaml'):
with open('/etc/reststore.yaml', 'r') as f:
_update_values(yaml.load(f))
# Load the user configuration file, update config with its values or initialise
# a new configuration file if it didn't exist.
_config_file_path = os.path.join(os.path.expanduser('~'), '.%s.yaml' % project)
if os.path.exists(_config_file_path):
with open(_config_file_path, 'r') as f:
_update_values(yaml.load(f))
else:
with open(_config_file_path, 'w') as f:
yaml.dump(values, f, default_flow_style=False)
# Update config with the values found in our current env
for interface, kwargs in values.items():
for key, value in kwargs.items():
environ_key = ('%s_%s_%s' % (project, interface, key)).upper()
value_type = type(value)
kwargs[key] = value_type(os.environ.get(environ_key, value))
| 1,765 |
User/migrations/0008_biodata_email.py
|
judeakinwale/SMS-backup
| 0 |
2025460
|
# Generated by Django 3.2 on 2021-07-05 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0007_auto_20210705_1045'),
]
operations = [
migrations.AddField(
model_name='biodata',
name='email',
field=models.EmailField(default='', max_length=25, unique=True),
preserve_default=False,
),
]
| 441 |
policy_sentry/writing/template.py
|
patricksanders/policy_sentry
| 0 |
2024156
|
"""Templates for the policy_sentry YML files.
These can be used for generating policies
"""
from jinja2 import Template
ACTIONS_TEMPLATE = """# Generate my policy when I know the Actions
mode: actions
name: {{ name }}
description: '' # For human auditability
role_arn: '' # For human auditability
actions:
- ''
"""
CRUD_TEMPLATE = """# Generate my policy when I know the access levels and ARNs
mode: crud
name: {{ name }}
description: '' # For human auditability
role_arn: '' # For human auditability
# Insert ARNs under each access level below
# If you do not need to use certain access levels, delete them.
read:
- ''
write:
- ''
list:
- ''
tagging:
- ''
permissions-management:
- ''
# If the policy needs to use IAM actions that cannot be restricted to ARNs,
# like ssm:DescribeParameters, specify those actions here.
wildcard:
- ''
"""
CRUD_TEMPLATE_DICT = {
"mode": "crud",
"name": "",
"description": "",
"role_arn": "",
"read": [],
"write": [],
"list": [],
"tagging": [],
"permissions-management": [],
"wildcard": [],
}
ACTIONS_TEMPLATE_DICT = {
"mode": "actions",
"name": "",
"description": "",
"role_arn": "",
"actions": [],
}
def create_crud_template(name):
"""Generate the CRUD YML Template with Jinja2"""
template = Template(CRUD_TEMPLATE)
msg = template.render(name=name)
return msg
def create_actions_template(name):
"""Generate the Actions YML template with Jinja2"""
template = Template(ACTIONS_TEMPLATE)
msg = template.render(name=name)
return msg
def get_crud_template_dict():
"""Generate the CRUD template in dict format"""
return CRUD_TEMPLATE_DICT
def get_actions_template_dict():
"""Get the Actions template in dict format."""
return ACTIONS_TEMPLATE_DICT
| 1,797 |
RecSearch/Tests/DataInterfaces/Splitters/Test_percent.py
|
matthew-kimm/RecSearch
| 0 |
2025296
|
from RecSearch.DataInterfaces.Splitters.percent import IXPercentSplit
from RecSearch.Tests.DataInterfaces.Splitters.SharedTestSplitter import SharedTestSplitter
import unittest
import pandas as pd
class TestQuerySplitter(unittest.TestCase):
def setUp(self):
self.percents = {'train': 0.8, 'test': 0.2}
self.Interface = IXPercentSplit()
self.data = pd.DataFrame(data=[['Math', 2017, 3.6],
['Computer Science', 2017, 3.8],
['English', 2019, 2.7],
['Math', 2018, 2.0],
['Art', 2018, 3.1],
['Engineering', 2017, 3.4],
['Art', 2017, 3.5],
['Math', 2017, 2.8],
['English', 2018, 3.6],
['Art', 2018, 3.0]],
columns=['Department', 'Year', 'GPA'],
index=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
self.split = self.Interface.iget_splitter(self.data, **self.percents)
def checkPercent(self, datasets: dict):
if set(self.percents.keys()) == set(datasets.keys()):
compare = {key: (datasets[key].shape[0] / self.data.shape[0]) == percent
for key, percent in self.percents.items()}
return all(compare.values())
else:
raise KeyError('Keys in dictionaries do not match')
def checkPartition(self, datasets: dict):
return pd.concat(datasets.values()).sort_index().equals(self.data)
def test_interface_percent(self):
self.assertTrue(self.checkPercent(self.split))
def test_interface_partition(self):
self.assertTrue(self.checkPartition(self.split))
| 1,894 |
CS3/0700_car_driving_neural_net_evolutionary/for_students/car_ballistic.py
|
nealholt/python_programming_curricula
| 7 |
2022815
|
import car, math
class CarBallistic(car.Car):
def __init__(self, screen, x, y, friction, acceleration):
super().__init__(screen, x, y)
self.dx = 0
self.dy = 0
self.friction = friction
self.acceleration = acceleration
def accelerate(self):
self.dx += math.cos(self.angle)*self.acceleration
self.dy += math.sin(self.angle)*self.acceleration
def brake(self):
self.dx = max(0,self.dx-math.cos(self.angle)*self.acceleration)
self.dy = max(0,self.dy-math.sin(self.angle)*self.acceleration)
def moveForward(self):
self.x += self.dx
self.y += self.dy
self.dx = (1-self.friction)*self.dx
self.dy = (1-self.friction)*self.dy
| 739 |
WOW/urls.py
|
ricksaha2000/StayConnected
| 0 |
2025833
|
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib.auth.views import LogoutView
from django.contrib.auth import views as auth_views
from django.conf.urls import url
from home.views import home_view, about_view, contact_view
from accounts.views import login_page, register_page
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', home_view, name="home"),
url(r'^about/', about_view, name="about"),
url(r'^contact/', contact_view, name="contact"),
url(r'^login/', login_page, name="login"),
url(r'^logout', LogoutView.as_view(), name="logout"),
url(r'^registration/', register_page, name="register"),
url(r'^events/', include(('events.urls', 'events'), namespace='events')),
url(r'^meetups/', include(('meetups.urls', 'meetups'), namespace='meetups')),
url(r'^jobs/', include(('jobs.urls', 'jobs'), namespace='jobs')),
url(r'^communities/', include(('communities.urls', 'communities'), namespace='communities')),
url(r'^search/', include(('search.urls', 'search'), namespace='search')),
url(r'^password_reset/$', auth_views.PasswordResetView.as_view(template_name='accounts/password_reset.html'), name='password_reset'),
url(r'^password_reset/done/$', auth_views.PasswordResetDoneView.as_view(template_name='accounts/password_reset_done.html'), name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[<KEY>/$',auth_views.PasswordResetConfirmView.as_view(template_name='accounts/password_reset_confirm.html'), name='password_reset_confirm'),
url(r'^reset/done/$', auth_views.PasswordResetCompleteView.as_view(template_name='accounts/password_reset_complete.html'), name='password_reset_complete'),
path('admindash/',include('admindash.urls')),
]
urlpatterns += staticfiles_urlpatterns()
#
| 1,902 |
data/dataset_EMOTIC_maskimgs_size.py
|
Sampson-Lee/SIB-Net
| 0 |
2025571
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 2019
@author: lixinpeng
"""
import torch
import torch.utils.data as data
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import cv2
from IPython import embed
import scipy.io as sio
import math
import os, collections
import torchvision.utils as vutils
# from .custom_sampler import BalancedBatchSampler
from tqdm import tqdm
import torchfile
import copy
import torchvision.transforms as transforms
from torchvision.transforms import Resize, RandomCrop, CenterCrop, ToTensor, Normalize
disc_class = ['Affection','Anger','Annoyance','Anticipation','Aversion','Confidence','Disapproval','Disconnection','Disquietment','Doubt/Confusion','Embarrassment','Engagement','Esteem','Excitement','Fatigue','Fear','Happiness','Pain','Peace','Pleasure','Sadness','Sensitivity','Suffering','Surprise','Sympathy','Yearning']
cont_class = ['Valence','Arousal','Dominance']
"""
dataset
"""
class EMOTICDataset(data.Dataset):
def __init__(self, config, mode='train', batch_size=64):
self.meta_dir = config['meta_dir']
self.img_dir = config['img_dir']
self.mode = mode
self.transform = {
'train_head': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
'train_person': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
'train_scene': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]),
'val_head': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
'val_person': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
'val_scene': transforms.Compose([
Resize((230,230)),
RandomCrop((224,224)),
ToTensor(),
Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
}
print ('Start preprocessing dataset..!')
self.preprocess()
print ('Finished preprocessing dataset..!')
# embed()
print('Analysing data distribution..!')
self.statistics()
print('Finished data distribution..!')
# embed()
if mode=='train':
self.num_data = len(self.image_list)
# self.num_data = batch_size*5e4/20
# self.num_data = max(self.numPclass)*len(disc_class)
print('FERDataset has {} {} images, and augment to {} images'.format(len(self.image_list), self.mode, self.num_data))
else:
self.num_data = len(self.image_list)
print('FERDataset has {} {} images'.format(self.num_data, self.mode))
def preprocess(self):
self.image_list = []
self.disc_label_list = []
self.cont_label_list = []
self.personbbox_list = []
self.headbbox_list = []
annotations = torchfile.load(self.meta_dir+'DiscreteContinuousAnnotations26_'+self.mode+'.t7')
for item in tqdm(annotations):
# embed()
filename = str(item[b'filename'], encoding='utf-8')
folder = str(item[b'folder'], encoding='utf-8')
# filename = str(item[b'filename']).encode('utf-8')
# folder = str(item[b'folder']).encode('utf-8')
head_bbox = item[b'head_bbox']
body_bbox = item[b'body_bbox']
workers = item[b'workers']
disc_labels = np.zeros(len(disc_class))
cont_labels = np.zeros(len(cont_class))
for worker in workers:
for cate in worker[b'labels']:
disc_labels[cate-1] = disc_labels[cate-1] + 1
cont_labels = cont_labels + np.array(list(worker[b'continuous'].values()))/10
disc_labels = np.clip(disc_labels/disc_labels.max(), 0, 1)
cont_labels = cont_labels/len(workers)
# print(self.img_dir, folder, filename)
self.image_list.append(self.img_dir+folder+'/'+filename)
self.disc_label_list.append(disc_labels)
self.cont_label_list.append(cont_labels)
self.personbbox_list.append(body_bbox)
self.headbbox_list.append(head_bbox)
def statistics(self):
fig = plt.figure(figsize=(20,20))
labelMatrix = np.array(self.disc_label_list)
numPclass = labelMatrix.sum(axis=0).astype(int)
self.numPclass = numPclass
# plot distribution
sorted_numPclass = np.sort(numPclass)[::-1]
ind = np.arange(len(disc_class)) # the x locations for the groups
plt.bar(ind, sorted_numPclass)
plt.xticks(ind, disc_class, fontsize=5)
plt.xlabel('class');plt.ylabel('number')
sorted_indices = np.argsort(-numPclass)
for ind_ind, ind_ in enumerate(sorted_indices):
print(disc_class[ind_], numPclass[ind_])
plt.text(ind_ind, numPclass[ind_]+0.05, '{}'.format(numPclass[ind_]), ha='center', va='bottom', fontsize=7)
fig.canvas.draw()
fig_arr = np.array(fig.canvas.renderer._renderer)
plt.close()
cv2.imwrite(os.path.dirname(self.meta_dir)+'/EMOTIC_datavisaul_'+self.mode+'.jpg', \
cv2.cvtColor(fig_arr, cv2.COLOR_BGRA2RGB))
# calculate weights
# self.weights_list = list(map(lambda x: 1/math.log(x+1.2), self.numPclass/labelMatrix.shape[0]))
# self.weights_list = list(map(lambda x: log(x), numPclass))
# self.weights_list = list(map(lambda x: x/total, numPclass))
self.weights_list = list(map(lambda x: math.log(x)/10, numPclass))
def __getitem__(self, index):
image = Image.open(self.image_list[index])
if image.mode=='L': image = image.convert('RGB')
if image.mode=='RGBA': image = image.convert('RGB')
# different image
personbbox = self.personbbox_list[index]
image_body = image.crop(personbbox)
headbbox = self.headbbox_list[index]
image_head = image.crop(headbbox)
img_arr = np.array(image)
img_arr[headbbox[1]:headbbox[3],headbbox[0]:headbbox[2],:] = 0
image_mask_head = Image.fromarray(img_arr)
image_body_mask_head = image_mask_head.crop(personbbox)
img_arr[personbbox[1]:personbbox[3],personbbox[0]:personbbox[2],:] = 0
image_mask_body = Image.fromarray(img_arr)
if self.mode=='train':
data = {'image_scene': self.transform['train_scene'](image_mask_body),
'image_body': self.transform['train_person'](image_body_mask_head),
'image_head': self.transform['train_head'](image_head)}
else:
data = {'image_scene': self.transform['val_scene'](image_mask_body),
'image_body': self.transform['val_person'](image_body_mask_head),
'image_head': self.transform['val_head'](image_head)}
label = {'disc':self.disc_label_list[index], 'cont':self.cont_label_list[index]}
return data, label
def __len__(self):
return self.num_data
#########
# data loader
#########
def denorm(img):
img = img.transpose((1,2,0))*0.5 + 0.5
img = np.uint8(255*img)
return img
def cv2_landmarks(image, landmarks):
for idx, point in enumerate(landmarks):
cv2.circle(image, center=(point[0], point[1]), radius=2, color=(255, 0, 0), thickness=-1)
# cv2.putText(image, str(idx+1), (point[0], point[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.2, (0, 0, 255), 1, cv2.LINE_AA)
return image
def cv2_bboxs(image, bbox):
cv2.rectangle(image, tuple(bbox.astype(int)[0:2]), tuple(bbox.astype(int)[2:4]), (255,0,0), 2)
return image
def load_data(config):
Dataset = {
'train': EMOTICDataset(config.EMOTIC, mode='train', batch_size=config.batch_size),
'val': EMOTICDataset(config.EMOTIC, mode='val', batch_size=32),
'test': EMOTICDataset(config.EMOTIC, mode='test', batch_size=32),
}
data_loader = {
'train': data.DataLoader(Dataset['train'], batch_size= config.batch_size, \
shuffle=True, num_workers=3, worker_init_fn = np.random.seed(0)),
'val': data.DataLoader(Dataset['val'], batch_size= config.batch_size, shuffle= False, num_workers=3, worker_init_fn = np.random.seed(0)),
'test': data.DataLoader(Dataset['test'], batch_size= config.batch_size, shuffle= False, num_workers=3, worker_init_fn = np.random.seed(0)),
}
input_data, _ = next(iter(data_loader['train']))
image_scene, image_body, image_head = input_data['image_scene'], input_data['image_body'], input_data['image_head']
vutils.save_image(image_scene, config.save_dir+'/samples_train_image_scene.png', nrow=8, padding=2, normalize=True)
vutils.save_image(image_body, config.save_dir+'/samples_train_image_body.png', nrow=8, padding=2, normalize=True)
vutils.save_image(image_head, config.save_dir+'/samples_train_image_head.png', nrow=8, padding=2, normalize=True)
# from IPython import embed; embed(); exit()
return data_loader, Dataset
| 9,811 |
analysis/logutils.py
|
RePierre/metis
| 0 |
2025736
|
import logging
import sys
def create_logger(logger_name, log_file, log_to_stdout=True, log_level='DEBUG'):
logger = logging.getLogger(logger_name)
file_log_handler = logging.FileHandler(log_file)
logger.addHandler(file_log_handler)
if log_to_stdout:
stdout_log_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_log_handler)
# nice output format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_log_handler.setFormatter(formatter)
stdout_log_handler.setFormatter(formatter)
logger.setLevel('DEBUG')
return logger
| 634 |
setup.py
|
Pandaaaa906/ketcher_server
| 0 |
2025025
|
from distutils.core import setup
setup(name='Ketcher',
version='1.0',
description='Ketcher toolkit and service',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/Pandaaaa906/ketcher_server',
packages=['flask_ketcher', ],
package_dir={'flask_ketcher': 'flask_ketcher'},
package_data={'flask_ketcher': ['static/*', 'static/*/*']},
requires=['flask', ]
)
| 429 |
509.fibonacci-number.py
|
windard/leeeeee
| 0 |
2024542
|
#
# @lc app=leetcode id=509 lang=python
#
# [509] Fibonacci Number
#
# https://leetcode.com/problems/fibonacci-number/description/
#
# algorithms
# Easy (66.37%)
# Total Accepted: 20.5K
# Total Submissions: 30.8K
# Testcase Example: '2'
#
# The Fibonacci numbers, commonly denoted F(n) form a sequence, called the
# Fibonacci sequence, such that each number is the sum of the two preceding
# ones, starting from 0 and 1. That is,
#
#
# F(0) = 0, F(1) = 1
# F(N) = F(N - 1) + F(N - 2), for N > 1.
#
#
# Given N, calculate F(N).
#
#
#
# Example 1:
#
#
# Input: 2
# Output: 1
# Explanation: F(2) = F(1) + F(0) = 1 + 0 = 1.
#
#
# Example 2:
#
#
# Input: 3
# Output: 2
# Explanation: F(3) = F(2) + F(1) = 1 + 1 = 2.
#
#
# Example 3:
#
#
# Input: 4
# Output: 3
# Explanation: F(4) = F(3) + F(2) = 2 + 1 = 3.
#
#
#
#
# Note:
#
# 0 ≤ N ≤ 30.
#
#
class Solution(object):
def fib(self, N):
"""
:type N: int
:rtype: int
"""
result = 0
last_one = 0
last_two = 0
for i in range(N+1):
if i == 0:
result = 0
elif i == 1:
result = 1
else:
result = last_one + last_two
if last_one:
last_two = last_one
last_one = result
return result
def _fib(self, N):
"""
:type N: int
:rtype: int
"""
if N == 0:
return 0
elif N == 1:
return 1
else:
return self._fib(N - 1) + self._fib(N - 2)
| 1,564 |
Source/JackFramework/SysBasic/switch.py
|
Archaic-Atom/JackFramework
| 13 |
2024830
|
# -*- coding: utf-8 -*-
class Switch(object):
def __init__(self, value: str) -> object:
self.__value = value
self.__fall = False
def __iter__(self) -> bool:
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args: tuple) -> bool:
"""Indicate whether or not to enter a case suite"""
if self.__fall or not args:
return True
elif self.__value in args: # changed for v1.5, see below
self.__fall = True
return True
else:
return False
| 615 |
OpenCV/q2.py
|
ronnie7z7z/Autumn-of-Automation-Ronit-Shukla
| 0 |
2024073
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.cvtColor(cv2.imread("as2.png"), cv2.COLOR_RGB2BGR)
rows,cols,z = img.shape
pts1 = np.float32([[130,195],[215,200],[50,350],[300,350]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg1 = cv2.warpPerspective(img,M,(300,300))
pts1 = np.float32([[130,195],[215,200],[130,270],[215,280]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg2 = cv2.warpPerspective(img,M,(300,300))
pts1 = np.float32([[0,0],[300,0],[0,350],[220,290]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg3 = cv2.warpPerspective(img,M,(300,300))
pts1 = np.float32([[150,215],[190,215],[150,240],[190,240]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg4 = cv2.warpPerspective(img,M,(300,300))
M = cv2.getRotationMatrix2D((cols/2,rows/2),70,1)
timg5 = cv2.warpAffine(img,M,(cols,rows))
pts1 = np.float32([[150,125],[300,125],[150,250],[300,250]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg5 = cv2.warpPerspective(timg5,M,(300,300))
M = cv2.getRotationMatrix2D((cols/2,rows/2),180,1)
timg6 = cv2.warpAffine(img,M,(cols,rows))
pts1 = np.float32([[100,75],[250,75],[100,180],[250,180]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg6 = cv2.warpPerspective(timg6,M,(300,300))
M = cv2.getRotationMatrix2D((cols/2,rows/2),25,1)
timg7 = cv2.warpAffine(img,M,(cols,rows))
pts1 = np.float32([[125,150],[250,150],[125,300],[250,300]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg7 = cv2.warpPerspective(timg7,M,(300,300))
M = cv2.getRotationMatrix2D((cols/2,rows/2),135,1)
timg8 = cv2.warpAffine(img,M,(cols,rows))
pts1 = np.float32([[150,75],[250,75],[150,200],[250,200]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg8 = cv2.warpPerspective(timg8,M,(300,300))
pts1 = np.float32([[130,195],[215,200],[130,270],[215,280]])
pts2 = np.float32([[0,0],[350,0],[0,350],[350,350]])
M = cv2.getPerspectiveTransform(pts1,pts2)
timg9 = cv2.warpPerspective(img,M,(300,300))
timg9 = cv2.bilateralFilter(timg9,20,100,75)
timg10 = cv2.bilateralFilter(img,20,100,75)
timg = [timg1, timg2, timg3, timg4, timg5, timg6, timg7, timg8, timg9, timg10]
for i in range(10):
plt.subplot(2,5,i+1)
plt.imshow(timg[i])
plt.xticks([])
plt.yticks([])
plt.show()
| 2,635 |
testtrail/examples/allroadstime.py
|
hongzhangMu/testrun
| 0 |
2025184
|
#coding:utf-8
import os
import time
#定义每路口剩余红绿灯剩余
def lefttime_per_reads(miletime_now,start_time,greentime,yellowtime,redtime):
"""
miletime_now :当前时间乘以1000,为毫秒时间戳
start_time:当前路口绿灯开始时间,如9点半直行为绿灯,毫秒时间戳
greentime 直行路口绿灯秒数
redtime:直行红灯秒数
yellowtime:黄灯秒数时间
"""
# miletime_now = 1587609000000
alltime = int(greentime + redtime+ yellowtime)
green_yellow = int(greentime+yellowtime)
delta_time = (miletime_now - start_time)/1000
yushu = int(delta_time)%alltime
print(int(delta_time)%alltime)
green,yellow,red =0,0,0
if yushu<=greentime:
green = greentime - yushu
elif yushu >greentime and yushu<=green_yellow:
green = 0
yellow = yellowtime-(yushu -greentime)
elif yushu>green_yellow:
red = redtime-( yushu -green_yellow)
print('================================',green,yellow,red)
return green,yellow,red
#当前时刻红绿灯情况
#向后累计十分钟
def getafter10min(inttime,greentime,yellowtime,redtime):
"""
inttime:当前时间加上剩余时间
greentime 直行路口绿灯秒数
redtime:直行红灯秒数
yellowtime:黄灯秒数时间
"""
alltime = int(greentime + redtime+ yellowtime)
green_yellow = int(greentime+yellowtime)
print(inttime)
timeArray = time.localtime(inttime)
otherStyleTime = time.strftime("%H:%M:%S", timeArray)
print(otherStyleTime) # 2013--10--10 15:40:00
print(inttime)
contrcut = []
timeArray = time.localtime(inttime)
otherStyleTime = time.strftime("%H:%M:%S", timeArray)
contrcut.append(otherStyleTime)
couttime = 6
try:
counttime = 400%(greentime+yellowtime+redtime)+1
except:
couttime = 6
for i in range(0,counttime):
nexttime = (inttime+greentime+alltime*i)
nnexttime = (inttime+green_yellow+alltime*i)
nnnextime = (inttime+alltime+alltime*i)
timeArray1 = time.localtime(nexttime)
otherStyleTime1 = time.strftime("%H:%M:%S", timeArray1)
timeArray2 = time.localtime(nnexttime)
otherStyleTime2 = time.strftime("%H:%M:%S", timeArray2)
timeArray3 = time.localtime(nnnextime)
otherStyleTime3 = time.strftime("%H:%M:%S", timeArray3)
contrcut.append(otherStyleTime1)
contrcut.append(otherStyleTime2)
contrcut.append(otherStyleTime3)
# print('=============710---------===',contrcut)
return contrcut
# #构造一条路的时刻
def concattimes(miletime_now,start_time,greentime,yellowtime,redtime):
final_lists =[]
print(greentime,yellowtime,redtime,'===================')
leftgreen,leftyellow,leftred = lefttime_per_reads(miletime_now,start_time,greentime,yellowtime,redtime)
inttime = miletime_now/1000
flagleftcolor = 'green'
leftinttime = 0
fristtime_lists = []
if leftgreen!=0:
leftinttime =leftgreen+inttime
timeArray = time.localtime(leftinttime)
otherStyleTime = time.strftime("%H:%M:%S", timeArray)
leftinttime1 =leftgreen+inttime+yellowtime
timeArray1 = time.localtime(leftinttime1)
otherStyleTime1 = time.strftime("%H:%M:%S", timeArray1)
leftinttime2 =leftgreen+inttime+yellowtime+redtime
timeArray2 = time.localtime(leftinttime2)
otherStyleTime2 = time.strftime("%H:%M:%S", timeArray2)
print('444444444--------/',otherStyleTime,otherStyleTime1,otherStyleTime2) # 2013--10--10 15:40:00
fristtime_lists.append(otherStyleTime)
fristtime_lists.append(otherStyleTime1)
fristtime_lists.append(otherStyleTime2)
leftnowtime = (inttime +leftgreen+yellowtime+redtime)
loadtime_lists = getafter10min(leftnowtime,greentime,yellowtime,redtime)
final_lists = fristtime_lists+(loadtime_lists)
flagleftcolor = 'green'
elif leftyellow!=0:
leftinttime =leftyellow+inttime
timeArray = time.localtime(leftinttime)
otherStyleTime = time.strftime("%H:%M:%S", timeArray)
leftinttime1 =leftyellow+inttime+redtime
timeArray1 = time.localtime(leftinttime1)
otherStyleTime1 = time.strftime("%H:%M:%S", timeArray1)
fristtime_lists.append(otherStyleTime)
fristtime_lists.append(otherStyleTime1)
leftnowtime = inttime+leftyellow+redtime
loadtime_lists = getafter10min(leftnowtime,greentime,yellowtime,redtime)
final_lists = fristtime_lists +(loadtime_lists)
print(otherStyleTime,otherStyleTime1)
print('yellow')
flagleftcolor = 'yellow'
elif leftred !=0:
leftinttime = leftred
print('red')
leftinttime =leftred+inttime
timeArray = time.localtime(leftinttime)
otherStyleTime = time.strftime("%H:%M:%S", timeArray)
fristtime_lists.append(otherStyleTime)
leftnowtime = leftred + inttime
loadtime_lists = getafter10min(leftnowtime,greentime,yellowtime,redtime)
final_lists = fristtime_lists +(loadtime_lists)
flagleftcolor = 'red'
return final_lists,flagleftcolor
#构造400秒的点。
def ten_minsdata(inttime,miaoshu):
nowtime = []
for i in range(1,miaoshu):
nexttime = (inttime+i)
timeArray1 = time.localtime(nexttime)
otherStyleTime1 = time.strftime("%H:%M:%S", timeArray1)
nowtime.append(otherStyleTime1)
# print(nowtime)
return nowtime
| 4,863 |
simsiam/api/__init__.py
|
tillaczel/simsiam
| 0 |
2025071
|
from simsiam.api.train import train_unsupervised, train_supervised
from simsiam.api.evaluate import evaluate
| 112 |
grmodel/pymcDoseResponse.py
|
meyer-lab/ps-growth-model
| 2 |
2024728
|
"""
Dose response analysis to assess the uncertainty that exists when one only uses the live cell number.
"""
from os.path import join, dirname, abspath
import numpy as np
import pymc3 as pm
import theano.tensor as T
import pandas as pd
from .pymcGrowth import fitKwargs
class doseResponseModel:
""" pymc3 model of just using the live cell number. """
def build_model(self):
""" Builds then returns the pyMC model. """
M = pm.Model()
with M:
# The three values here are div and deathrate
# Assume just one IC50 for simplicity
lIC50 = pm.Normal("IC50s", 2.0)
Emin_growth = pm.Uniform("Emin_growth", lower=0.0, upper=self.Emax_growth)
Emax_death = pm.Lognormal("Emax_death", -2.0, 2.0)
# Import drug concentrations into theano vector
drugCs = T._shared(self.drugCs)
# Drug term since we're using constant IC50 and hill slope
drugTerm = 1.0 / (1.0 + T.pow(10.0, (lIC50 - drugCs) * pm.Lognormal("hill")))
# Do actual conversion to parameters for each drug condition
growthV = self.Emax_growth + (Emin_growth - self.Emax_growth) * drugTerm
# Calculate the growth rate
# _Assuming deathrate in the absence of drug is zero
GR = growthV - Emax_death * drugTerm
# Calculate the number of live cells
lnum = T.exp(GR * self.time)
# Normalize live cell data to control, as is similar to measurements
# Residual between model prediction and measurement
residual = self.lObs - (lnum / lnum[0])
pm.Normal("dataFitlnum", sd=T.std(residual), observed=residual)
return M
def __init__(self, Drug):
""" Load data and setup. """
filename = join(dirname(abspath(__file__)), "data/initial-data/2017.07.10-H1299-celltiter.csv")
data = pd.read_csv(filename)
# Response should be normalized to the control
data["response"] = data["CellTiter"] / np.mean(data.loc[data["Conc (nM)"] == 0.0, "CellTiter"])
# Put the dose on a log scale as well
data["logDose"] = np.log10(data["Conc (nM)"] + 0.1)
dataLoad = data[data["Drug"] == Drug]
# Handle data import here
self.drugCs = dataLoad["logDose"].values
self.time = 72.0
# Based on control kinetic data
self.Emax_growth = 0.0315
self.lObs = dataLoad["response"].values
# Build the model
self.model = self.build_model()
self.trace = pm.sample(model=self.model, **fitKwargs)
| 2,631 |
docsim/methods/common/loader.py
|
tarohi24/docsim
| 1 |
2025653
|
"""
Query loader
"""
from pathlib import Path
from typing import Generator
from tqdm import tqdm
from typedflow.nodes import LoaderNode
from docsim.methods.common.types import Context
from docsim.methods.common.pre_filtering import load_cols
from docsim.models import ColDocument
from docsim.settings import data_dir
__all__ = ['load_query_files', ]
def load_query_files(dataset: str) -> Generator[ColDocument, None, None]:
qpath: Path = data_dir.joinpath(f'{dataset}/query/dump.bulk')
pbar = tqdm()
with open(qpath) as fin:
while (line := fin.readline()):
doc: ColDocument = ColDocument.from_json(line) # type: ignore
try:
# filter documents
load_cols(docid=doc.docid, dataset=dataset)
except FileNotFoundError:
continue
yield doc
pbar.update(1)
| 882 |
transferapp/tests/test_transfer_page.py
|
AlpsRunner/money_tranfer_page
| 0 |
2024395
|
import random
import pytest
from django.core.validators import RegexValidator
from django.forms import Field, ModelChoiceField
from django.test import TestCase, Client
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from transferapp.forms import TranferForm
from usersapp import factories
@pytest.mark.django_db
class TransferPageTest(TestCase):
""" Test module for transfer page """
users_qty = 20
client = Client()
@classmethod
def setUpClass(cls):
super().setUpClass()
inn_src = [f'{random.randint(1, 999999999999):012d}' for _ in range(cls.users_qty // 2)]
cls.users = [
factories.CustomUserFactory.create(
inn=random.choice(inn_src),
) for _ in range(cls.users_qty)
]
cls.inn_src = list(set([el.inn for el in cls.users]))
def setUp(self):
self.valid_src_user = random.choice(self.users)
_inn_src = self.inn_src.copy()
_inn_src.remove(self.valid_src_user.inn)
self.valid_inn_src = _inn_src
def test_transfer_form_render(self):
response = self.client.get(reverse('transfer_page'))
assert response.status_code == 200, 'wrong status_code'
form = response.context.get('form')
assert 'src_user' in form.fields, 'no "src_user" field in form'
assert 'dst_inn' in form.fields, 'no "dst_inn" field in form'
assert 'amount' in form.fields, 'no "amount" field in form'
assert len(form.fields.get('src_user').queryset) == \
len(list(filter(lambda x: x.balance > 0, self.users))), \
'wrong users count in "src_user" field'
def test_transfer_form_valid_data(self):
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': random.choice(self.valid_inn_src),
'amount': random.randint(1, self.valid_src_user.balance),
})
assert form.is_valid(), 'form is not valid'
def test_transfer_form_empty_src_user(self):
form = TranferForm({
'dst_inn': random.choice(self.valid_inn_src),
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'src_user' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('src_user') == [Field.default_error_messages['required']], \
'wrong error message'
def test_transfer_form_wrong_src_user(self):
form = TranferForm({
'src_user': 0,
'dst_inn': random.choice(self.valid_inn_src),
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'src_user' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('src_user') == \
[ModelChoiceField.default_error_messages['invalid_choice']], \
'wrong error message'
def test_transfer_form_empty_dst_inn(self):
form = TranferForm({
'src_user': self.valid_src_user.pk,
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'dst_inn' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('dst_inn') == [Field.default_error_messages['required']], \
'wrong error message'
def test_transfer_form_wrong_dst_inn(self):
# short inn case
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': '1111',
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'dst_inn' in form.errors.keys(), 'no key in form errors'
assert RegexValidator.message in form.errors.get('dst_inn'), \
'wrong error message for short case'
# inn with letters case
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': '1111hello999',
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'dst_inn' in form.errors.keys(), 'no key in form errors'
assert RegexValidator.message in form.errors.get('dst_inn'), \
'wrong error message for letter case'
def test_transfer_form_not_exists_dst_inn(self):
dst_inn = '000000000000'
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': dst_inn,
'amount': random.randint(1, self.valid_src_user.balance),
})
assert not form.is_valid(), 'form is valid'
assert 'dst_inn' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('dst_inn') == [_('no users with this inn found')], \
'wrong error message'
def test_transfer_form_empty_amount(self):
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': random.choice(self.valid_inn_src),
})
assert not form.is_valid(), 'form is valid'
assert 'amount' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('amount') == [Field.default_error_messages['required']], \
'wrong error message'
def test_transfer_form_wrong_amount(self):
# zero amount case
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': random.choice(self.valid_inn_src),
'amount': 0,
})
assert not form.is_valid(), 'form is valid'
assert 'amount' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('amount') == [_('not set amount of money to transfer')], \
'wrong error message for zero amount case'
# not enough balance case
form = TranferForm({
'src_user': self.valid_src_user.pk,
'dst_inn': random.choice(self.valid_inn_src),
'amount': self.valid_src_user.balance + 100,
})
assert not form.is_valid(), 'form is valid'
assert 'amount' in form.errors.keys(), 'no key in form errors'
assert form.errors.get('amount') == [_('src_user have not enough money')], \
'wrong error message for not enough balance case'
| 6,429 |
analysis/models/nodes/filters/built_in_filter_node.py
|
SACGF/variantgrid
| 5 |
2025494
|
from typing import Optional
from django.db import models
from django.db.models import Q
from analysis.models.nodes.analysis_node import AnalysisNode, NodeCount
from analysis.models.nodes.node_counts import get_extra_filters_q
from annotation.models import ClinVarReviewStatus
from snpdb.models.models_enums import BuiltInFilters
class BuiltInFilterNode(AnalysisNode):
built_in_filter = models.CharField(max_length=1, choices=BuiltInFilters.FILTER_CHOICES, null=True)
clinvar_stars_min = models.IntegerField(default=0)
cosmic_count_min = models.IntegerField(default=0)
def modifies_parents(self):
return self.built_in_filter is not None
def get_extra_filters(self):
return dict(BuiltInFilters.FILTER_CHOICES)[self.built_in_filter]
def get_clinvar_stars_q(self):
review_statuses = ClinVarReviewStatus.statuses_gte_stars(self.clinvar_stars_min)
return Q(clinvar__clinvar_review_status__in=review_statuses)
def _get_node_q(self) -> Optional[Q]:
q = get_extra_filters_q(self.analysis.user, self.analysis.genome_build, self.built_in_filter)
if self.built_in_filter == BuiltInFilters.CLINVAR and self.clinvar_stars_min:
q &= self.get_clinvar_stars_q()
elif self.built_in_filter == BuiltInFilters.COSMIC and self.cosmic_count_min:
q &= Q(variantannotation__cosmic_count__gte=self.cosmic_count_min)
return q
def _get_method_summary(self):
if self.modifies_parents():
extra_filters = self.get_extra_filters()
method_summary = f"Filtering to '{extra_filters}'"
else:
method_summary = 'No filters applied as no built in filter selected.'
return method_summary
def get_node_name(self):
name = ''
if self.modifies_parents():
extra_filters = self.get_extra_filters()
name = extra_filters.replace("_", " ")
if self.built_in_filter == BuiltInFilters.CLINVAR and self.clinvar_stars_min:
name += f"\n★ >= {self.clinvar_stars_min}"
elif self.built_in_filter == BuiltInFilters.COSMIC and self.cosmic_count_min:
name += f"\n>= {self.cosmic_count_min}"
return name
@staticmethod
def get_help_text() -> str:
return "Built in filters used in node counts eg Impact / OMIM / ClinVar / COSMIC"
def get_css_classes(self):
css_classes = super().get_css_classes()
if self.modifies_parents():
css_classes.append(f"node-count-{self.built_in_filter}")
return css_classes
def _get_cached_label_count(self, label):
""" Use parent ClinVar count """
count = super()._get_cached_label_count(label)
if count is None:
if label in [BuiltInFilters.TOTAL, self.built_in_filter]:
# Can't use if any extra filters applied
if label == BuiltInFilters.CLINVAR:
if self.clinvar_stars_min:
return None
elif label == BuiltInFilters.COSMIC:
if self.cosmic_count_min:
return None
try:
parent = self.get_single_parent()
parent_node_count = NodeCount.load_for_node(parent, self.built_in_filter)
count = parent_node_count.count
except:
pass
return count
@staticmethod
def get_node_class_label():
return "Built In Filter"
| 3,536 |
tests/commands/test_utils.py
|
fakela/charmcraft
| 0 |
2025472
|
# Copyright 2020 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
from charmcraft.commands.utils import make_executable
def test_make_executable_read_bits(tmp_path):
pth = tmp_path / "test"
pth.touch(mode=0o640)
# sanity check
assert pth.stat().st_mode & 0o777 == 0o640
with pth.open() as fd:
make_executable(fd)
# only read bits got made executable
assert pth.stat().st_mode & 0o777 == 0o750
| 1,016 |
clamav_to_yara.py
|
kojibhy/cuckoo-yara-auto
| 12 |
2025193
|
#!/usr/bin/env python
# encoding: utf-8
#
# Tested on Linux (Ubuntu), Windows XP/7, and Mac OS X
#
"""
clamav_to_yara.py
Created by <NAME> on 2010-03-12.
Copyright (c) 2010 __MyCompanyName__. All rights reserved.
"""
import sys
import os
import re
from optparse import OptionParser
def main():
parser = OptionParser()
parser.add_option("-f", "--file", action="store", dest="filename",
type="string", help="scanned FILENAME")
parser.add_option("-o", "--output-file", action="store", dest="outfile",
type="string", help="output filename")
parser.add_option("-v", "--verbose", action="store_true", default=False,
dest="verbose", help="verbose")
parser.add_option("-s", "--search", action="store", dest="search",
type="string", help="search filter", default="")
(opts, args) = parser.parse_args()
if opts.filename == None:
parser.print_help()
parser.error("You must supply a filename!")
if not os.path.isfile(opts.filename):
parser.error("%s does not exist" % opts.filename)
if opts.outfile == None:
parser.print_help()
parser.error("You must specify an output filename!")
yara_rule = """
rule %s
{
strings:
%s
condition:
%s
}
"""
rules = {}
output = ""
data = open(opts.filename, 'rb').readlines()
if (opts.filename.endswith(".cvd") or opts.filename.endswith(".cld")) and data[0].find("ClamAV") == 0:
print "It seems you're passing a compressed database."
print "Try using sigtool -u to decompress first."
return
print "[+] Read %d lines from %s" % (len(data), opts.filename)
# ClamAV signatures are one per line
for line in data:
# signature format is
# name:sigtype:offset:signature
try:
vals = line.split(':')
if len(vals) < 4 or len(vals) > 6:
print "**ERROR reading ClamAV signature file**"
continue
name = vals[0]
sigtype = vals[1]
offset = vals[2]
signature = vals[3]
except:
print "**ERROR reading ClamAV signature file**"
continue
# if specified, only parse rules that match a search criteria
if opts.search in name:
# sanitize rule name for YARA compatability
# YARA does not allow non-alphanumeric chars besides _
rulename_regex = re.compile('(\W)')
rulename = rulename_regex.sub('_', name)
# and cannot start with a number
rulename_regex = re.compile('(^[0-9]{1,})')
rulename = rulename_regex.sub('', rulename)
# if the rule doesn't exist, create a dict entry
if rulename not in rules:
rules[rulename] = []
# handle the ClamAV style jumps
# {-n} is n or less bytes
jump_regex = re.compile('(\{-(\d+)\})')
signature = jump_regex.sub('{0-\g<2>}', signature)
# {n-} is n or more bytes
jump_regex = re.compile('(\{(\d+)-\})')
matches = jump_regex.findall(signature)
if matches:
for match in matches:
#print "\t\tfound %s" % (match[1])
start = int(match[1])
jump_regex = re.compile('(\{(%d)-\})' % (start))
if (start < 256):
#print "\t\t\tfound short jump of len %d" % (start)
signature = jump_regex.sub('[0-1]', signature)
else:
#print "\t\t\tfound long jump, replacing with '*'"
signature = jump_regex.sub('*', signature)
# {n-m} is n to m bytes
# need to make sure it's not bigger than 255,
# and the high bound cannot exceed 255
# if it is we'll treat it like a '*'
jump_regex = re.compile('(\{(\d+)-(\d+)\})')
matches = jump_regex.findall(signature)
if matches:
for match in matches:
print "\t\tfound %s - %s" % (match[1], match[2])
start = int(match[1])
end = int(match[2])
jump_regex = re.compile('(\{(%d)-(%d)\})' % (start, end))
if (end - start == 0):
if opts.verbose:
print "\t**Skip nothing, impossible!**"
signature = jump_regex.sub('', signature)
elif (end - start < 256) and (end < 256):
#print "\t\t\tfound short jump of len %d" % (end - start)
signature = jump_regex.sub('[\g<2>-\g<3>]', signature)
else:
#print "\t\t\tfound long jump, replacing with '*'"
signature = jump_regex.sub('*', signature)
# {n} bytes
# here we must also enforce the 255 byte maximum jump
# that YARA can handle
jump_regex = re.compile('(\{(\d+)\})')
matches = jump_regex.findall(signature)
if matches:
for match in matches:
#print "\t\tfound %s" % (match[1])
start = int(match[1])
jump_regex = re.compile('(\{(%d)\})' % (start))
if start < 256:
#print "\t\t\tfound short jump of len %d" % (start)
signature = jump_regex.sub('[\g<2>]', signature)
else:
#print "\t\t\tfound long jump, replacing with '*'"
signature = jump_regex.sub('*', signature)
# translate the '*' operator into a pair of signatures
# with an 'and'
if '*' in signature:
for part in signature.split('*'):
if part[0] != '(':
rules[rulename].append(part.strip())
else:
if signature[0] != '(':
rules[rulename].append(signature.strip())
for rule in rules.keys():
detects = ''
conds = "\t"
x = 0
for detect in rules[rule]:
detects += "\t$a%d = { %s }\r\n" % (x, detect)
if x > 0:
conds += " and "
conds += "$a%d" % (x)
x += 1
if detects == '':
if opts.verbose:
print "\t**Found empty rule %s, skipping**" % rule
continue
else:
output += yara_rule % (rule, detects, conds)
if len(output) > 0:
print "\r\n[+] Wrote %d rules to %s\r\n" % (len(rules), opts.outfile)
fout = open(opts.outfile, 'wb')
fout.write(output)
fout.close()
else:
print "\r\n**Could not find any signatures to convert!!!**\r\n"
if __name__ == '__main__':
print "\n" + '#' * 75
print "\t" + "Malware Analyst's Cookbook - ClamAV to YARA Converter 0.0.1"
print "\n" + '#' * 75, "\n"
main()
| 7,276 |
tests/test_mr_market.py
|
ipl31/mr_market
| 0 |
2023369
|
import pytest
from mister_market.MisterMarketBot import MisterMarketBot
@pytest.fixture
def bot():
return MisterMarketBot()
def test_parse_command(bot):
command, args = \
bot._parse_command("test_command param1 param2")
assert command == "test_command"
assert args == ["param1", "param2"]
@pytest.mark.skip(msg="Need to figure out to test this with new resolver.")
def test_handle_slack_message_bad_command(bot):
result = bot.handle_slack_message("not a command")
assert "I don't understand your command" in result[0].text.text
def test_handle_slack_message_btc(bot):
result = bot.handle_slack_message("@1234 price btc")
symbol, price = result[0].text.text.split()
assert "BTCUSD" in symbol[1:-1]
assert isinstance(float(price[1:-1]), float)
result = bot.handle_slack_message("@1234 price BTC")
symbol, price = result[0].text.text.split()
assert "BTCUSD" in symbol[1:-1]
assert isinstance(float(price[1:-1]), float)
result = bot.handle_slack_message("@1234 price ETHUSD")
symbol, price = result[0].text.text.split()
assert symbol[1:-1] == "ETHUSD"
assert isinstance(float(price[1:-1]), float)
def test_handle_slack_message_aapl(bot):
result = bot.handle_slack_message("@1234 price AAPL")
symbol, price = result[0].text.text.split()
assert symbol[1:-1] == "AAPL"
assert isinstance(float(price[1:-1]), float)
result = bot.handle_slack_message("@1234 price aapl")
symbol, price = result[0].text.text.split()
assert symbol[1:-1] == "AAPL"
assert isinstance(float(price[1:-1]), float)
| 1,605 |
nrrd/formatters.py
|
mscheifer/pynrrd
| 92 |
2023102
|
import numpy as np
def format_number(x):
"""Format number to string
Function converts a number to string. For numbers of class :class:`float`, up to 17 digits will be used to print
the entire floating point number. Any padding zeros will be removed at the end of the number.
See :ref:`user-guide:int` and :ref:`user-guide:double` for more information on the format.
.. note::
IEEE754-1985 standard says that 17 significant decimal digits are required to adequately represent a
64-bit floating point number. Not all fractional numbers can be exactly represented in floating point. An
example is 0.1 which will be approximated as 0.10000000000000001.
Parameters
----------
x : :class:`int` or :class:`float`
Number to convert to string
Returns
-------
vector : :class:`str`
String of number :obj:`x`
"""
if isinstance(x, float):
# Helps prevent loss of precision as using str() in Python 2 only prints 12 digits of precision.
# However, IEEE754-1985 standard says that 17 significant decimal digits is required to adequately represent a
# floating point number.
# The g option is used rather than f because g precision uses significant digits while f is just the number of
# digits after the decimal. (NRRD C implementation uses g).
value = '{:.17g}'.format(x)
else:
value = str(x)
return value
def format_vector(x):
"""Format a (N,) :class:`numpy.ndarray` into a NRRD vector string
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : (N,) :class:`numpy.ndarray`
Vector to convert to NRRD vector string
Returns
-------
vector : :class:`str`
String containing NRRD vector
"""
return '(' + ','.join([format_number(y) for y in x]) + ')'
def format_optional_vector(x):
"""Format a (N,) :class:`numpy.ndarray` into a NRRD optional vector string
Function converts a (N,) :class:`numpy.ndarray` or :obj:`None` into a string using NRRD vector format. If the input
:obj:`x` is :obj:`None`, then :obj:`vector` will be 'none'
See :ref:`user-guide:int vector` and :ref:`user-guide:double vector` for more information on the format.
Parameters
----------
x : (N,) :class:`numpy.ndarray` or :obj:`None`
Vector to convert to NRRD vector string
Returns
-------
vector : :class:`str`
String containing NRRD vector
"""
# If vector is None or all elements are NaN, then return none
# Otherwise format the vector as normal
if x is None or np.all(np.isnan(x)):
return 'none'
else:
return format_vector(x)
def format_matrix(x):
"""Format a (M,N) :class:`numpy.ndarray` into a NRRD matrix string
See :ref:`user-guide:int matrix` and :ref:`user-guide:double matrix` for more information on the format.
Parameters
----------
x : (M,N) :class:`numpy.ndarray`
Matrix to convert to NRRD vector string
Returns
-------
matrix : :class:`str`
String containing NRRD matrix
"""
return ' '.join([format_vector(y) for y in x])
def format_optional_matrix(x):
"""Format a (M,N) :class:`numpy.ndarray` of :class:`float` into a NRRD optional matrix string
Function converts a (M,N) :class:`numpy.ndarray` of :class:`float` into a string using the NRRD matrix format. For
any rows of the matrix that contain all NaNs for each element, the row will be replaced with a 'none' indicating
the row has no vector.
See :ref:`user-guide:double matrix` for more information on the format.
.. note::
:obj:`x` must have a datatype of float because NaN's are only defined for floating point numbers.
Parameters
----------
x : (M,N) :class:`numpy.ndarray` of :class:`float`
Matrix to convert to NRRD vector string
Returns
-------
matrix : :class:`str`
String containing NRRD matrix
"""
return ' '.join([format_optional_vector(y) for y in x])
def format_number_list(x):
"""Format a (N,) :class:`numpy.ndarray` into a NRRD number list.
See :ref:`user-guide:int list` and :ref:`user-guide:double list` for more information on the format.
Parameters
----------
x : (N,) :class:`numpy.ndarray`
Vector to convert to NRRD number list string
Returns
-------
list : :class:`str`
String containing NRRD list
"""
return ' '.join([format_number(y) for y in x])
| 4,627 |
pythonlibs/mantis/BlueEarth/handler.py
|
adoggie/Tibet.6
| 22 |
2023676
|
# coding:utf-8
import json
import datetime
from mantis.fundamental.application.app import instance
from vnpy.trader.vtObject import VtTickData
def get_device_message(message,ctx):
"""订阅的所有合约行情数据"""
topic = ctx.get('name') # 通道名称
data = message
device_id = topic.split('.')[-1] # 最后一项为设备编号 DeviceChannelPub = 'blue_earth.device.channel.pub.{device_id}'
message = json.loads(data)
table = instance.getProp('SubscribeTable')
if table:
ns_name = ctx['channel'].cfgs.get('data',{}).get('ns_name')
table.emit_message(ns_name,device_id,message)
| 593 |
API/serializers.py
|
lyonkvalid/Audifye
| 1 |
2025448
|
#from django.contrib.auth.models import User
from rest_framework import serializers, viewsets
from rest_framework import pagination, serializers
from models.models import *
from authentication.models import *
from django.core.paginator import Paginator
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ["username", "email", "first_name", "last_name", "id", "is_active", "kyc", "date_joined"]
class KYCSerializer(serializers.ModelSerializer):
class Meta:
model = KYC
fields = "__all__"
class SettingsSerializer(serializers.ModelSerializer):
class Meta:
model = Settings
fields = "__all__"
class AudioPreferenceSerializer(serializers.ModelSerializer):
class Meta:
model = AudioPreference
fields = "__all__"
class DisplaySerializer(serializers.ModelSerializer):
class Meta:
model = Display
fields = "__all__"
class ArtistDataSerializer(serializers.ModelSerializer):
class Meta:
model = ArtistData
fields = "__all__"
class ProfileSerializer(serializers.ModelSerializer):
user = UserSerializer()
kyc = KYCSerializer()
display = DisplaySerializer()
artist = ArtistDataSerializer()
audio_preference = AudioPreferenceSerializer()
settings = SettingsSerializer()
class Meta:
model = Profile
fields = "__all__"
class MetaDataSerializer(serializers.ModelSerializer):
class Meta:
model = MetaData
fields = "__all__"
class SongSerializer(serializers.ModelSerializer):
user = UserSerializer()
meta_data = MetaDataSerializer()
class Meta:
model = Song
fields = "__all__"
class CollectionSerializer(serializers.ModelSerializer):
user = UserSerializer()
songs = serializers.SerializerMethodField('song_paginator')
meta_data = MetaDataSerializer()
class Meta:
model = Collection
fields = "__all__"
def song_paginator(self, object):
paginate = Paginator(object.songs.all(), 4)
songs = paginate.page(1)
return SongSerializer(songs, many=True).data
class CollectionHyperLinkSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
songs = serializers.SerializerMethodField("song_paginator")
meta_data = MetaDataSerializer()
class Meta:
model = Collection
fields = ["url", "id", "user", "songs", "type", "date_add", "meta_data"]
def song_paginator(self, object):
paginate = object.songs.all()
paginator = pagination.PageNumberPagination()
songs = paginator.paginate_queryset(paginate, self.context["request"])
serializer = SongSerializer(songs, many=True, context={"request":self.context["request"]})
return paginator.get_paginated_response(serializer.data).data
class LibrarySerializer(serializers.ModelSerializer):
user = UserSerializer()
collections = CollectionSerializer(many=True)
class Meta:
model = Library
fields = "__all__"
class LibraryHyperLinkSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
collections = serializers.SerializerMethodField("collection_paginator")
class Meta:
model = Library
fields = "__all__"
def collection_paginator(self, object):
paginate = object.collections.all()
paginator = pagination.PageNumberPagination()
collections = paginator.paginate_queryset(paginate, self.context["request"])
serializer = CollectionSerializer(collections, many=True, context={"request":self.context["request"]})
return paginator.get_paginated_response(serializer.data).data
class ArtistSerializer(serializers.ModelSerializer):
user = UserSerializer()
artist = ProfileSerializer()
class Meta:
model = Artist
fields = "__all__"
class PlaylistSerializer(serializers.ModelSerializer):
user = UserSerializer()
collection = CollectionSerializer()
class Meta:
model = Artist
fields = "__all__"
class PlaylistHyperLinkSerializer(serializers.HyperlinkedModelSerializer):
collection = CollectionSerializer()
class Meta:
model = Playlist
fields = "__all__"
class MyLibraryHyperLinkSerializer(serializers.HyperlinkedModelSerializer):
user = UserSerializer()
playlists = PlaylistSerializer(many=True)
artists = ArtistSerializer(many=True)
class Meta:
model = MyLibrary
fields = "__all__"
| 4,269 |
Working/DownloadProjects.py
|
hehao98/CommentAnalysis
| 4 |
2025153
|
"""
Download projects from a project csv file and put them in [output_path]/projects
"""
import pandas as pd
import subprocess
import os
import argparse
from datetime import datetime
from multiprocessing import Pool
from termcolor import colored
def remove_non_code_files(path):
"""
Remove non-code files in the project dataset larger than 64KB,
to save storage space
"""
for root, dirs, files in os.walk(path):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
for file in files:
try:
full_path = os.path.join(root, file)
if file.endswith(".java") or file.endswith(".py") or file.endswith(".txt") or file.endswith(".md"):
continue
if os.path.getsize(full_path) <= 64*1024:
continue
os.remove(full_path)
print("Removed", full_path)
except FileNotFoundError:
print(colored("FileNotFoundError: {}".format(full_path), "red"))
except OSError as e:
print(colored(e, "red"))
def run_proc(index, url, name):
print(colored("Downloading Project {}...".format(index), "green"))
subprocess.call(
"git clone {}.git {}".format(url, name), shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"csv_path", help="Path to the CSV file storing project information")
parser.add_argument(
"output_path", help="Path where the downloaded projects will be stored")
parser.add_argument("-j", type=int, default=4,
help="Number of Jobs (Default 4)")
parser.add_argument("-d", action="store_true", help="Whether to delete non-code files after the download has finished")
args = vars(parser.parse_args())
csv_path = args["csv_path"]
output_path = args["output_path"]
num_job = args["j"]
clean_non_code_file = args["d"]
begin_time = datetime.now()
projects = pd.read_csv(csv_path)
os.chdir(output_path)
if not os.path.exists("projects/"):
os.mkdir("projects")
os.chdir("projects")
pool = Pool(num_job)
for index, row in projects.iterrows():
if os.path.exists(os.path.join(output_path, "/projects/{}".format(row["name"]))):
print(colored("Skipping {} because the folder already exists...", "yellow"))
continue
pool.apply_async(run_proc, args=(index, row["url"], row["name"]))
pool.close()
pool.join()
if clean_non_code_file:
print(colored("Start Cleanning Files...", "yellow"))
remove_non_code_files(".")
print("Total running time: {}".format(datetime.now() - begin_time))
| 2,780 |
python-code/opencv-learning/tiny-apps/Face-Align/facemesh.py
|
juxiangwu/image-processing
| 13 |
2024015
|
# Face Mesh
# Written by <NAME> for Data Art class taught in ITP, NYU during fall 2017 by <NAME>.
# Based on Leon Eckerts code from the facemesh workshop - https://github.com/leoneckert/facemash-workshop
import cv2
import dlib
import sys, os, time, random
import numpy as np
class FaceMesh:
def __init__(self, pred):
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(pred)
def get_img(self, path):
print("[+] Opened image from:", path)
return cv2.imread(path)
def get_rects(self, img):
rects = self.detector(img)
print("[+] Number of faces found:", len(rects))
return rects
def get_landmarks(self, img, rect):
return np.matrix([[p.x, p.y] for p in self.predictor(img, rect).parts()])
# https://matthewearl.github.io/2015/07/28/switching-eds-with-python/
def transformation_from_points(self, points1, points2):
points1 = points1.astype(np.float64)
points2 = points2.astype(np.float64)
c1 = np.mean(points1, axis=0)
c2 = np.mean(points2, axis=0)
points1 -= c1
points2 -= c2
s1 = np.std(points1)
s2 = np.std(points2)
points1 /= s1
points2 /= s2
U, S, Vt = np.linalg.svd(points1.T * points2)
R = (U * Vt).T
return np.vstack([np.hstack(((s2 / s1) * R,
c2.T - (s2 / s1) * R * c1.T)),
np.matrix([0., 0., 1.])])
def warp_im(self, im, M, dshape):
output_im = np.ones(dshape, dtype=im.dtype)*255
translationMatrix = np.matrix([0, 0])
moveImageSet = cv2.transform(dshape, translationMatrix)
cv2.warpAffine(im,
M[:2],
(dshape[1], dshape[0]),
dst=output_im,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return output_im
def align(self, folderPath):
imgs = []
count = 0
for img_file in os.listdir(folderPath):
path = folderPath + "/" + img_file
print(count, ":", path)
if count == 0:
# get our reference image
ref_img = self.get_img(path)
rects = self.get_rects(ref_img)
if len(rects) > 0:
ref_rect = rects[0]
else:
continue
ref_landmarks = self.get_landmarks(ref_img, ref_rect)
average = ref_img.copy()
# cv2.namedWindow("average", cv2.WINDOW_NORMAL)
# cv2.imshow('average', average)
# cv2.waitKey(0)
else:
# do the thing
img = self.get_img(path)
rects = self.get_rects(img)
if len(rects) > 0:
rect = rects[0]
else:
continue
landmarks = self.get_landmarks(img, rect)
transformation_matrix = self.transformation_from_points(ref_landmarks, landmarks)
warped_img = self.warp_im(img, transformation_matrix, ref_img.shape)
# cv2.imshow('average', np.mean(alignedImgs, axis=0))
imgs.append(warped_img)
data = np.array(imgs)
self.avrege = np.mean(data, axis = 0)
cv2.imshow("average", self.avrege.astype('uint8'))
cv2.waitKey(1)
count += 1
return imgs
cv2.waitKey(0)
def getAvarage(self):
return self.avrege
| 3,665 |
so-tetris-python/board/so_mino_helper.py
|
soaprasri/so-tetris-python
| 0 |
2025562
|
import random
from config.board_config import BoardConfig
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s | %(name)s | %(levelname)s | %(message)s')
logger = logging.getLogger(__name__)
def generate_new_shape() -> tuple[int, list[int], list[int]]:
"""Generate new shape
#0:
hot_cell_y = [0,1,2,3]
hot_cell_x = [5,5,5,5]
X
X
X
X
#1:
hot_cell_y = [0,0,0,0]
hot_cell_x = [3,4,5,6]
XXXX
#2:
hot_cell_y = [0,1,0,1]
hot_cell_x = [4,4,5,5]
XX
XX
#3.
hot_cell_y = [0,0,1,1]
hot_cell_x = [4,5,5,6]
XX
XX
#4.
hot_cell_y = [0,1,1,2]
hot_cell_x = [4,4,5,5]
X
XX
X
#5.
hot_cell_y = [0,1,2,2]
hot_cell_x = [4,4,4,5]
X
X
XX
#6.
hot_cell_y = [1,0,1,1]
hot_cell_x = [4,4,5,6]
X
XXX
"""
shape_id = random.randint(1, 7)
logger.info("generating shape id => " + str(shape_id))
shape_color = shape_id
if(shape_id == 2):
shape_y_pos_list = [0, 0, 0, 0]
shape_x_pos_list = [3, 4, 5, 6]
elif(shape_id == 3):
shape_y_pos_list = [0, 1, 0, 1]
shape_x_pos_list = [4, 4, 5, 5]
elif(shape_id == 4):
shape_y_pos_list = [0, 0, 1, 1]
shape_x_pos_list = [4, 5, 5, 6]
elif(shape_id == 5):
shape_y_pos_list = [0, 1, 1, 2]
shape_x_pos_list = [4, 4, 5, 5]
elif(shape_id == 6):
shape_y_pos_list = [0, 1, 2, 2]
shape_x_pos_list = [4, 4, 4, 5]
elif(shape_id == 7):
shape_y_pos_list = [0, 1, 1, 1]
shape_x_pos_list = [4, 4, 5, 6]
else:
shape_y_pos_list = [0, 1, 2, 3]
shape_x_pos_list = [5, 5, 5, 5]
return (shape_color, shape_x_pos_list, shape_y_pos_list)
def rotate_shape(shape_x_pos_list: list[int],
shape_y_pos_list: list[int]) -> tuple((list[int], list[int])):
"""
hot_cell_y = [0,1,1,2]
hot_cell_x = [4,4,5,5]
X 0,0
XX 0,1 1,1
X 1,2
0
find mid of x series
find mid of y series
for all x values -> find diff of value from mid value
from mid fo y series
use the diff of x values to populate the y values
^^ repeat for all y values.
"""
sum_x_pos = 0
sum_y_pos = 0
for i in range(4):
sum_x_pos = sum_x_pos + shape_x_pos_list[i]
sum_y_pos = sum_y_pos + shape_y_pos_list[i]
# avg_x_pos = round(sum_x_pos/4)
# avg_y_pos = round(sum_y_pos/4)
avg_x_pos = int(sum_x_pos/4)
avg_y_pos = int(sum_y_pos/4)
next_mino_x_pos_list = [0] * 4
next_mino_y_pos_list = [0] * 4
for i in range(4):
x_shift_from_mid = shape_x_pos_list[i] - avg_x_pos
next_mino_y_pos_list[i] = avg_y_pos + x_shift_from_mid
y_shift_from_mid = shape_y_pos_list[i] - avg_y_pos
next_mino_x_pos_list[i] = avg_x_pos + y_shift_from_mid
return (next_mino_x_pos_list, next_mino_y_pos_list)
def move_shape(shape_x_pos_list: list[int],
shape_y_pos_list: list[int],
direction: int) -> tuple[list[int], list[int]]:
"""Move shape to next steps
Args:
shape_x_pos_list ([type]): [description]
shape_y_pos_list ([type]): [description]
direction ([type]): [description]
Returns:
[type]: [description]
"""
move_x = 0
move_y = 0
if(direction == BoardConfig.DIRECTION_UP):
next_shape = rotate_shape(shape_x_pos_list, shape_y_pos_list)
else:
if(direction == BoardConfig.DIRECTION_DOWN):
move_x = 0
move_y = 1
elif(direction == BoardConfig.DIRECTION_LEFT):
move_x = -1
move_y = 0
if(direction == BoardConfig.DIRECTION_RIGHT):
move_x = 1
move_y = 0
next_shape_x = [0] * 4
next_shape_y = [0] * 4
for i in range(4):
logger.debug(f"is_move_allowed => {i}")
next_shape_x[i] = shape_x_pos_list[i] + move_x
next_shape_y[i] = shape_y_pos_list[i] + move_y
next_shape = (next_shape_x, next_shape_y)
return next_shape
| 4,383 |
google/cloud/pubsublite/cloudpubsub/message_transforms.py
|
LaudateCorpus1/python-pubsublite
| 15 |
2025722
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from google.api_core.exceptions import InvalidArgument
from google.protobuf.timestamp_pb2 import Timestamp # pytype: disable=pyi-error
from google.pubsub_v1 import PubsubMessage
from google.cloud.pubsublite.cloudpubsub import MessageTransformer
from google.cloud.pubsublite.internal import fast_serialize
from google.cloud.pubsublite.types import Partition, MessageMetadata
from google.cloud.pubsublite_v1 import AttributeValues, SequencedMessage, PubSubMessage
PUBSUB_LITE_EVENT_TIME = "x-goog-pubsublite-event-time"
def _encode_attribute_event_time_proto(ts: Timestamp) -> str:
return fast_serialize.dump([ts.seconds, ts.nanos])
def _decode_attribute_event_time_proto(attr: str) -> Timestamp:
try:
ts = Timestamp()
loaded = fast_serialize.load(attr)
ts.seconds = loaded[0]
ts.nanos = loaded[1]
return ts
except Exception: # noqa: E722
raise InvalidArgument("Invalid value for event time attribute.")
def encode_attribute_event_time(dt: datetime.datetime) -> str:
ts = Timestamp()
ts.FromDatetime(dt.astimezone(datetime.timezone.utc))
return _encode_attribute_event_time_proto(ts)
def decode_attribute_event_time(attr: str) -> datetime.datetime:
return (
_decode_attribute_event_time_proto(attr)
.ToDatetime()
.replace(tzinfo=datetime.timezone.utc)
)
def _parse_attributes(values: AttributeValues) -> str:
if not len(values.values) == 1:
raise InvalidArgument(
"Received an unparseable message with multiple values for an attribute."
)
value: bytes = values.values[0]
try:
return value.decode("utf-8")
except UnicodeError:
raise InvalidArgument(
"Received an unparseable message with a non-utf8 attribute."
)
def add_id_to_cps_subscribe_transformer(
partition: Partition, transformer: MessageTransformer
) -> MessageTransformer:
def add_id_to_message(source: SequencedMessage):
source_pb = source._pb
message: PubsubMessage = transformer.transform(source)
message_pb = message._pb
if message_pb.message_id:
raise InvalidArgument(
"Message after transforming has the message_id field set."
)
message_pb.message_id = MessageMetadata._encode_parts(
partition.value, source_pb.cursor.offset
)
return message
return MessageTransformer.of_callable(add_id_to_message)
def to_cps_subscribe_message(source: SequencedMessage) -> PubsubMessage:
source_pb = source._pb
out_pb = _to_cps_publish_message_proto(source_pb.message)
out_pb.publish_time.CopyFrom(source_pb.publish_time)
out = PubsubMessage()
out._pb = out_pb
return out
def _to_cps_publish_message_proto(
source: PubSubMessage.meta.pb,
) -> PubsubMessage.meta.pb:
out = PubsubMessage.meta.pb()
try:
out.ordering_key = source.key.decode("utf-8")
except UnicodeError:
raise InvalidArgument("Received an unparseable message with a non-utf8 key.")
if PUBSUB_LITE_EVENT_TIME in source.attributes:
raise InvalidArgument(
"Special timestamp attribute exists in wire message. Unable to parse message."
)
out.data = source.data
for key, values in source.attributes.items():
out.attributes[key] = _parse_attributes(values)
if source.HasField("event_time"):
out.attributes[PUBSUB_LITE_EVENT_TIME] = _encode_attribute_event_time_proto(
source.event_time
)
return out
def to_cps_publish_message(source: PubSubMessage) -> PubsubMessage:
out = PubsubMessage()
out._pb = _to_cps_publish_message_proto(source._pb)
return out
def from_cps_publish_message(source: PubsubMessage) -> PubSubMessage:
source_pb = source._pb
out = PubSubMessage()
out_pb = out._pb
if PUBSUB_LITE_EVENT_TIME in source_pb.attributes:
out_pb.event_time.CopyFrom(
_decode_attribute_event_time_proto(
source_pb.attributes[PUBSUB_LITE_EVENT_TIME]
)
)
out_pb.data = source_pb.data
out_pb.key = source_pb.ordering_key.encode("utf-8")
for key, value in source_pb.attributes.items():
if key != PUBSUB_LITE_EVENT_TIME:
out_pb.attributes[key].values.append(value.encode("utf-8"))
return out
| 4,968 |
API_demo/get_scalar-demo.py
|
GT-AcerZhang/tb-paddle
| 0 |
2025022
|
# coding=utf-8
import os
import numpy
def log_scalar(dir_path, tag):
"""log scalar in directory os.path.join(dir_path, tag)
:param tag: tag of scalar
:type tag: str
:return: None
"""
from tb_paddle import SummaryWriter
writer = SummaryWriter(logdir=os.path.join(dir_path, tag))
upper_bound = numpy.random.randint(low=5, high=20)
for i in range(1, upper_bound):
writer.add_scalar(tag, numpy.random.randint(100), i)
writer.close()
if __name__ == '__main__':
from tb_paddle import SummaryReader
parse_dir = "log"
reader = SummaryReader(parse_dir=parse_dir)
tag1 = "first"
log_scalar(parse_dir, tag1)
print("scalar: {}".format(tag1))
res1 = reader.get_scalar(tag1)
print(res1)
tag2 = "second"
print("scalar: {}".format(tag2))
log_scalar(parse_dir, tag2)
res2 = reader.get_scalar(tag2)
print(res2)
| 911 |
Chat/serveur.py
|
Yaya-Cout/Python
| 5 |
2025497
|
# import time
import getpass
import os
import subprocess
import sys
localhostname = os.uname()[1]
localuser = getpass.getuser()
distantuser = sys.argv[1]
distanthost = sys.argv[2]
message = sys.argv[3]
args = [
"ssh",
"-l",
distantuser,
distanthost,
"notify-send",
localuser + "@" + localhostname,
message,
]
subprocess.Popen(args=args)
# time.sleep(5)
| 386 |
Chapter07/distributed/worker1.py
|
tongni1975/Deep-Learning-with-TensorFlow-Second-Edition
| 54 |
2025503
|
import tensorflow as tf
from main import *
with tf.Session(worker1.target) as sess:
init = tf.global_variables_initializer()
a = tf.constant(10.0, dtype=tf.float32)
add_node = tf.multiply(a,b)
sess.run(init)
a = add_node
print(sess.run(add_node))
| 284 |
Fenier/apps/reviews/migrations/0003_auto_20210410_1248.py
|
hubertzk/Fenier
| 2 |
2025108
|
# Generated by Django 3.1.7 on 2021-04-10 12:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reviews', '0002_auto_20210410_0412'),
]
operations = [
migrations.AlterField(
model_name='reviews',
name='comments_img',
field=models.CharField(blank=True, max_length=80, null=True, verbose_name='appendix image'),
),
]
| 447 |
timeflux/helpers/port.py
|
HerySon/timeflux
| 123 |
2025207
|
"""A set of Port helpers."""
import json
import pandas as pd
from timeflux.helpers.clock import now
def make_event(label, data={}, serialize=True):
"""Create an event DataFrame
Args:
label (str): The event label.
data (dict): The optional data dictionary.
serialize (bool): Whether to JSON serialize the data or not.
Returns:
Dataframe
"""
if serialize:
data = json.dumps(data)
return pd.DataFrame([[label, data]], index=[now()], columns=["label", "data"])
def match_events(port, label):
"""Find the given label in an event DataFrame
Args:
port (Port): The event port.
label (str): The string to look for in the label column.
Returns:
DataFrame: The list of matched events, or `None` if there is no match.
"""
matches = None
if port.ready():
matches = port.data[port.data["label"] == label]
if matches.empty:
matches = None
return matches
def get_meta(port, keys, default=None):
"""Find a deep value in a port's meta
Args:
port (Port): The event port.
keys (tuple|str): The hiearchical list of keys.
default: The default value if not found.
Returns:
The value, or `default` if not found.
"""
return traverse(port.meta, keys, default)
def traverse(dictionary, keys, default=None):
"""Find a deep value in a dictionary
Args:
dictionary (dict): The event port.
keys (tuple|str): The hiearchical list of keys.
default: The default value if not found.
Returns:
The value, or `default` if not found.
"""
if keys is None:
return default
if type(keys) == str:
keys = (keys,)
for key in keys:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary
| 1,905 |
test_scripts/test_imgAug.py
|
wwdok/mask2json
| 27 |
2025755
|
'''
@lanhuage: python
@Descripttion: image augmentation with labels.
@version: beta
@Author: xiaoshuyui
@Date: 2020-07-17 15:49:30
LastEditors: xiaoshuyui
LastEditTime: 2020-10-23 09:40:03
'''
import sys
sys.path.append("..")
import os
from convertmask.utils.methods.getMultiShapes import getMultiShapes
from convertmask.utils.auglib.imgAug import (aug_labelimg, aug_labelme, imgFlip,
imgNoise, imgRotation, imgTranslation,
imgZoom)
from skimage import io
BASE_DIR = os.path.abspath(os.path.dirname(os.getcwd())) + os.sep + 'static'
# print(BASE_DIR)
imgPath = BASE_DIR + os.sep + 'multi_objs.jpg'
labelPath = BASE_DIR + os.sep + 'multi_objs.json'
imgPath2 = BASE_DIR + os.sep + 'label_255.png'
labelPath2 = BASE_DIR + os.sep + 'label_255.xml'
if __name__ == "__main__":
#### test1
imgFlip(imgPath, labelPath)
imgNoise(imgPath, labelPath)
imgRotation(imgPath, labelPath)
imgTranslation(imgPath, labelPath)
imgZoom(imgPath, labelPath, 1.2)
#### test2
n = imgNoise(imgPath, labelPath, flag=False)
tmp = n['noise']
img, processedImg = tmp.oriImg, tmp.processedImg
r = imgRotation(img, processedImg, flag=False, angle=15)
tmp = r['rotation']
img, processedImg = tmp.oriImg, tmp.processedImg
t = imgTranslation(img, processedImg, flag=False)
tmp = t['trans']
img, processedImg = tmp.oriImg, tmp.processedImg
f = imgFlip(img, processedImg, flag=False)
tmp = f['h_v']
img, processedImg = tmp.oriImg, tmp.processedImg
parent_path = os.path.dirname(imgPath)
if os.path.exists(parent_path + os.sep + 'jsons_'):
pass
else:
os.makedirs(parent_path + os.sep + 'jsons_')
fileName = 'test'
io.imsave(
parent_path + os.sep + 'jsons_' + os.sep + fileName + '_assumble.jpg',
img)
assumbleJson = getMultiShapes(parent_path + os.sep + 'jsons_' + os.sep +
fileName + '_assumble.jpg',
processedImg,
flag=True,
labelYamlPath='')
saveJsonPath = parent_path + os.sep + 'jsons_' + os.sep + fileName + '_assumble.json'
with open(saveJsonPath, 'w') as f:
f.write(assumbleJson)
#### test3
aug_labelme(imgPath, labelPath)
#### test4
# aug_labelimg(imgPath2, labelPath2)
| 2,449 |
TrieOOP.py
|
arthurharrison/SongScrapper
| 6 |
2024400
|
import re
class TreeTrie:
"""This Class is a TreeTrie (as the name says)
You can Enter with as many words as you want or even with none, and it will create a Tree on your given object
Functions in this class:
addTrie: Adds Words in the Tree
getDataVal: Gets a value of the given word
getData: Runs Percorra and PercorraTor, have the Real Data usable for what we need
getAll: Get all values of all words and returns a list with all the values
inTrie: Check if the word given is in the Tree
sumAll: Get the words and returns the total number of words in the Tree
percorra: Walks trough the Tree Trie and make a list with all the words saved in it
percorraTor: Creates a list with all the items found in the tree with value
Important Variables used in this class:
CurrentDict: It's the variable that is going to get in the Dictonary and explore every branch of it
progLetter: Is the Progression of the word, it starts with the first letter and in the end will be the whole word
"""
def __init__(self, *words):
self.tree = dict()
self.list = []
query = []
for word in words:
currentDict = self.tree
if(' ' in word):
query = word.split(' ')
continue
progLetter = ''
for letter in word:
progLetter = progLetter + letter
currentDict = currentDict.setdefault(progLetter,{})
if('value' in currentDict):
currentDict['value'] = currentDict['value'] + 1
else:
currentDict['value'] = 1
if(len(query)>0):
for word in query:
currentDict = self.tree
progLetter = ''
for letter in word:
progLetter = progLetter + letter
currentDict = currentDict.setdefault(progLetter,{})
if('value' in currentDict):
currentDict['value'] = currentDict['value'] + 1
else:
currentDict['value'] = 1
self.tree #ask lucas about this, why not return ?
def addTrie(self,*words):
"""Adds Words in the Tree
Args:
words: N given words to add in the Tree
Returns:
The Tree itself
"""
for word in words: #looping all words given
currentDict = self.tree
progLetter = ''
for letter in word: #creating every letter of the given word
progLetter = progLetter + letter
if(progLetter in currentDict): #This will enable that if the letter already exists it will just get in it, NOT CREATE A NEW
currentDict = currentDict[progLetter]
else:
currentDict = currentDict.setdefault(progLetter,{}) #And if not, it will Create one
if('value' in currentDict): #it will enter in the final letter of the word
currentDict['value'] = currentDict['value'] + 1 #If for some reason the word already exists, it will only Add +1 to the Counter
else:
currentDict['value'] = 1 #If not, it will create the Counter
return self.tree
def percorra(self, trie):
"""Walks trough the Tree Trie and make a list with all the words saved in it
Args: Tree Trie
Return: A List
"""
aux = list(trie)
tete = ''
for i in aux:
if(len(aux) == 1 and 'value' in aux):
continue
if('value' in aux):#if it encounters a 'word' it resets the counter
tete = ''
tete = i
self.list.append(tete)
if(type(trie[i]) == int):
continue
self.percorra(trie[i])
return self.list
def percorraTor(self, lista):
"""Creates a list with all the items found in the tree with value
Args: A List
Return: a List with only the items that are valuable
"""
wordList = []
while 'value' in lista:
lista.remove('value')
for i in lista:
if(self.inTrie(i)):
if(i in wordList):
continue
wordList.append(i)
return wordList
def getData(self, arg):
""" Runs both percorra and percorraTor, making more easy to use the script
Args:
arg: The Tree
Return:
Return a list with only the items that are valuable
"""
x = self.percorra(arg.tree)
data = self.percorraTor(x)
return data
def getDataVal(self, word):
""" Gets a value of a word
Args:
word: Given word to get its value
Return:
Word Value
"""
if(not(self.inTrie(word))):
return 0
currentDict = self.tree
progLetter = ''
for letter in word:
progLetter = progLetter + letter
if(progLetter in currentDict):
currentDict = currentDict[progLetter]
else:
return 0
if('value' in currentDict):
return currentDict['value']
else:
return 0
def getAll(self, lista):
""" Get all values of all words and returns a list with all the values
Args:
lista: A List of the Words
Return:
A List of the Value of the words
Observation: Remember to compare with the entry list, because it save only the values
"""
listona = []
for i in lista:
if(self.inTrie(i)):
listona.append(self.getDataVal(i))
return listona
def sumAll(self, lista):
""" Get the words and returns the total number of words in the Tree
Args:
lista: A List of Words
Return:
A Interger meaning the Total of words in the Tree
"""
total = 0
dumpList = []
for i in lista:
if(self.inTrie(i) and i not in dumpList):
total += self.getDataVal(i)
dumpList.append(i)
return total
def inTrie(self, word):
""" Check if the word is in the Tree
Args:
word: The Word you want to check
Return:
A Boolean Value (True or False)
"""
currentDict = self.tree
progLetter = ''
for letter in word:
progLetter = progLetter + letter
#it runs every letter
if(progLetter in currentDict):
#it will run until the last letter found
currentDict = currentDict[progLetter]
else:
return False
else:
#if it runs and FIND ALL LETTERS IN THE WORD
#it will look if it has a value on it
return 'value' in currentDict
if (__name__ == "__main__"):
#Debugger
y = "Oh it's too late"
x = TreeTrie('salada e bahia', y)
print(x.tree)
print(x.addTrie('sal','salvador','salada','salada'))
print(x.percorra(x.tree))
tt = x.percorraTor(x.percorra(x.tree))
print(tt)
print(x.getAll(tt))
print(x.getDataVal('salada'))
print(x.inTrie('sergipe'))
print(x.inTrie('sal'))
print(x.tree)
print(x.sumAll(x.percorra(x.tree)))
| 7,563 |
blockchain_connector/common/connector_common/work_order_delegate.py
|
ikegawa-koshi/avalon
| 127 |
2024601
|
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import random
import asyncio
import logging
from error_code.error_status import WorkOrderStatus
from avalon_sdk.connector.blockchains.common.contract_response \
import ContractResponse
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO)
class WorkOrderDelegate():
"""
Helper class to sync work orders between avalon and blockchain
"""
def __init__(self, jrpc_work_order, work_order_instance):
"""
Initialize the connector with instances of jrpc worker
implementation and blockchain worker implementation objects.
@param jrpc_work_order - JRPC implementation class object of work order
@param work_order_instance - work order blockchain implementation
class object
"""
self._work_order_proxy = work_order_instance
self._jrpc_work_order_instance = jrpc_work_order
def submit_work_order_and_get_result(self, work_order_id, worker_id,
requester_id, work_order_params):
"""
This function submits work order using work_order_submit direct API
"""
logging.info("About to submit work order to listener")
response = self._jrpc_work_order_instance\
.work_order_submit(work_order_id, worker_id, requester_id,
work_order_params, id=random.randint(0, 100000))
logging.info("Work order submit response : {}".format(
json.dumps(response, indent=4)))
if response and 'error' in response and \
response['error']['code'] == \
WorkOrderStatus.PENDING.value:
# get the work order result
work_order_result = self._jrpc_work_order_instance\
.work_order_get_result(work_order_id,
id=random.randint(0, 100000))
logging.info("Work order get result : {} "
.format(json.dumps(work_order_result, indent=4)))
return work_order_result
# In Synchronous work order processing response would
# contain result
elif response and ('result' in response or
'error' in response):
return response
else:
return None
def add_work_order_result_to_chain(self, work_order_id, response):
"""
This function adds a work order result to the blockchain
"""
result = self._work_order_proxy.work_order_complete(
work_order_id, json.dumps(response))
if result == ContractResponse.SUCCESS:
logging.info("Successfully added work order result to blockchain")
else:
logging.error("Error adding work order result to blockchain")
return result
| 3,418 |
opensfm/commands/match_features.py
|
RashidLadj/OpenSfM
| 1 |
2024016
|
from . import command
from opensfm.actions import match_features
class Command(command.CommandBase):
name = 'match_features'
help = 'Match features between image pairs'
def run_impl(self, dataset, args):
match_features.run_dataset(dataset)
def add_arguments_impl(self, parser):
pass
| 319 |
openhab_creator/models/configuration/location/indoor/buildings.py
|
DerOetzi/openhab_creator
| 1 |
2024391
|
from __future__ import annotations
from openhab_creator.models.configuration.location.indoor import (Indoor,
IndoorType)
@IndoorType()
class Building(Indoor):
@property
def area(self) -> str:
return 'Building'
@property
def typed(self) -> str:
return 'Building'
class BuildingType(IndoorType):
pass
@BuildingType()
class Garage(Building):
pass
@BuildingType()
class House(Building):
pass
@BuildingType()
class Shed(Building):
pass
@BuildingType()
class SummerHouse(Building):
pass
| 613 |
python/main.py
|
Jdubedition/docker-polyglot-world
| 0 |
2025519
|
from socket import gethostname
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"hello": "World", "from": gethostname()}
| 162 |
db_query/tests.py
|
hbolzan/django-sql-to-rest
| 1 |
2024474
|
from django.test import TestCase
import db_query.views as views
class BuildCustomSQLTestCase(TestCase):
"""
SQL builder is horrendously wrong and must be urgently refactored
"""
def test_get_format_keys(self):
self.assertEqual(views.get_format_keys("{a}, {b}, {c}"), ["a", "b", "c"])
def test_build_replace_dict(self):
result = views.build_replace_dict(["a", "b", "c"], {"a": 1, "b": 2, "c": 3, "d": 4}, None)
self.assertEqual(result, {"a": "1", "b": "2", "c": "3"})
def test_replace_query_params(self):
"""
String params must be replaced with corresponding arguments from params dictionary
"""
self.assertEqual(
views.replace_query_params("{a}, {b}, {c}", {"a": 1, "b": "BE", "c": 3}, None),
"1, 'BE', 3"
)
self.assertEqual(
views.replace_query_params("{a}, {b}, {c}", {"a": 1, "b": "BE"}, views.REPLACE_WITH_KEY),
"1, 'BE', c"
)
self.assertEqual(
views.replace_query_params("{a}, {b}, {c}", {"a": 1, "b": "BE"}, views.REPLACE_WITH_NULL),
"1, 'BE', null"
)
def test_build_where(self):
self.assertEqual(views.build_where("a", 1), "a = 1")
self.assertEqual(views.build_where(["a", "b"], [1, 2]), "a = 1 and b = 2")
def test_get_update_sql(self):
request_data = {
"data": {"a": 1, "b": "BE", "c": "X"}
}
self.assertEqual(
views.get_update_sql(None, "public.teste", "id", request_data, 5),
"update public.teste set a = 1, b = 'BE', c = 'X' where id = 5"
)
self.assertEqual(
views.get_update_sql(None, "public.teste", ["parent_id", "order"], request_data, [5, 1]),
"update public.teste set a = 1, b = 'BE', c = 'X' where parent_id = 5 and order = 1"
)
custom_sql = "update my.table set x = {x}, a = {a}, b = {b} where my_pk = {pk}"
self.assertEqual(
views.get_update_sql(custom_sql, "public.teste", "id", request_data, 5),
"update my.table set x = x, a = 1, b = 'BE' where my_pk = 5"
)
def test_get_insert_sql(self):
request_data = {
"data": {"a": 1, "b": "BE"}
}
self.assertEqual(
views.get_insert_sql(None, "public.teste", "id", request_data, None),
"insert into public.teste (a, b) values (1, 'BE')"
)
custom_sql = "insert into my.table (a, b, c) values ({a}, {b}, {c})"
self.assertEqual(
views.get_insert_sql(custom_sql, "public.teste", "id", request_data, None),
"insert into my.table (a, b, c) values (1, 'BE', null)"
)
def test_get_delete_sql(self):
self.assertEqual(
views.get_delete_sql("delete from my.table where id = {pk}", "my.table", "id", 3),
"delete from my.table where id = 3"
)
self.assertEqual(
views.get_delete_sql("", "my.table", "id", 3),
"delete from my.table where id = 3"
)
self.assertEqual(
views.get_delete_sql("", "my.table", ["parent_id", "order"], [1, 3]),
"delete from my.table where parent_id = 1 and order = 3"
)
self.assertEqual(
views.get_delete_sql("delete from teste.people where name = {pk}", "my.table", "name", "Fulano"),
"delete from teste.people where name = 'Fulano'"
)
self.assertEqual(
views.get_delete_sql(None, "my.table", "id", 3),
"delete from my.table where id = 3"
)
class BuildUpdateSQLTestCase(TestCase):
def test_build_update_sql(self):
request_data = {
"data": {"a": 1, "b": "BE"}
}
self.assertEqual(
views.build_update_sql("teste", request_data, "id", 5),
"update teste set a = 1, b = 'BE' where id = 5"
)
request_data_b = {
"data": {"a": 1, "b": "BE"}
}
self.assertEqual(
views.build_update_sql("teste", request_data_b, "id", "char-id"),
"update teste set a = 1, b = 'BE' where id = 'char-id'"
)
| 4,209 |
wafw00f/plugins/ptaf.py
|
biscuitehh/wafw00f
| 1 |
2025616
|
#!/usr/bin/env python
'''
Copyright (C) 2019, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'PT Application Firewall (Positive Technologies)'
def is_waf(self):
schemes = [
self.matchContent(r'<h1.{0,10}?Forbidden'),
self.matchContent(r'<pre>Request.ID:.{0,10}?\d{4}\-(\d{2})+.{0,15}?pre>')
]
if all(i for i in schemes):
return True
return False
| 417 |
extractfail.py
|
ihaveamac/FAIL
| 3 |
2022659
|
#!/usr/bin/env python2
import argparse, sys, os, binascii
if len(sys.argv) != 3:
print 'pick a file to extract and a directory to put all the files in'
print ' extractfail.py myarchive.fail directory'
sys.exit()
if not os.path.isfile(sys.argv[1]):
print sys.argv[1] + ' doesn\'t exist'
sys.exit()
try:
os.makedirs(sys.argv[2])
except OSError:
if not os.path.isdir(sys.argv[2]):
raise
archive = open(sys.argv[1], "rb")
archive.seek(0x0)
if archive.read(0x4) != "FAIL":
print 'this isn\'t a "FAIL" archive'
sys.exit()
archive.seek(0x4)
version = int(binascii.hexlify(archive.read(2)), 16)
print 'archive version: ' + str(version)
if version != 1:
print 'archive version too new'
print 'this script can handle up to version 1'
sys.exit()
archive.seek(0x6)
numberOfFiles = int(binascii.hexlify(archive.read(2)), 16)
print 'number of files: ' + str(numberOfFiles)
toExtract = []
# filename, offset, size
currentOffset = 0x8 + (numberOfFiles * 0x108)
print currentOffset
for filenumber in range(0, numberOfFiles):
archive.seek(0x8 + (filenumber * 0x108))
fileheader_magic = archive.read(0x4)
if fileheader_magic != "FILE":
print 'incorrect magic found (should be "FILE")'
archive.close()
sys.exit()
fileheader_name = archive.read(0x100).rstrip('\0')
fileheader_size = int(binascii.hexlify(archive.read(0x4)), 16)
toExtract.append([fileheader_name, currentOffset, fileheader_size])
currentOffset += fileheader_size
# TODO: make this more memory efficient
for fileinfo in toExtract:
print 'writing: ' + fileinfo[0]
filehandle = open(sys.argv[2] + '/' + fileinfo[0], "wb")
archive.seek(fileinfo[1])
filedata = archive.read(fileinfo[2])
filehandle.write(filedata)
filehandle.close()
archive.close()
print 'looks like it worked'
print 'extracted '+str(len(toExtract))+' files'
sys.exit()
| 1,920 |
2019/Python/utils/grid_tools_2d.py
|
airstandley/AdventofCode
| 0 |
2025582
|
import math
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return "Point({},{})".format(self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
if isinstance(other, Point):
return other.x == self.x and other.y == self.y
else:
return False
def __add__(self, other):
if isinstance(other, Vector):
return Point(self.x + other.x, self.y + other.y)
else:
raise ValueError("Cannot add Point and {}".format(type(other)))
def __sub__(self, other):
if isinstance(other, Vector):
return Point(self.x - other.x, self.y - other.y)
else:
raise ValueError("Cannot subtract Point and {}".format(type(other)))
class Vector:
def __init__(self, x=0.0, y=0.0):
self.x = x
self.y = y
@property
def magnitude(self):
return math.sqrt(self.x**2 + self.y**2)
@property
def direction(self):
# Return the unit/directoinal vector for this vector
return Vector(self.x/self.magnitude, self.y/self.magnitude)
def dot_product(self, other):
# X1*X2+Y1*Y2+Z1*Z2......
return self.x * other.x + self.y * other.y
def angle(self, other=None):
# Angle between this vector and (optionally) other
angle = math.degrees(math.atan2(self.y, self.x)) + 180
# atan2 returns -pi/2(-180) to pi/2(180) we want it to return 0 - pi(360)
if other:
angle = angle - other.angle()
if angle < 0:
angle = 360 + angle
return angle
def rotate(self, angle):
theta = math.radians(angle)
x = self.x * math.cos(theta) - self.y * math.sin(theta)
y = self.x * math.sin(theta) + self.y * math.cos(theta)
self.x, self.y = x, y
# def alt_angle(self, other):
# angle = math.acos(
# self.dot_product(other)/(self.magnitude * other.magnitude)
# )
# return math.degrees(angle)
def nearest_integer(self):
# Return the nearest vector with only integer components
return Vector(int(self.x), int(self.y))
def reduce(self):
"""
Given a vector remove all common divisors so that it's a "integer unit vector"
"""
if self.x == 0 and self.y == 0:
x, y = self.x, self.y
elif self.x == 0:
x = self.x
y = self.y / abs(self.y)
elif self.y == 0:
x = self.x / abs(self.x)
y = self.y
else:
x, y = self.x, self.y
while True:
for i in range(1, min(abs(x), abs(y)) + 1):
if x % i == 0 and y % i == 0 and i != 1:
x, y = x / i, y / i
break
else:
break
return Vector(x, y)
def __repr__(self):
return "Vector({},{})".format(self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
if isinstance(other, Vector):
return self.x == other.x and self.y == other.y
return False
def __add__(self, other):
if isinstance(other, Vector):
return Vector(self.x + other.x, self.y + other.y)
else:
return super().__add__(other)
def __mul__(self, other):
if isinstance(other, Vector):
return self.dot_product(other)
else:
return super().__mul__(other)
| 3,622 |
questionbank/invites/tests/test_forms.py
|
SyafiqTermizi/questionbank
| 1 |
2025383
|
import pytest
from questionbank.invites.forms import InviteForm
pytestmark = pytest.mark.django_db
def test_invite_form_invalid(user, group):
# Initializing form with user that already exist
form = InviteForm(data={
'username': user.username,
'email': user.email,
'roles': [group]
})
# form should not be valid because the user already exist
assert not form.is_valid()
# form should contains username && email error
assert form.errors['username'] == ['Username already exist']
assert form.errors['email'] == ['Email already exist']
def test_invite_form_valid(user, specialty, group):
# Initializing form with user that don't exist
form = InviteForm(data={
'username': 'test',
'email': '<EMAIL>',
'roles': [group],
'specialty': specialty.id
})
# form should be valid because the user don't exist yet
assert form.is_valid()
# form should not contains any error
assert not form.errors
| 1,008 |
nonbonded/tests/test_nonbonded.py
|
SimonBoothroyd/nonbonded
| 5 |
2025322
|
def test_main_import():
# Ensure that the main package is importable.
import nonbonded
assert nonbonded is not None
| 130 |
opentargets/version.py
|
richardbrunt-tessella/opentargets-py
| 0 |
2024927
|
__pkgname__ = 'opentargets'
__author__ = 'OpenTargets Core Team <<EMAIL>>'
__author_email__ = '<EMAIL>'
__license__ = 'Apache License, Version 2.0'
__homepage__ = 'https://github.com/opentargets/opentargets-py'
__version__ = '3.1.15'
__api_major_version__ = '3'
__description__ = 'Client for Open Targets REST API at api.opentargets.io'
| 337 |
day4.py
|
WayneLiang/Python-lesson
| 0 |
2025877
|
import os
import numpy as np
import matplotlib.pyplot as plt
data_path = './data/bikeshare/'
data_filenames = ['2017-q1_trip_history_data.csv', '2017-q2_trip_history_data.csv',
'2017-q3_trip_history_data.csv', '2017-q4_trip_history_data.csv']
# 结果保存路径
output_path = './output'
if not os.path.exists(output_path):
os.makedirs(output_path)
# 直方图参数
hist_range = (0, 180)
n_bins = 12
def collect_and_process_data():
"""
Step 1+2: 数据获取,数据处理
"""
year_duration_member_type_list = []
for data_filename in data_filenames:
data_file = os.path.join(data_path, data_filename)
data_arr = np.loadtxt(data_file, delimiter=',', dtype='str', skiprows=1)
# 去掉双引号
# 骑行时间
duration_col = np.core.defchararray.replace(data_arr[:, 0], '"', '')
duration_col = duration_col.reshape(-1, 1)
# 用户类型
member_type_col = np.core.defchararray.replace(data_arr[:, -1], '"', '')
member_type_col = member_type_col.reshape(-1, 1)
duration_member_type_arr = np.concatenate([duration_col, member_type_col], axis=1)
year_duration_member_type_list.append(duration_member_type_arr)
year_duration_member_type_arr = np.concatenate(year_duration_member_type_list, axis=0)
member_arr = year_duration_member_type_arr[year_duration_member_type_arr[:, 1] == 'Member']
casual_arr = year_duration_member_type_arr[year_duration_member_type_arr[:, 1] == 'Casual']
year_member_duration = member_arr[:, 0].astype('float') / 1000 / 60
year_casual_duration = casual_arr[:, 0].astype('float') / 1000 / 60
return year_member_duration, year_casual_duration
def analyze_data(year_member_duration, year_casual_duration):
"""
Step 3: 数据分析
"""
m_duration_hist, m_bin_edges = np.histogram(year_member_duration, range=hist_range, bins=n_bins)
c_duration_hist, c_bin_edges = np.histogram(year_casual_duration, range=hist_range, bins=n_bins)
print('会员直方图统计信息:{}, 直方图分组边界:{}'.format(m_duration_hist, m_bin_edges))
print('非会员直方图统计信息:{}, 直方图分组边界:{}'.format(c_duration_hist, c_bin_edges))
def save_and_show_results(year_member_duration, year_casual_duration):
"""
Step 4: 结果展示
"""
fig = plt.figure(figsize=(10, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2, sharey=ax1)
# 会员直方图
ax1.hist(year_member_duration, range=hist_range, bins=n_bins)
ax1.set_xticks(range(0, 181, 15))
ax1.set_title('Member')
ax1.set_ylabel('Count')
# 非会员直方图
ax2.hist(year_casual_duration, range=hist_range, bins=n_bins)
ax2.set_xticks(range(0, 181, 15))
ax2.set_title('Casual')
ax2.set_ylabel('Count')
plt.tight_layout()
plt.savefig(os.path.join(output_path, 'type_histogram.png'))
plt.show()
def main():
"""
主函数
"""
# Step 1 + 2: 数据获取,数据处理
year_member_duration, year_casual_duration = collect_and_process_data()
# Step 3: 数据分析
analyze_data(year_member_duration, year_casual_duration)
save_and_show_results(year_member_duration, year_casual_duration)
if __name__ == '__main__':
main()
| 3,139 |
app/api/v2/models/users.py
|
ogambakerubo/DeliverIT2
| 0 |
2025365
|
# DeliverIT2/app/api/v2/models/users.py
import os
import psycopg2
from psycopg2.extras import RealDictCursor
from datetime import datetime
from passlib.hash import sha256_crypt
DATABASE = os.environ.get("DATABASE", '')
USER = os.environ.get("USER", '')
PASSWORD = os.environ.get("PASSWORD", '')
HOST = os.environ.get("HOST", '')
PORT = os.environ.get("PORT", '')
class Users:
'''This class creates a blueprint for the user object'''
def __init__(self):
# create a database connection
try:
self.conn = psycopg2.connect(database=DATABASE,
user=USER,
password=PASSWORD,
host=HOST,
port=PORT)
self.conn.autocommit = True
self.cur = self.conn.cursor(cursor_factory=RealDictCursor)
print('message: Database Connection Successful')
except psycopg2.DatabaseError as err:
print('message: Something went wrong {}'.format(err))
def create_schemas(self):
# create user schemas
roles_table = "\
CREATE TABLE IF NOT EXISTS roles (\
role_id serial PRIMARY KEY,\
role_name VARCHAR(255) NOT NULL UNIQUE)"
# roles_table schema
self.cur.execute(roles_table)
roles_test_table = "CREATE TABLE IF NOT EXISTS roles_test (LIKE roles)"
# roles_test schema
self.cur.execute(roles_test_table)
users_table = "\
CREATE TABLE IF NOT EXISTS users (\
user_id serial PRIMARY KEY,\
role_id integer NOT NULL,\
username VARCHAR(255) NOT NULL UNIQUE,\
password VARCHAR(255) NOT NULL,\
email VARCHAR(255) NOT NULL UNIQUE,\
date_created VARCHAR(255) NOT NULL,\
date_changed VARCHAR(255),\
FOREIGN KEY(role_id) REFERENCES roles(role_id) ON UPDATE CASCADE ON DELETE CASCADE)"
# users_table schema
self.cur.execute(users_table)
users_test_table = "CREATE TABLE IF NOT EXISTS users_test (LIKE users)"
# users_test_table schema
self.cur.execute(users_test_table)
unsub_table = "\
CREATE TABLE IF NOT EXISTS unsub (\
user_id serial PRIMARY KEY,\
role_id integer NOT NULL,\
username VARCHAR(255) NOT NULL UNIQUE,\
password VARCHAR(255) NOT NULL,\
email VARCHAR(255) NOT NULL UNIQUE,\
date_created VARCHAR(255) NOT NULL,\
date_unsubbed VARCHAR(255) NOT NULL,\
FOREIGN KEY(role_id) REFERENCES roles(role_id) ON UPDATE CASCADE ON DELETE CASCADE)"
# unsub_table schema
self.cur.execute(unsub_table)
unsub_test_table = "CREATE TABLE IF NOT EXISTS unsub_test (LIKE unsub)"
# users_test_table schema
self.cur.execute(unsub_test_table)
print('message: tables created successfully')
def add_role(self):
# creates a new role of admin or regular user
self.regular = "regular"
self.admin = "admin"
# add role query
query = """ INSERT INTO roles(role_id, role_name)
VALUES(1, '{}'),(2, '{}') ON CONFLICT (role_id) DO NOTHING""".format(
self.regular,
self.admin
)
self.cur.execute(query, (self.regular, self.admin))
print('message: new role created')
def add_user(self, username, email, password):
# create a new regular user
self.role_id = 1 # regular user role id
self.username = ''.join(username.lower().split())
self.email = ''.join(email.lower().split())
self.password = <PASSWORD>5<PASSWORD>(str(password))#hash the password
self.date_created = str(datetime.now())
# check username query
checkusername_query = """ SELECT * from users WHERE username = '{}'""".format(
self.username)
self.cur.execute(checkusername_query)
if self.cur.rowcount > 0:
return "message: Username already in use, please try again"
# check email query
checkemail_query = """ SELECT * from users WHERE email = '{}'""".format(
self.email)
self.cur.execute(checkemail_query)
if self.cur.rowcount > 0:
return "message: Email already in use, please try again"
# add user query
query = """ INSERT INTO users(
role_id,
username,
password,
email,
date_created)
VALUES('{}', '{}', '{}', '{}', '{}')""".format(
self.role_id,
self.username,
self.password,
self.email,
self.date_created
)
self.cur.execute(query, (self.username, self.password, self.email))
return "message: new user created"
def get_user_by_email(self, email, password):
# retrieve a single user by their email address
self.password_candidate = password
query = """ SELECT * from users WHERE email = '{}'""".format(email)
self.cur.execute(query)
if self.cur.rowcount > 0:
# get stored hash
user_data = self.cur.fetchone()
self.actual_password = user_data['password']
# compare passwords
if sha256_crypt.verify(self.password_candidate, self.actual_password):
# passed
return user_data
else:
# failed
return None
else:
return None
def add_admin(self, username, email, password):
# create an admin
self.role_id = 2 # admin role id
self.username = ''.join(username.lower().split())
self.email = ''.join(email.lower().split())
self.password = <PASSWORD>(str(password))
self.date_created = str(datetime.now())
# check username query
checkusername_query = """ SELECT * from users WHERE username = '{}'""".format(
self.username)
self.cur.execute(checkusername_query)
if self.cur.rowcount > 0:
return "message: Username already in use, please try again"
# check email query
checkemail_query = """ SELECT * from users WHERE email = '{}'""".format(
self.email)
self.cur.execute(checkemail_query)
if self.cur.rowcount > 0:
return "message: Email already in use, please try again"
# add admin query
query = """ INSERT INTO users(
role_id,
username,
password,
email,
date_created)
VALUES('{}', '{}', '{}', '{}', '{}')""".format(
self.role_id,
self.username,
self.password,
self.email,
self.date_created
)
self.cur.execute(query, (self.username, self.password, self.email))
return "message: new admin created"
def get_admin_by_email(self, email, password):
# retrieve a single user by their email address
self.password_candidate = password
query = """ SELECT * from users WHERE email = '{}'""".format(email)
self.cur.execute(query)
if self.cur.rowcount > 0:
# get stored hash
user_data = self.cur.fetchone()
self.actual_password = user_data['password']
# compare passwords
if sha256_crypt.verify(self.password_candidate, self.actual_password):
# passed
return user_data
else:
# failed
return None
else:
return None
def user_access(self, email):
#retrieve user by email
query = """ SELECT * from users WHERE email = '{}'""".format(email)
self.cur.execute(query)
if self.cur.rowcount > 0:
user_data = self.cur.fetchone()
return user_data
return None
def update_user(self, username, email, password):
#update user information
self.username = ''.join(username.lower().split())
self.email = email
self.password = <PASSWORD>(str(password))
self.date_changed = str(datetime.now())
# check username query
checkusername_query = """ SELECT * from users WHERE username = '{}'""".format(
self.username)
self.cur.execute(checkusername_query)
user_data = self.cur.fetchone()
if self.cur.rowcount > 0:
if self.username == user_data['username']:
return None
else:
return "message: Username already in use, please try again"
# add admin query
query = """ UPDATE users SET(
username,
password,
date_changed)=
('{}', '{}', '{}') WHERE email = '{}'""".format(
self.username,
self.password,
self.date_changed,
self.email
)
self.cur.execute(query, (self.username, self.password, self.email))
return "message: user updated"
def get_all_users(self):
#get all users and their details
get_all_users_query = """ SELECT user_id, username, email, date_created, date_changed
FROM users WHERE role_id = '1'"""
self.cur.execute(get_all_users_query)
if self.cur.rowcount > 0:
all_users = self.cur.fetchall()
return all_users
| 9,546 |
SeptemberCodingChallenge/Day-17-Robot-Bounded-In-Circle.py
|
muskaan-codes/leetcoding-challenges
| 0 |
2025568
|
class Solution(object):
def isRobotBounded(self, instructions):
"""
:type instructions: str
:rtype: bool
"""
self.action = [[-1, 0], [0, -1], [1, 0], [0, 1]]
self.index = 0
self.currLocation = [50, 50]
def turnLeft():
if self.index != 0:
self.index -= 1
else:
self.index = 3
def turnRight():
if self.index != 3:
self.index += 1
else:
self.index = 0
def goStraight():
c, r = self.action[self.index]
self.currLocation = [self.currLocation[0]+c, self.currLocation[1]+r]
for i in instructions:
if i == 'L':
turnLeft()
elif i == 'R':
turnRight()
else:
goStraight()
for x in self.currLocation: #It goes out of the circle plane here itself
if x < 0 or x > 100:
return False
if self.currLocation == [50, 50] or self.index != 0:
return True
else:
return False
| 1,207 |
pyqt/pyqt5-master/src/windows/gitanim.py
|
Ding-zhenke/Dcount-s-notebook
| 0 |
2022748
|
'''
装载Gif动画
QMovie
'''
import sys
from PyQt5.QtWidgets import QApplication, QLabel ,QWidget
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QMovie
class LoadingGif(QWidget):
def __init__(self):
super().__init__()
self.label = QLabel("",self)
self.setFixedSize(128,128)
self.setWindowFlags(Qt.Dialog | Qt.CustomizeWindowHint)
self.movie = QMovie('./images/loading.gif')
self.label.setMovie(self.movie)
self.movie.start()
if __name__ == "__main__":
app = QApplication(sys.argv)
form = LoadingGif()
form.show()
sys.exit(app.exec_())
| 618 |
test.py
|
FlantasticDan/mac-remote-command
| 0 |
2024640
|
from mac_remote.ping import EndlessPing
if __name__ == '__main__':
EndlessPing('https://mac-commander.deta.dev/')
| 118 |
src/face_check/settings/utils.py
|
tarvitz/face-check
| 0 |
2025215
|
"""
Very straight forward and simple utils
To boost settings work. In case if you would like
"""
import os
import ast
import pkg_resources
PROJECT_ROOT_DIR = os.path.dirname(os.path.dirname(__file__))
def _get_literal_from_env(env_key, fallback):
"""
reads environment variable and tries to covert it to python literal
:param str env_key:
:param object fallback: fallback value
:rtype: object
:return: python literal
"""
try:
value = ast.literal_eval(os.environ.get(env_key))
return value
except ValueError:
return fallback
def get_env_bool(env_key, fallback):
"""
reads boolean literal from environment.
Please note that 0, [], {}, '' treats as False
:param str env_key: key to read
:param bool fallback: fallback value
:rtype: bool
:return: environment value typed in bool
"""
assert isinstance(fallback, bool), "fallback should be bool instance"
return bool(_get_literal_from_env(env_key, fallback))
def get_env_int(env_key, fallback):
"""
reads boolean literal from environment.
Please note that 0, [], {}, '' treats as False
:param str env_key: key to read
:param int fallback: fallback value
:rtype: int
:return: environment value typed in bool
"""
assert isinstance(fallback, int), "fallback should be int instance"
return _get_literal_from_env(env_key, fallback)
def get_env_string(env_key, fallback):
"""
reads boolean literal from environment. (does not use literal compilation
as far as env returns always a string value
Please note that 0, [], {}, '' treats as False
:param str env_key: key to read
:param str fallback: fallback value
:rtype: str
:return: environment value typed in string
"""
assert isinstance(fallback, str), "fallback should be str instance"
return os.environ.get(env_key) or fallback
def rel(path, base_dir=PROJECT_ROOT_DIR):
return os.path.join(base_dir, path)
def requires(dependencies, *, validator=all):
"""
Checks if package has dependencies according to `validator` logic
:param list[str] | tuple[str] dependencies: list of dependencies without
its versions, for example: raven, requests, wheel, setuptools, etc
:param callable validator: boolean sequence validator, by default
it's :py:func:`all`. Recommended to use:
- :py:func:`all`
- :py:func:`any`
:return: decorator
"""
#: a bit hacky, however setuptools is a root dependency anyway
distribution = pkg_resources.get_distribution('setuptools')
has_resource = distribution.has_resource
def decorator(func):
def wrapper(*args, **kwargs):
have_all_dependencies = validator(map(has_resource, dependencies))
if have_all_dependencies:
return func(*args, **kwargs)
return
return wrapper
return decorator
| 2,948 |
PyRods/examples/file_move.py
|
kaldrill/irodspython
| 0 |
2023301
|
# Copyright (c) 2013, University of Liverpool
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Author : <NAME>
#
from irods import *
if __name__ == "__main__":
status, myEnv = getRodsEnv()
conn, errMsg = rcConnect(myEnv.rodsHost, myEnv.rodsPort,
myEnv.rodsUserName, myEnv.rodsZone)
status = clientLogin(conn)
path1 = myEnv.rodsHome + '/testmove.txt'
tmp_coll = myEnv.rodsHome + "/testMove"
status = createCollection(conn, tmp_coll)
path2 = tmp_coll + "/testmove2.txt"
f = irodsOpen(conn, path1, 'w')
f.write("Test Move")
f.close()
f = irodsOpen(conn, path1, 'r')
print "Content before move:", f.read()
print
f.close()
#status = f.move(path2)
status = irodsMove(conn, path1, path2)
f = irodsOpen(conn, path2, 'r')
print "Content after move:", f.read()
print
f.close()
status = deleteCollection(conn, tmp_coll)
#f.delete()
conn.disconnect()
| 1,617 |
lokii/utils.py
|
dorukerenaktas/lok
| 1 |
2024305
|
def print_start(curr: int, total: int) -> None:
completed = curr / total
print('|{}|'.format(
''.join([
'#' if i / 100 <= completed else '_'
for i in range(100)
])
), end='\r', flush=True)
def print_process(curr: int, total: int) -> None:
completed = curr / total
print('|{}|'.format(
''.join([
'#' if i / 100 <= completed else '_'
for i in range(100)
])
), end='\r', flush=True)
| 487 |
snakypy/zshpower/prompt/sections/ember.py
|
williamcanin/zshpower
| 10 |
2024715
|
from contextlib import suppress
from os import getcwd
from os.path import join
from snakypy.helpers.files import read_json
from snakypy.zshpower.config.base import Base
from snakypy.zshpower.prompt.sections.utils import (
Color,
Version,
element_spacing,
separator,
symbol_ssh,
)
from snakypy.zshpower.utils.catch import get_key, verify_objects
class Ember(Version, Base):
def __init__(self, *args):
super(Ember, self).__init__()
self.args: tuple = args
self.key = "ember"
self.shorten = "ember-"
self.finder = {
"extensions": [],
"folders": ("node_modules",),
"files": [
join("node_modules", "ember-cli", "package.json"),
"ember-cli-build.js",
],
}
def get_version(self, space_elem: str = " "):
enable = get_key(self.args[0], self.key, "version", "enable")
symbol = symbol_ssh(get_key(self.args[0], self.key, "symbol"), self.shorten)
color = (
get_key(self.args[0], self.key, "color")
if get_key(self.args[0], "general", "color", "enable") is True
else "negative"
)
prefix_color = get_key(self.args[0], self.key, "prefix", "color")
prefix_text = element_spacing(get_key(self.args[0], self.key, "prefix", "text"))
micro_version_enable = get_key(
self.args[0], self.key, "version", "micro", "enable"
)
if enable is True and verify_objects(getcwd(), data=self.finder) is True:
with suppress(FileNotFoundError):
parsed = read_json(self.finder["files"][0])
if "version" not in parsed:
return ""
version = f"{parsed['version']}{space_elem}"
# # Using subprocess
# command = run(
# f"""grep '"version":' {self.files[1]} | cut -d\\" -f4""",
# capture_output=True,
# shell=True,
# text=True,
# )
# version = command.stdout.replace("\n", "").strip()
version = version.replace("\n", "").strip()
prefix = f"{Color(prefix_color)}{prefix_text}{Color().NONE}"
if micro_version_enable is True:
version = f"{'{0[0]}.{0[1]}.{0[2]}'.format(version.split('.'))}"
else:
version = f"{'{0[0]}.{0[1]}'.format(version.split('.'))}"
return str(
(
f"{separator(self.args[0])}{prefix}"
f"{Color(color)}{symbol}"
f"{version}{space_elem}{Color().NONE}"
)
)
return ""
def __str__(self):
return self.get_version()
| 2,863 |
kenning/modelwrappers/frameworks/tensorflow.py
|
antmicro/edge-ai-tester
| 20 |
2025170
|
import numpy as np
import tensorflow as tf
from pathlib import Path
from kenning.core.model import ModelWrapper
from kenning.core.dataset import Dataset
class TensorFlowWrapper(ModelWrapper):
def __init__(
self,
modelpath: Path,
dataset: Dataset,
from_file: bool,
inputspec: tf.TensorSpec):
"""
Creates the TensorFlow model wrapper.
TensorFlow models require input shape specification in a form of
TensorSpec to serialize the model to ONNX.
Parameters
----------
modelpath : Path
The path to the model
dataset : Dataset
The dataset to verify the inference
from_file : bool
True if the model should be loaded from file
inputspec : tf.TensorSpec
Specification of the input tensor dimensionality and type (used for
ONNX conversion)
"""
self.inputspec = inputspec
super().__init__(modelpath, dataset, from_file)
def load_model(self, modelpath):
tf.keras.backend.clear_session()
if hasattr(self, 'model') and self.model is not None:
del self.model
self.model = tf.keras.models.load_model(str(modelpath))
print(self.model.summary())
def save_model(self, modelpath):
self.model.save(modelpath)
def preprocess_input(self, X):
return np.array(X, dtype='float32')
def run_inference(self, X):
return self.model.predict(X)
def get_framework_and_version(self):
return ('tensorflow', tf.__version__)
def save_to_onnx(self, modelpath):
import tf2onnx
modelproto, _ = tf2onnx.convert.from_keras(
self.model,
input_signature=self.inputspec,
output_path=modelpath
)
def convert_input_to_bytes(self, inputdata):
return inputdata.tobytes()
def convert_output_from_bytes(self, outputdata):
result = []
singleoutputsize = self.numclasses * np.dtype(np.float32).itemsize
for ind in range(0, len(outputdata), singleoutputsize):
arr = np.frombuffer(
outputdata[ind:ind + singleoutputsize],
dtype=np.float32
)
result.append(arr)
return result
| 2,334 |
metric/textio/logfile.py
|
dgketchum/MT_Rsense
| 5 |
2025798
|
import os, time
class logfile:
"""
log files may be used to have your scripts print progress so they
can be verified and troubleshooted
this simple class has just three methods,
__init__() opens up or creates a new logfile.(automatic)
entry() addes entries to the logfile with datestamps
close() closes the logfile.
"""
def __init__(self, logfile_path, overwrite=True):
""" initializes the logfile with a header and correct writing mode"""
if overwrite or not os.path.exists(logfile_path):
fhandle = open(logfile_path, 'w+')
else:
fhandle = open(logfile_path, 'a')
fhandle.write('-----------------------------------------------------\n')
fhandle.write('Initialized: ' + time.strftime('%Y-%b-%d-%H:%M:%S') + '\n')
fhandle.write('-----------------------------------------------------\n')
self.fhandle = fhandle
return
def entry(self, entry):
""" creates an entry in log file"""
timestamp = time.strftime('%Y-%b-%d-%H:%M:%S')
print(entry)
self.fhandle.write('{0}: {1}\n'.format(timestamp, entry))
return
def close(self):
"""Closes the logfile"""
self.entry("Program Terminated")
self.fhandle.close()
return
| 1,328 |
dynamic_validation/tests/test_templatetags.py
|
imtapps/django-dynamic-validation
| 0 |
2023305
|
from django.utils import unittest
from django.template import Context, Template, TemplateSyntaxError
import mock
__all__ = ('DynamicViolationTagTests', )
class DynamicViolationTagTests(unittest.TestCase):
@mock.patch('dynamic_validation.models.Violation.objects.get_by_trigger_model')
def test_violations_for_adds_violation_to_context(self, get_by_trigger_model):
template = Template("""
{% load dynamic_validation_tags %}
{% violations_for validation_object as violations %}
{% for violation in violations %}
{{ violation }}
{% endfor %}
""")
validation_object = mock.sentinel.validation_object
get_by_trigger_model.return_value = ['one', 'two', 'three']
result = template.render(Context(dict(validation_object=validation_object)))
get_by_trigger_model.assert_called_once_with(validation_object)
self.assertTrue("one" in result)
self.assertTrue("two" in result)
self.assertTrue("three" in result)
@mock.patch('dynamic_validation.models.ViolationsWrapper')
@mock.patch('dynamic_validation.models.Violation.objects.get_by_trigger_model')
def test_violation_tag_wraps_query_results_in_violations_wrapper(self, get_by_object, wrapper_class):
validation_object = mock.sentinel.validation_object
template = Template("""
{% load dynamic_validation_tags %}
{% violations_for validation_object as violations %}
""")
context = dict(validation_object=validation_object)
template.render(Context(context))
wrapper_class.assert_called_once_with(get_by_object.return_value)
self.assertEqual(context['violations'], wrapper_class.return_value)
@mock.patch('dynamic_validation.models.Violation.objects.get_by_trigger_model')
def test_violations_for_tag_can_resolve_callable_variable_for_violation_object(self, get_by_trigger_model):
template = Template("""
{% load dynamic_validation_tags %}
{% violations_for get_validation_obj as violations %}
{% for violation in violations %}
{{ violation }}
{% endfor %}
""")
validation_object = mock.sentinel.validation_object
def get_validation_object():
return validation_object
get_by_trigger_model.return_value = ['one', 'two', 'three']
result = template.render(Context(dict(get_validation_obj=get_validation_object)))
get_by_trigger_model.assert_called_once_with(validation_object)
self.assertTrue("one" in result)
self.assertTrue("two" in result)
self.assertTrue("three" in result)
def test_calling_template_tag_without_var_name_raises_template_syntax_error(self):
with self.assertRaises(TemplateSyntaxError):
Template("""
{% load dynamic_validation_tags %}
{% violations_for validation_object %}
""")
@mock.patch('dynamic_validation.models.Violation.objects.get_by_trigger_model')
def test_calling_template_tag_with_silent_indicator_wont_blow_up(self, get_by_trigger_model):
template = Template("""
{% load dynamic_validation_tags %}
{% violations_for get_validation_obj as violations silent_indicator %}
{% for violation in violations %}
{{ violation }}
{% endfor %}
""")
validation_object = mock.sentinel.validation_object
def get_validation_object():
return validation_object
violation_one = mock.Mock(rule=mock.Mock(dynamic_fields={'silent': True}))
violation_one.__str__ = mock.Mock(return_value="Joel")
violation_two = mock.Mock(rule=mock.Mock(dynamic_fields={'silent': False}))
violation_two.__str__ = mock.Mock(return_value="Matt")
violation_three = mock.Mock(rule=mock.Mock(dynamic_fields={'silent': False}))
violation_three.__str__ = mock.Mock(return_value="Jarrod")
get_by_trigger_model.return_value = [violation_one, violation_two, violation_three]
result = template.render(Context(dict(get_validation_obj=get_validation_object, silent_indicator=True)))
get_by_trigger_model.assert_called_once_with(validation_object)
self.assertTrue("Matt" in result)
self.assertTrue("Jarrod" in result)
self.assertTrue("Joel" not in result)
def test_calling_template_tag_without_validation_object_raises_template_syntax_error(self):
with self.assertRaises(TemplateSyntaxError):
Template("""
{% load dynamic_validation_tags %}
{% violations_for as validation_object %}
""")
@mock.patch('dynamic_validation.models.Violation.objects.get_by_trigger_model', mock.MagicMock())
def test_returns_empty_string_when_template_variable_does_not_exist(self):
template = Template("""
{% load dynamic_validation_tags %}
{% violations_for get_validation_obj as violations %}
""")
result = template.render(Context({}))
self.assertEqual('', result.strip())
| 5,210 |
brewtils/queues.py
|
scott-taubman/brewtils
| 7 |
2025701
|
# -*- coding: utf-8 -*-
"""This module currently exists to maintain backwards compatibility."""
import warnings
from brewtils.pika import PikaClient
__all__ = ["PikaClient"]
warnings.warn(
"This module has been migrated to brewtils.pika and will be removed in a future "
"release. Please import directly from the new module.",
DeprecationWarning,
stacklevel=2,
)
| 382 |
python/download_images_from_url.py
|
RenanMsV/VR-QR-Codes-List
| 0 |
2025535
|
import urllib.request
lines = open('urls.txt').readlines()
class AppURLopener(urllib.request.FancyURLopener):
version = "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.69 Safari/537.36"
urllib._urlopener = AppURLopener()
urllib._urlopener.retrieve("https://www.hypergridbusiness.com/wp-content/uploads/2015/10/Alian-Cardboard-147x150.jpg", "images/bar.jpg")
for line in lines:
print("Downloading: images/{} {}".format(line.split('/')[-1].strip(), line))
urllib._urlopener.retrieve(line, "images/{}".format(line.split('/')[-1].strip()))
| 590 |
python/programmers/challenges/9-graph/1-furthest-node.py
|
bum12ark/algorithm
| 1 |
2025763
|
"""
* 가장 먼 노드
n개의 노드가 있는 그래프가 있습니다. 각 노드는 1부터 n까지 번호가 적혀있습니다.
1번 노드에서 가장 멀리 떨어진 노드의 갯수를 구하려고 합니다.
가장 멀리 떨어진 노드란 최단경로로 이동했을 때 간선의 개수가 가장 많은 노드들을 의미합니다.
노드의 개수 n, 간선에 대한 정보가 담긴 2차원 배열 vertex가 매개변수로 주어질 때,
1번 노드로부터 가장 멀리 떨어진 노드가 몇 개인지를 return 하도록 solution 함수를 작성해주세요.
입출력 예
Input: n = 6, vertex = [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]
Output: 3
"""
import collections
def solution(n, vertex):
pass
if __name__ == '__main__':
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]]))
| 532 |
txdav/common/datastore/podding/migration/sync_metadata.py
|
eventable/CalendarServer
| 1 |
2024884
|
##
# Copyright (c) 2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.enterprise.dal.record import Record, fromTable
from twext.enterprise.dal.syntax import Parameter, Delete
from twisted.internet.defer import inlineCallbacks
from txdav.common.datastore.sql_tables import schema
"""
Module that manages store-level metadata objects used during the migration process.
"""
class CalendarMigrationRecord(Record, fromTable(schema.CALENDAR_MIGRATION)):
"""
@DynamicAttrs
L{Record} for L{schema.CALENDAR_MIGRATION}.
"""
@classmethod
@inlineCallbacks
def deleteremotes(cls, txn, homeid, remotes):
return Delete(
From=cls.table,
Where=(cls.calendarHomeResourceID == homeid).And(
cls.remoteResourceID.In(Parameter("remotes", len(remotes)))
),
).on(txn, remotes=remotes)
class CalendarObjectMigrationRecord(Record, fromTable(schema.CALENDAR_OBJECT_MIGRATION)):
"""
@DynamicAttrs
L{Record} for L{schema.CALENDAR_OBJECT_MIGRATION}.
"""
pass
class AttachmentMigrationRecord(Record, fromTable(schema.ATTACHMENT_MIGRATION)):
"""
@DynamicAttrs
L{Record} for L{schema.ATTACHMENT_MIGRATION}.
"""
pass
| 1,771 |
ttkthemes-3.2.2/tests/test_utils.py
|
Zsl-w/caigou
| 0 |
2024930
|
"""
Author: RedFantom
License: GNU GPLv3
Copyright (c) 2017-2018 RedFantom
"""
from unittest import TestCase
from ttkthemes import _utils as utils
import os
class TestUtils(TestCase):
def assertPathEquals(self, a, b):
if hasattr(os.path, 'samefile'):
self.assertTrue(os.path.samefile(a, b))
else:
# On windows, os.path.normcase lowercases because 'ASD' and 'asd'
# should be treated equally
self.assertEqual(os.path.normcase(a), os.path.normcase(b))
def test_temporary_chdir(self):
dir1 = os.getcwd()
with utils.temporary_chdir(utils.get_temp_directory()):
dir2 = os.getcwd()
dir3 = os.getcwd()
self.assertPathEquals(dir1, dir3)
self.assertPathEquals(dir2, utils.get_temp_directory())
with self.assertRaises(RuntimeError):
with utils.temporary_chdir(utils.get_temp_directory()):
raise RuntimeError()
dir4 = os.getcwd()
self.assertPathEquals(dir1, dir4)
def test_get_file_directory(self):
directory = utils.get_file_directory()
self.assertIsInstance(directory, str)
self.assertTrue(os.path.exists(directory))
def test_get_temp_directory(self):
self.assertTrue(os.path.exists(utils.get_temp_directory()))
def test_get_themes_directory(self):
themes = [
"aquativo",
"black",
"blue",
"clearlooks",
"elegance",
"keramik",
"kroc",
"plastik",
"radiance",
"winxpblue",
]
folders = os.listdir(utils.get_themes_directory())
for theme in themes:
self.assertTrue(theme in folders)
| 1,761 |
Facial Keypoints Detection/3. Facial keypoint detection.py
|
vkmavani/Facial-Keypoints-Detection
| 1 |
2024886
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import cv2
# In[2]:
image = cv2.imread('images/obamas.jpg')
image_RGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_RGB)
# In[3]:
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faces = face_cascade.detectMultiScale(image, 1.2, 2)
image_with_detections = image_RGB.copy()
print(image_with_detections.shape)
for (x,y,w,h) in faces:
cv2.rectangle(image_with_detections, (x,y), (x+w, y+h), (255,0,0), 3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
# In[4]:
model = load_model('Model.h5')
# In[5]:
def denormalize_keypoints(keypoints):
keypoints = keypoints*50+100
return keypoints
def show_all_keypoints(image, predicted_key_pts):
plt.figure(figsize=(20,10))
predicted_key_pts = predicted_key_pts.astype(float).reshape(-1,2)
predicted_key_pts = denormalize_keypoints(predicted_key_pts)
plt.imshow(image, cmap='gray')
plt.scatter(predicted_key_pts[:, 0], predicted_key_pts[:, 1], s=20, marker='.', c='g')
plt.show()
# In[6]:
image_copy = np.copy(image)
for (x,y,w,h) in faces:
roi = image_copy[y-50:y+h+50, x-50:x+w+50]
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
roi = roi / 255.0
new_w = 224
new_h = 224
roi_plot = cv2.resize(roi, (new_w, new_h))
roi = np.reshape(roi_plot,(1, new_w, new_h, 1))
prediction = model.predict(roi)
show_all_keypoints(roi_plot, prediction)
| 1,668 |
pywinrt/winsdk/windows/devices/printers/__init__.py
|
pywinrt/python-winsdk
| 3 |
2025280
|
# WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import enum
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Devices.Printers")
try:
import winsdk.windows.foundation
except Exception:
pass
try:
import winsdk.windows.foundation.collections
except Exception:
pass
try:
import winsdk.windows.storage.streams
except Exception:
pass
class IppAttributeErrorReason(enum.IntEnum):
REQUEST_ENTITY_TOO_LARGE = 0
ATTRIBUTE_NOT_SUPPORTED = 1
ATTRIBUTE_VALUES_NOT_SUPPORTED = 2
ATTRIBUTE_NOT_SETTABLE = 3
CONFLICTING_ATTRIBUTES = 4
class IppAttributeValueKind(enum.IntEnum):
UNSUPPORTED = 0
UNKNOWN = 1
NO_VALUE = 2
INTEGER = 3
BOOLEAN = 4
ENUM = 5
OCTET_STRING = 6
DATE_TIME = 7
RESOLUTION = 8
RANGE_OF_INTEGER = 9
COLLECTION = 10
TEXT_WITH_LANGUAGE = 11
NAME_WITH_LANGUAGE = 12
TEXT_WITHOUT_LANGUAGE = 13
NAME_WITHOUT_LANGUAGE = 14
KEYWORD = 15
URI = 16
URI_SCHEMA = 17
CHARSET = 18
NATURAL_LANGUAGE = 19
MIME_MEDIA_TYPE = 20
class IppResolutionUnit(enum.IntEnum):
DOTS_PER_INCH = 0
DOTS_PER_CENTIMETER = 1
IppAttributeError = _ns_module.IppAttributeError
IppAttributeValue = _ns_module.IppAttributeValue
IppIntegerRange = _ns_module.IppIntegerRange
IppPrintDevice = _ns_module.IppPrintDevice
IppResolution = _ns_module.IppResolution
IppSetAttributesResult = _ns_module.IppSetAttributesResult
IppTextWithLanguage = _ns_module.IppTextWithLanguage
Print3DDevice = _ns_module.Print3DDevice
PrintSchema = _ns_module.PrintSchema
| 1,610 |
setup.py
|
anthill-platform/anthill-game-controller
| 0 |
2025427
|
from setuptools import setup, find_namespace_packages
DEPENDENCIES = [
"anthill-common>=0.2.5"
]
setup(
name='anthill-game-controller',
package_data={
"anthill.game.controller": ["anthill/game/controller/sql", "anthill/game/controller/static"]
},
version='0.2.8',
description='Game servers hosting & matchmaking service for Anthill platform Edit Add topics',
author='desertkun',
license='MIT',
author_email='<EMAIL>',
url='https://github.com/anthill-platform/anthill-game-controller',
namespace_packages=["anthill"],
include_package_data=True,
packages=find_namespace_packages(include=["anthill.*"]),
zip_safe=False,
install_requires=DEPENDENCIES
)
| 718 |
aiohttp_r3/router.py
|
iceb0y/aiohttp-r3
| 3 |
2024261
|
import asyncio
from aiohttp import web
from aiohttp_r3 import _r3
from yarl import unquote
METHOD_STR_TO_INT = {
'GET': _r3.METHOD_GET,
'POST': _r3.METHOD_POST,
'PUT': _r3.METHOD_PUT,
'DELETE': _r3.METHOD_DELETE,
'PATCH': _r3.METHOD_PATCH,
'HEAD': _r3.METHOD_HEAD,
'OPTIONS': _r3.METHOD_OPTIONS,
}
METHOD_ALL = (_r3.METHOD_GET | _r3.METHOD_POST | _r3.METHOD_PUT |
_r3.METHOD_DELETE | _r3.METHOD_PATCH | _r3.METHOD_HEAD |
_r3.METHOD_OPTIONS)
class R3Router(web.UrlDispatcher):
def __init__(self):
super().__init__()
self.tree = _r3.R3Tree()
def add_route(self, method, path, handler, *, name=None, expect_handler=None):
route = super().add_route(
method, path, handler, name=name, expect_handler=expect_handler)
if method == '*':
method_int = METHOD_ALL
else:
method_int = METHOD_STR_TO_INT[method]
self.tree.insert_route(method_int, path.encode(), route)
return route
def freeze(self):
super().freeze()
self.tree.compile()
@asyncio.coroutine
def resolve(self, request):
route, params = self.tree.match_route(METHOD_STR_TO_INT[request._method],
request.rel_url.raw_path.encode())
if route:
match_dict = {k.decode(): unquote(v.decode()) for k, v in params}
return web.UrlMappingMatchInfo(match_dict, route)
result = yield from super().resolve(request)
return result
| 1,556 |
insights/combiners/du.py
|
lhuett/insights-core
| 121 |
2025889
|
"""
Disk Usage
==========
Combiners for gathering information from du parsers.
"""
from insights import combiner
from insights.parsers.du import DiskUsageDir
@combiner(DiskUsageDir)
class DiskUsageDirs(dict):
"""
Combiner for the :class:`insights.parsers.du.DiskUsageDir` parser.
The parser is multioutput, one parser instance for each directory disk
usage. This combiner puts all of them back together and presents them as a
dict where the keys are the directory names and the space usage are the
values.
Sample input data for du commands as parsed by the parsers::
# Output of the command:
# /bin/du -s -k /var/log
553500 /var/log
# Output of the command:
# /bin/du -s -k /var/lib/pgsql
519228 /var/lib/pgsql
Examples:
>>> type(disk_usage_dirs)
<class 'insights.combiners.du.DiskUsageDirs'>
>>> sorted(disk_usage_dirs.keys())
['/var/lib/pgsql', '/var/log']
>>> disk_usage_dirs['/var/lib/pgsql']
519228
"""
def __init__(self, du_dirs):
super(DiskUsageDirs, self).__init__()
for du in du_dirs:
self.update(du)
| 1,183 |
game/animations.py
|
bpetrikovics/Poing
| 0 |
2023529
|
from color import Color
from interfaces import IMovable, IAnimation, IScene
class FadeOut(IAnimation):
""" Applies fadeout effect on a ColorableMixin object, from a given color """
COLOR_SPEED = 180
def __init__(self, color: Color):
print(f"Animation.FadeOut: starting from {color}")
self.target = None
self.color = color
self.color_r = color.r
self.color_g = color.g
self.color_b = color.b
def update(self, dt):
self.color_r = int(max(self.color_r - FadeOut.COLOR_SPEED * dt, 0))
self.color_g = int(max(self.color_g - FadeOut.COLOR_SPEED * dt, 0))
self.color_b = int(max(self.color_b - FadeOut.COLOR_SPEED * dt, 0))
out_color = Color(self.color_r, self.color_g, self.color_b)
self.target.set_color(out_color)
def is_finished(self) -> bool:
return self.color_r == 0 and self.color_g == 0 and self.color_b == 0
def set_target(self, target: IMovable):
print(f"Animation.FadeOut: got target={target}")
self.target = target
def __repr__(self):
return f"<FadeOut color={self.color}>"
class BallBounceOff(IAnimation):
""" Bounce animation to be played when the pad misses the ball, should be used with FadeOut """
ACCEL_Y = 15
def __init__(self, scene: IScene):
print(f"Animation.BallBounceOff: scene is {scene}")
self.target = None
self.scene = scene
def update(self, dt: int):
self.speed_y += BallBounceOff.ACCEL_Y * dt
self.target.set_speed(self.speed_x, int(self.speed_y))
def is_finished(self) -> bool:
return self.target.y >= self.scene.height
def set_target(self, target: IMovable):
self.target = target
(self.speed_x, self.speed_y) = target.get_speed()
print(
f"Animation.BallBounceOff: got target={target}, initial object speed is (dx={self.speed_x}, dy={self.speed_y})")
self.speed_x *= -1
self.speed_y = -5
target.set_speed(self.speed_x, self.speed_y)
def __repr__(self):
return f"<BallBounceOff>"
class Flash(IAnimation):
""" Quick flash animation when ball bounces off a wall """
COLOR_SPEED = 250
def __init__(self, flash_color: Color, speed: int = COLOR_SPEED):
print(f"Animation.Flash: color={flash_color}, speed={speed}")
self.target = None
self.speed = speed
self.target_color = None
self.flash_color = flash_color
def update(self, dt):
# We'll mutate the flash_color towards the direction of the desired target color
self.flash_color.r = min(self.flash_color.r + int(self.speed * dt), 255)
self.flash_color.g = min(self.flash_color.g + int(self.speed * dt), 255)
self.flash_color.b = min(self.flash_color.b + int(self.speed * dt), 255)
self.target.set_color(self.flash_color)
def is_finished(self) -> bool:
return self.flash_color.r == self.target_color.r and self.flash_color.g == self.target_color.g and self.flash_color.b == self.target_color.b
def set_target(self, target: IMovable):
self.target = target
self.target_color = target.get_color()
self.target.set_color(self.flash_color)
print(f"Animation.Flash: got target={target}, target_color={self.target_color})")
def __repr__(self):
return f"<Flash color={self.target_color}>"
| 3,424 |
exercism/40.sublist/sublist.py
|
fenilgandhi/100_Days_of_Python
| 0 |
2023161
|
check_lists = 0
SUBLIST = 1
SUPERLIST = 2
EQUAL = 3
UNEQUAL = 4
def check_lists(first_list, second_list):
first_list = "#".join(map(str, first_list))
second_list = "#".join(map(str, second_list))
ainb = (first_list in second_list)
bina = (second_list in first_list)
if ainb and bina :
return EQUAL
elif ainb:
return SUBLIST
elif bina:
return SUPERLIST
else:
return UNEQUAL
| 394 |
src/pypkg/config.py
|
zondo/pypkg
| 0 |
2025766
|
"""
Configuration stuff.
"""
# TODO: use or remove this
import configparser as cp
from pathlib import Path
from . import __program__
def init_config(*files, suffix="cfg"):
"""
Return package configuration settings.
"""
filenames = []
configname = f"{__program__}.{suffix}"
# Add internal config file.
dirname = Path(__file__).parent
filenames.append(dirname / f"config.{suffix}")
# Add config file in user home directory.
filenames.append(Path.home() / f".{configname}")
# Add package config file in current directory.
filenames.append(configname)
# Add other specific files.
filenames.extend(files)
# Create parser.
parser = cp.ConfigParser()
parser.read(filenames)
# Make it case-sensitive.
parser.optionxform = str
return parser
| 825 |
Data Helper Scripts/CenterCrops.py
|
TankyFranky/Snow_Removal_GAN_ELEC825_Final_Project
| 1 |
2023894
|
import os
"""
Helper Script. Creates 256x256 center cropped images from the snow100K dataset. Based on the GenerateCrops.py script
"""
crop_size = 256 # crop size
num_samples = 1 # number of crops per image
counter = 0
for gt, synth in zip(os.listdir("all/gt/"), os.listdir("all/synthetic/")): # loops over both directories at same time
#for realistic in os.listdir("Dataset/Snow100K/realistic/"):
# the above to file locations are where the original data is (relative path)
# grab/open image
gt_img = Image.open("Dataset/Snow100K/test/Snow100K-L/gt/" + gt)
synth_img = Image.open("Dataset/Snow100K/test/Snow100K-L/synthetic/" + synth)
#realistic_img = Image.open("Dataset/Snow100K/realistic/" + realistic)
# get image sizes
x_gt, y_gt = gt_img.size
x_synth, y_synth = synth_img.size
#x_realistic, y_realistic = realistic_img.size
#close image to avoid errors
# synth_img.close()
# # remove images less than 256x256
# if (x_synth < 256):
# counter = counter + 1
#os.remove("Dataset/Snow100K/test/Snow100K-L/synthetic/" + synth)
#print(counter)
#spot check they are the same (just in case)
# if(x_gt != x_synth or y_gt != y_synth or gt != synth):
# print("image size missmatch")
# break
# perform center crop on each image
# run for num_samples on each image
for i in range(num_samples): # run for num_samples on each image
x1 = (x_synth - crop_size)/2
y1 = (y_synth - crop_size)/2
save_gt = gt_img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
save_synth = synth_img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# save them bad bois
save_gt.save("Dataset/Snow100K/realistic_crop/"+gt+"_"+str(i)+".jpg")
save_synth.save("Dataset/Snow100K/center_crop_small_val/synthetic/"+synth+"_"+str(i)+".jpg")
# you will need the following file structure for output
"""
/center crop
/gt
/synthetic
"""
| 2,039 |
kattis/k_rotatecut.py
|
ivanlyon/exercises
| 0 |
2023157
|
'''
Remove alternating quarter-lengths from input string
Status: Accepted
'''
###############################################################################
def main():
"""Read input and print output"""
for _ in range(int(input())):
rotated = False
rotations, text = input().split()
rotations = int(rotations)
while rotations:
quarter = (len(text) >> 2)
if quarter == 0:
break
if rotated:
text = text[:-quarter]
else:
text = text[quarter:]
rotated = not rotated
rotations -= 1
print(text)
###############################################################################
if __name__ == '__main__':
main()
| 784 |
binary_search_tree/binary_search_tree_ex1.py
|
RafaSzl/Algorithms
| 0 |
2025629
|
"""
As a senior backend engineer at Jovian, you are tasked with developing a fast in-memory data structure to manage
profile information (username, name and email) for 100 million users.
It should allow the following operations to be performed efficiently:
1. Insert the profile information for a new user.
2. Find the profile information of a user, given their username
3. Update the profile information of a user, given their usrname
4. List all the users of the platform, sorted by username
You can assume that usernames are unique.
Along the way, we will also solve several other questions related to binary trees and binary search trees
that are often asked in coding interviews and assessments.
Problem:
We need to create a data structure which can store 100 million records and perform insertion,
search, update and list operations efficiently.
Input:
The key inputs to our data structure are user profiles, which contain the username, name and email of a user.
A Python class would be a great way to represent the information for a user. A class is a blueprint for
creating objects. Everything in Python is an object belonging to some class.
"""
class User:
def __init__(self, username, name, email):
self.username = username
self.name = name
self.email = email
print('User created')
# self here is reffering to the actual object (instance) that gets created
def introduce(self, guest_name):
print("Hi {}, I'm {}. Contact me at {}".format(guest_name, self.name, self.email))
user2 = User('przemek', '<NAME>', '<EMAIL>')
user2.introduce('David')
# The print statement and str() built-in function uses __str__ to display the string representation of the object
# while the repr() built-in function uses __repr__ to display the object.
# We can also express our desired data structure as a Python class UserDatabase with four methods:
# insert, find, update and list_all.
class UserDatabase:
def __init__(self):
self.list_of_users = []
def insert(self, user):
if len(self.list_of_users) == 0:
print(self.list_of_users)
self.list_of_users.append(user)
elif user in self.list_of_users:
print('This name already exists in the list of users')
else:
self.list_of_users.append(user)
# print(list_of_users)
def find(self, username):
for user in self.list_of_users:
if user.username == username:
return user # , print('Username: ' + user.username + ', Name: ' + user.name + ', Email: ' + user.email)
print('Object is not in users list')
def update(self, user):
target = self.find(user.username)
target.name, target.email = user.name, user.email
print('Username: ' + target.username + ', Name: ' + target.name + ', Email: ' + target.email)
def list_all(self):
return print(self.list_of_users)
# Example user input
aakash = User('aakash', '<NAME>', '<EMAIL>')
biraj = User('biraj', '<NAME>', '<EMAIL>')
hemanth = User('hemanth', '<NAME>', '<EMAIL>')
jadhesh = User('jadhesh', '<NAME>', '<EMAIL>')
siddhant = User('siddhant', '<NAME>', '<EMAIL>')
sonaksh = User('sonaksh', '<NAME>', '<EMAIL>')
vishal = User('vishal', '<NAME>', '<EMAIL>')
users = [aakash, biraj, hemanth, jadhesh, siddhant, sonaksh, vishal]
# print(aakash.__dict__['username'])
# i forgot to create an instance of a class!!!
database = UserDatabase()
for i in users:
database.insert(i)
print()
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')
print()
database.find('sonaksh')
database.update(User(username='siddhant', name='<NAME>', email='<EMAIL>'))
# database.list_all()
#
#
#
# for i in range(len(users)):
# print(database.list_of_users[i].__dict__['name'])
# print(database.list_of_users[6].__dict__['name'])
# We store the User instances in a list sorted by usernames
# Insert: Loop through the list and add the new user at a position that keeps the list sorted
# Find: Loop through the list and find the user object with the username matching the query
# Update: Loop through the list, find the user instance matching the query and update the details
# List: Return the list of user object
| 4,245 |
Exercicios-mundo-2/desafio062aa.py
|
talitadeoa/CEV-Exercicios-Python
| 0 |
2025861
|
#Refazer o desafio 61 com uma opção para mostrar mais 5 progressões
pt = int(input('Qual o primeiro termo da PA? '))
r = int(input('E qual a razão? '))
t = pt
count = 1
qtd = 10 #<total
vai = 1 #<mais
pro = 10
print('\n------ Gerador de PA ------ \n')
while vai != 0:
qtd += vai
while count <= qtd:
print(f'{t} -> ', end='')
t += r
count += 1
print('PAUSA')
vai = int(input('\nQuer mostrar mais quantos termos? Digite [0] para encerrar \n'))
print(f'\nOK, fim do programa, foram mostrados {qtd} termos')
| 558 |
art/__init__.py
|
alonhare/adversarial-robustness-toolbox
| 2 |
2025073
|
import os
import json
_folder = os.path.expanduser('~')
if not os.access(_folder, os.W_OK):
_folder = '/tmp'
_folder = os.path.join(_folder, '.art')
_config_path = os.path.expanduser(os.path.join(_folder, 'config.json'))
if os.path.exists(_config_path):
try:
with open(_config_path) as f:
_config = json.load(f)
except ValueError:
_config = {}
if not os.path.exists(_folder):
try:
os.makedirs(_folder)
except OSError:
# Log warning here
pass
if not os.path.exists(_config_path):
# Generate default config
_config = {'DATA_PATH': os.path.join(_folder, 'data')}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Log warning here
pass
if 'DATA_PATH' in _config:
DATA_PATH = _config['DATA_PATH']
| 874 |
scripts/456.py
|
eglaubauf/egRedshiftTools
| 28 |
2025612
|
import hou
#Set DefaultShape & Color Redshift Lights
hou.nodeType(hou.objNodeTypeCategory(), "rslight").setDefaultColor(hou.Color(1,1,0))
hou.nodeType(hou.objNodeTypeCategory(), "rslight").setDefaultShape('light')
hou.nodeType(hou.objNodeTypeCategory(), "rslighties").setDefaultColor(hou.Color(1,1,0))
hou.nodeType(hou.objNodeTypeCategory(), "rslighties").setDefaultShape('light')
hou.nodeType(hou.objNodeTypeCategory(), "rslightportal").setDefaultColor(hou.Color(1,1,0))
hou.nodeType(hou.objNodeTypeCategory(), "rslightportal").setDefaultShape('light')
hou.nodeType(hou.objNodeTypeCategory(), "rslightsun").setDefaultColor(hou.Color(1,1,0))
hou.nodeType(hou.objNodeTypeCategory(), "rslightsun").setDefaultShape('light')
hou.nodeType(hou.objNodeTypeCategory(), "rslightdome::2.0").setDefaultColor(hou.Color(1,1,0))
hou.nodeType(hou.objNodeTypeCategory(), "rslightdome::2.0").setDefaultShape('light')
| 904 |
nmmn/astro.py
|
rsnemmen/nemmen
| 16 |
2025451
|
"""
Astrophysical routines
=========================
"""
import numpy
import scipy
def dist2z(d):
"""
Converts luminosity distance to redshift by solving the equation
'd-z=0'.
Input is assumed float.
"""
import cosmolopy
# x here is the unknown redshift
f = lambda x: d-cosmolopy.distance.luminosity_distance(x,**cosmolopy.fidcosmo)
z = scipy.optimize.fsolve(f, 0.01)
return z
def mjy(lognu,ll,dist,llerr=None):
"""
Converts log(nu/Hz), log(nu Lnu [erg/s]), error in log(nuLnu) to
log(lambda/micron), log(Fnu/mJy), error in log(Fnu).
The input units are CGS.
Usage:
If you have errors in the flux:
>>> lamb,fnu,ferr=mjy(xdata,ydata,dist,yerr)
If you do not have errors in the flux:
>>> lamb,fnu=mjy(xdata,ydata,dist)
:param dist: distance in Mpc
"""
import uncertainties.unumpy as unumpy
c=29979245800. # speed of light in CGS
dist=dist*3.085677581e24 # Mpc -> cm
nu=10**lognu
lamb=c/nu*1e4 # cm -> micron
if llerr!=None:
lllerr=unumpy.uarray(ll,llerr)
else:
lllerr=ll
lnuerr=10**lllerr/nu
fluxerr=lnuerr/(1e-26*4.*numpy.pi*dist**2) # Lnu (erg/s/Hz) -> Fnu (mJy)
if llerr!=None:
fluxerr=unumpy.log10(fluxerr)
return numpy.log10(lamb),unumpy.nominal_values(fluxerr),unumpy.std_devs(fluxerr)
else:
return numpy.log10(lamb),numpy.log10(fluxerr)
def arcsec2pc(d=15.,a=1.):
"""
Given the input angular size and distance to the object, computes
the corresponding linear size in pc.
:param d: distance in Mpc
:param a: angular size in arcsec
:returns: linear size in pc
"""
# convert arcsec to radians
a=a*4.848e-6
# convert distance to pc instead of Mpc
d=d*1e6
return d*numpy.tan(a)
def freq(T):
"""
Convert array of periods in days to frequencies in Hz.
"""
return 1./T/86400.
def period(freq):
"""
Convert array of frequencies to periods.
"""
return 1./freq
class Constants:
"""
Defines a set of useful constants in CGS.
::
const=nmmn.astro.Constants()
E=mass*const.c**2
"""
def __init__(self):
self.G = 6.673E-8
self.c = 29979245800
self.solarmass = 1.99e33
self.year = 31556926
| 2,204 |
hw_asr/augmentations/wave_augmentations/Fade.py
|
ivan-gorin/asr_project_template
| 0 |
2025775
|
import torchaudio
from torch import Tensor
import random
from hw_asr.augmentations.base import AugmentationBase
from hw_asr.augmentations.random_apply import RandomApply
class Fade(AugmentationBase):
def __init__(self, p=0.5, fade_shape="linear"):
self._aug = RandomApply(torchaudio.transforms.Fade(fade_shape=fade_shape), p=p)
def __call__(self, data: Tensor):
length = data.shape[-1]
self._aug.augmentation.fade_in_len = random.randint(0, length // 2)
self._aug.augmentation.fade_out_len = random.randint(0, length // 2)
return self._aug(data)
| 598 |
aux/system/device/network.py
|
bischjer/auxiliary
| 0 |
2024259
|
from aux.protocols import installers
from aux.device.base import Device
class NetworkDevice(Device):
"""Generic network device
"""
def __init__(self, scriptengine, address, protocols):
"""
@param scriptengine: see :Device.__init__:
@param address: the network address of the device (usually hostname or ip)
@param protocols: dictionary of protocol names and configurations
this Device should be configured with
"""
Device.__init__(self, address, scriptengine)
self.address = address
for protocol, config in protocols.iteritems():
# Find protocol installers for requested protocols and install on
# this device with given configuration.
installer = getattr(installers, protocol)
installer(self, **config)
| 856 |
algorithm/matrix/spiral_matrix.py
|
sekilas13/Python
| 79 |
2024693
|
# Program ini mencetak matriks dalam bentuk spiral.
# Masalah ini telah diselesaikan melalui cara rekursif.
# Matriks harus memenuhi kondisi di bawah ini
# i) matriks harus hanya satu atau dua dimensi
# ii) jumlah kolom semua baris harus sama
from collections.abc import Iterable
def check_matrix(matrix):
if matrix and isinstance(matrix, Iterable):
if isinstance(matrix[0], Iterable):
prev_len = 0
for row in matrix:
if prev_len == 0:
prev_len = len(row)
result = True
else:
result = prev_len == len(row)
else:
result = True
else:
result = False
return result
def spiral_print(a):
if check_matrix(a) and len(a) > 0:
mat_row = len(a)
if isinstance(a[0], Iterable):
mat_col = len(a[0])
else:
for dat in a:
print(dat),
return
# pencetakan horizontal meningkat
for i in range(0, mat_col):
print(a[0][i]),
# pencetakan vertikal ke bawah
for i in range(1, mat_row):
print(a[i][mat_col - 1]),
# pencetakan horizontal menurun
if mat_row > 1:
for i in range(mat_col - 2, -1, -1):
print(a[mat_row - 1][i]),
# pencetakan vertikal ke atas
for i in range(mat_row - 2, 0, -1):
print(a[i][0]),
remain_mat = [row[1 : mat_col - 1] for row in a[1 : mat_row - 1]]
if len(remain_mat) > 0:
spiral_print(remain_mat)
else:
return
else:
print("matrix tidak valid")
return
if __name__ == "__main__":
a = ([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12])
spiral_print(a)
| 1,804 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.