max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
apollon/commands/apollon_onsets.py
|
bader28/apollon
| 0 |
2171822
|
# Licensed under the terms of the BSD-3-Clause license.
# Copyright (C) 2019 <NAME>
# <EMAIL>
import argparse
import multiprocessing as mp
import sys
from .. import onsets
def _parse_cml(argv):
parser = argparse.ArgumentParser(description='Apollon onset detection engine')
parser.add_argument('--amplitude', action='store_true',
help='Detect onsets based on local extrema in the time domain signal.')
parser.add_argument('--entropy', action='store_true',
help='Detect onsets based on time domain entropy maxima.')
parser.add_argument('--flux', action='store_true',
help='Detect onsets based on spectral flux.')
parser.add_argument('-o', '--outpath', action='store',
help='Output file path.')
parser.add_argument('filepath', type=str, nargs=1)
return parser.parse_args(argv)
def _amp(a):
print('Amplitude')
return a
def _entropy(a):
print('Entropy')
return a
def _flux(a):
print('Flux')
return a
def main(argv=None):
if argv is None:
argv = sys.argv
args = _parse_cml(argv)
args = _parse_cml(argv)
detectors = {'amplitude': _amp,
'entropy': _entropy,
'flux': _flux}
methods = [func for name, func in detectors.items() if getattr(args, name)]
if len(methods) == 0:
print('At least one detection method required. Aborting.')
return 1
with mp.Pool(processes=3) as pool:
results = [pool.apply_async(meth, (i,)) for i, meth in enumerate(methods)]
out = [res.get() for res in results]
return out
if __name__ == '__main__':
sys.exit(main())
| 1,712 |
Table/migrations/0001_initial.py
|
KarryBanana/-_ckr-zfy
| 0 |
2172616
|
# Generated by Django 3.0.3 on 2020-08-13 08:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docname', models.CharField(blank=True, max_length=100)),
('docintro', models.CharField(blank=True, max_length=300)),
('doctitle', models.CharField(blank=True, max_length=100)),
('doctext', models.TextField()),
('createtime', models.DateTimeField(default=django.utils.timezone.now)),
('lasttime', models.DateTimeField(auto_now=True)),
('stat', models.IntegerField(default=0)),
('admindoc', models.IntegerField(default=0)),
('deletetime', models.DateTimeField(auto_now=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1,328 |
datascience_utilities/json_to_csv.py
|
jattenberg/datascience-utilities
| 19 |
2169598
|
#!/usr/local/bin/python
"""
Copyright (c) 2013 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import pandas as pd
from .utils import option_parser
def get_parser():
parser = option_parser("""consume json data and emit it back as a csv""")
parser.add_option(
"-O",
"--orient",
action="store",
dest="orient",
default="records",
help="""Indication of expected JSON string format.\n
default is `records`.
The set of possible orients is:\n
'split' : dict like {index -> [index], columns -> [columns], data -> [values]}\n
'records' : list like [{column -> value}, ... , {column -> value}]\n
'index' : dict like {index -> {column -> value}}\n
'columns' : dict like {column -> {index -> value}}\n
'values' : just the values array
""",
)
parser.add_option(
"-i",
"--index",
action="store_true",
dest="index",
help="add a column <index> with the row number",
)
parser.add_option(
"-L",
"--lines",
action="store_true",
dest="lines",
help="read line-delimited json",
)
parser.add_option(
"-C",
"--columns",
action="store_true",
dest="columns",
help="print the column names and exit"
)
return parser
def main():
(options, args) = get_parser().parse_args()
input = open(options.filename, "r") if options.filename else sys.stdin
output = open(options.out, "w") if options.out else sys.stdout
df = pd.read_json(input, orient=options.orient, lines=options.lines)
if options.columns:
output.write(options.delim.join(df.columns.values))
return
df.to_csv(
output,
sep=options.delim,
index=options.index,
index_label="index" if options.index else False,
)
if __name__ == "__main__":
main()
| 2,945 |
model/contact.py
|
olgakos/python_traning
| 0 |
2172153
|
# -*- coding: utf-8 -*-
from sys import maxsize
#это класс Contact из задания №3
class Contact():
def __init__(self, lastname=None, firstname=None, id=None,
#address=None, email=None, email2=None,
home=None, mobile=None, work=None, phone2=None,
all_phones_from_home_page=None):
self.lastname = lastname
self.firstname = firstname
#self.address = address
#self.email = email
#self.email2 = email2
self.home = home
self.mobile = mobile
self.work = work
self.phone2 = phone2
self.all_phones_from_home_page=all_phones_from_home_page
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.lastname, self.firstname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname
# unit 4_11 (10-05)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 1,080 |
apricotlib/uniprot_proteome_table.py
|
malvikasharan/APRICOT
| 5 |
2171946
|
#!/usr/bin/env python
# Description = Download UniProt based proteome data for a taxonomy id
import sys
try:
from urllib.request import urlopen
except ImportError:
print('Python package urllib is missing. Please install/update.\n')
sys.exit(0)
def format_uniprot_table(proteome_table, uniprot_link):
'''Downloads protein information
table from UniProt database for
the selected taxonomy id'''
try:
response = urlopen(uniprot_link)
for entry in str(response.read()).split('\\n'):
if not entry == "'" and not entry == '"':
if not entry.startswith(
"b'Entry") and not entry.startswith('b"Entry'):
proteome_table.write("%s\n" % '\t'.join(
list(entry.split('\\t'))))
print('"\nDownloaded protein information using UniProt link: %s\n"' % (
uniprot_link))
except:
print(
"UniProt entry is apparently deleted, please check: %s"
% uniprot_link)
| 1,035 |
pygame Bouncing Rectangle.py
|
matthew-e-brown/Grade-11-Pygame
| 0 |
2172244
|
import pygame, math, sys, time
pygame.init()
##Define some colours
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 255)
BLUE = (0, 0, 255)
SKYBLUE = (150, 215, 255)
LIGHTGREEN = (75, 245, 125)
WN_WIDTH = 1000
WN_HEIGHT = 800
size = (WN_WIDTH, WN_HEIGHT)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Bouncing Rectangle")
clock = pygame.time.Clock()
##Define some Fonts
CS17 = pygame.font.Font('C:/Windows/Fonts/comic.ttf', 17)
##Define some Sounds
#boing1 = pygame.mixer.Sound("C:/Users/MA316BR/Downloads/159376__greenhourglass__boing1.wav")
#boing1.set_volume(0.3)
#def soundBoing():
#pygame.mixer.Sound.play(boing1, loops = 0, maxtime = 0, fade_ms = 0)
##-------------------------
## Rectangle Size
rectSizeX, rectSizeY = (75, 75)
## Rectangle starting pos
rect_x, rect_y = (50, 50)
## Rect change amount
rectChangeX, rectChangeY = (5, 5)
## Gravity
grav = 0.5
slide = 0.5
##--- Main Loop -----
pygame.mixer.init(frequency=22050, size=-16, channels=1, buffer=4069)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT: pygame.quit(), sys.exit()
rectChangeY += grav
if grav == 0:
rectChangeX += slide
rect_x += rectChangeX
rect_y += rectChangeY
if rect_y > ((WN_HEIGHT - rectSizeY)+13) or rect_y < 13:
grav = 0
rectChangeY = 0
testText = CS17.render("TEST, You're supposed to have stopped by now // Y", True, BLACK)
screen.blit(testText, [0,0])
elif rect_y > (WN_HEIGHT - rectSizeY) or rect_y < 0:
#soundBoing()
rectChangeY = rectChangeY * (-1)
if rect_x > ((WN_WIDTH - rectSizeX) + 13) or rect_x < -12:
slide = 0
rectChangeX = 0
testText = CS17.render("TEST, You're supposed to have stopped by now // X", True, BLACK)
screen.blit(testText, [0,0])
elif rect_x > (WN_WIDTH - rectSizeX) or rect_x < 0:
#soundBoing()
rectChangeX = rectChangeX * (-1)
## Draw the rect, my boyyo
screen.fill(WHITE)
pygame.draw.rect(screen, SKYBLUE, [rect_x, rect_y, rectSizeX, rectSizeY])
clock.tick(100)
pygame.display.flip() ## FPS
| 2,297 |
src/extract_api_permission_mapping.py
|
FlyingWithJerome/Malware_Detector
| 8 |
2171535
|
'''
extract api permission mapping from PScout output
'''
import csv
import os
import os.path
from utilities import get_data_directory
DANGEROUS_PERMISSION = []
def load_dangerous_permissions():
file_location = get_data_directory("permission_metadata", "dangerous_permission_list.txt")
with open(file_location) as input_file:
return input_file.read().split()
DANGEROUS_PERMISSION = load_dangerous_permissions()
def get_list_of_apis(source_str):
results = []
for lines in source_str:
try:
lines = lines.strip("<>")
[module, rest] = lines.split(": ")
[return_value, func_def] = rest.split(" ")
func_def = func_def.rstrip(")")
[func_name, arguments] = func_def.split("(")
module = "/".join(module.split("."))
results.append(["/".join((module, func_name)), return_value, arguments])
except ValueError:
print lines
return results
def parse_pscout_output(filename, api_lvl="API_22"):
output_location = get_data_directory("training_data", api_lvl)
if not os.path.exists(output_location):
os.mkdir(output_location)
output_file = os.path.join(output_location, api_lvl+"_parsed_api.csv")
with open(filename) as pscout_input, open(output_file, "w") as output:
raw_content = split_file = pscout_input.read()
split_file = raw_content.split("\n")
pscout_input.seek(0)
line_numbers = []
for line_num, line in enumerate(pscout_input):
if line.startswith("Permission:"):
line_numbers.append(line_num)
line_numbers.append(len(split_file))
results = []
for i in range(len(line_numbers)-1):
permission_res = get_list_of_apis(split_file[line_numbers[i]+2:line_numbers[i+1]])
for index in range(len(permission_res)):
permission_res[index] = [split_file[line_numbers[i]].split(".")[-1],] + permission_res[index]
results += permission_res
out_writer = csv.writer(output)
out_writer.writerow(["Permission", "Function Name", "Return Value", "Arguments"])
out_writer.writerows(results)
if __name__ == "__main__":
parse_pscout_output("/Users/jeromemao/Desktop/EECS600/project/data/pscout_results/API_21/publishedapimapping.txt", "API_21")
| 2,412 |
minmax.py
|
kaylabollinger/PythonMidterm2022
| 0 |
2172396
|
import numpy as np
def compute(indep,dep):
"""
Computes/prints the (fake) minimum and maximum dependent variable values.
Parameters:
-indep: ndarray
independent variable values stored in 2D array: data point on axis 0, independent variable on axis 1
-dep: ndarray
dependent variable values stored in 1D array: data point on axis 0
Returns:
None
Notes:
Prints fake results.
"""
fake_results = np.random.rand(2)
print('Minimum of Dependent Variable: '+str(min(fake_results)))
print('Maximum of Dependent Variable: '+str(max(fake_results)))
print('\n')
| 597 |
unet3d/metrics.py
|
dweiss044/multiclass_tissue_segmentation
| 1 |
2172460
|
from functools import partial
from itertools import product
from keras import backend as K
import tensorflow as tf
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coefficient_loss(y_true, y_pred):
return -dice_coefficient(y_true, y_pred)
def weighted_dice_coefficient(y_true, y_pred, axis=(-3, -2, -1), smooth=0.00001):
"""
Weighted dice coefficient. Default axis assumes a "channels first" data structure
:param smooth:
:param y_true:
:param y_pred:
:param axis:
:return:
"""
return K.mean(2. * (K.sum(y_true * y_pred,
axis=axis) + smooth/2)/(K.sum(y_true,
axis=axis) + K.sum(y_pred,
axis=axis) + smooth))
def weighted_dice_coefficient_loss(y_true, y_pred):
return -weighted_dice_coefficient(y_true, y_pred)
def label_wise_dice_coefficient(y_true, y_pred, label_index):
return dice_coefficient(y_true[:, label_index], y_pred[:, label_index])
def get_label_dice_coefficient_function(label_index):
f = partial(label_wise_dice_coefficient, label_index=label_index)
f.__setattr__('__name__', 'label_{0}_dice_coef'.format(label_index))
return f
def weighted_bce(alpha=0.9):
def _loss(y_true, y_pred):
# weight positives stronger than negatives --> 9:1, alpha = 0.9
weights = (y_true * alpha/(1.-alpha)) + 1.
bce = K.binary_crossentropy(y_true, y_pred)
weighted_bce = K.mean(bce * weights)
return weighted_bce
return _loss
def categorical_crossentropy_loss(y_true, y_pred):
return K.categorical_crossentropy(y_true, y_pred, axis=1)
def w_categorical_crossentropy(target, output, weights, axis=1):
"""Categorical crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
# Returns
Loss tensor
"""
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
_epsilon = tf.convert_to_tensor(1e-7, output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
target_channels_last = tf.transpose(target, [0,2,3,4,1])
w = target_channels_last*tf.constant(weights, dtype = target.dtype.base_dtype)
w = tf.reduce_sum(w, axis = -1)
w = tf.expand_dims(w, 1)
return - tf.reduce_sum(target * tf.log(output) * w, axis)
def w_categorical_crossentropy_loss(weights):
def _loss(y_true, y_pred):
return w_categorical_crossentropy(y_true, y_pred, weights)
return _loss
def weighted_cce(weights):
# weights must broadcast to [B,C,H,W,D]
weights = K.reshape(K.variable(weights),(1,len(weights),1,1,1))
def _loss(y_true, y_pred):
return K.mean(K.categorical_crossentropy(y_true, y_pred, axis=1) * weights)
return _loss
def w_categorical_crossentropy_old(weights):
def _loss(y_true,y_pred):
nb_cl = len(weights)
final_mask = K.zeros_like(y_pred[:,0])
y_pred_max = K.max(y_pred, axis=1, keepdims=True)
y_pred_max_mat = K.cast(K.equal(y_pred, y_pred_max),'float32')
for c_p, c_t in product(range(nb_cl),range(nb_cl)):
final_mask += (weights[c_t, c_p] * y_pred_max_mat[:, c_p, :, :, :] * y_true[:, c_t, :, :, :])
return K.categorical_crossentropy(y_true, y_pred, axis=1) * final_mask
return _loss
'''
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
# Arguments
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1`
corresponds to data format `channels_last`,
and `axis=1` corresponds to data format
`channels_first`.
# Returns
Output tensor.
# Raises
ValueError: if `axis` is neither -1 nor one of
the axes of `output`.
"""
output_dimensions = list(range(len(output.get_shape())))
if axis != -1 and axis not in output_dimensions:
raise ValueError(
'{}{}{}'.format(
'Unexpected channels axis {}. '.format(axis),
'Expected to be -1 or one of the axes of `output`, ',
'which has {} dimensions.'.format(len(output.get_shape()))))
# Note: tf.nn.softmax_cross_entropy_with_logits
# expects logits, Keras expects probabilities.
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output /= tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
_epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)
output = tf.clip_by_value(output, _epsilon, 1. - _epsilon)
return - tf.reduce_sum(target * tf.log(output), axis)
else:
return tf.nn.softmax_cross_entropy_with_logits(labels=target,
logits=output)
'''
dice_coef = dice_coefficient
dice_coef_loss = dice_coefficient_loss
| 5,730 |
lib/interface/__init__.py
|
ThiagoAciole/Sistema-de-Cadastro
| 0 |
2171752
|
def leiaint(msg):
while True:
try:
n=int(input(msg))
except (ValueError, TypeError):
print("\033[31mERRO: Por Favor,Digite um Numero Inteiro Valido.\033[m ")
continue
except (KeyboardInterrupt):
print("\033[31m Usuario Preferiu não Digitar\033[m ")
return 0
else:
return n
def linha( tam=42):
return "-"*tam
def cabeçalho(msg):
print(linha())
print(msg.center(42))
print(linha())
def menu(lista):
cabeçalho("MENU PRINCIPAL")
c=1
for item in lista:
print(f"\033[33m{c}\033[m - \033[34m {item}\033[m")
c+=1
print(linha())
opc=leiaint("\033[32mSua Opção: \033[m ")
return opc
| 741 |
scripts/train_UniqueDET.py
|
TonyChouZJU/py-faster-rcnn-batch
| 0 |
2171341
|
import _init_paths
from tools.train_net import train
import os
import numpy as np
import caffe
from datafactory.imdb import IMDB
from datafactory.load import load_data_with_boxes
from tools.train_net_with_boxes import get_training_roidb, train_net
import fast_rcnn.config as fconfig
from fast_rcnn.config import cfg_from_file
from configuration.config import def_cfg
import sys
import pprint
gpu_id = 0
solver = '/home/zyb/VirtualDisk500/exhdd/Recognition-master/models/UniqueDET/solver_debug.prototxt'
max_iters = 100000
size = 224
imdb_name = 'UniqueDET'
out = 'out'
cfg = '/home/zyb/VirtualDisk500/exhdd/Recognition-master/experiments/cfgs/faster_rcnn_end2end.yml'
pretrained_model = '/home/zyb/VirtualDisk500/exhdd/Recognition-master/pretrained_models/VGG_CNN_M_1024.v2.caffemodel'
if __name__ == '__main__':
def_cfg('UDET')
cfg_from_file(cfg)
pprint.pprint(fconfig.cfg)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
# setup the dataset's path
dataset = os.path.join('..', 'data', imdb_name)
# load pixel mean
pixel_means = None
if os.path.exists(os.path.join(dataset, 'mean.npy')):
pixel_means = np.load(os.path.join(dataset, 'mean.npy'))
fconfig.cfg.PIXEL_MEANS = pixel_means
print 'Loaded mean.npy: {}'.format(pixel_means)
else:
print 'Cannot find mean.npy and we will use default mean.'
imdb = IMDB()
imdb.get_roidb(load_data_with_boxes, dataset=dataset)
roidb = get_training_roidb(imdb)
np.random.seed(fconfig.cfg.RNG_SEED)
caffe.set_random_seed(fconfig.cfg.RNG_SEED)
train_net(solver, roidb, out,
pretrained_model=pretrained_model, max_iters=max_iters)
| 1,686 |
__init__.py
|
jacobtomlinson/skill-words
| 0 |
2172128
|
from random import sample
import nltk
from nltk.corpus import swadesh, wordnet
from opsdroid.matchers import match_regex
from opsdroid.skill import Skill
class WordsHelp(Skill):
@match_regex(r"help scrabble: (.*) (.*)", case_sensitive=False)
async def help_scrabble(self, message):
"""Opsdroid will help you with scrabble."""
scrabble_letters = message.regex.group(1)
board_letter = message.regex.group(2)
puzzle_letters = nltk.FreqDist(scrabble_letters)
wordlist = nltk.corpus.words.words()
await message.respond("Please give a second, I'm thinking...")
words = [
word
for word in wordlist
if len(word) >= 4
and board_letter in word
and nltk.FreqDist(word) <= puzzle_letters
]
if len(words) > 5:
words = sample(words, 5)
if not words:
reply = "Sorry, I can't help you. You better replace some letters."
else:
reply = "Hmm... How about: {}".format(words)
await message.respond(reply)
@match_regex(r"define: (.*)", case_sensitive=False)
async def define(self, message):
"""Opsdroid will define a word and show you how it's used."""
term = message.regex.group(1)
try:
synset = wordnet.synsets(term)
word = str(term) + str(synset[0])[-7:-2]
definition = wordnet.synset(word).definition()
examples = wordnet.synset(word).examples()
synonyms = wordnet.synset(word).lemma_names()
await message.respond(
"Definition of the word '{}': {} \n"
"Synonyms: {} \n"
"You can use this word like such: {}".format(
term, definition, str(synonyms).replace("_", " "), examples
)
)
except nltk.corpus.reader.wordnet.WordNetError:
await message.respond("Sorry, I can't find anything about that word.")
@match_regex(r"translate: (.*) from: (.*) to: (.*)", case_sensitive=False)
async def translate(self, message):
"""Opsdroid with translate a word from one language to another."""
term = message.regex.group(1)
from_language = message.regex.group(2)
to_language = message.regex.group(3)
_dictionary = dict()
languages_dict = {
"spanish": "es",
"belorussian": "be",
"bulgarian": "bg",
"catalan": "cs",
"czech": "cs",
"german": "de",
"english": "en",
"french": "fr",
"croatian": "hr",
"italian": "it",
"latin": "la",
"macedonian": "mk",
"dutch": "nl",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"serbian": "sr",
"ukrainian": "uk",
}
entries = swadesh.entries(
[
languages_dict.get(from_language, "english"),
languages_dict.get(to_language, "english"),
]
)
for word in entries:
_word = word[0].split(", ")
if len(_word) > 1:
_dictionary[_word[0]] = word[1]
_dictionary[_word[1]] = word[1]
else:
_dictionary[word[0]] = word[1]
translation = _dictionary.get(
term, "Sorry, I can't find the " "translation for that word :("
)
await message.respond(
"The {} word '{}' in {} is: {}".format(
from_language, term, to_language, translation
)
)
| 3,774 |
mayan/apps/common/managers.py
|
eshbeata/open-paperless
| 2,743 |
2171710
|
from __future__ import unicode_literals
from django.apps import apps
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
class ErrorLogEntryManager(models.Manager):
def register(self, model):
ErrorLogEntry = apps.get_model(
app_label='common', model_name='ErrorLogEntry'
)
model.add_to_class('error_logs', GenericRelation(ErrorLogEntry))
| 421 |
poketech/pokequest/migrations/0001_initial.py
|
sagarmanchanda/PokeQuest
| 0 |
2172034
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attack',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('damage', models.IntegerField(default=0)),
('unlocked', models.BooleanField(default=False)),
('defensive', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('absent', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Pokemon',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('health', models.IntegerField(default=100)),
('unlocked', models.BooleanField(default=False)),
('player', models.ForeignKey(to='pokequest.Player')),
],
),
migrations.AddField(
model_name='attack',
name='poke',
field=models.ForeignKey(to='pokequest.Pokemon'),
),
]
| 1,659 |
pylocator/vtksurface.py
|
nipy/PyLocator
| 5 |
2170658
|
import vtk
from events import EventHandler
from vtkutils import vtkmatrix4x4_to_array, array_to_vtkmatrix4x4
class VTKSurface(vtk.vtkActor):
"""
CLASS: VTKSurface
DESCR: Handles a .vtk structured points file.
"""
def set_matrix(self, registration_mat):
print "VTKSurface.set_matrix(", registration_mat, ")!!"
#print "calling SetUserMatrix(", array_to_vtkmatrix4x4(registration_mat) , ")"
mat = array_to_vtkmatrix4x4(registration_mat)
mat.Modified()
mat2xform = vtk.vtkMatrixToLinearTransform()
mat2xform.SetInput(mat)
print "calling SetUserTransform(", mat2xform, ")"
self.SetUserTransform(mat2xform) # see vtk Prop3d docs
self.Modified()
# how do we like update the render tree or somethin..
self.renderer.Render()
def __init__(self, filename, renderer):
self.renderer = renderer
reader = vtk.vtkStructuredPointsReader()
#reader.SetFileName('/home/mcc/src/devel/extract_mri_slices/braintest2.vtk')
reader.SetFileName(filename)
# we want to move this from its (.87 .92 .43) esque position to something more like 'the center'
# how to do this?!?
# ALTERNATIVELY: we want to use vtkInteractorStyleTrackballActor
# somewhere instead of the interactor controlling the main window and 3 planes
imagedata = reader.GetOutput()
#reader.SetFileName(filename)
cf = vtk.vtkContourFilter()
cf.SetInput(imagedata)
# ???
cf.SetValue(0, 1)
deci = vtk.vtkDecimatePro()
deci.SetInput(cf.GetOutput())
deci.SetTargetReduction(.1)
deci.PreserveTopologyOn()
smoother = vtk.vtkSmoothPolyDataFilter()
smoother.SetInput(deci.GetOutput())
smoother.SetNumberOfIterations(100)
# XXX try to call SetScale directly on actor..
#self.scaleTransform = vtk.vtkTransform()
#self.scaleTransform.Identity()
#self.scaleTransform.Scale(.1, .1, .1)
#transformFilter = vtk.vtkTransformPolyDataFilter()
#transformFilter.SetTransform(self.scaleTransform)
#transformFilter.SetInput(smoother.GetOutput())
#cf.SetValue(1, 2)
#cf.SetValue(2, 3)
#cf.GenerateValues(0, -1.0, 1.0)
#deci = vtk.vtkDecimatePro()
#deci.SetInput(cf.GetOutput())
#deci.SetTargetReduction(0.8) # decimate_value
normals = vtk.vtkPolyDataNormals()
#normals.SetInput(transformFilter.GetOutput())
normals.SetInput(smoother.GetOutput())
normals.FlipNormalsOn()
"""
tags = vtk.vtkFloatArray()
tags.InsertNextValue(1.0)
tags.InsertNextValue(0.5)
tags.InsertNextValue(0.7)
tags.SetName("tag")
"""
lut = vtk.vtkLookupTable()
lut.SetHueRange(0, 0)
lut.SetSaturationRange(0, 0)
lut.SetValueRange(0.2, 0.55)
contourMapper = vtk.vtkPolyDataMapper()
contourMapper.SetInput(normals.GetOutput())
contourMapper.SetLookupTable(lut)
###contourMapper.SetColorModeToMapScalars()
###contourMapper.SelectColorArray("tag")
self.contours = vtk.vtkActor()
self.contours.SetMapper(contourMapper)
#if (do_wireframe):
#self.contours.GetProperty().SetRepresentationToWireframe()
#elif (do_surface):
self.contours.GetProperty().SetRepresentationToSurface()
self.contours.GetProperty().SetInterpolationToGouraud()
self.contours.GetProperty().SetOpacity(1.0)
self.contours.GetProperty().SetAmbient(0.1)
self.contours.GetProperty().SetDiffuse(0.1)
self.contours.GetProperty().SetSpecular(0.1)
self.contours.GetProperty().SetSpecularPower(0.1)
# XXX arbitrarily setting scale to this
#self.contours.SetScale(.1, .1,.1)
renderer.AddActor(self.contours)
# XXX: mcc will this work?!?
print "PlaneWidgetsXYZ.set_image_data: setting EventHandler.set_vtkactor(self.contours)!"
EventHandler().set_vtkactor(self.contours)
#writer = vtk.vtkSTLWriter()
#writer.SetFileTypeToBinary()
#writer.SetFileName('/home/mcc/src/devel/extract_mri_slices/braintest2.stl')
#writer.SetInput(normals.GetOutput())
#writer.Write()
######################################################################
######################################################################
######################################################################
| 4,669 |
Backend/venv/Lib/site-packages/quorum/daemon.py
|
calvin44/Final-Project
| 1 |
2171351
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Flask Quorum
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Flask Quorum.
#
# Hive Flask Quorum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Flask Quorum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Flask Quorum. If not, see <http://www.gnu.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import os
import sys
import time
import atexit
import signal
class Daemon:
"""
A generic daemon class that provides the general
daemon capabilities. In order to inherit the daemon
capabilities override the run method.
"""
pidfile = None
""" The path to the file that will hold the
pid of the created daemon """
stdin = None
""" The file path to the file to be used
as the standard input of the created process """
stdout = None
""" The file path to the file to be used
as the standard output of the created process """
stderr = None
""" The file path to the file to be used
as the standard error of the created process """
def __init__(self, pid_file, stdin = "/dev/null", stdout = "/dev/null", stderr = "/dev/null"):
"""
Constructor of the class.
@type pidfile: String
@param pidfile: The path to the pid file.
@type stdin: String
@param stdin: The file path to the file to be used
as the standard input of the created process.
@type stdout: String
@param stdout: The file path to the file to be used
as the standard output of the created process.
@type stderr: String
@param stderr: The file path to the file to be used
as the standard error of the created process.
"""
self.pidfile = pid_file
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
def daemonize(self, register = True):
"""
Do the UNIX double-fork magic, see Stevens "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177).
This is considered the main method for the execution
of the daemon strategy.
@type register: bool
@param register: If a cleanup function should be register for
the at exit operation.
@see: http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork() #@UndefinedVariable
if pid > 0: sys.exit(0)
except OSError, error:
sys.stderr.write(
"first fork failed: %d (%s)\n" % (error.errno, error.strerror)
)
sys.exit(1)
# decouples the current process from parent environment
# should create a new group of execution
os.chdir("/")
os.setsid() #@UndefinedVariable
os.umask(0)
try:
# runs the second for and then exits from
# the "second" parent process
pid = os.fork() #@UndefinedVariable
if pid > 0: sys.exit(0)
except OSError, error:
sys.stderr.write(
"second fork failed: %d (%s)\n" % (error.errno, error.strerror)
)
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, "r")
so = file(self.stdout, "a+")
se = file(self.stderr, "a+", 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile, updating the data in it
# this should mark the process as running
register and atexit.register(self.cleanup)
register and signal.signal(signal.SIGTERM, self.cleanup_s)
pid = str(os.getpid())
file(self.pidfile, "w+").write("%s\n" % pid)
def start(self, register = True):
try:
# checks for a pidfile to check if the daemon
# already runs, in such case retrieves the pid
# of the executing daemon
pid_file = file(self.pidfile, "r")
pid_contents = pid_file.read().strip()
pid = int(pid_contents)
pid_file.close()
except IOError:
pid = None
# in case the pid value is loaded, prints an error
# message to the standard error and exists the current
# process (avoids duplicated running)
if pid:
message = "pidfile %s already exists, daemon already running ?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# daemonizes the current process and then starts
# the daemon structures (runs the process)
self.daemonize(register = register)
self.run()
def stop(self):
try:
# checks for a pidfile to check if the daemon
# already runs, in such case retrieves the pid
# of the executing daemon
pid_file = file(self.pidfile, "r")
pid_contents = pid_file.read().strip()
pid = int(pid_contents)
pid_file.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist, daemon not running ?\n"
sys.stderr.write(message % self.pidfile)
return
try:
while True:
os.kill(pid, signal.SIGTERM) #@UndefinedVariable
time.sleep(0.1)
except OSError, error:
error = str(error)
if error.find("No such process") > 0:
pid_exists = os.path.exists(self.pidfile)
pid_exists and os.remove(self.pidfile)
else:
sys.exit(1)
def restart(self):
"""
Restarts the daemon process stopping it and
then starting it "again".
"""
self.stop()
self.start()
def cleanup(self):
"""
Performs a cleanup operation in the current daemon
releasing all the structures locked by it.
"""
self.delete_pid()
def cleanup_s(self, signum, frame):
"""
Cleanup handler for the signal handler, this handler
takes extra arguments required by the signal handler
caller.
@type signum: int
@param signum: The identifier of the signal that has
just been raised.
@type frame: Object
@param frame: The object containing the current program
frame at the time of the signal raise.
"""
self.cleanup()
def delete_pid(self):
"""
Removes the current pid file in case it exists in the
current file system.
No error will be raised in case no pid file exists.
"""
pid_exists = os.path.exists(self.pidfile)
pid_exists and os.remove(self.pidfile)
def run(self):
"""
You should override this method when you subclass
daemon. It will be called after the process has been
daemonized by start or restart methods.
"""
pass
| 8,038 |
register.py
|
AkiaCode/Hiyobot-slashcommand
| 0 |
2171192
|
import requests
url = "https://discord.com/api/v8/applications/<app id>/guilds/<guild id>/commands"
json = {
"name": "히요비정보",
"description": "히요비에서 해당작품 정보를 가져옵니다.",
"options": [
{
"name": "번호",
"description": "작품번호",
"type": 3,
"required": True,
}
],
}
headers = {"Authorization": "Bot Tokem"}
res = requests.post(url, headers=headers, json=json)
print(res.text, res.status_code)
| 465 |
lightgbm_new.py
|
raph-m/safe_driver_prediction
| 0 |
2172523
|
import lightgbm as lgb
import numpy as np
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import scipy as scp
import csv
from sklearn.model_selection import StratifiedKFold
def gini(y, pred):
fpr, tpr, thr = metrics.roc_curve(y, pred, pos_label=1)
g = 2 * metrics.auc(fpr, tpr) -1
return g
num_boost_round = 1
train_master = pd.read_csv('train.csv')
test_master = pd.read_csv('test.csv')
# train_master.describe()
np.random.seed(3)
model_scores = {}
# Drop binary columns with almost all zeros.
# Why now? Just follow along for now. We have a lot of experimentation to be done
train = train_master.drop(['ps_ind_10_bin', 'ps_ind_11_bin', 'ps_ind_13_bin'], axis=1)
test = test_master.drop(['ps_ind_10_bin', 'ps_ind_11_bin', 'ps_ind_13_bin'], axis=1)
# Drop calculated features
# But WHY???
# Because we are assuming that tree can generate any complicated function
# of base features and calculated features add no more information
# Is this assumption valid? Results will tell
calc_columns = [s for s in list(train_master.columns.values) if '_calc' in s]
train = train.drop(calc_columns, axis=1)
test = test.drop(calc_columns, axis=1)
# Get categorical columns for encoding later
categorical_columns = [s for s in list(train_master.columns.values) if '_cat' in s]
target_column = 'target'
# Replace missing values with NaN
train = train.replace(-1, np.nan)
test = test.replace(-1, np.nan)
# Initialize DS to store validation fold predictions
y_val_fold = np.empty(len(train))
# Initialize DS to store test predictions with aggregate model and individual models
y_test = np.zeros(len(test))
y_test_model_1 = np.zeros(len(test))
y_test_model_2 = np.zeros(len(test))
y_test_model_3 = np.zeros(len(test))
n_splits = 5
folds = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=7)
import numpy as np
from sklearn import metrics
def encode_cat_features(train_df, test_df, cat_cols, target_col_name, smoothing=1):
prior = train_df[target_col_name].mean()
probs_dict = {}
for c in cat_cols:
probs = train_df.groupby(c, as_index=False)[target_col_name].mean()
probs['counts'] = train_df.groupby(c, as_index=False)[target_col_name].count()[[target_col_name]]
probs['smoothing'] = 1 / (1 + np.exp(-(probs['counts'] - 1) / smoothing))
probs['enc'] = prior * (1 - probs['smoothing']) + probs['target'] * probs['smoothing']
probs_dict[c] = probs[[c, 'enc']]
return probs_dict
for fold_number, (train_ids, val_ids) in enumerate(folds.split(train.drop(['id', target_column], axis=1), train[target_column])):
X = train.iloc[train_ids]
X_val = train.iloc[val_ids]
X_test = test
# Encode categorical variables using training fold
encoding_dict = encode_cat_features(X, X_val, categorical_columns, target_column)
for c, encoding in encoding_dict.items():
X = pd.merge(X, encoding[[c, 'enc']], how='left', on=c, sort=False, suffixes=('', '_' + c))
X = X.drop(c, axis=1)
X = X.rename(columns={'enc': 'enc_' + c})
X_test = pd.merge(X_test, encoding[[c, 'enc']], how='left', on=c, sort=False, suffixes=('', '_' + c))
X_test = X_test.drop(c, axis=1)
X_test = X_test.rename(columns={'enc': 'enc_' + c})
X_val = pd.merge(X_val, encoding[[c, 'enc']], how='left', on=c, sort=False, suffixes=('', '_' + c))
X_val = X_val.drop(c, axis=1)
X_val = X_val.rename(columns={'enc': 'enc_' + c})
# Seperate target column and remove id column from all
y = X[target_column]
X = X.drop(['id', target_column], axis=1)
X_test = X_test.drop('id', axis=1)
y_val = X_val[target_column]
X_val = X_val.drop(['id', target_column], axis=1)
# # Upsample data in training folds
# ids_to_duplicate = pd.Series(y == 1)
# X = pd.concat([X, X.loc[ids_to_duplicate]], axis=0)
# y = pd.concat([y, y.loc[ids_to_duplicate]], axis=0)
# # Again Upsample (total increase becomes 4 times)
# X = pd.concat([X, X.loc[ids_to_duplicate]], axis=0)
# y = pd.concat([y, y.loc[ids_to_duplicate]], axis=0)
# Shuffle after concatenating duplicate rows
# We cannot use inbuilt shuffles since both dataframes have to be shuffled in sync
shuffled_ids = np.arange(len(X))
np.random.shuffle(shuffled_ids)
X = X.iloc[shuffled_ids]
y = y.iloc[shuffled_ids]
# Feature Selection goes here
# TODO
# Define parameters of GBM as explained before for 3 trees
params_1 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 3,
'learning_rate': 0.05,
'feature_fraction': 1,
'bagging_fraction': 1,
'bagging_freq': 10,
'verbose': 0,
'scale_pos_weight': 4
}
params_2 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 4,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.9,
'bagging_freq': 2,
'verbose': 0,
'scale_pos_weight': 4
}
params_3 = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'max_depth': 5,
'learning_rate': 0.05,
'feature_fraction': 0.3,
'bagging_fraction': 0.7,
'bagging_freq': 10,
'verbose': 0,
'scale_pos_weight': 4
}
# Create appropriate format for training and evaluation data
lgb_train = lgb.Dataset(X, y)
lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train)
# Create the 3 classifiers with 1000 rounds and a window of 100 for early stopping
clf_1 = lgb.train(params_1, lgb_train, num_boost_round=num_boost_round,
valid_sets=lgb_eval, early_stopping_rounds=100, verbose_eval=50)
clf_2 = lgb.train(params_2, lgb_train, num_boost_round=num_boost_round,
valid_sets=lgb_eval, early_stopping_rounds=100, verbose_eval=50)
clf_3 = lgb.train(params_3, lgb_train, num_boost_round=num_boost_round,
valid_sets=lgb_eval, early_stopping_rounds=100, verbose_eval=50)
# Predict raw scores for validation ids
# At each fold, 1/10th of the training data get scores
y_val_fold[val_ids] = (clf_1.predict(X_val, raw_score=True) +
clf_2.predict(X_val, raw_score=True) +
clf_3.predict(X_val, raw_score=True)) / 3
# Predict and average over folds, raw scores for test data
y_test += (clf_1.predict(X_test, raw_score=True) +
clf_2.predict(X_test, raw_score=True) +
clf_3.predict(X_test, raw_score=True)) / (3 * n_splits)
y_test_model_1 += clf_1.predict(X_test, raw_score=True) / n_splits
y_test_model_2 += clf_2.predict(X_test, raw_score=True) / n_splits
y_test_model_3 += clf_3.predict(X_test, raw_score=True) / n_splits
# Display fold predictions
# Gini requires only order and therefore raw scores need not be scaled
print("Fold %2d : %.9f" % (fold_number + 1, gini(y_val, y_val_fold[val_ids])))
# Display aggregate predictions
# Gini requires only order and therefore raw scores need not be scaled
print("Average score over all folds: %.9f" % gini(train_master[target_column], y_val_fold))
temp = y_test
# Scale the raw scores to range [0.0, 1.0]
temp = np.add(temp, abs(min(temp)))/max(np.add(temp, abs(min(temp))))
df = pd.DataFrame(columns=['id', 'target'])
df['id']=test_master['id']
df['target']=temp
df.to_csv('benchmark__0_283.csv', index=False, float_format="%.9f")
| 7,693 |
tests/make_test_plot.py
|
lgbouma/aesthetic
| 0 |
2171928
|
import numpy as np, matplotlib.pyplot as plt
from aesthetic.plot import (
savefig, set_style, set_style_scatter, set_style_grid, format_ax
)
x = np.linspace(0,10,1000)
y = (x/100)**3 + 5*np.sin(x)
set_style()
fig, ax = plt.subplots()
ax.plot(x, y)
ax.plot(x, y+3)
ax.plot(x, y+6)
ax.set_xlabel('x')
ax.set_ylabel('y')
savefig(fig, '../results/plot_standard.png')
set_style_scatter()
fig, ax = plt.subplots()
ax.scatter(x[::50], y[::50])
ax.scatter(x[::50], y[::50]+3)
ax.scatter(x[::50], y[::50]+6)
ax.set_xlabel('x')
ax.set_ylabel('y')
savefig(fig, '../results/plot_scatter.png')
set_style_grid()
fig, ax = plt.subplots()
ax.plot(x, y)
ax.plot(x, y+3)
ax.plot(x, y+6)
ax.set_xlabel('x')
ax.set_ylabel('y')
savefig(fig, '../results/plot_grid.png')
| 756 |
SphinxReport/Logger_test.py
|
Tim-HU/sphinx-report
| 1 |
2172336
|
from .Logger import warn, debug, info
from . import Logger
import multiprocessing
Logger.basicConfig(
level=Logger.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename = "logger_test.log" )
def test_logging( args ):
msg = args[0]
info( msg )
if __name__ == "__main__":
#
# call from main process to make sure works
#
info( "starting" )
#
# call from child processes in pool
#
pool = multiprocessing.Pool(processes=4) # start 4 worker processes
function_parameters = list()
for a in range(200):
function_parameters.append(("message #%3d" % a,))
pool.map(test_logging, function_parameters)
print(Logger.getCounts())
| 732 |
ExpandDicts.py
|
baliyanvinay/Python-Interview-Preparation
| 1 |
2172465
|
# From a given nested dict, normalize it into dict.
def normalize_dict(input_dict):
'''
Expanding nested dicts into normalized dict using recursion
'''
result = {}
for key, val in input_dict.items():
if isinstance(val, dict):
result.update(normalize_dict(val))
else:
result[key] = val
return result
sample_dict = {
"key1": "val1",
"key2": {
"key2_1": "val2_1",
"key2_2": {
"key2_2_1": "val2_2_1",
"key2_2_2": "val2_2_2",
},
"key2_3": "val2_3"
},
"key3": "val3",
"key4": "val4"
}
output_dict = normalize_dict(sample_dict)
print(output_dict)
## Expected Output
# output_dict = {
# "key1": "val1",
# "key2_1": "val2_1",
# "key2_2_1": "val2_2_1",
# "key2_2_2": "val2_2_2",
# "key2_3": "val2_3",
# "key3": "val3",
# "key4": "val4"
# }
| 965 |
tests/test_basic.py
|
grivet/tf-vrouter
| 0 |
2171989
|
#!/usr/bin/python
import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/lib/')
from imports import * # noqa
# anything with *test* will be assumed by pytest as a test
# The vrouter_test_fixture is passed as an argument to the test
class TestBasic(unittest.TestCase):
@classmethod
def setup_class(cls):
ObjectBase.setUpClass()
# do auto cleanup and auto idx allocation for vif and nh
ObjectBase.set_auto_features(cleanup=True, vif_idx=True, nh_idx=True)
@classmethod
def teardown_class(cls):
ObjectBase.tearDownClass()
def setup_method(self, method):
ObjectBase.setUp(method)
def teardown_method(self, method):
ObjectBase.tearDown()
def test_vif(self):
vif = VirtualVif(name="tap_1", ipv4_str="192.168.3.11",
mac_str="de:ad:be:ef:00:02")
vif.sync()
self.assertEqual("tap_1", vif.get_vif_name())
def test_vif_v6(self):
vmi = VirtualVif(name="tap_2", ipv4_str="192.168.3.11",
mac_str="de:ad:be:ef:00:02",
ipv6_str="2001:0db8:85a3:0000:0000:8a2e:0370:7334")
vmi.sync()
self.assertEqual("tap_2", vmi.get_vif_name())
def test_encap_nh(self):
# add the virtual vif
vif = VirtualVif(name="tap_3", ipv4_str="192.168.3.11",
mac_str="de:ad:be:ef:00:02")
# add encap nexthop
nh = EncapNextHop(encap_oif_id=vif.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00")
# sync all objects
ObjectBase.sync_all()
# check if virtual vif and encap nh got added
self.assertEqual("tap_3", vif.get_vif_name())
self.assertEqual(nh.idx(), nh.get_nh_idx())
def test_tunnel_nh(self):
# add fabric vif
vmi = FabricVif(name="en0", ipv4_str="192.168.1.1",
mac_str="de:ad:be:ef:00:02")
# add tunnel nh
nh = TunnelNextHopV4(
encap_oif_id=vmi.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00",
tun_sip="1.1.1.1",
tun_dip="1.1.1.2",
nh_flags=constants.NH_FLAG_TUNNEL_VXLAN)
ObjectBase.sync_all()
# check if fabric vif and tunnel nh got added
self.assertEqual("en0", vmi.get_vif_name())
self.assertEqual(nh.idx(), nh.get_nh_idx())
def test_rt(self):
# add virtual vif
vmi = VirtualVif(name="tap_5", ipv4_str="192.168.1.1",
mac_str="de:ad:be:ef:00:02")
# add encap nh 1
nh1 = EncapNextHop(encap_oif_id=vmi.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00",
nh_family=constants.AF_BRIDGE)
# add encap nh 2
nh2 = EncapNextHop(encap_oif_id=vmi.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00")
# add bridge route
bridge_rt = BridgeRoute(
vrf=0,
mac_str="de:ad:be:ef:00:02",
nh_idx=nh1.idx())
# add inet route
inet_rt = InetRoute(
vrf=0,
prefix="192.168.1.1",
prefix_len=32,
nh_idx=nh2.idx())
# sync all objects
ObjectBase.sync_all()
# Query the objects back
self.assertEqual("tap_5", vmi.get_vif_name())
self.assertEqual(nh1.idx(), nh1.get_nh_idx())
self.assertEqual(nh2.idx(), nh2.get_nh_idx())
self.assertEqual(nh2.idx(), inet_rt.get_rtr_nh_idx())
def test_flow(self):
flow1 = InetFlow(sip='1.1.1.4', dip='2.2.2.4', sport=1136, dport=0,
proto=constants.VR_IP_PROTO_ICMP, flow_nh_idx=23,
src_nh_idx=23, flow_vrf=3, rflow_nh_idx=28)
flow1.sync(resp_required=True)
self.assertGreater(flow1.get_fr_index(), 0)
def test_flow_sync_and_add_reverse_flow(self):
flow1 = InetFlow(sip='1.1.1.5', dip='2.2.2.5', sport=1136, dport=0,
proto=constants.VR_IP_PROTO_ICMP, flow_nh_idx=23,
src_nh_idx=23, flow_vrf=3, rflow_nh_idx=28)
flow1.sync_and_add_reverse_flow()
self.assertGreater(flow1.get_fr_index(), 0)
def test_dropstats(self):
# add virtual vif
vmi = VirtualVif(
name="tap_10",
ipv4_str="1.1.1.10",
mac_str="de:ad:be:ef:00:02")
vmi.sync()
self.assertEqual("tap_10", vmi.get_vif_name())
# create an invalid unicast ARP pkt which should get dropped in vrouter
arp = ArpPacket(src="de:ad:be:ef:00:02", dst="de:ad:be:ef:00:00")
pkt = arp.get_packet()
pkt.show()
vmi.send_packet(pkt)
# get the dropstats
drop_stats = DropStats()
self.assertEqual(1, drop_stats.get_vds_invalid_arp())
def test_flow_and_link_flow(self):
# create flow1
flow1 = InetFlow(sip='1.1.1.6', dip='2.2.2.6', sport=1136, dport=0,
proto=constants.VR_IP_PROTO_ICMP, flow_nh_idx=23,
src_nh_idx=23, flow_vrf=3, rflow_nh_idx=28)
# create flow2
flow2 = InetFlow(sip='2.2.2.6', dip='1.1.1.6', sport=1136, dport=0,
proto=constants.VR_IP_PROTO_ICMP, flow_nh_idx=28,
src_nh_idx=28, flow_vrf=3, rflow_nh_idx=23)
# sync and link both flows
flow1.sync_and_link_flow(flow2)
self.assertGreater(flow1.get_fr_index(), 0)
self.assertGreater(flow2.get_fr_index(), 0)
def test_vxlan(self):
# Add vif
vif = VirtualVif(
name="tap_6",
mac_str="de:ad:be:ef:00:02",
ipv4_str=None)
# Add nexthop
nh = EncapNextHop(
encap_oif_id=vif.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00",
nh_family=constants.AF_BRIDGE)
# Add vxlan
vxlan = Vxlan(
vxlan_idx=4,
vxlan_nhid=nh.idx())
ObjectBase.sync_all()
self.assertEqual(vxlan.idx(), vxlan.get_vxlan_idx())
# Delete vxlan
vxlan.delete()
self.assertNotIn(vxlan.__obj_id__, ObjectBase.__obj_dict__)
def test_mirror(self):
# Add vif
vif = VirtualVif(
name="tap_7",
mac_str="de:ad:be:ef:00:02",
ipv4_str=None)
# Add nexthop
nh = EncapNextHop(
encap_oif_id=vif.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00",
nh_family=constants.AF_BRIDGE)
# Add mirror
mirr = Mirror(
idx=4,
nh_idx=nh.idx())
ObjectBase.sync_all()
self.assertEqual(mirr.idx(), mirr.get_mirr_idx())
# Delete mirror
mirr.delete()
self.assertNotIn(mirr, ObjectBase.__obj_dict__)
def test_mpls(self):
# Add vif
vif = VirtualVif(
name="tap_8",
mac_str="de:ad:be:ef:00:02",
ipv4_str=None)
# Add nexthop
nh = EncapNextHop(
encap_oif_id=vif.idx(),
encap="de ad be ef 00 02 de ad be ef 00 01 08 00",
nh_family=constants.AF_BRIDGE)
# Add mpls
mpls = Mpls(
mr_label=4,
mr_nhid=nh.idx())
ObjectBase.sync_all()
self.assertEqual(mpls.label(), mpls.get_mr_label())
# Delete mpls
mpls.delete()
self.assertNotIn(mpls, ObjectBase.__obj_dict__)
| 7,573 |
tests/urls.py
|
ShreeshaRelysys/django-ipam
| 99 |
2171593
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^', include('django_ipam.urls', namespace='ipam')),
url(r'^admin/', admin.site.urls),
]
| 194 |
setup.py
|
speedcell4/torchglyph
| 11 |
2169232
|
from setuptools import setup, find_packages
name = 'torchglyph'
setup(
name=name,
version='0.3.0',
packages=[package for package in find_packages() if package.startswith(name)],
url=f'https://speedcell4.github.io/torchglyph',
license='MIT',
author='speedcell4',
author_email='<EMAIL>',
description='Data Processor Combinators for Natural Language Processing',
install_requires=[
'tqdm',
'numpy',
'einops',
'torchrua>=0.3.0',
'requests',
'tabulate',
'aku',
],
extras_require={
'dev': [
'pytest',
'hypothesis',
],
'ctx': [
'transformers',
],
'docs': [
'mkdocs',
'mkdocs-alabaster',
]
}
)
| 800 |
Script/Commands/On_Ready/ready_loop.py
|
iocaeaniqa/Clash-Of-Clans-Discord-Bot
| 0 |
2171601
|
# Called when the bot is ready to be used
import asyncio
import datetime
import sqlite3
import threading
import time
import discord
import flask
from Data.Constants.import_const import Login, Ids, Main_bot, Useful
from Script.import_emojis import Emojis
if Main_bot:
discord_token = Login["discord"]["token"]
else:
discord_token = Login["discord"]["beta"]
async def ready_loop(self):
support_server = self.get_guild(Ids["Support_server"])
member_role = discord.utils.get(support_server.roles, name="Member")
for member in support_server.members:
if (member_role not in member.roles) and (not member.bot):
await member.add_roles(member_role)
if Main_bot:
status_channel = self.get_channel(Ids["Status_channel"])
msg = await status_channel.send(f"{Emojis['Yes']} Connected")
await msg.edit(content=f"{Emojis['Yes']} Connected `{msg.created_at.replace(microsecond=0).isoformat(sep=' ')}` UTC-0")
clash_info = self
def thread_weekly_stats():
while True:
date = datetime.datetime.now()
monday = datetime.date.today() + datetime.timedelta(days=(7 - date.weekday()))
monday = datetime.datetime(monday.year, monday.month, monday.day)
diff = monday - date
time.sleep(diff.seconds + diff.days * 24 * 3600)
print("Weekly Stats", datetime.datetime.now())
# ===== WEEKLY STATS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class WeeklyStatsBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
channel = self.get_channel(Ids["Weekly_stats_channel"])
old_servers_count = 0
async for message in channel.history(limit=None):
if message.is_system():
await message.delete()
if message.pinned:
old_servers_count = int(message.content)
await message.delete()
break
msg = await channel.send(str(len(clash_info.guilds)))
await msg.pin()
diff_servers_count = len(clash_info.guilds) - old_servers_count
diff_servers_count = "%+d" % diff_servers_count
await channel.send(f"Evolution of number of servers this week : {diff_servers_count}")
await self.logout()
weekly_stats_bot = WeeklyStatsBot()
try:
loop.run_until_complete(weekly_stats_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(weekly_stats_bot.close())
finally:
loop.close()
thread = threading.Thread(target=thread_weekly_stats)
thread.start()
def thread_monthly_users():
while True:
date = datetime.datetime.now()
if date.month < 12:
day1 = datetime.datetime(date.year, date.month + 1, 1)
else:
day1 = datetime.datetime(date.year + 1, 1, 1)
diff = day1 - date
time.sleep(diff.seconds + diff.days * 24 * 3600 + 3600) # 1h00 instead of 0h00 to avoid conflicts with WeeklyStats
print("Monthly Users Stats", datetime.datetime.now())
# ===== MONTHLY USERS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class MonthlyUsersBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
connection = sqlite3.connect(Useful["secure_folder_path"] + "Modifiable.sqlite")
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM BotUsage")
nb_monthly_users = cursor.fetchone()[0]
text = f"Monthly users : {nb_monthly_users}"
channel = self.get_channel(Ids["Monthly_stats_channel"])
await channel.send(text)
if len(str(date.month)) == 1:
month = f"0{date.month}"
else:
month = str(date.month)
w = f"""CREATE TABLE IF NOT EXISTS BotUsage_{date.year}_{month} AS SELECT * FROM BotUsage"""
cursor.execute(w)
cursor.execute("DELETE FROM BotUsage")
connection.commit()
await self.logout()
monthly_users_bot = MonthlyUsersBot()
try:
loop.run_until_complete(monthly_users_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(monthly_users_bot.close())
finally:
loop.close()
thread = threading.Thread(target=thread_monthly_users)
thread.start()
def thread_webhooks_app():
app = flask.Flask(__name__)
@app.route('/topgg_webhook', methods=['post'])
def topgg_webhook():
if (flask.request.remote_addr != "172.16.17.32") or ("Authorization" not in list(flask.request.headers.keys())) or (flask.request.headers["Authorization"] != Login["topgg"]["authorization"]):
authorization = None if "Authorization" not in list(flask.request.headers.keys()) else flask.request.headers["Authorization"]
print(f"Unauthorized :\nIP = {flask.request.remote_addr}\nAuthorization = {authorization}")
return flask.Response(status=401)
def run_bot(voter_id):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class WebhooksBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
import json
from Script.import_functions import create_embed
from Data.Constants.useful import Useful
from Data.Variables.import_var import Votes
user = clash_info.get_user(voter_id)
votes_channel = self.get_channel(Ids["Votes_channel"])
if user.id not in list(Votes.keys()):
Votes[user.id] = 1
else:
Votes[user.id] += 1
json_text = json.dumps(Votes, sort_keys=True, indent=4)
def_votes = open(f"{Useful['secure_folder_path']}votes.json", "w")
def_votes.write(json_text)
def_votes.close()
vote_copy = dict(Votes)
vote = {}
for member_id, member_votes in vote_copy.items():
member = clash_info.get_user(int(member_id))
vote[member.mention] = member_votes
vote = sorted(vote.items(), key=lambda t: t[1])
text = ""
for user_vote_tuple in vote:
text += f"{user_vote_tuple[0]} has voted {user_vote_tuple[1]} times\n"
embed = create_embed(f"{user} has voted for Clash INFO", text, votes_channel.guild.me.color, "", votes_channel.guild.me.avatar_url)
await votes_channel.send(embed=embed)
await self.logout()
webhooks_bot = WebhooksBot()
try:
loop.run_until_complete(webhooks_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(webhooks_bot.close())
finally:
loop.close()
import threading
thread = threading.Thread(target=run_bot, kwargs={"voter_id": int(flask.request.get_json()["user"])})
thread.start()
return flask.Response(status=200)
app.run(host="0.0.0.0", port=8080)
thread = threading.Thread(target=thread_webhooks_app, args=())
thread.start()
print("Connected")
nb_guilds = len(self.guilds)
act = discord.Activity(type=discord.ActivityType.watching, name=f"{nb_guilds: ,} servers")
await self.change_presence(status=discord.Status.online, activity=act)
return
| 8,600 |
hedgehogsRestApi/analytics/models.py
|
rmarathay/hedgehogs_rcos
| 9 |
2172037
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class CompanyFundamentalsTable(models.Model):
c_id = models.UUIDField(blank=True, null=True)
indicator = models.TextField(blank=True, null=True)
day = models.DateField(blank=True, null=True)
value = models.TextField(blank=True, null=True)
ticker = models.TextField(blank=True, null=True)
class Meta:
db_table = 'fundamentals_sample'
class CompanyInfoTable(models.Model):
company_id = models.TextField(primary_key=True)
ticker = models.TextField(blank=True, null=True)
ticker_id = models.TextField(blank=True, null=True)
class Meta:
db_table = 'company_info_table'
class EndOfDayDataTable(models.Model):
primary_key = models.AutoField(primary_key=True)
symbol = models.CharField(max_length=7, blank=True, null=True)
date = models.DateField(blank=True, null=True)
open = models.FloatField(blank=True, null=True)
high = models.FloatField(blank=True, null=True)
low = models.FloatField(blank=True, null=True)
close = models.FloatField(blank=True, null=True)
volume = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'end_of_day_data_table'
class EodCompanyRelation(models.Model):
company_info = models.OneToOneField(
CompanyInfoTable,
on_delete = models.CASCADE,
primary_key = True,
)
def __str__(self):
return str(company_info.ticker)
| 1,671 |
src/python/utilities/PhycasUpdateCheck.py
|
plewis/phycas
| 3 |
2171067
|
#/usr/bin/env python
import os
from subprocess import Popen, PIPE
def runPhycasUpdateChecker(outstream, update_url, branch_string, revision_string):
phycas_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
phycas_path
dot_svn_path = os.path.join(phycas_path, ".svn")
underSVN = os.path.exists(dot_svn_path)
if underSVN:
try:
svnStatOut = "" #Popen(["svn", "status", "-u", phycas_path], stdout=PIPE).communicate()[0]
except:
outstream.warning('Could not run "svn status" command on directory %d to see if it is up-to-date' % phycas_path)
return
if not "*" in svnStatOut:
outstream.verbose_info("Your copy of phycas is up-to-date")
return
print "runPhycasUpdateChecker() not implemented"
| 826 |
pynegf/mpi.py
|
gpenazzi/pynegf
| 0 |
2171494
|
import logging
_HAS_MPI = False
try:
import mpi4py
_HAS_MPI = True
except ModuleNotFoundError:
logging.info('Module mpi4py not found. MPI support has been disabled.')
_HAS_MPI = False
def has_mpi():
"""
Returns:
bool: whether mpi is supported or not.
"""
return _HAS_MPI
def get_world_comm():
"""
Returns the world communicator if mpi support is enabled.
Otherwise, returns None.
"""
if _HAS_MPI:
from mpi4py import MPI
return MPI.COMM_WORLD
return None
| 541 |
pizza/forms.py
|
mamalmaleki/django-forms
| 1 |
2171040
|
from django import forms
from .models import Pizza, Size
# class PizzaForm(forms.Form):
# topping1 = forms.CharField(label='Topping 1', max_length=100)
# topping2 = forms.CharField(label='Topping 1', max_length=100)
# size=forms.ChoiceField(label='size', choices=[
# ('Small', 'Small'), ('Medium', 'Medium'), ('Large', 'Large')
# ])
class PizzaForm(forms.ModelForm):
class Meta:
model = Pizza
fields = ('topping1', 'topping2', 'size')
labels = {'topping1': 'Topping 1', 'topping2': 'Topping 2'}
class MultiplePizzaForm(forms.Form):
number = forms.IntegerField(min_value=2, max_value=12)
| 649 |
conopy/viewlinks.py
|
sshmakov/conopy
| 5 |
2172459
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import datetime
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import xlsxwriter
import conopy.util as util
class LinksMenu(QMenu):
sections = None
win = None
view = None
def __init__(self, win, parent=None):
super().__init__(parent)
self.win = win
if not win:
#print("No focused window")
return
self.view = util.focusItemView(self.win)
if not self.view:
#print("No focused item view")
return
index = self.view.currentIndex()
if not index.isValid():
return
self.row = index.row()
model = self.view.model()
#self.headers = [ str(model.headerData(col, Qt.Horizontal)).upper() for col in range(model.columnCount()) ]
self.headers = []
for col in range(model.columnCount()):
d = model.headerData(col, Qt.Horizontal, Qt.EditRole)
if d is None:
d = model.headerData(col, Qt.Horizontal, Qt.DisplayRole)
self.headers.append(str(d).upper())
self.roles = win.fieldRoles if 'fieldRoles' in dir(win) else {} # { role: fieldName }
self.roles = { str(r).upper():str(self.roles[r]).upper() for r in self.roles }
#print('headers',self.headers)
#print('roles',self.roles)
iniFile = util.nearFile('.','data/links.ini')
ini = QSettings(iniFile, QSettings.IniFormat)
ini.setIniCodec("utf-8")
ini.beginGroup('Links')
self.sections = ini.value('Sections')
ini.endGroup()
if self.sections is None:
return
if type(self.sections) != type([]):
self.sections = [self.sections]
#print(self.sections)
rhset = set(self.headers).union(set(self.roles))
for s in self.sections:
ini.beginGroup(s)
t = ini.value('Title')
if not t:
t = s
params = ini.value("Params")
if params is None:
params = []
if type(params) != type([]):
params = [params]
exeIni = ini.value("Ini")
ini.endGroup()
upar = [ p.upper() for p in params]
#print('sect',s,'params',upar)
if not set(upar).issubset(rhset):
#print('not added')
continue
a = self.addAction(t)
a.params = params
a.exeIni = util.nearFile(iniFile,exeIni)
a.iniFile = iniFile
a.section = s
a.win = win
#print('added')
self.triggered.connect(self.exeAction)
def isValid(self):
return self.win and self.view and self.sections
def exeAction(self, a):
model = self.view.model()
#print(2, a.params, a.exeIni)
values = {}
for p in a.params:
par = str(p).upper()
if not par in self.headers:
if par in self.roles:
par = self.roles[par]
try:
col = self.headers.index(par)
values[p] = model.index(self.row, col).data(Qt.DisplayRole)
except:
#print(str(sys.exc_info()[1]))
#print(a.params)
return
#print(3, values)
w = util.mainWindow.runIni(a.exeIni)
w.clearParamValues()
for v in values:
w.setParamValue(v, values[v])
def showMenu(win):
menu = LinksMenu(win)
if menu.isValid():
menu.exec(QCursor.pos())
| 3,413 |
tracardi/process_engine/destination/rabbitmq_connector.py
|
Tracardi/tracardi
| 153 |
2172499
|
from typing import List
from .connector import Connector
from kombu import Connection
from ...domain.event import Event
from ...domain.profile import Profile
from ...domain.session import Session
from ...service.rabbitmq.queue_config import QueueConfig
from ...service.rabbitmq.queue_publisher import QueuePublisher
from ...service.rabbitmq.rabbit_configuration import RabbitConfiguration
class RabbitMqConnector(Connector):
async def run(self, data, delta, profile: Profile, session: Session, events: List[Event]):
credentials = self.resource.credentials.test if self.debug is True else self.resource.credentials.production
configuration = RabbitConfiguration(**credentials)
if 'queue' not in self.destination.destination.init:
raise ValueError("Missing queue config.")
settings = QueueConfig(**self.destination.destination.init['queue'])
with Connection(configuration.uri, connect_timeout=configuration.timeout) as conn:
queue_publisher = QueuePublisher(conn, queue_config=settings)
queue_publisher.publish(data)
| 1,104 |
pages/migrations/0047_auto_20171120_0305.py
|
JoshZero87/site
| 4 |
2172062
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-11-20 03:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0046_micrositepage'),
]
operations = [
migrations.AddField(
model_name='micrositepage',
name='accent_border_color',
field=models.CharField(blank=True, help_text='6 digit CSS color code.', max_length=6, null=True),
),
migrations.AddField(
model_name='micrositepage',
name='show_accent_border',
field=models.BooleanField(default=False, help_text='Show solid accent border at top of page.'),
),
]
| 748 |
federatedml/nn/homo_nn/zoo/dnn.py
|
peiyong86/FATE
| 1 |
2172122
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.nn.homo_nn.backend.tf_keras.layers import has_builder, DENSE, DROPOUT
from federatedml.nn.homo_nn.backend.tf_keras.nn_model import KerasNNModel
from federatedml.nn.homo_nn.zoo import nn
def is_dnn_supported_layer(layer):
return has_builder(layer) and layer in {DENSE, DROPOUT}
def build_nn_model(input_shape, nn_define, loss, optimizer, metrics,
is_supported_layer=is_dnn_supported_layer) -> KerasNNModel:
return nn.build_nn_model(input_shape=input_shape,
nn_define=nn_define,
loss=loss,
optimizer=optimizer,
metrics=metrics,
is_supported_layer=is_supported_layer,
default_layer=DENSE)
| 1,425 |
SeismicMesh/plots/simpplot.py
|
WPringle/SeismicMesh
| 0 |
2172685
|
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as a3
from .. import geometry
def plot_tets(points, cells, hold_on=False):
"""
vizualize tetrahedral mesh
"""
axes = a3.Axes3D(plt.figure())
tri = geometry.get_facets(cells)
vts = points[tri, :]
tri = a3.art3d.Poly3DCollection(vts)
tri.set_alpha(0.2)
tri.set_color("grey")
axes.add_collection3d(tri)
axes.plot(points[:, 0], points[:, 1], points[:, 2], "ko")
axes.set_axis_off()
if hold_on is not False:
plt.show()
return None
def plot_facets(points, facets, color="red", marker="gx", hold_on=False):
"""
visualize the facets
"""
axes = a3.Axes3D(plt.figure())
vts2 = points[facets, :]
tri2 = a3.art3d.Poly3DCollection(vts2)
tri2.set_alpha(0.2)
tri2.set_color(color)
axes.add_collection3d(tri2)
axes.plot(points[:, 0], points[:, 1], points[:, 2], marker)
axes.set_axis_off()
if hold_on is not False:
plt.show()
return None
| 1,011 |
ml/evaluation.py
|
ztstroud/learnz
| 0 |
2172412
|
import numpy as np
def evaluate(labels, predictions, *metrics):
"""
Runs all of the given metrics on the labels and predictions.
:param labels: the ground truth
:param predictions: predicted labels
:param metrics: the metrics to run
:return: the results of the given metrics
"""
return [metric(labels, predictions) for metric in metrics]
def accuracy(labels, predictions):
"""
Determines the accuracy of the given predictions on the given labels.
:param labels: the ground truth
:param predictions: predicted labels
:return: the accuracy of the given predictions on the given labels
"""
correct = np.equal(labels, predictions)
correct_count = np.count_nonzero(correct)
return correct_count / len(labels)
def error(labels, predictions):
"""
Determines the error of the given predictions on the given labels.
:param labels: the ground truth
:param predictions: predicted labels
:return: the error of the given predictions on the given labels
"""
return 1 - accuracy(labels, predictions)
def precision_on(value):
"""
Returns a function that can be usedto determine the precision of a set of
predictions on a set of labels. The function should be called with the
labels as the first parameter and the predictions as the second parameter.
For example:
precision_on_one = precision_on(1)
precision = precision_on_one(labels, predictions)
:param value: the value to find the precision of
:return: a function taht determines the precision of given labels and predictions
"""
return lambda labels, predictions: _precision(value, labels, predictions)
def _precision(value, labels, predictions):
"""
Determines the precision of the given predictions on the given value.
Precision is defined as the number of correct guesses out of the number of
guesses. If 'a' was predicted 100 times, but only 25 of those were correct
the precision would be 0.25.
:param value: the value to find the precision of
:param labels: the ground truth
:param predictions: predicted labels
:return: the recall of the predictions for the given value
"""
correct = np.equal(labels, predictions)
guesses = np.equal(predictions, value)
if np.count_nonzero(guesses) == 0:
return 0
return np.count_nonzero(correct & guesses) / np.count_nonzero(guesses)
def recall_on(value):
"""
Returns a function that can be used to determine the recall of a set of
predictions on a set of labels. The function should be called with the
labels as the first parameter and the predictions as the second parameter.
For example:
recall_on_one = recall_on(1)
recall = recall_on_one(labels, predictions)
:param value: the value to find the recall of
:return: a function that determines the recall of given labels and predictions
"""
return lambda labels, predictions: _recall(value, labels, predictions)
def _recall(value, labels, predictions):
"""
Determines the recall of the given labels and predictions on the given value
:pram value: the value to calculate recall for
:param labels: the ground truth
:param predictions: predicted labels
:return: the recall of the predictions for the given value
"""
correct = np.equal(labels, predictions)
with_value = np.equal(labels, value)
if np.count_nonzero(with_value) == 0:
return 0
return np.count_nonzero(correct & with_value) / np.count_nonzero(with_value)
def fscore_on(value):
"""
Returns a function that can be used to determine the F1 score of a set of
predictions on a set of labels. The function should be called with the
labels as the first parameter and the predictions as the second parameter.
For example:
fscore_on_one = fscore_on(1)
fscore = fscore_on_one(labels, predictions)
:param value: the value to find the fscore of
:return: a function that determines the fscore of given labels and predictions
"""
return lambda labels, predictions: _fscore(value, labels, predictions)
def _fscore(value, labels, predictions):
"""
Determines the F1 score of the given labels and predictions on the given value.
:param value: the value top calculate the fscore of
:param labels: the ground truth
:param predictions: predicted labels
:return: the F1 score of the predictions for the given value
"""
precision = _precision(value, labels, predictions)
recall = _recall(value, labels, predictions)
if precision == 0 or recall == 0:
return 0
return 2 * ((precision * recall) / (precision + recall))
| 4,755 |
vidsitu_code/evl_vsitu.py
|
TheShadow29/VidSitu
| 37 |
2172509
|
"""
Evalution for Vsitu
"""
import torch
from torch import nn
from torch.nn import functional as F
import pickle
from pathlib import Path
from utils.trn_utils import (
progress_bar,
move_to,
synchronize,
is_main_process,
compute_avg_dict,
get_world_size,
)
from vidsitu_code.evl_fns import EvlFn_Vb, EvalFnCap, EvlFn_EvRel
from vidsitu_code.seq_gen import SeqGenCustom
class EvalB(nn.Module):
def __init__(self, cfg, comm, device):
super().__init__()
self.cfg = cfg
self.full_cfg = cfg
self.comm = comm
self.device = device
self.met_keys = ["Per_Ev_Top_1", "Per_Ev_Top_5", "recall_macro_1_th_9"]
self.after_init()
return
def after_init(self):
self.evl_met = EvlFn_Vb(self.cfg, self.comm, self.met_keys)
self.evl_fn = self.evl_met.simple_acc
self.compute_loss = False
return
def forward_one_batch(self, mdl, inp):
mdl_out = mdl(inp)["mdl_out"]
mdl_out_probs = F.softmax(mdl_out, dim=-1)
mdl_probs_sorted, mdl_ixs_sorted = mdl_out_probs.sort(dim=-1, descending=True)
# label_lst10 = inp["label_tensor10"]
ann_lst = inp["vseg_idx"]
topk_save = 5
def get_dct(pred_vbs, pred_scores, ann_idx):
pred_vbs_out = []
pred_scores_out = []
assert len(pred_vbs) == 5
assert len(pred_scores) == 5
# assert len(tgt_vbs10) == 5
# iterate over Ev1-5
for pvb, pvs in zip(pred_vbs, pred_scores):
pvb_used = pvb[:topk_save]
pvb_str = [self.comm.vb_id_vocab.symbols[pv] for pv in pvb_used]
pred_vbs_out.append(pvb_str)
pvb_score = pvs[:topk_save]
pred_scores_out.append(pvb_score)
return {
"pred_vbs_ev": pred_vbs_out,
"pred_scores_ev": pred_scores_out,
"ann_idx": ann_idx,
}
out_dct_lst = [
get_dct(pred_vbs, pred_scores, ann_idx)
for pred_vbs, pred_scores, ann_idx in zip(
mdl_ixs_sorted.tolist(), mdl_probs_sorted.tolist(), ann_lst.tolist(),
)
]
return out_dct_lst
def forward(self, model, loss_fn, dl, dl_name, rank=0, pred_path=None, mb=None):
fname = Path(pred_path) / f"{dl_name}_{rank}.pkl"
model.eval()
model.to(self.device)
loss_keys = loss_fn.loss_keys
val_losses = {k: [] for k in loss_keys}
nums = []
results = []
for batch in progress_bar(dl, parent=mb):
batch = move_to(batch, self.device)
b = next(iter(batch.keys()))
nums.append(batch[b].size(0))
torch.cuda.empty_cache()
if self.compute_loss:
with torch.no_grad():
out = model(batch)
out_loss = loss_fn(out, batch)
for k in out_loss:
val_losses[k].append(out_loss[k].detach().cpu())
results += self.forward_one_batch(model, batch)
pickle.dump(results, open(fname, "wb"))
nums = torch.tensor(nums).float()
if self.compute_loss:
val_loss = compute_avg_dict(val_losses, nums)
synchronize()
if is_main_process():
curr_results = results
world_size = get_world_size()
for w in range(1, world_size):
tmp_file = Path(pred_path) / f"{dl_name}_{w}.pkl"
with open(tmp_file, "rb") as f:
tmp_results = pickle.load(f)
curr_results += tmp_results
tmp_file.unlink
with open(fname, "wb") as f:
pickle.dump(curr_results, f)
if self.full_cfg.only_test:
task_type = self.full_cfg.task_type
if task_type == "vb":
spl = "test_verb"
elif task_type == "vb_arg":
spl = "test_srl"
elif task_type == "evrel":
spl = "test_evrel"
else:
raise NotImplementedError
else:
spl = "valid"
out_acc = self.evl_fn(fname, split_type=spl)
val_acc = {
k: torch.tensor(v).to(self.device)
for k, v in out_acc.items()
if k in self.met_keys
}
synchronize()
if is_main_process():
if self.compute_loss:
return val_loss, val_acc
else:
dummy_loss = {k: torch.tensor(0.0).to(self.device) for k in loss_keys}
return dummy_loss, val_acc
else:
return (
{k: torch.tensor(0.0).to(self.device) for k in loss_keys},
{k: torch.tensor(0.0).to(self.device) for k in self.met_keys},
)
class EvalB_Gen(EvalB):
def after_init(self):
self.in_met_keys = ["cider", "bleu", "rouge"]
self.met_keys = ["cider", "rouge", "lea", "MacroVb_cider", "MacroArg_cider"]
self.evl_met = EvalFnCap(
self.cfg, self.comm, self.in_met_keys, read_val_file=True
)
self.evl_fn = self.evl_met.eval_cap_mets
self.compute_loss = False
def forward_one_batch(self, mdl, inp):
if self.cfg.num_gpus > 1:
seq_gen = SeqGenCustom(
[mdl.module], tgt_dict=self.comm.gpt2_hf_tok, **self.cfg.gen
)
out_sents = mdl.module.forward_gen(inp, seq_gen)
else:
seq_gen = SeqGenCustom(
[mdl], tgt_dict=self.comm.gpt2_hf_tok, **self.cfg.gen
)
out_sents = mdl.forward_gen(inp, seq_gen)
ann_lst = inp["vseg_idx"]
wvoc = self.comm.gpt2_hf_tok
def conv_seq_to_srl(inp_seq: str, ann_idx):
inp_tok_lst = inp_seq.split(" ")
if "." not in inp_tok_lst[0]:
return {}
vb = inp_tok_lst[0]
ix = 1
vb_dct = {"vb_id": vb}
curr_str_lst = []
curr_arg_name = ""
while ix < len(inp_tok_lst):
if inp_tok_lst[ix] not in self.comm.ag_name_dct.ag_dct_start.values():
curr_str_lst.append(inp_tok_lst[ix])
else:
if ix > 1:
vb_dct[curr_arg_name] = " ".join(curr_str_lst)
curr_arg_name = inp_tok_lst[ix].split("<", 1)[1].rsplit(">", 1)[0]
curr_str_lst = []
ix += 1
vb_dct[curr_arg_name] = " ".join(curr_str_lst)
return vb_dct
ev_lst = [f"Ev{ix}" for ix in range(1, 6)]
def get_dct(out_sent, ann_idx):
out_vb_dct = {}
for ev_ix, ev_in in enumerate(ev_lst):
assert len(out_sent[ev_ix]) == 1
out_sent_toks = wvoc.decode(
out_sent[ev_ix][0], skip_special_tokens=True
)
out_vb_dct[ev_in] = conv_seq_to_srl(out_sent_toks, ann_idx)
out_dct = {"ann_idx": ann_idx, "vb_output": out_vb_dct}
return out_dct
out_dct_lst = [
get_dct(pred_sent, ann_idx)
for pred_sent, ann_idx in zip(out_sents.tolist(), ann_lst.tolist(),)
]
return out_dct_lst
class EvalB_Acc(EvalB):
def after_init(self):
self.met_keys = ["Macro_Top_1", "Top_1"]
self.evl_met = EvlFn_EvRel(self.cfg, self.comm, self.met_keys)
self.evl_fn = self.evl_met.simple_acc_evrel
self.compute_loss = True
def forward_one_batch(self, mdl, inp):
mdl_out = mdl(inp)["mdl_out"]
mdl_out_probs = F.softmax(mdl_out, dim=-1)
mdl_probs_sorted, mdl_ixs_sorted = mdl_out_probs.sort(dim=-1, descending=True)
ann_lst = inp["vseg_idx"]
def get_dct(pred_vbs, pred_scores, ann_idx):
pred_vbs_out = []
pred_scores_out = []
assert len(pred_vbs) == 4
assert len(pred_scores) == 4
# iterate over Ev1-5
for pvb, pvs in zip(pred_vbs, pred_scores):
pvb_used = [pvb_i[0] for pvb_i in pvb]
pvb_str = [self.comm.evrel_dct_opp[pv] for pv in pvb_used]
pred_vbs_out.append(pvb_str)
pvb_score = [pvs_i[0] for pvs_i in pvs]
pred_scores_out.append(pvb_score)
return {
"pred_evrels_ev": pred_vbs_out,
"pred_scores_ev": pred_scores_out,
"ann_idx": ann_idx,
}
out_dct_lst = [
get_dct(pred_vbs, pred_scores, ann_idx)
for pred_vbs, pred_scores, ann_idx in zip(
mdl_ixs_sorted.tolist(), mdl_probs_sorted.tolist(), ann_lst.tolist(),
)
]
return out_dct_lst
| 8,950 |
src/assay/combine.py
|
lmsac/GproDIA
| 2 |
2172425
|
import itertools
import numpy as np
from assay.modseq import stringify_modification
from pepmass.glycomass import GlycanNode
class AssayCombiner():
def __init__(self, group_key=None):
if group_key is None:
group_key = glycopeptide_group_key()
self.group_key = group_key
def combine(self, *assays, return_generator=False):
return self.remove_redundant(
itertools.chain.from_iterable(assays),
return_generator=return_generator
)
def remove_redundant(self, assays, return_generator=False):
result = (
self.combine_replicates(spectra)
for spectra in self.group_replicates(assays)
)
result = (x for x in result if x is not None)
if not return_generator:
result = list(result)
return result
def group_replicates(self, assays):
def get_key(assay):
return tuple(
str(k(assay) if callable(k) else assay.get(k, None))
for k in self.group_key
)
return (
list(v)
for k, v in itertools.groupby(
sorted(assays, key=get_key),
key=get_key
)
)
def combine_replicates(self, spectra):
if len(spectra) == 0:
return None
return spectra[0]
class BestReplicateAssayCombiner(AssayCombiner):
def __init__(self,
group_key=None,
score='score', higher_score_better=True):
super(BestReplicateAssayCombiner, self) \
.__init__(group_key=group_key)
self.score = score
self.higher_score_better = higher_score_better
def combine_replicates(self, spectra):
if len(spectra) == 0:
return None
score = [
spec['metadata'][self.score]
for spec in spectra
]
if self.higher_score_better:
index = np.argmax(score)
else:
index = np.argmin(score)
return spectra[index]
def glycopeptide_group_key(use_glycan_struct=True, use_glycan_site=True,
within_run=False):
if use_glycan_struct:
glycan_key = 'glycanStruct'
else:
def glycan_key(x):
x = x.get('glycanStruct', None)
return x and GlycanNode \
.from_str(x) \
.composition_str()
group_key = [
'peptideSequence',
lambda x: stringify_modification(x.get('modification', None)),
glycan_key,
'precursorCharge'
]
if use_glycan_site:
group_key.append('glycanSite')
if within_run:
def filename(x):
metadata = x.get('metadata', None)
if metadata is not None:
return metadata.get('file', None)
return None
group_key.insert(0, filename)
return group_key
| 3,108 |
hydro_flat.py
|
GEODE-Lab/compositeDEM
| 1 |
2172293
|
import sys
import osr
from demLib.spatial import Raster, Vector
from demLib.parser import HydroParser
'''
Script to flatten noisy lake surfaces in a raster DEM (.tif) using a boundary shapefile of the lakes.
usage: python hydro_flat.py [-h] [-mt MULTI_TILE_FILE] [-p PERCENTILE] [-minp MIN_PIXELS] [--verbose]
raster_infile raster_outfile hydro_shpfile
positional arguments:
raster_infile Input raster file name
raster_outfile Output raster file name
hydro_shpfile Shapefile of water bodies
optional arguments:
-h, --help show this help message and exit
--multi_tile_file MULTI_TILE_FILE, -mt MULTI_TILE_FILE
Shapefile with lakes spanning multiple tiles with stats as attributes (default: none)
--percentile PERCENTILE, -p PERCENTILE
Percentile value for final elevation of flat surface (default: 10)
--min_pixels MIN_PIXELS, -minp MIN_PIXELS
Minimum number of raster pixels inside a feature below which no flattening is desired
(default: 25)
--verbose, -v Display verbosity (default: False)
'''
def main(raster_name,
out_raster_name,
hydro_file,
multi_tile_file,
pctl=10,
min_pixels=25,
verbose=False):
"""
Main function to run hydro flattening
:param raster_name: Raster filename with full path
:param out_raster_name: The output file to write the final raster
:param hydro_file: Shapefile of water body boundaries
:param multi_tile_file: Output file from multi_tile_hydro_attr.py,
file must contain stat attributes, geometry_id, and geometry
:param pctl: Percentile value to substitute (default: 10)
:param min_pixels: Number of minimum pixels for extraction (default: 25)
:param verbose: Display verbosity (Default: False)
:return: None
"""
# initialize objects
raster = Raster(filename=raster_name,
get_array=True)
raster_spref = osr.SpatialReference()
res = raster_spref.ImportFromWkt(raster.metadata['spref'])
hydro_vector = Vector(filename=hydro_file)
raster_bounds = raster.get_bounds(bounds_vector=True)
if multi_tile_file != 'none':
multi_tile_vec = Vector(multi_tile_file)
else:
multi_tile_vec = None
if verbose:
sys.stdout.write('Raster bounds vector: {}\n'.format(raster_bounds))
# find intersecting tile features
hydro_vector_reproj = hydro_vector.reproject(destination_spatial_ref=raster_spref,
_return=True)
if verbose:
sys.stdout.write(hydro_vector_reproj.__repr__())
sys.stdout.write("\n")
intersect_vector = hydro_vector_reproj.get_intersecting_vector(raster_bounds)
if verbose:
sys.stdout.write(intersect_vector.__repr__())
sys.stdout.write("\n")
# replace values by percentile
result = raster.vector_extract(intersect_vector,
pctl=pctl,
replace=True,
min_pixels=min_pixels)
if multi_tile_vec is not None:
multi_tile_vec_attr_keys = list(multi_tile_vec.attributes[0])
percentiles = sorted(list(int((key.split('_')[1]).strip()) for key in multi_tile_vec_attr_keys
if 'pctl' in key))
diff_from_val = list(abs(val - pctl) for val in percentiles)
nearest_idx = diff_from_val.index(min(diff_from_val))
pctl_attr = 'pctl_{}'.format(str(percentiles[nearest_idx]))
geom_idx_list = []
for multi_geom_idx in range(multi_tile_vec.nfeat):
for intersect_geom_idx in range(intersect_vector.nfeat):
if intersect_vector.attributes[intersect_geom_idx]['orig_id'] == \
multi_tile_vec.attributes[multi_geom_idx]['orig_id']:
geom_idx_list.append(multi_geom_idx)
break
if verbose:
sys.stdout.write("Found {} multi-tile geometries\n".format(str(len(geom_idx_list))))
if len(geom_idx_list) > 0:
for idx, geom_idx in enumerate(geom_idx_list):
if verbose:
sys.stdout.write("Processing multi-tile geometry {} of {}\n".format(str(idx + 1),
str(len(geom_idx_list))))
multi_vec = Vector(spref_str=raster_spref.ExportToWkt(),
geom_type=3,
in_memory=True)
multi_geom = Vector.get_osgeo_geom(multi_tile_vec.wktlist[geom_idx])
multi_vec.add_feat(multi_geom)
result = raster.vector_extract(multi_vec,
pctl=pctl,
min_pixels=min_pixels,
replace=True,
replace_val=multi_tile_vec.attributes[geom_idx][pctl_attr])
# write to disk
if verbose:
sys.stdout.write('\nWriting raster file: {}\n'.format(out_raster_name))
raster.write_raster(outfile=out_raster_name)
raster = tile_vector = tiles_vector = intersect_vector = None
if __name__ == '__main__':
args = HydroParser().parser.parse_args()
if args.verbose:
sys.stdout.write('\nHydro-flattening - {}\n'.format(args.raster_infile))
main(args.raster_infile,
args.raster_outfile,
args.hydro_shpfile,
args.multi_tile_file,
args.percentile,
args.min_pixels,
args.verbose)
if args.verbose:
sys.stdout.write('\n----------------------------------------------\n Done!\n')
| 6,000 |
code/Examples/RJObject_GalaxyField/display.py
|
tripathi/DNestD3SB
| 0 |
2172210
|
from pylab import *
import os
# Piecewise linear stretch
def stretch(x):
y = x.copy()
y = (y - y.min())/(y.max() - y.min())
y[y > 0.1] = 0.1 + 0.05*(y[y > 0.1] - 0.1)
return y
saveFrames = False # For making movies
if saveFrames:
os.system('rm Frames/*.png')
posterior_sample = atleast_2d(loadtxt('posterior_sample.txt'))
data = loadtxt('Data/test_image.txt')
sig = loadtxt('Data/test_sigma.txt')
ion()
hold(False)
for i in range(0, posterior_sample.shape[0]):
img = posterior_sample[i, 0:200**2].reshape((200, 200))
subplot(1, 2, 1)
imshow(-stretch(img), cmap='gray')
title('Model {i}'.format(i=i))
gca().set_xticks([-0.5, 99.5, 199.5])
gca().set_yticks([-0.5, 99.5, 199.5])
gca().set_xticklabels(['-1', '0', '1'])
gca().set_yticklabels(['1', '0', '-1'])
subplot(1, 2, 2)
sigma = sqrt(sig**2 + posterior_sample[i,-2]**2)
imshow(-(img - data)/sigma, cmap='gray')
title('Standardised Residuals')
gca().set_xticks([-0.5, 99.5, 199.5])
gca().set_yticks([-0.5, 99.5, 199.5])
gca().set_xticklabels(['-1', '0', '1'])
gca().set_yticklabels(['1', '0', '-1'])
draw()
if saveFrames:
savefig('Frames/' + '%0.4d'%(i+1) + '.png', bbox_inches='tight')
print('Frames/' + '%0.4d'%(i+1) + '.png')
ioff()
show()
| 1,231 |
genetic_algorithm.py
|
Yairmendo/genetic_algorithm
| 0 |
2172087
|
import random
modelo = [1,2,3,4,5,5,4,3,2,1]
largo = 10
num = 20
pressure = 3
mutation_chance = 0.2
#Crea aleatoriamente las caracteristicas (ADN) de cada individuo
def individual(min,max):
return[random.randint(min, max) for i in range(largo)]
#Genera la poblacion deseada (num)
def crearPoblacion():
return[individual(1,9) for i in range(num)]
#Compara cada caracteristica del individuo con su contraparte del modelo y cuenta las coincidencias
def calcularFitness(individual):
fitness = 0
for i in range(len(individual)):
if individual[i] == modelo[i]:
fitness += 1
return fitness
def selection_and_reproduction(population):
#lista de tuplas (fitness, individuo) de todos los individuos
puntuados = [ (calcularFitness(i), i) for i in population]
#print('Puntuados:\n{}'.format(puntuados))
#Lista ordenada de menor a mayor fitness
puntuados = [i[1] for i in sorted(puntuados)]
#print('Puntuados2:\n{}'.format(puntuados))
population = puntuados
#seleccion de individuos con mejor puntuacion (cantidad = pressure)
selected = puntuados[(len(puntuados)-pressure):]
#print('selected:\n{}'.format(selected))
#reproduccion: Por cada elemento restante (poblacion - selected) sucede:
#1. se seleccionan dos individuos aleatorios entre los seleccionados
#2. se escoge un numero aleatorio (punto) de caracteristicas del primer individuo (principio)
#3. se toman las caracteristicas restantes del segundo individuo (final)
#4. se reemplaza un elemento de la poblacion.
for i in range(len(population)-pressure):
punto = random.randint(1,largo-1)
padre = random.sample(selected, 2)
population[i][:punto] = padre[0][:punto]
population[i][punto:] = padre[1][punto:]
#print('Punto: {}\nPadres:\n{}\nNuevo individuo:\n{}'.format(punto, padre, population[i]))
return population
def mutation(population):
for i in range(len(population)-pressure):
# Se escoge aleatoriamente quien sufre una mutación.
if random.random() <= mutation_chance:
#se escoge una posicion aleatoria en la lista de caracteristicas
punto = random.randint(0,largo-1)
#se genera una caracteristica nueva de forma aleatoria
nuevo_valor = random.randint(1,9)
# Si el valor obtenido es igual al valor existente en el punto de
# mutacion se generan valores aleatorios hasta que cambie, luego se
#inserta el nuevo valor.
while nuevo_valor == population[i][punto]:
nuevo_valor = random.randint(1,9)
population[i][punto] = nuevo_valor
return population
def main():
print("\n\Modelo: %s\n"%(modelo))
population = crearPoblacion()
print("Población Inicial:\n%s"%(population))
for i in range(100):
population = selection_and_reproduction(population)
population = mutation(population)
print("\nPoblación Final:\n%s"%(population))
print("\n\n")
if __name__ == '__main__':
main()
| 3,125 |
tests/core/integration/integration_test.py
|
jebabi/controllerx
| 0 |
2168689
|
import pytest
from core import integration as integration_module
from core.controller import Controller
from tests.utils import IntegrationMock, fake_controller, hass_mock
def test_get_integrations(fake_controller):
integrations = integration_module.get_integrations(fake_controller)
integrations.sort(key=lambda integration: integration.name)
inteagration_names = [i.name for i in integrations]
assert inteagration_names == sorted(["z2m", "zha", "deconz"])
| 477 |
examples/dashboard_with_module.py
|
NishantBaheti/graphpkg
| 1 |
2172557
|
import random
import datetime
import matplotlib
#matplotlib.use('Agg')
from graphpkg.live.dashboard import LiveDashboard
# plt.style.use('')
count1 = 0
cluster = 0.30
def func1():
return datetime.datetime.now(), [random.randrange(1, 10) , random.randrange(1, 10) ]
def func2():
return random.randrange(1, 100), [random.randrange(1, 10000) , random.randrange(1, 100), random.randrange(1, 100)]
def func3(*args):
#print(args)
return random.randrange(1, args[0]), [random.randrange(1, args[0]), random.randrange(1, 100)]
if __name__ == "__main__":
conf = {
"dashboard": "DASHBOARD1",
"plots": {
"trend": [
{
"func_for_data": func1,
"fig_spec": (3,3,(1,2)),
"interval": 500,
"title" : "trend plot1"
},
{
"func_for_data": func1,
"fig_spec": (3, 3, (4, 5)),
"interval" : 500,
"title" : "trend plot2"
},
{
"func_for_data": func1,
"fig_spec": (3, 3, (7, 8)),
"interval": 500,
"title": "trend plot3"
},
],
"scatter": [
{
"fig_spec" : (3, 3, 3),
"func_for_data" : func3,
"func_args": (1000,),
"interval" : 1000,
"title" : "other scatter plot",
"window": 500
},
{
"fig_spec" : (3, 3, 6),
"func_for_data" : func2,
"interval": 500,
"title" : "some scatter plot",
"window": 1000
},
{
"fig_spec": (3, 3, 9),
"func_for_data": func3,
"func_args": (1000,),
"interval" : 1000,
"title" : "other other scatter plot",
"window": 500
}
]
}
}
dash = LiveDashboard(config=conf)
dash.start()
matplotlib.pyplot.show()
| 2,294 |
virtool_workflow/abc/__init__.py
|
BlakeASmith/virtool-workflow
| 0 |
2167724
|
from .runtime.runtime import AbstractWorkflowEnvironment
__all__ = [
"AbstractWorkflowEnvironment",
]
| 107 |
applications/laboratorio/controllers/api.py
|
pedrofrancoribeiro/labteste
| 0 |
2172355
|
#coding: utf-8
def call():
"""
webservice que pode retornar xml-rpc, soap e json
exposes services. for example:
http://..../[app]/default/call/jsonrpc
decorate with @services.jsonrpc the functions to expose
supports xml, json, xmlrpc, jsonrpc, amfrpc, rss, csv
"""
return service()
| 314 |
z/scripts/extra.py
|
iluminite/argh-examples
| 2 |
2169427
|
import argh
from argh.decorators import arg
from z.cli import cmd, load, dump
def l():
p = argh.ArghParser()
argh.set_default_command(p, load)
p.dispatch()
def d():
p = argh.ArghParser()
argh.set_default_command(p, dump)
p.dispatch()
def main():
p = argh.ArghParser()
p.add_commands([cmd, load, dump])
p.dispatch()
if __name__ == '__main__':
main()
| 399 |
litefeel/pycommon/math.py
|
litefeel/pycommon
| 1 |
2172048
|
"some function for math"
import math
from typing import SupportsFloat
def round(n: SupportsFloat) -> int:
return math.floor(float(n).__float__() + 0.5)
| 159 |
observation/management/commands/delete_observations.py
|
bartromgens/waarnemingkaart
| 0 |
2172311
|
from django.core.management.base import BaseCommand
from observation.models import Observation
class Command(BaseCommand):
def handle(self, *args, **options):
Observation.objects.all().delete()
| 209 |
blog/forms.py
|
nuttawatso/Projectcs_momotalk
| 0 |
2172621
|
from django import forms
from .models import Posts
from .models import Comment
class PostsForm(forms.ModelForm):
class Meta:
widgets = {}
model = Posts
path = forms.CharField(required=False)
fields = {'title','category','description','picture','pic_name' }
widgets = {
'picture': forms.HiddenInput(),
'pic_name': forms.HiddenInput(),
'title' : forms.TextInput(attrs={'style': 'border-color:darkgoldenrod; border-radius: 35px;', 'placeholder':'หัวข้อโพสต์'})
}
labels = {
'title':'',
'category':'',
'description':'',
}
description = forms.CharField(label ="", widget = forms.Textarea(
attrs ={
'class':'form-control',
'placeholder':'รายละเอียด',
'rows':3,
}))
class CommentForm(forms.ModelForm):
class Meta:
widgets = {}
model = Comment
fields = {'content','picture','pic_name'}
widgets = {'picture': forms.HiddenInput(),'pic_name': forms.HiddenInput()}
content = forms.CharField(label ="", widget = forms.Textarea(
attrs ={
'class':'form-control',
'placeholder':'Comment ',
'rows':5,
'cols':9,
}))
| 1,333 |
docbook/patients/forms.py
|
dhruvs19/docbook
| 0 |
2172510
|
from django import forms
from django.forms import TextInput
from django.contrib.auth.models import User
from django.forms import ModelForm, widgets, DateField
from .models import Diagnosis, Patients
class PatientsForm(ModelForm):
GROUPS = (
('','Blood Group'),
('A+', 'A+'),
('A-', 'A-'),
('B+', 'B+'),
('B-', 'B-'),
('O+', 'O+'),
('O-', 'O-'),
('AB+', 'AB+'),
('AB-', 'AB-'),
)
BloodGroup = forms.ChoiceField(required=True, choices = GROUPS)
class Meta:
model = Patients
fields = ['ProfilePicture', 'FirstName', 'LastName', 'Address', 'PhoneNumber', 'DOB', 'BloodGroup']
widgets = {
'DOB': widgets.DateInput(attrs = { 'type': 'date' }),
'ProfilePicture': widgets.FileInput(),
}
labels = {
"ProfilePicture": "Change Profile Picture",
"FirstName": "<NAME>",
"LastName": "<NAME>",
"Phonenumber": "Phone number",
"DOB": "Date of Birth",
"BloodGroup": "Select Blood Group"
}
def __init__(self, *args, **kwargs):
super(PatientsForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control floating'
if visible.field.widget.input_type == "select":
visible.field.widget.attrs['class'] = "form-select form-control floating"
class DiagnosisForm(ModelForm):
TestTypes = (
('','Select Diagnosis Type'),
('X-Ray','X-Ray'),
('Complete Blood Count','Complete blood count'),
('Vitamin D Test', 'Vitamin D Test'),
('PULS (Protein Unstable Lesion Signature Test) Cardiac Test' ,'PULS (Protein Unstable Lesion Signature Test) Cardiac Test'),
('ABPM','ABPM')
)
DiagnosisName = forms.ChoiceField(required=True, choices = TestTypes)
class Meta:
model = Diagnosis
fields = ['DiagnosisName', 'Document']
widgets = {
'Document': widgets.FileInput(),
}
labels = {
"Document" : "Upload Diagnosis Document"
}
def __init__(self, *args, **kwargs):
super(DiagnosisForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
visible.field.widget.attrs['class'] = 'form-control floating'
if visible.field.widget.input_type == "select":
visible.field.widget.attrs['class'] = "form-select form-control floating"
| 2,595 |
mcoding_bot/cogs/starboard.py
|
Endercheif/mCodingBot
| 4 |
2172070
|
from __future__ import annotations
from typing import TYPE_CHECKING, TypeVar
import asyncpg
from pincer import Client
import pincer
from pincer.objects import (
MessageReactionAddEvent, MessageReactionRemoveEvent, UserMessage, Embed
)
from pincer.utils.types import MissingType, APINullable
from mcoding_bot.database import Star, Message
if TYPE_CHECKING:
from mcoding_bot.bot import Bot
_T = TypeVar("_T")
def _obj_or_none(obj: APINullable[_T]) -> _T | None:
if isinstance(obj, MissingType):
return None
return obj
async def _orig_message(msg_id: int) -> Message | None:
if (message := await Message.exists(sb_msg_id=msg_id)):
return message
return await Message.exists(id=msg_id)
def embed_message(
msg: UserMessage, points: int, bot: Bot
) -> tuple[str, Embed]:
embed = Embed(
description=_obj_or_none(msg.content) or "",
color=bot.theme,
).set_author(
icon_url=msg.author.get_avatar_url(), # type: ignore
name=msg.author.username, # type: ignore
url="https://pincermademe.dothis",
).add_field(
"",
f"[Go to Message](https://discord.com/channels/"
f"{bot.config.mcoding_server}/{msg.channel_id}/{msg.id})"
)
if (attachments := _obj_or_none(msg.attachments)) is not None and len(attachments) > 0:
embed.set_image(attachments[0].url)
embed.description = embed.description or f"*{attachments[0].filename}*"
elif not embed.description:
embed.description = "*nothing*"
return f"⭐ **{points} |** <#{msg.channel_id}>", embed
async def _refresh_message(bot: Bot, message: Message):
# get starcount
points = await Star.fetch_query().where(message_id=message.id).count()
message.last_known_star_count = points
# get action
action: bool | None = None
if points >= bot.config.required_stars:
action = True
elif points == 0:
action = False
# get the starboard message
orig = await bot.cache.gof_message(message.id, message.channel_id)
if not orig:
return
sbmsg = None
if message.sb_msg_id is not None:
sbmsg = await bot.cache.gof_message(
message.sb_msg_id, bot.config.starboard_id
)
if not sbmsg:
message.sb_msg_id = None
# update
if sbmsg is None and action is True:
starboard = await bot.cache.gof_channel(bot.config.starboard_id)
if not starboard:
return
content, embed = embed_message(orig, points, bot)
sbmsg = await starboard.send(
pincer.objects.Message(
content=content,
embeds=[embed],
)
)
assert sbmsg
await sbmsg.react("⭐")
message.sb_msg_id = sbmsg.id
elif sbmsg is not None:
if action is False:
await sbmsg.delete()
message.sb_msg_id = None
else:
content, embed = embed_message(orig, points, bot)
await sbmsg.edit(
content=content,
embeds=[embed],
)
await message.save()
class Starboard:
def __init__(self, client: Bot):
self.client = client
self.refreshing: set[int] = set()
async def refresh_message(self, message: Message):
if message.id in self.refreshing:
return
self.refreshing.add(message.id)
try:
await _refresh_message(self.client, message)
finally:
self.refreshing.remove(message.id)
@Client.event
async def on_message_reaction_add(self, event: MessageReactionAddEvent):
if (member := _obj_or_none(event.member)) is None:
return
if (user := _obj_or_none(member.user)) is None:
user = await self.client.cache.gof_user(event.user_id)
if not user:
return
if bool(user.bot):
return
if event.emoji.name != "⭐":
return
orig = await _orig_message(event.message_id)
if not orig:
obj = await self.client.cache.gof_message(
event.message_id, event.channel_id
)
if not obj:
return
if (author := _obj_or_none(obj.author)) is None:
return
orig = await Message(
id=event.message_id,
channel_id=event.channel_id,
author_id=author.id,
).create()
assert self.client.bot is not None
if (
orig.author_id == self.client.bot.id
and orig.channel_id == self.client.config.starboard_id
):
# prevents old starboard messages from reposting
return
if orig.author_id == event.user_id:
# no self stars
return
try:
await Star(message_id=orig.id, user_id=event.user_id).create()
except asyncpg.UniqueViolationError:
# forgiveness, not permission
# besides, Star.exists() is async so it might fail anyways
pass
await self.refresh_message(orig)
@Client.event
async def on_message_reaction_remove(
self, event: MessageReactionRemoveEvent
):
orig = await _orig_message(event.message_id)
if not orig:
return
await Star.delete_query().where(
message_id=orig.id, user_id=event.user_id
).execute()
await self.refresh_message(orig)
setup = Starboard
| 5,552 |
watersheds/ws_anisotropic_distance_transform.py
|
constantinpape/watersheds
| 0 |
2170893
|
import vigra
import numpy as np
from wsdt import group_seeds_by_distance, iterative_inplace_watershed
def signed_anisotropic_dt(
pmap,
threshold,
anisotropy,
preserve_membrane_pmaps
):
binary_membranes = (pmap >= threshold).astype('uint32')
distance_to_membrane = vigra.filters.distanceTransform(
binary_membranes,
pixel_pitch = [anisotropy, 1., 1.])
if preserve_membrane_pmaps:
# Instead of computing a negative distance transform within the thresholded membrane areas,
# Use the original probabilities (but inverted)
membrane_mask = binary_membranes.astype(np.bool)
distance_to_membrane[membrane_mask] = -pmap[membrane_mask]
else:
# Save RAM with a sneaky trick:
# Use distanceTransform in-place, despite the fact that the input and output don't have the same types!
# (We can just cast labeled as a float32, since uint32 and float32 are the same size.)
distance_to_nonmembrane = binary_membranes.view('float32')
vigra.filters.distanceTransform(
binary_membranes,
background=False,
out=distance_to_nonmembrane,
pixel_pitch = [anisotropy, 1., 1.])
# Combine the inner/outer distance transforms
distance_to_nonmembrane[distance_to_nonmembrane>0] -= 1
distance_to_membrane[:] -= distance_to_nonmembrane
return distance_to_membrane
def anisotropic_seeds(
distance_to_membrane,
anisotropy,
sigma_seeds,
group_seeds
):
seeds = np.zeros_like(distance_to_membrane, dtype = 'uint32')
seed_map = vigra.filters.gaussianSmoothing(distance_to_membrane, (1. / anisotropy, 1., 1.) )
for z in xrange(distance_to_membrane.shape[0]):
seeds_z = vigra.analysis.localMaxima(seed_map[z], allowPlateaus=True, allowAtBorder=True, marker=np.nan)
if group_seeds:
seeds_z = group_seeds_by_distance( seeds_z, distance_to_membrane[z])
else:
seeds_z = vigra.analysis.labelMultiArrayWithBackground(seeds_z)
seeds[z] = seeds_z
return seeds
def ws_anisotropic_distance_transform(
pmap,
threshold,
anisotropy,
sigma_seeds,
sigma_weights = 0.,
min_segment_size = 0,
preserve_membrane_pmaps = True,
grow_on_pmap = True,
group_seeds = False
):
"""
Watershed on anisotropic distance transform on 3d probabiity map.
@params:
pmap: probability map, 3d numpy.ndarray of type float32.
threshold: threshold for pixels that are considered in distance transform.
anisotropy: anisotropy factor along the z axis.
sigma_seeds: smoothing factor for distance transform used for finding seeds.
sigma_weights: smoothing factor for heiht map used for the watershed (default 0.).
min_segment_size: size filter for resulting segments (default 0 -> no size filtering).
preserve_membrane: preserve membrane seeds (default: False).
grow_on_pmap: grow on the probability map instead of distance transform (default: True).
group_seeds: use heuristics to group adjacent seeds (default: False).
@returns:
fragments: numpy.ndarray of type uint32
n_labels: number of labels
"""
# make sure we are in 3d and that first axis is z
assert pmap.ndim == 3
shape = pmap.shape
assert shape[0] < shape[1] and shape[0] < shape[2]
distance_to_membrane = signed_anisotropic_dt(pmap, threshold, anisotropy, preserve_membrane_pmaps)
seeds = anisotropic_seeds(distance_to_membrane, anisotropy, sigma_seeds, group_seeds)
if grow_on_pmap:
hmap = pmap
else:
hmap = distance_to_membrane
# Invert the DT: Watershed code requires seeds to be at minimums, not maximums
hmap[:] *= -1
if sigma_weights != 0.:
hmap = vigra.filters.gaussianSmoothing(hmap, ( 1. / sigma_weights ) )
offset = 0
for z in xrange(shape[0]):
max_z = iterative_inplace_watershed(hmap[z], seeds[z], min_segment_size, None)
seeds[z] -= 1
seeds[z] += offset
# TODO make sure that this does not cause a label overlap by one between adjacent slices
offset += max_z
return seeds, offset
| 4,362 |
common/migrations/0010_apisettings.py
|
exenin/Django-CRM
| 2 |
2171142
|
# Generated by Django 2.1.5 on 2019-02-13 13:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0006_auto_20190212_1708'),
('common', '0009_document_shared_to'),
]
operations = [
migrations.CreateModel(
name='APISettings',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=1000)),
('apikey', models.CharField(default='<KEY>', max_length=16)),
('created_on', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='settings_created_by', to=settings.AUTH_USER_MODEL)),
('lead_assigned_to', models.ManyToManyField(related_name='lead_assignee_users', to=settings.AUTH_USER_MODEL)),
('tags', models.ManyToManyField(blank=True, to='accounts.Tags')),
],
),
]
| 1,168 |
simphys/links.py
|
euzeb73/simu-phys
| 0 |
2171624
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 14:16:59 2020
@author: jimen
"""
import numpy as np
import pygame
from .functions import norm
from .linksforms import LinkForm, SpringForm
#######
# Linktypes #### A ENLEVER !
# 0 pas de lien
# 1 LinkRigid
# 10 LinkcsteF
# 100 LinkSpring
# 1000 etc...
# TODO enlever les array et mettre des vec2 pygame
class Link():
def __init__(self, m1, m2):
'''Lien de base'''
self.mass1 = m1
self.mass2 = m2
# donne le lien et le numero de la masse pour ce lien
self.mass1.linklist.append((self, 1))
self.mass2.linklist.append((self, 2))
self.rigid = False
self.linkForm = LinkForm(self)
self.init_dict()
self.update()
def init_dict(self):
self.dico = dict()
self.dico['type'] = Link
self.dico['visible'] = False
def update(self):
pass
def draw(self, screen):
self.linkForm.draw(screen)
class LinkRigid(Link):
def __init__(self, m1, m2):
''' Classe écrite pour UNE masse à chaque bout de la tige et pas plus
TODO: généralisation... '''
self.linktype = 1
self.length = norm(m1.OM-m2.OM)
self.force1 = pygame.math.Vector2((0, 0))
self.force2 = pygame.math.Vector2((0, 0))
super().__init__(m1, m2)
self.linkForm.visible = True
self.rigid = True
self.correctCI() # pour réajuster les vitesses si pas compatibes avec tige rigide
self.update()
def init_dict(self):
self.dico = dict()
self.dico['type'] = LinkRigid
self.dico['visible'] = True
def correctCI(self):
'''Recalcule vG et omega à partir de v1 et v2
elimine les problèmes dans les vitesses
TODO : Eviter ça en initialisant une fois les forces à partir des CI '''
# pour l'instant on ne le fait pas
pass
m1 = self.mass1.m
m2 = self.mass2.m
mT = m1+m2
x1 = self.mass1.OM
x2 = self.mass2.OM
v1 = self.mass1.v
v2 = self.mass2.v
xG = m1*x1/mT+m2*x2/mT
vG = m1*v1/mT+m2*v2/mT
u = pygame.math.Vector2.normalize(x2-x1) # uM1M2
# vecteur unitaire directement orthogonal
uortho = pygame.math.Vector2(-u[1], u[0])
# "Corrige" les vitesses
self.mass1.v += (-v1.dot(u)+vG.dot(u))*u
self.mass2.v += (-v2.dot(u)+vG.dot(u))*u
v1 = self.mass1.v
v2 = self.mass2.v
# Calcul de omega
w = (v2.dot(uortho)-v1.dot(uortho))/(norm(x1-xG)+norm(x2-xG))
# Corrige les vitesses dans l'autre direction
self.mass1.v += (-v1.dot(uortho)+vG.dot(uortho)-norm(x1-xG)*w)*uortho
self.mass2.v += (-v2.dot(uortho)+vG.dot(uortho)+norm(x2-xG)*w)*uortho
v1 = self.mass1.v
v2 = self.mass2.v
# Recalcule vG et w
self.vG = m1*v1/mT+m2*v2/mT
self.w = (v2.dot(uortho)-v1.dot(uortho))/(norm(x1-xG)+norm(x2-xG))
def update(self):
pass
# PAS trop utile apparemment, si les forces sont bien calculées
m1 = self.mass1.m
m2 = self.mass2.m
mT = m1+m2
x1 = self.mass1.OM
x2 = self.mass2.OM
taille = norm(x2-x1)
# Ce qu'il y a à enlever est réparti entre les deux masses prop à la masse de l'autre
u = pygame.math.Vector2.normalize(x2-x1) # uM1M2
self.mass1.OM = x1+(m2*(taille-self.length)/mT)*u
self.mass2.OM = x2-(m1*(taille-self.length)/mT)*u
class LinkCsteF(Link):
def __init__(self, m1, m2, F=[0,0]):
'''Lien avec force constante'''
self.linktype = 10
self.force1 = pygame.math.Vector2(F)
self.force2 = -self.force1
super().__init__(m1, m2)
self.rigid = False
def init_dict(self):
self.dico = dict()
self.dico['type'] = LinkCsteF
self.dico['visible'] = False
self.dico['force'] = self.force1
class LinkSpring(Link):
def __init__(self, m1, m2, k=1, l0=1):
'''Lien avec ressort'''
self.linktype = 100
self.k = k
self.l0 = l0
super().__init__(m1, m2)
self.linkForm = SpringForm(self)
self.rigid = False
def init_dict(self):
self.dico = dict()
self.dico['type'] = LinkSpring
self.dico['visible'] = True
self.dico['k'] = self.k
self.dico['l0'] = self.l0
def update(self):
x1 = self.mass1.OM
x2 = self.mass2.OM
l = norm(x1-x2)
# vecteur unitaire direction sens M2M1
uM2M1 = pygame.math.Vector2.normalize(x1-x2)
self.force1 = -self.k*(l-self.l0)*uM2M1 # force sur la masse 1
self.force2 = -self.force1 # Newton
| 4,907 |
Modulo 2/042.py
|
thiago19maciel/Exercicios-em-Python
| 1 |
2171757
|
# Refaça o DESAFIO 35 dos triângulos
# , acrescentando o recurso de mostrar que tipo de triângulo será formado:
a = float(input('LADO A: '))
b = float(input('LADO B: '))
c = float(input('LADO C: '))
# – EQUILÁTERO: todos os lados iguais
if a == b == c:
print('3 LADOS IGUAIS. VOCE TEM UM EQUILÁTERO')
# – ISÓSCELES: dois lados iguais, um diferente
if (a == b != c) or (a == c != b) or (b == c != a):
print('2 LADOS IGUAIS E UM DIFERENTE. VOCE TEM UM ISÓSCELES')
# – ESCALENO: todos os lados diferentes
if a != b != c != a:
print('TODOS OS LADOS DIFERENTES. VOCE TEM UM ESCALENO')
| 591 |
Google-Foobar-Challenge/bunny_prisoner_locating.py
|
AbhiSaphire/Codechef.Practice
| 27 |
2172237
|
# Bunny - Prisoner - Locating solved in 3 hours and 40 mins
# Task was to return element at a certain place in a given pattern
# 16
# 11 17
# 07 12 18
# 04 08 13 19
# 02 05 09 14 20
# 01 03 06 10 21
# Tried solving this question by making a binary tree(was a lot lengthy) then my brother came up with this simple Arithmetic progression solution
# First we find diagonal corner of searched element and then move diagonally reducing 1 at a time to get to the searched element.
def solution(x, y):
diagonal_difference = y - 1
element_row = x + diagonal_difference
value = element_row * (element_row + 1) // 2
value -= diagonal_difference
return str(value)
print(solution(10, 10))
print(solution(10000, 10000))
print(solution(1, 1))
print(solution(1, 10000))
| 776 |
test.py
|
fractalego/morph_classifier
| 0 |
2172117
|
from model import NameClassifier as Model
if __name__ == '__main__':
model = Model.load('model.nn')
file = open('data/test_names.txt')
lines = file.readlines()
tot_names = float(len(lines))
num_classified_names = 0
for line in lines:
if model.classify(line):
num_classified_names += 1
print 'Names classified correctly from the test set: %.1f%%' % (num_classified_names/tot_names*100)
file.close()
file = open('data/test_non_names.txt')
lines = file.readlines()
tot_non_names = float(len(lines))
num_classified_non_names = 0
for line in lines:
if not model.classify(line) :
num_classified_non_names += 1
print 'Non names classified correctly from the test set: %.1f%%' % (num_classified_non_names/tot_non_names*100)
file.close()
| 837 |
tmp/mnist_peer_params.py
|
YLJALDC/spacy_ray_example
| 0 |
2172549
|
import copy
import typer
import ray
import time
from timeit import default_timer as timer
from datetime import timedelta
import ml_datasets
from thinc_worker import ThincWorker
from thinc.api import Model
from thinc.types import Floats2d
# This config data is passed into the workers, so that they can then
# create the objects.
CONFIG = {
"optimizer": {
"@optimizers": "Adam.v1",
"learn_rate": 0.001
},
"train_data": {
"@datasets": "mnist_train_batches.v1",
"worker_id": None,
"num_workers": None,
"batch_size": None
},
"dev_data": {
"@datasets": "mnist_dev_batches.v1",
"batch_size": None
}
}
def make_model(n_hidden: int, depth: int, dropout: float) -> Model[Floats2d, Floats2d]:
from thinc.api import chain, clone, Relu, Softmax
return chain(
clone(Relu(nO=n_hidden, dropout=dropout), depth),
Softmax()
)
def main(
n_hidden: int = 256,
depth: int = 2,
dropout: float = 0.2,
n_iter: int = 10,
batch_size: int = 64,
n_epoch: int=10,
quorum: int=1,
n_workers: int=2
):
model = make_model(n_hidden, depth, dropout)
CONFIG["train_data"]["batch_size"] = batch_size
CONFIG["dev_data"]["batch_size"] = batch_size
if quorum is None:
quorum = n_workers
ray.init(lru_evict=True)
workers = []
print("Add workers and model")
Worker = ray.remote(ThincWorker)
for i in range(n_workers):
config = copy.deepcopy(CONFIG)
config["train_data"]["worker_id"] = i
config["train_data"]["num_workers"] = n_workers
worker = Worker.remote(
config,
rank=i,
num_workers=n_workers,
ray=ray
)
ray.get(worker.add_model.remote(model))
workers.append(worker)
for worker in workers:
ray.get(worker.set_proxy.remote(workers, quorum))
for worker in workers:
ray.get(worker.sync_params.remote())
print("Train")
for i in range(n_epoch):
start = timer()
for worker in workers:
ray.get(worker.train_epoch.remote())
todo = list(workers)
while todo:
time.sleep(1)
todo = [w for w in workers if ray.get(w.is_running.remote())]
end = timer()
duration = timedelta(seconds=int(end - start))
grads_usage = [ray.get(w.get_percent_grads_used.remote()) for w in workers]
print(duration, i, ray.get(workers[0].evaluate.remote()), grads_usage)
if __name__ == "__main__":
typer.run(main)
| 2,577 |
AER/Scripts/DataProcess/DataReader.py
|
LeBenchmark/Interspeech2021
| 48 |
2171485
|
import os, glob
import pandas as pd
import numpy as np
import json
import random
class DataReader():
'''
Read Data Set based on a json file.
'''
def __init__(self, jsonPath, targetFunc=None, onlineFeat=False, resampleTarget=False):
super(DataReader, self).__init__()
self.jsonPath = jsonPath
self.DatasetsPath = os.path.dirname(jsonPath)
self.dataReaderType = "Classic" # Allows different types of reading data (through a setter function) for different intended purpusos, e.g. for classical train-dev-test for feat->annot, or end2end learning (wav->annot), autoencoders (feat->feat), or other things like feat1->feat2, ...
self.targetFunc = targetFunc
self.onlineFeat = onlineFeat
self.resampleTarget = resampleTarget
self.cuda = False
with open(jsonPath, 'r') as jsonFile:
self.data = json.load(jsonFile)
def getModelFeat(self, featModelPath, normalised=False, maxDur=29.98, cuda=False):
import fairseq
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([featModelPath])
self.featModel = model[0]
self.cuda = cuda
if cuda: self.featModel = self.featModel.cuda()
self.featModel.eval()
self.layerNormed = normalised
self.maxDur = maxDur
def getPartition(self, partition):
dataPart = {}
self.partition = partition
for ID, sample in self.data.items():
if sample["partition"] == partition:
dataPart[ID] = sample
return dataPart
def keepOneOnly(self):
blackKeys = []
for i, key in enumerate(self.dataPart.keys()):
if i==0: continue
blackKeys.append(key)
[self.dataPart.pop(key) for key in blackKeys]
def limitData(self, limit=1):
self.dataPart = self.getPartition(self.partition)
dataPart = {}
whiteKeys = random.sample(self.dataPart.keys(), limit)
for i, key in enumerate(whiteKeys):
dataPart[key] = self.dataPart[key].copy()
self.dataPart = dataPart
def setDatasetClassic(self, partition, feat, annot):
# put annot as None if no annot -> target will be the same as feat then, or we can define other formats with other kinds of setDataset funcs
self.dataReaderType = "Classic"
self.dataPart = self.getPartition(partition)
self.feat = feat
self.annot = annot
def setDatasetFeatOnly(self, partition, feat):
# put annot as None if no annot -> target will be the same as feat then, or we can define other formats with other kinds of setDataset funcs
self.dataReaderType = "FeatOnly"
self.dataPart = self.getPartition(partition)
self.feat = feat
def setDatasetAnnotOnly(self, partition, annot):
# put annot as None if no annot -> target will be the same as feat then, or we can define other formats with other kinds of setDataset funcs
self.dataReaderType = "AnnotOnly"
self.dataPart = self.getPartition(partition)
self.annot = annot
def inputReader(self, ID):
if self.onlineFeat:
fullPath = os.path.join(self.DatasetsPath, self.dataPart[ID]["path"])
if "MFB" in self.feat:
from DataProcess.MelFilterBank import getFeatsFromWav
# print("fullPath", fullPath)
inputs = getFeatsFromWav(fullPath, winlen=0.025, winstep=0.01)
if "wav2vec2" in self.feat:
from DataProcess.wav2vec2 import getFeatsFromAudio
inputs = getFeatsFromAudio(fullPath, self.featModel, self.maxDur, self.layerNormed, cuda=self.cuda)
if "standardized" in self.feat:
inputs = (inputs - np.mean(inputs)) / np.std(inputs)
else:
feats = self.dataPart[ID]["features"]
feat = feats[self.feat]
path = feat["path"]
dimension = feat["dimension"][-1]
headers = ["feat_"+str(i) for i in range(dimension)]
inputs = self.csvReader(path, headers)
return inputs
def targetReader(self, ID):
annots = self.dataPart[ID]["annotations"]
annot = annots[self.annot]
path = annot["path"]
headers = annot["headers"]
targets = self.csvReader(path, headers)
if not self.targetFunc is None: targets = self.targetFunc(targets)
return targets
def csvReader(self, filePath, headers, standardize=False):
fullPath = os.path.join(self.DatasetsPath, filePath) #filePath.replace("./", "")
# print(fullPath)
df = pd.read_csv(fullPath)
outs = []
for header in headers:
out = df[header].to_numpy()
if standardize: out = (out - out.mean(axis=0)) / out.std(axis=0)
out = np.expand_dims(out, axis=1)
outs.append(out)
outs = np.concatenate(outs, 1)
return outs
@staticmethod
def getMeanStd(vector):
# print("vector.shape",vector.shape)
mean = np.mean(vector, 0)
std = np.std(vector, 0)
out = np.array([mean, std]).reshape(vector.shape[1], -1)
# print("out.shape",out.shape)
return out
def setTargetMeanStd(self):
self.targetFunc = DataReader.getMeanStd
def __len__(self):
return len(self.dataPart)
def __getitem__(self, idx):
ID = list(self.dataPart.keys())[idx]
if self.dataReaderType == "Classic":
inputs = self.inputReader(ID)
targets = self.targetReader(ID)
if self.resampleTarget:
from Utils.Funcs import reshapeMatrix
targets = reshapeMatrix(targets, len(inputs))
# print(inputs.shape, targets.shape)
return inputs, targets
elif self.dataReaderType == "FeatOnly":
inputs = self.inputReader(ID)
return ID, inputs
elif self.dataReaderType == "AnnotOnly":
targets = self.targetReader(ID)
return ID, targets
else:
print("Please set the dataset first")
# dataset = DataReader("/Users/sinaalisamir/Documents/Datasets/RECOLA_46_P/data.json")
# dataset.setDatasetClassic("dev", "MFB", "gs_arousal_0.01")
# feat, tar = dataset[0]
# print(feat.shape, tar.shape)
| 6,380 |
run.py
|
chevah/txghserf
| 3 |
2170078
|
"""
Sample entry point for the server.
"""
from txghserf.server import CONFIGURATION, resource
# Shut the linter.
resource
def handle_event(event):
"""
Custom code handling events.
"""
print event
CONFIGURATION['callback'] = handle_event
| 257 |
test/vanilla/low-level/Expected/AcceptanceTests/HttpLowLevel/httpinfrastructurelowlevel/rest/http_client_failure/__init__.py
|
cfculhane/autorest.python
| 35 |
2172438
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._request_builders_py3 import build_head400_request
from ._request_builders_py3 import build_get400_request
from ._request_builders_py3 import build_options400_request
from ._request_builders_py3 import build_put400_request
from ._request_builders_py3 import build_patch400_request
from ._request_builders_py3 import build_post400_request
from ._request_builders_py3 import build_delete400_request
from ._request_builders_py3 import build_head401_request
from ._request_builders_py3 import build_get402_request
from ._request_builders_py3 import build_options403_request
from ._request_builders_py3 import build_get403_request
from ._request_builders_py3 import build_put404_request
from ._request_builders_py3 import build_patch405_request
from ._request_builders_py3 import build_post406_request
from ._request_builders_py3 import build_delete407_request
from ._request_builders_py3 import build_put409_request
from ._request_builders_py3 import build_head410_request
from ._request_builders_py3 import build_get411_request
from ._request_builders_py3 import build_options412_request
from ._request_builders_py3 import build_get412_request
from ._request_builders_py3 import build_put413_request
from ._request_builders_py3 import build_patch414_request
from ._request_builders_py3 import build_post415_request
from ._request_builders_py3 import build_get416_request
from ._request_builders_py3 import build_delete417_request
from ._request_builders_py3 import build_head429_request
except (SyntaxError, ImportError):
from ._request_builders import build_head400_request # type: ignore
from ._request_builders import build_get400_request # type: ignore
from ._request_builders import build_options400_request # type: ignore
from ._request_builders import build_put400_request # type: ignore
from ._request_builders import build_patch400_request # type: ignore
from ._request_builders import build_post400_request # type: ignore
from ._request_builders import build_delete400_request # type: ignore
from ._request_builders import build_head401_request # type: ignore
from ._request_builders import build_get402_request # type: ignore
from ._request_builders import build_options403_request # type: ignore
from ._request_builders import build_get403_request # type: ignore
from ._request_builders import build_put404_request # type: ignore
from ._request_builders import build_patch405_request # type: ignore
from ._request_builders import build_post406_request # type: ignore
from ._request_builders import build_delete407_request # type: ignore
from ._request_builders import build_put409_request # type: ignore
from ._request_builders import build_head410_request # type: ignore
from ._request_builders import build_get411_request # type: ignore
from ._request_builders import build_options412_request # type: ignore
from ._request_builders import build_get412_request # type: ignore
from ._request_builders import build_put413_request # type: ignore
from ._request_builders import build_patch414_request # type: ignore
from ._request_builders import build_post415_request # type: ignore
from ._request_builders import build_get416_request # type: ignore
from ._request_builders import build_delete417_request # type: ignore
from ._request_builders import build_head429_request # type: ignore
__all__ = [
"build_head400_request",
"build_get400_request",
"build_options400_request",
"build_put400_request",
"build_patch400_request",
"build_post400_request",
"build_delete400_request",
"build_head401_request",
"build_get402_request",
"build_options403_request",
"build_get403_request",
"build_put404_request",
"build_patch405_request",
"build_post406_request",
"build_delete407_request",
"build_put409_request",
"build_head410_request",
"build_get411_request",
"build_options412_request",
"build_get412_request",
"build_put413_request",
"build_patch414_request",
"build_post415_request",
"build_get416_request",
"build_delete417_request",
"build_head429_request",
]
| 4,785 |
Library_Management/library_management/api/urls.py
|
pankesh18/web_dev_for_info_system
| 0 |
2172500
|
from django.urls import path
from . import views
urlpatterns = [
path('getdata',views.getData)
]
| 106 |
lib/clckwrkbdgr/test/test_pyshell.py
|
umi0451/dotfiles
| 2 |
2172633
|
import os, sys, platform
import time
import unittest
unittest.defaultTestLoader.testMethodPrefix = 'should'
import contextlib
try: # pragma: no cover
from pathlib2 import Path
except ImportError: # pragma: no cover
from pathlib import Path
import clckwrkbdgr.fs
import clckwrkbdgr.pyshell as pyshell
from clckwrkbdgr.pyshell import sh
pyshell._unmonkeypatch_sys_exit()
class TestUtils(unittest.TestCase):
def should_expand_lists_in_args(self):
self.assertEqual(list(pyshell.expand_lists([])), [])
self.assertEqual(list(pyshell.expand_lists(['a', 'b', 1, None])), ['a', 'b', 1, None])
self.assertEqual(list(pyshell.expand_lists(['a', ['b', 'c'], 1, None])), ['a', 'b', 'c', 1, None])
class TestReturnCode(unittest.TestCase):
def should_convert_returncode_to_string(self):
self.assertEqual(str(pyshell.ReturnCode(0)), '0')
self.assertEqual(repr(pyshell.ReturnCode(0)), 'ReturnCode(0)')
def should_treat_returncode_as_bool(self):
rc = pyshell.ReturnCode(0)
self.assertTrue(bool(rc))
self.assertTrue(rc)
rc = pyshell.ReturnCode()
self.assertTrue(bool(rc))
self.assertTrue(rc)
rc = pyshell.ReturnCode(1)
self.assertFalse(bool(rc))
self.assertFalse(rc)
def should_treat_returncode_as_int(self):
rc = pyshell.ReturnCode(0)
self.assertEqual(int(rc), 0)
self.assertEqual(rc, 0)
rc = pyshell.ReturnCode()
self.assertEqual(int(rc), 0)
self.assertEqual(rc, 0)
rc = pyshell.ReturnCode(-1)
self.assertEqual(int(rc), -1)
self.assertEqual(rc, -1)
def should_compare_returncodes(self):
self.assertEqual(pyshell.ReturnCode(-1), -1)
self.assertEqual(pyshell.ReturnCode(-1), False)
self.assertEqual(pyshell.ReturnCode(0), 0)
self.assertEqual(pyshell.ReturnCode(0), True)
self.assertNotEqual(pyshell.ReturnCode(-1), 0)
self.assertNotEqual(pyshell.ReturnCode(-1), True)
self.assertNotEqual(pyshell.ReturnCode(0), -1)
self.assertNotEqual(pyshell.ReturnCode(0), False)
@contextlib.contextmanager
def TempArgv(*args): # TODO mocks
try:
old_argv = sys.argv[:]
sys.argv[:] = list(args)
yield
finally:
sys.argv = old_argv
@contextlib.contextmanager
def TempEnviron(var, value): # pragma: no cover: TODO mocks or move to separate module?
old_value = None
try:
if var in os.environ:
old_value = os.environ[var]
if value is None:
if var in os.environ:
del os.environ[var]
else:
os.environ[var] = value
yield
finally:
if old_value is None:
if var in os.environ:
del os.environ[var]
else:
os.environ[var] = old_value
class TestPyShell(unittest.TestCase): # TODO mocks
def should_get_args(self):
with TempArgv('progname', '-a', '--arg', 'value'):
self.assertEqual(sh.ARGS(), ('-a', '--arg', 'value'))
self.assertEqual(sh.ARG(0), 'progname')
self.assertEqual(sh.ARG(1), '-a')
self.assertEqual(sh.ARG(4), '')
def should_chdir(self):
with clckwrkbdgr.fs.CurrentDir('.'):
expected = Path('.').resolve().parent
sh.cd('..')
actual = Path('.').resolve()
self.assertEqual(actual, expected)
def should_chdir_back(self):
with clckwrkbdgr.fs.CurrentDir('.'):
original = Path('.').resolve()
parent = original.parent
sh.cd('..')
sh.cd('-')
actual = Path('.').resolve()
self.assertEqual(actual, original)
sh.cd('-')
actual = Path('.').resolve()
self.assertEqual(actual, parent)
def should_get_environment_variables(self):
with TempEnviron('MY_VAR', 'my_value'):
self.assertEqual(sh['MY_VAR'], 'my_value')
with TempEnviron('MY_VAR', None):
self.assertEqual(sh['MY_VAR'], '')
def should_run_command(self):
rc = sh.run('true')
self.assertEqual(rc, 0)
rc = sh.run('false')
self.assertEqual(rc, 1)
def should_nohup(self):
start = time.time()
rc = sh.run('sleep', '4', nohup=True)
stop = time.time()
self.assertTrue(stop - start < 3)
self.assertIsNone(rc)
def should_suppress_output(self):
sh.run('echo', 'you should not see this!', stdout=None)
def should_collect_output(self):
output = sh.run('echo', 'test', stdout=str)
self.assertEqual(output, 'test')
def should_collect_stderr(self):
with TempEnviron('LC_ALL', 'C'):
output = sh.run('cat', 'definitely missing file', stdout=str, stderr='stdout')
output = output.replace('/usr/bin/', '')
output = output.replace('/bin/', '')
output = output.replace("'", '')
expected = "cat: cannot open definitely missing file" if platform.system() == 'AIX' else "cat: definitely missing file: No such file or directory"
self.assertEqual(output, expected)
def should_suppress_stderr(self):
output = sh.run('cat', 'definitely missing file', stdout=str, stderr=None)
self.assertEqual(output, '')
def should_use_parentheses_to_run_command(self):
output = sh('echo', 'test', stdout=str)
self.assertEqual(output, 'test')
def should_feed_stdin(self):
output = sh.run('cat', stdin='foo\nbar', stdout=str)
self.assertEqual(output, 'foo\nbar')
def should_exit(self):
with self.assertRaises(SystemExit) as e:
sh.exit(1)
self.assertEqual(e.exception.code, 1)
with self.assertRaises(SystemExit) as e:
sh.exit(0)
self.assertEqual(e.exception.code, 0)
def should_exit_with_code_from_last_command(self):
sh('false')
with self.assertRaises(SystemExit) as e:
sh.exit()
self.assertEqual(e.exception.code, 1)
sh('true')
with self.assertRaises(SystemExit) as e:
sh.exit()
self.assertEqual(e.exception.code, 0)
| 5,368 |
src/tenykshi/main.py
|
nijotz/tenyks-contrib
| 0 |
2170519
|
from datetime import date
import random
from tenyks.client import Client, run_client
class TenyksHi(Client):
direct_only = True
hellos = ['hi', 'hello', 'hola', 'sup', 'sup?', 'hey', 'heyy', 'heyyy',
'yo']
insults = ['shutup', 'stop.', 'you\'re being annoying',
'I\'m trying to write code']
def __init__(self, *args, **kwargs):
self.hello_counts = {}
super(TenyksHi, self).__init__(*args, **kwargs)
def handle(self, data, match, filter_name):
if any([item == data['payload'] for item in self.hellos]):
if data['nick'] not in self.hello_counts:
self.hello_counts[data['nick']] = {}
if date.today() not in self.hello_counts[data['nick']]:
self.hello_counts[data['nick']][date.today()] = 0
self.hello_counts[data['nick']][date.today()] += 1
hello_count = self.hello_counts[data['nick']][date.today()]
if hello_count < 5:
self.send('{nick}: {word}'.format(
nick=data['nick'], word=random.choice(self.hellos)), data)
elif hello_count >= 5 and hello_count <= 10:
self.send('{nick}: {word}'.format(
nick=data['nick'], word=random.choice(self.insults)), data)
elif hello_count == 11:
self.send('{nick}: I\'m ignoring you.'.format(
nick=data['nick']), data)
def main():
run_client(TenyksHi)
if __name__ == '__main__':
main()
| 1,535 |
BioClients/pubchem/ftp/pubchem_ftp_assay_fetch.py
|
jeremyjyang/BioClients
| 10 |
2171679
|
#!/usr/bin/env python
#############################################################################
### pubchem_assay_fetch.py - from input AIDs, fetch full dataset
#############################################################################
import os,sys,re,time,getopt,gzip,zipfile
from ... import pubchem
PROG=os.path.basename(sys.argv[0])
ASSAY_DATA_DIR='/home/data/pubchem/bioassay/csv/data'
#############################################################################
def ErrorExit(msg):
print >>sys.stderr,msg
sys.exit(1)
#############################################################################
def MergeListIntoHash(hsh,lst):
for x in lst:
if not hsh.has_key(x):
hsh[x]=1
else:
hsh[x]+=1
return
#############################################################################
def ExtractSIDs(fpath_csv_gz):
try:
f=gzip.open(fpath_csv_gz)
except:
print >>sys.stderr, 'ERROR: could not open %s'%(fpath)
return []
ftxt=f.read()
f.close()
sids_this=pubchem.ftp.Utils.ExtractOutcomes(ftxt,None,False)
return sids_this
#############################################################################
if __name__=='__main__':
usage='''
%(PROG)s - fetch assay csvs from AIDs (from local mirror)
required:
--i AIDFILE .......... AIDs (CSV w/ AID 1st ok)
or
--aids AIDS .......... list of AIDs (comma separated)
and
--odir ODIR .......... dir for output files
options:
--o OFILE ............ SIDs extracted from assay CSVs
--keep_dirtree ....... output in directory tree (as in PubChem FTP)
--v .................. verbose
--h .................. this help
'''%{'PROG':PROG}
ifile=None; odir=None; verbose=0; ofile=None; aidslist=None; keep_dirtree=False;
opts,pargs = getopt.getopt(sys.argv[1:],'',['h','v','vv','odir=','i=','o=','aids=','keep_dirtree'])
if not opts: ErrorExit(usage)
for (opt,val) in opts:
if opt=='--h': ErrorExit(usage)
elif opt=='--i': ifile=val
elif opt=='--odir': odir=val
elif opt=='--out_sids': ofile=val
elif opt=='--aids': aidslist=val
elif opt=='--keep_dirtree': keep_dirtree=True
elif opt=='--vv': verbose=2
elif opt=='--v': verbose=1
else: ErrorExit('Illegal option: %s'%val)
if not odir:
ErrorExit('-odir required\n'+usage)
if not os.access(ASSAY_DATA_DIR,os.R_OK):
ErrorExit('cannot find: %s'%ASSAY_DATA_DIR)
aids=[];
if aidslist:
aids=map(lambda x:int(x),(re.split(r'[\s,]',aidslist.strip())))
elif ifile:
faids=file(ifile)
if not faids:
ErrorExit('cannot open: %s\n%s'%(ifile,usage))
i=0;
while True:
line=faids.readline()
if not line: break
line=line.strip()
if not line: continue
i+=1
try:
field=re.sub('[,\s].*$','',line) ## may be addl field[s]
aid=int(field)
aids.append(aid)
except:
print >>sys.stderr,'cannot parse aid: "%s"'%line
continue
print >>sys.stderr,'aids read: %d'%len(aids)
else:
ErrorExit('-in or -aids required\n'+usage)
aids.sort()
sids={}
t0=time.time()
n_out=0; n_not_found=0;
for aid in aids:
if verbose:
print >>sys.stderr, '%d:\t'%(aid),
is_found=False
for fname_zip in os.listdir(ASSAY_DATA_DIR):
if not re.search('\.zip',fname_zip): continue
aid_from=re.sub(r'^([\d]+)_([\d]+)\.zip$',r'\1',fname_zip)
aid_to=re.sub(r'^([\d]+)_([\d]+)\.zip$',r'\2',fname_zip)
try:
aid_from=int(aid_from)
aid_to=int(aid_to)
except:
print >>sys.stderr, 'ERROR: cannot parse AIDs from fname_zip: "%s"'%fname_zip
continue
if aid<aid_from or aid>aid_to:
continue
if verbose:
print >>sys.stderr, '(%s)'%(fname_zip),
fpath_zip=ASSAY_DATA_DIR+'/'+fname_zip
try:
zf=zipfile.ZipFile(fpath_zip,'r')
except:
print >>sys.stderr, 'ERROR: cannot read fpath_zip: "%s"'%fpath_zip
continue
flist_csv_gz=zf.namelist()
zf.close()
for fpath_csv_gz in flist_csv_gz:
aid_this=None
if not re.search('\.csv\.gz',fpath_csv_gz): continue
try:
if re.search(r'/',fpath_csv_gz):
txt=re.sub(r'^.*/(\d*)\.csv\.gz',r'\1',fpath_csv_gz)
else:
txt=re.sub(r'\.csv\.gz','',fpath_csv_gz)
aid_this=int(txt)
except:
print >>sys.stderr, 'cannot parse AID: "%s"'%fpath_csv_gz
continue
if aid==aid_this:
zf=zipfile.ZipFile(fpath_zip,'r')
cwd=os.getcwd()
os.chdir(odir)
zf.extract(fpath_csv_gz)
if not keep_dirtree:
d,f = os.path.split(fpath_csv_gz)
os.rename(fpath_csv_gz,f)
n_out+=1
is_found=True
os.chdir(cwd)
zf.close()
if ofile:
MergeListIntoHash(sids,ExtractSIDs(odir+"/"+fpath_csv_gz))
break
if not is_found:
n_not_found+=1
if verbose:
print >>sys.stderr, '\t[%s]'%(is_found)
if ofile:
fout_sids=open(ofile,"w+")
if not fout_sids:
print >>sys.stderr, 'cannot open: %s'%ofile
else:
for sid in sids.keys():
fout_sids.write("%d\n"%sid)
fout_sids.close()
print >>sys.stderr, '%s: input AIDs: %d'%(PROG,len(aids))
print >>sys.stderr, '%s: output assay csv datafiles: %d'%(PROG,n_out)
print >>sys.stderr, '%s: assays not found: %d'%(PROG,n_not_found)
if ofile:
print >>sys.stderr, '%s: output SIDs: %d'%(PROG,len(sids.keys()))
print >>sys.stderr, ('%s: total elapsed time: %s'%(PROG,time.strftime('%Hh:%Mm:%Ss',time.gmtime(time.time()-t0))))
| 5,629 |
game/Background.py
|
senhordaluz/jc1-python
| 0 |
2172289
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 20 19:18:21 2017
@author: <NAME>
"""
import pygame
from game import my
from game import camera
my.FASES = ['Primeira Fase', 'Segunda Fase', 'Terceira Fase', 'Boss']
my.FASE = pygame.sprite.Group()
def Inicializa_Fase():
"""Carrega todos os elementos basicos da fase no
grupo de sprites salvo em my.FASE"""
background = Imagem_de_Fundo()
portalCima = Portal('cima')
portalBaixo = Portal('baixo')
portalEsquerda = Portal('esquerda')
portalDireita = Portal('direita')
class Imagem_de_Fundo(pygame.sprite.Sprite):
"""Classe para instanciar o sprite do plano de fundo"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.fase = 1
self.image = pygame.image.load(self._get_image())
self.image = pygame.transform.scale(self.image, my.SIZE)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = [0,0]
self.add(my.FASE)
def update(self):
"""Roda a cada frame do jogo"""
##self.proxima_fase()
pass
def _get_image(self):
"""Retorna local do arquivo do mapa"""
if self.fase == 1:
mapa = ''.join([my.ARTE_MAPAS_PATH, 'fundo01.jpg'])
elif self.fase == 2:
mapa = ''.join([my.ARTE_MAPAS_PATH, 'fundo02.jpg'])
elif self.fase == 3:
mapa = ''.join([my.ARTE_MAPAS_PATH, 'fundo03.jpg'])
elif self.fase == 4:
mapa = ''.join([my.ARTE_MAPAS_PATH, 'fundoB1.jpg'])
else:
mapa = ''.join([my.ARTE_MAPAS_PATH, 'fundo01.jpg'])
return mapa
def _troca_fase(self):
"""Troca o arquivo de imagem do fundo da tela para o
da próxima fase"""
self.image = pygame.image.load(self._get_image())
self.image = pygame.transform.scale(self.image, my.SIZE)
def proxima_fase(self):
if self.fase == 4:
self.fase = 1
else:
self.fase += 1
self._troca_fase()
class Portal(pygame.sprite.Sprite):
"""
Classe para instanciar os portais da fase
tipo: cima, baixo, esquerda, direita
"""
def __init__(self, tipo):
pygame.sprite.Sprite.__init__(self)
self.fase = 1
self.tipo = tipo
self.spritesheet = camera.SpriteSheet(self._get_image_sheet())
self._posiciona_portal()
self.image = self.spritesheet.image
self.image = pygame.transform.scale(self.image, self.rect.size)
self.add(my.FASE)
def update(self):
"""Roda a cada frame do jogo"""
self._troca_sprite()
self.image = self.spritesheet.image
self.image = pygame.transform.scale(self.image, self.rect.size)
def _get_image_sheet(self):
"""Retorna local do arquivo do portal"""
if self.fase == 1:
if self.tipo == 'baixo':
portal = ''.join([my.ARTE_PORTAL_PATH, 'vermelho_baixo.png'])
elif self.tipo == 'cima':
portal = ''.join([my.ARTE_PORTAL_PATH, 'vermelho_cima.png'])
elif self.tipo == 'esquerda':
portal = ''.join([my.ARTE_PORTAL_PATH, 'vermelho_esquerda.png'])
elif self.tipo == 'direita':
portal = ''.join([my.ARTE_PORTAL_PATH, 'vermelho_direita.png'])
elif self.fase == 2:
if self.tipo == 'baixo':
portal = ''.join([my.ARTE_PORTAL_PATH, 'azul_baixo.png'])
elif self.tipo == 'cima':
portal = ''.join([my.ARTE_PORTAL_PATH, 'azul_cima.png'])
elif self.tipo == 'esquerda':
portal = ''.join([my.ARTE_PORTAL_PATH, 'azul_esquerda.png'])
elif self.tipo == 'direita':
portal = ''.join([my.ARTE_PORTAL_PATH, 'azul_direita.png'])
elif self.fase == 3:
if self.tipo == 'baixo':
portal = ''.join([my.ARTE_PORTAL_PATH, 'cinza_baixo.png'])
elif self.tipo == 'cima':
portal = ''.join([my.ARTE_PORTAL_PATH, 'cinza_cima.png'])
elif self.tipo == 'esquerda':
portal = ''.join([my.ARTE_PORTAL_PATH, 'cinza_esquerda.png'])
elif self.tipo == 'direita':
portal = ''.join([my.ARTE_PORTAL_PATH, 'cinza_direita.png'])
return portal
def _posiciona_portal(self):
"""Salva o rect com a posição do portal em tela
além da posição a ser cortada no spritesheet
"""
if self.tipo == 'cima':
self.rect = pygame.Rect(362,0,80,50)
self.spritesheet.rect = pygame.Rect(0,0,59,30)
elif self.tipo == 'baixo':
self.rect = pygame.Rect(362,550,80,50)
self.spritesheet.rect = pygame.Rect(0,0,59,30)
elif self.tipo == 'esquerda':
self.rect = pygame.Rect(0,280,50,80)
self.spritesheet.rect = pygame.Rect(0,0,30,59)
elif self.tipo == 'direita':
self.rect = pygame.Rect(748,265,50,80)
self.spritesheet.rect = pygame.Rect(0,0,30,59)
def _troca_sprite(self):
"""Troca o sprite de animação do portal"""
if self.tipo == 'cima' or self.tipo == 'baixo':
if self.spritesheet.rect.x >= 413:
self.spritesheet.rect.x = 0
else:
self.spritesheet.rect.x += 59
elif self.tipo == 'esquerda' or self.tipo == 'direita':
if self.spritesheet.rect.y >= 413:
self.spritesheet.rect.y = 0
else:
self.spritesheet.rect.y += 59
| 5,887 |
scripts/start-temscripting-test.py
|
christian-at-ceos/temscript
| 2 |
2170558
|
from temscript import GetInstrument
# for testing on the Titan microscope PC
print("Starting Test...")
instrument = GetInstrument()
gun = instrument.Gun
illumination = instrument.Illumination
projection = instrument.Projection
vacuum = instrument.Vacuum
illuminationMode = illumination.Mode
print("illuminationMode=%s" % illuminationMode)
condenserMode = illumination.CondenserMode
print("condenserMode=%s" % condenserMode)
htValue = gun.HTValue
print("HT1=%s" % htValue)
cameraLength = projection.CameraLength
print("cameraLength=%s" % cameraLength)
magnification = projection.Magnification
print("magnification=%s" % magnification)
projectionMode = projection.Mode
print("projectionMode=%s" % projectionMode)
projectionSubMode = projection.SubMode
print("projectionSubMode=%s" % projectionSubMode)
stemMagnification = illumination.StemMagnification
print("stemMagnification=%s" % stemMagnification)
beamBlanked = illumination.BeamBlanked
print("beamBlanked=%s" % beamBlanked)
illuminationMode = illumination.Mode
print("illuminationMode=%s" % illuminationMode)
illuminatedArea = illumination.IlluminatedArea
print("illuminatedArea=%s" % illuminatedArea)
dfMode = illumination.DFMode
print("dfMode=%s" % dfMode)
spotSizeIndex = illumination.SpotSizeIndex
print("spotSizeIndex=%s" % spotSizeIndex)
condenserMode = illumination.CondenserMode
print("condenserMode=%s" % condenserMode)
#convergenceAngle = illumination.ConvergenceAngle
#print("convergenceAngle=%s" % convergenceAngle)
print("Done.")
| 1,514 |
rally/rally-plugins/octavia/octavia-create-loadabalancer-listeners-pools-members.py
|
cloud-bulldozer/browbeat
| 19 |
2169835
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
import io
from rally.common import sshutils
from rally_openstack import consts
from rally_openstack.scenarios.vm import utils as vm_utils
from rally_openstack.scenarios.neutron import utils as neutron_utils
from rally_openstack.scenarios.octavia import utils as octavia_utils
from octaviaclient.api import exceptions
from rally.task import scenario
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
@types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image")
@validation.add("required_services", services=[consts.Service.NEUTRON,
consts.Service.NOVA,
consts.Service.OCTAVIA])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=["network"])
@scenario.configure(context={"cleanup@openstack": ["octavia", "neutron", "nova"],
"keypair@openstack": {}, "allow_ssh@openstack": None},
name="BrowbeatPlugin.OctaviaCreateLoadbalancerListenersPoolsMembers",
platform="openstack")
class OctaviaCreateLoadbalancerListenersPoolsMembers(vm_utils.VMScenario,
neutron_utils.NeutronScenario,
octavia_utils.OctaviaBase):
def create_clients(self, num_clients, image, flavor, user, user_data_file, **kwargs):
_clients = []
for i in range(num_clients):
try:
userdata = io.open(user_data_file, "r")
kwargs["userdata"] = userdata
LOG.info("Added user data")
except Exception as e:
LOG.info("couldn't add user data %s", e)
LOG.info("Launching Client : {}".format(i))
server = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
if hasattr(userdata, 'close'):
userdata.close()
for net in server.addresses:
network_name = net
break
if network_name is None:
return False
# IP Address
_clients.append(
str(server.addresses[network_name][0]["addr"]))
LOG.info(_clients)
return _clients
def run(self, image, flavor, user, lb_algorithm, protocol, protocol_port,
jump_host_ip, num_pools, num_clients, vip_subnet_id, user_data_file,
router_create_args=None, network_create_args=None,
subnet_create_args=None, **kwargs):
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
kwargs["nics"] = [{"net-id": network['network']['id']}]
subnet_id = subnet['subnet']['id']
_clients = self.create_clients(num_clients, image,
flavor, user, user_data_file,
**kwargs)
max_attempts = 10
LOG.info("Creating a load balancer")
lb = self.octavia.load_balancer_create(
subnet_id=vip_subnet_id,
admin_state=True)
lb_id = lb["id"]
LOG.info("Waiting for the lb {} to be active".format(lb_id))
self.octavia.wait_for_loadbalancer_prov_status(lb)
time.sleep(90)
for _ in range(num_pools):
listener_args = {
"name": self.generate_random_name(),
"loadbalancer_id": lb_id,
"protocol": protocol,
"protocol_port": protocol_port,
"connection_limit": -1,
"admin_state_up": True,
}
LOG.info("Creating a listener for lb {}".format(lb_id))
attempts = 0
# Retry to avoid HTTP 409 errors like "Load Balancer
# is immutable and cannot be updated"
while attempts < max_attempts:
try:
listener = self.octavia.listener_create(json={"listener": listener_args})
break
except exceptions.OctaviaClientException as e:
# retry for 409 return code
if e.code == 409:
attempts += 1
time.sleep(120)
self.octavia.wait_for_loadbalancer_prov_status(lb)
continue
break
LOG.info(listener)
time.sleep(30)
LOG.info("Waiting for the lb {} to be active, after listener_create"
.format(lb_id))
self.octavia.wait_for_loadbalancer_prov_status(lb)
LOG.info("Creating a pool for lb {}".format(lb_id))
attempts = 0
# Retry to avoid HTTP 409 errors like "Load Balancer
# is immutable and cannot be updated"
while attempts < max_attempts:
try:
# internally pool_create will wait for active state
pool = self.octavia.pool_create(
lb_id=lb["id"],
protocol=protocol,
lb_algorithm=lb_algorithm,
listener_id=listener["listener"]["id"],
admin_state_up=True)
break
except exceptions.OctaviaClientException as e:
# retry for 409 return code
if e.code == 409:
attempts += 1
time.sleep(120)
continue
break
time.sleep(60)
for client_ip in _clients:
member_args = {
"address": client_ip,
"protocol_port": protocol_port,
"subnet_id": subnet_id,
"admin_state_up": True,
"name": self.generate_random_name(),
}
LOG.info("Adding member : {} to the pool {} lb {}"
.format(client_ip, pool["id"], lb_id))
attempts = 0
# Retry to avoid "Load Balancer is immutable and cannot be updated"
while attempts < max_attempts:
try:
self.octavia.member_create(pool["id"],
json={"member": member_args})
break
except exceptions.OctaviaClientException as e:
# retry for 409 return code
if e.code == 409:
attempts += 1
time.sleep(120)
self.octavia.wait_for_loadbalancer_prov_status(lb)
LOG.info("member_create exception: Waiting for the lb {} to be active"
.format(lb_id))
continue
break
time.sleep(30)
LOG.info("Waiting for the lb {} to be active, after member_create"
.format(lb_id))
self.octavia.wait_for_loadbalancer_prov_status(lb)
protocol_port = protocol_port + 1
# ssh and ping the vip
lb_ip = lb["vip_address"]
LOG.info("Load balancer IP: {}".format(lb_ip))
port = 80
jump_ssh = sshutils.SSH(user, jump_host_ip, 22, None, None)
# check for connectivity
self._wait_for_ssh(jump_ssh)
for i in range(num_pools):
for j in range(num_clients):
cmd = "curl -s {}:{}".format(lb_ip, port)
attempts = 0
while attempts < max_attempts:
test_exitcode, stdout_test, stderr = jump_ssh.execute(cmd, timeout=60)
LOG.info("cmd: {}, stdout:{}".format(cmd, stdout_test))
if test_exitcode != 0 and stdout_test != 1:
LOG.error("ERROR with HTTP response {}".format(cmd))
attempts += 1
time.sleep(30)
else:
LOG.info("cmd: {} succesful".format(cmd))
break
port = port + 1
| 9,139 |
tools/wr_chart.py
|
probably-not-porter/pokedex
| 12 |
2172050
|
# Here is my python equaivalent of a type chart.
# Normal = 0 and so forth
matrix = [
# NOR FIR WAT ELE GRA ICE FIG POI GRO FLY PSY BUG ROC GHO DRA DAR STE FAI
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 ,0.5, 0 , 1 , 1 ,0.5, 1 ], # NOR
[ 1 ,0.5,0.5, 1 , 2 , 2 , 1 , 1 , 1 , 1 , 1 , 2 ,0.5, 1 ,0.5, 1 , 2 , 1 ], # FIR
[ 1 , 2 ,0.5, 1 ,0.5, 1 , 1 , 1 , 2 , 1 , 1 , 1 , 2 , 1 ,0.5, 1 , 1 , 1 ], # WAT
[ 1 , 1 , 2 ,0.5,0.5, 1 , 1 , 1 , 0 , 2 , 1 , 1 , 1 , 1 ,0.5, 1 , 1 , 1 ], # ELE
[ 1 ,0.5, 2 , 1 ,0.5, 1 , 1 ,0.5, 2 ,0.5, 1 ,0.5, 2 , 1 ,0.5, 1 ,0.5, 1 ], # GRA
[ 1 ,0.5,0.5, 1 , 2 ,0.5, 1 , 1 , 2 , 2 , 1 , 1 , 1 , 1 , 2 , 1 ,0.5, 1 ], # ICE
[ 2 , 1 , 1 , 1 , 1 , 2 , 1 ,0.5, 1 ,0.5,0.5,0.5, 2 , 0 , 1 , 2 , 2 ,0.5], # FIG
[ 1 , 1 , 1 , 1 , 2 , 1 , 1 ,0.5,0.5, 1 , 1 , 1 ,0.5,0.5, 1 , 1 , 0 , 2 ], # POI
[ 1 , 2 , 1 , 2 ,0.5, 1 , 1 , 2 , 1 , 0 , 1 ,0.5, 2 , 1 , 1 , 1 , 2 , 1 ], # GRO
[ 1 , 1 , 1 ,0.5, 2 , 1 , 2 , 1 , 1 , 1 , 1 , 2 ,0.5, 1 , 1 , 1 ,0.5, 1 ], # FLY
[ 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 1 , 1 ,0.5, 1 , 1 , 1 , 1 , 0 ,0.5, 1 ], # PSY
[ 1 ,0.5, 1 , 1 , 2 , 1 ,0.5,0.5, 1 ,0.5, 2 , 1 , 1 ,0.5, 1 , 2 ,0.5,0.5], # BUG
[ 1 , 2 , 1 , 1 , 1 , 2 ,0.5, 1 ,0.5, 2 , 1 , 2 , 1 , 1 , 1 , 1 ,0.5, 1 ], # ROC
[ 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 2 , 1 ,0.5, 1 , 1 ], # GHO
[ 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 1 ,0.5, 0 ], # DRA
[ 1 , 1 , 1 , 1 , 1 , 1 ,0.5, 1 , 1 , 1 , 2 , 1 , 1 , 2 , 1 ,0.5, 1 ,0.5], # DAR
[ 1 ,0.5,0.5,0.5, 1 , 2 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 1 , 1 , 1 ,0.5, 2 ], # STE
[0.5, 1 , 1 , 1 , 1 , 1 , 2 ,0.5, 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 ,0.5, 1 ] # FAI
]
| 1,714 |
app/comments/models.py
|
jking6884/RESTapi
| 0 |
2172445
|
from marshmallow_jsonapi import Schema, fields
from marshmallow import validate
from app.basemodels import db, CRUD_MixIn
class Comments(db.Model, CRUD_MixIn):
id = db.Column(db.Integer, primary_key=True)
author = db.Column(db.String(250), nullable=False)
body = db.Column(db.Text, nullable=False)
author_url = db.Column(db.String(250), nullable=False)
created_on = db.Column(db.Date, nullable=False)
approved = db.Column(db.Boolean, nullable=False)
def __init__(self, author, body, author_url, created_on, approved, ):
self.author = author
self.body = body
self.author_url = author_url
self.created_on = created_on
self.approved = approved
class CommentsSchema(Schema):
not_blank = validate.Length(min=1, error='Field cannot be blank')
# add validate=not_blank in required fields
id = fields.Integer(dump_only=True)
author = fields.String(validate=not_blank)
body = fields.String(validate=not_blank)
author_url = fields.URL(validate=not_blank)
created_on = fields.Date(required=True)
approved = fields.Boolean(required=True)
# self links
def get_top_level_links(self, data, many):
if many:
self_link = "/comments/"
else:
self_link = "/comments/{}".format(data['id'])
return {'self': self_link}
class Meta:
type_ = 'comments'
| 1,455 |
chemfiles/misc.py
|
ezavod/chemfiles.py
| 0 |
2172132
|
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import warnings
from .clib import _get_c_library
class ChemfilesWarning(UserWarning):
"""Warnings from the Chemfiles runtime."""
pass
class ChemfilesError(BaseException):
"""Exception class for errors in chemfiles"""
pass
# Store a reference to the last logging callback, to preven Python from
# garbage-collecting it.
_CURRENT_CALLBACK = None
def set_warnings_callback(function):
"""
Call `function` on every warning event. The callback should take a string
message and return nothing.
By default, warnings are send to python `warnings` module.
"""
from .ffi import chfl_warning_callback
def callback(message):
try:
function(message.decode("utf8"))
except Exception as e:
message = "exception raised in warning callback: {}".format(e)
warnings.warn(message, ChemfilesWarning)
global _CURRENT_CALLBACK
_CURRENT_CALLBACK = chfl_warning_callback(callback)
_get_c_library().chfl_set_warning_callback(_CURRENT_CALLBACK)
def add_configuration(path):
"""
Read configuration data from the file at ``path``.
By default, chemfiles reads configuration from any file name `.chemfilesrc`
in the current directory or any parent directory. This function can be used
to add data from another configuration file.
This function will fail if there is no file at ``path``, or if the file is
incorectly formatted. Data from the new configuration file will overwrite
any existing data.
"""
_get_c_library().chfl_add_configuration(path.encode("utf8"))
def _last_error():
"""Get the last error from the chemfiles runtime."""
return _get_c_library().chfl_last_error().decode("utf8")
def _clear_errors():
"""Clear any error message saved in the chemfiles runtime."""
return _get_c_library().chfl_clear_errors()
def _set_default_warning_callback():
set_warnings_callback(
# We need to set stacklevel=4 to get through the lambda =>
# adapatator => C++ code => Python binding => user code
lambda message: warnings.warn(message, ChemfilesWarning, stacklevel=4)
)
| 2,251 |
shader.py
|
ultradr3mer/PythonGl
| 0 |
2170082
|
import glm
from OpenGL.GL import *
from OpenGL.GLUT import *
import game
class Shader:
def __init__(self, vertex_file, fragment_file):
vertex_shader_handle = glCreateShader(GL_VERTEX_SHADER)
fragment_shader_handle = glCreateShader(GL_FRAGMENT_SHADER)
with open(vertex_file, "r") as f:
content = f.read()
glShaderSource(vertex_shader_handle, content)
with open(fragment_file, "r") as f:
content = f.read()
glShaderSource(fragment_shader_handle, content)
glCompileShader(vertex_shader_handle)
if glGetError() != 0:
raise Exception(glGetShaderInfoLog(vertex_shader_handle))
glCompileShader(fragment_shader_handle)
if glGetError() != 0:
raise Exception(glGetShaderInfoLog(fragment_shader_handle))
shader_program_handle = glCreateProgram()
glAttachShader(shader_program_handle, vertex_shader_handle)
glAttachShader(shader_program_handle, fragment_shader_handle)
glLinkProgram(shader_program_handle)
if glGetProgramiv(shader_program_handle, GL_LINK_STATUS) == GL_FALSE:
log = glGetProgramInfoLog(shader_program_handle)
raise Exception(log)
self.shader_program_handle = shader_program_handle
def create_vertex_attribute_object(self, mesh):
vao_handle = glGenVertexArrays(1)
glBindVertexArray(vao_handle)
game.Game.read_error_log()
# normal_index = glGetAttribLocation(self.shader_program_handle, "in_normal")
position_index = glGetAttribLocation(self.shader_program_handle, "in_position")
# tangent_index = glGetAttribLocation(self.shader_program_handle, "in_tangent")
texture_index = glGetAttribLocation(self.shader_program_handle, "in_texture")
if position_index != -1:
glEnableVertexAttribArray(position_index)
glBindBuffer(GL_ARRAY_BUFFER, mesh.verticeBufferId)
glVertexAttribPointer(position_index, 3, GL_FLOAT, GL_FALSE, 0, None)
if texture_index != -1:
glEnableVertexAttribArray(texture_index)
glBindBuffer(GL_ARRAY_BUFFER, mesh.textureCoordBufferId)
glVertexAttribPointer(texture_index, 2, GL_FLOAT, GL_FALSE, 0, None)
glBindVertexArray(0)
return vao_handle
def insert_uniform(self, uniform_name, value):
location = glGetUniformLocation(self.shader_program_handle, uniform_name)
if isinstance(value, glm.mat4):
glUniformMatrix4fv(location, 1, GL_FALSE, glm.value_ptr(value))
return
if isinstance(value, tuple):
if len(value) == 4:
glUniform4fv(location, 1, arrays.GLfloatArray(value))
return
| 2,786 |
setup.py
|
NickolaiBeloguzov/robust-json
| 6 |
2172071
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
with open("README.md", "r") as f:
lond_desc = f.read()
setuptools.setup(
name="robust-json", # or robust_json
version="1.2.7",
author="<NAME>",
author_email="<EMAIL>",
packages=setuptools.find_packages(),
install_requires=["jsonpath_ng", "pathlib2"],
long_description=lond_desc,
long_description_content_type="text/markdown",
url="https://github.com/NickolaiBeloguzov/robust-json",
description="Robust and easy-to-use framework for working with JSON",
license="Apache 2.0",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: OS Independent",
],
python_requires=">=3.8",
include_package_data=True,
)
| 1,429 |
scripts/05_nlcd92_to_tracts.py
|
snmarkley1/HHUUD10
| 3 |
2172186
|
#####################################################################################
#####################################################################################
#### --------------------------------------------------------------------------- ####
#### ASSIGNING 1992 NLCD CATEGORIES to 2010 TRACT GEOGRAPHIES ####
#### --------------------------------------------------------------------------- ####
#####################################################################################
#####################################################################################
### RUN in ArcGIS PRO 2.8.1
##################################
## PREPARE WORKSPACE ##
##################################
## Import packages
import arcpy # need ArcGIS license
from arcpy import env
import os, zipfile, urllib # for downloading, unzipping files
from urllib import request
## Set workspace
base = "D:/HHUUD10"
env.workspace = base
## Set preferences
env.outputCoordinateSystem = arcpy.SpatialReference("USA Contiguous Albers Equal Area Conic") # coordinate system in use
env.extent = "MAXOF" # for raster operations
env.qualifiedFieldNames = False # good for joins
# Create temp folder
arcpy.management.CreateFolder(base, "temp")
path = os.path.join(base, "temp") # create path
# Establish Map
aprx = arcpy.mp.ArcGISProject("CURRENT")
# Create GDB
arcpy.management.CreateFileGDB(os.path.join(base, "gis_files"), "nlcd92.gdb")
############################################################
## DOWNLOAD/UNZIP 1992 NLCD CATEGORIES ##
############################################################
## Create list of URLs--available via the USGS (https://water.usgs.gov/GIS/metadata/usgswrd/XML/nlcde92.xml#stdorder)
urls = ["https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_1.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_2.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_3.zip",
"https://water.usgs.gov/GIS/dsdl/nlcde/nlcde92_4.zip"]
## List of output names
outputs = ["nlcd92_1", "nlcd92_2", "nlcd92_3", "nlcd92_4"]
## Run Loop downloading and unzipping raster files
for i, j in zip(urls, outputs):
zip_path, _ = urllib.request.urlretrieve(i, j) # retrieve files from URLs
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(path) # unzip files to temp folder created above
## NOTE: The above block of code can sometimes spit back errors. Re-running it from the top a second time worked for us.
############################################################
## RECLASSIFY & CONDUCT ZONAL HISTOGRAM ##
############################################################
## Change workspace
env.workspace = path
## Grab rasters in list
rasters = ["nlcde92_1/nlcde1.tif", "nlcde92_2/nlcde2.tif", "nlcde92_3/nlcde3.tif", "nlcde92_4/nlcde4.tif"]
outfolder = os.path.join(base, "gis_files", "nlcd92.gdb")
## Reclassify into 3-class Rasters (simplifies following step)
for r in rasters:
output = os.path.join(outfolder, "nlcd" + r[15:16] + "_recl") # make name (e.g.) "nlcd1_recl"
arcpy.gp.Reclassify_sa(r, "value", '11 12 1;21 22 2;23 3; 25 84 4;85 2;86 99 4', output, "NODATA") # for codes, see below:
## 1992 NLCD Codes Specified in Reclassify Step (source: https://water.usgs.gov/GIS/metadata/usgswrd/XML/nlcde92.xml#stdorder):
## ---- Water (1) ---- ##
# 11 - Open Water
# 12 - Perennial Ice/Snow
## ---- "Developed" (2) ---- ##
# 21 - Low Intensity Residential
# 22 - High Intensity Residential
# 23 - Commercial/Industrial/Transportation
# 85 - Urban/Recreational Grasses
## ---- Other (3) ---- ##
# All other numbers thru 99
## Prepare Zonal Histogram
env.workspace = outfolder # change workspace to gdb just created
rasters = arcpy.ListRasters() # rasters created above
t10 = os.path.join(base, "gis_files/database1.gdb/t10") # grab t10 polygon from database1.gdb
## Do Zonal Histogram (output as tables in tables folder)
for r in rasters:
output = r[:5] + "_zh" # outputs: rast1_zh, rast2_zh, etc.
arcpy.sa.ZonalHistogram(t10, "GISJOIN", r, output, "") # zonal histogram
## DELETE TEMP FOLDER
arcpy.management.Delete(path)
## Clear shapefiles from map display
for m in aprx.listMaps():
for lyr in m.listLayers("nlcd*"):
m.removeLayer(lyr)
## Clear tables from map display
for m in aprx.listMaps():
for tab in m.listTables("nlcd*"):
m.removeTable(tab)
| 4,599 |
jupyterlab2pymolpysnips/FileInput/loadPDBfile.py
|
MooersLab/pymolpysnips
| 0 |
2172283
|
"""
cmd.do('load ${1:my.pdb};')
"""
cmd.do('load my.pdb;')
# Description: Load a pdb file in the current directory.
# Source: placeHolder
| 142 |
selia/views/list_views/sampling_event_items.py
|
IslasGECI/selia
| 0 |
2172646
|
from irekua_database.models import SamplingEvent
from django.views.generic.detail import SingleObjectMixin
from django.utils.translation import gettext as _
from irekua_database.models import Item
from irekua_filters.items import items
from irekua_permissions.items import (
items as item_permissions)
from selia.views.list_views.base import SeliaListView
class ListSamplingEventItemsView(SeliaListView, SingleObjectMixin):
template_name = 'selia/list/sampling_event_items.html'
list_item_template = 'selia/list_items/item.html'
help_template = 'selia/help/sampling_event_items.html'
filter_form_template = 'selia/filters/item.html'
empty_message = _('No items are registered in this sampling event')
filter_class = items.Filter
search_fields = items.search_fields
ordering_fields = items.ordering_fields
def has_view_permission(self):
user = self.request.user
return item_permissions.list(user, sampling_event=self.object)
def has_create_permission(self):
user = self.request.user
return item_permissions.create(user, sampling_event=self.object)
def get(self, request, *args, **kwargs):
self.object = self.get_object(queryset=SamplingEvent.objects.all())
return super().get(request, *args, **kwargs)
def get_initial_queryset(self):
return Item.objects.filter(
sampling_event_device__sampling_event=self.object)
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['sampling_event'] = self.object
context['collection'] = self.object.collection
return context
| 1,679 |
fora/views/admintopics.py
|
WayStudios/fora
| 0 |
2172599
|
# fora
# class AdminTopicsView
# Xu [<EMAIL>] Copyright 2015
from fora.core.adminview import AdminView
from fora.core.topic import Topic
from fora.core.thread import Thread
from pyramid.renderers import render_to_response
from pyramid.httpexceptions import (
HTTPFound
)
class AdminTopicsView(AdminView):
""" This class contains the topics administration view of fora.
"""
identity = None
def __init__(self, request):
template = '%(path)s/topics.pt' % {'path': AdminView.path['templates']}
super(AdminTopicsView, self).__init__(request = request,
template = template,
actions = {
'retrieve_topics': self.retrieve_topics,
'retrieve_topic': self.retrieve_topic,
'delete_topic': self.delete_topic
})
if 'identity' in request.matchdict:
self.identity = request.matchdict['identity']
def prepare_template(self):
if not self.moderator.is_guest():
if self.activity == 'view':
topic = Topic.get_topic_by_uuid(self.identity)
thread = Thread.get_thread_by_uuid(topic.initial_thread())
self.value['topic'] = {
'uuid': topic.uuid(),
'subject': thread.subject(),
'content': thread.content(),
'create_date': topic.create_date().strftime('%Y-%m-%d %H:%M:%S'),
'update_date': topic.update_date().strftime('%Y-%m-%d %H:%M:%S')
}
self.template = '%(path)s/topics/view.pt' % {'path': AdminView.path['templates']}
elif self.activity == 'create':
self.template = '%(path)s/topics/create.pt' % {'path': AdminView.path['templates']}
elif self.activity == 'edit':
self.template = '%(path)s/topics/edit.pt' % {'path': AdminView.path['templates']}
else:
self.exception = HTTPFound(self.request.route_url("admin_portal"))
super(AdminTopicsView, self).prepare_template()
def retrieve_topics(self):
value = {
'status': True,
'entries': []
}
topics = Topic.get_topics()
for id in topics:
thread = Thread.get_thread_by_uuid(uuid = topics[id].initial_thread())
value['entries'].append({
'identity': topics[id].uuid(),
'id': topics[id].id(),
'author': thread.author(),
'subject': thread.subject(),
'is_archived': topics[id].is_archived(),
'is_deleted': topics[id].is_deleted(),
'create_date': topics[id].create_date().strftime('%Y-%m-%d %H:%M:%S'),
'update_date': topics[id].update_date().strftime('%Y-%m-%d %H:%M:%S')
})
self.response = render_to_response(renderer_name = 'json',
value = value,
request = self.request)
def retrieve_topic(self):
value = {
'status': True,
'entry': {}
}
self.response = render_to_response(renderer_name = 'json',
value = value,
request = self.request)
def delete_topic(self):
value = {
'status': True
}
self.response = render_to_response(renderer_name = 'json',
value = value,
request = self.request)
| 3,817 |
python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py
|
RangeKing/Paddle
| 8 |
2172367
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.contrib.sparsity.asp import ASPHelper
import numpy as np
class MyLayer(paddle.nn.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.conv1 = paddle.nn.Conv2D(
in_channels=3, out_channels=2, kernel_size=3, padding=2)
self.linear1 = paddle.nn.Linear(1352, 32)
self.linear2 = paddle.nn.Linear(32, 32)
self.linear3 = paddle.nn.Linear(32, 10)
def forward(self, img):
hidden = self.conv1(img)
hidden = paddle.flatten(hidden, start_axis=1)
hidden = self.linear1(hidden)
hidden = self.linear2(hidden)
prediction = self.linear3(hidden)
return prediction
class TestASPDynamicOptimize(unittest.TestCase):
def setUp(self):
self.layer = MyLayer()
self.place = paddle.CPUPlace()
if core.is_compiled_with_cuda():
self.place = paddle.CUDAPlace(0)
self.optimizer = paddle.optimizer.SGD(
learning_rate=0.01, parameters=self.layer.parameters())
def test_is_supported_layers(self):
program = paddle.static.default_main_program()
names = [
'embedding_0.w_0', 'fack_layer_0.w_0', 'conv2d_0.w_0',
'conv2d_0.b_0', 'conv2d_1.w_0', 'conv2d_1.b_0', 'fc_0.w_0',
'fc_0.b_0', 'fc_1.w_0', 'fc_1.b_0', 'linear_2.w_0', 'linear_2.b_0'
]
ref = [
False, False, True, False, True, False, True, False, True, False,
True, False
]
for i, name in enumerate(names):
self.assertTrue(
ref[i] == ASPHelper._is_supported_layer(program, name))
paddle.incubate.asp.set_excluded_layers(['fc_1', 'conv2d_0'])
ref = [
False, False, False, False, True, False, True, False, False, False,
True, False
]
for i, name in enumerate(names):
self.assertTrue(
ref[i] == ASPHelper._is_supported_layer(program, name))
paddle.incubate.asp.reset_excluded_layers()
ref = [
False, False, True, False, True, False, True, False, True, False,
True, False
]
for i, name in enumerate(names):
self.assertTrue(
ref[i] == ASPHelper._is_supported_layer(program, name))
def test_decorate(self):
param_names = [param.name for param in self.layer.parameters()]
self.optimizer = paddle.incubate.asp.decorate(self.optimizer)
program = paddle.static.default_main_program()
for name in param_names:
mask_var = ASPHelper._get_program_asp_info(program).mask_vars.get(
name, None)
if ASPHelper._is_supported_layer(program, name):
self.assertTrue(mask_var is not None)
else:
self.assertTrue(mask_var is None)
def test_asp_training(self):
self.optimizer = paddle.incubate.asp.decorate(self.optimizer)
paddle.incubate.asp.prune_model(self.layer)
imgs = paddle.to_tensor(
np.random.randn(32, 3, 24, 24),
dtype='float32',
place=self.place,
stop_gradient=False)
labels = paddle.to_tensor(
np.random.randint(
10, size=(32, 1)),
dtype='float32',
place=self.place,
stop_gradient=False)
loss_fn = paddle.nn.MSELoss(reduction='mean')
output = self.layer(imgs)
loss = loss_fn(output, labels)
loss.backward()
self.optimizer.step()
self.optimizer.clear_grad()
for param in self.layer.parameters():
if ASPHelper._is_supported_layer(
paddle.static.default_main_program(), param.name):
mat = param.numpy()
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mat.T, n=2, m=4))
def test_asp_training_with_amp(self):
self.optimizer = paddle.incubate.asp.decorate(self.optimizer)
paddle.incubate.asp.prune_model(self.layer)
imgs = paddle.to_tensor(
np.random.randn(32, 3, 24, 24),
dtype='float32',
place=self.place,
stop_gradient=False)
labels = paddle.to_tensor(
np.random.randint(
10, size=(32, 1)),
dtype='float32',
place=self.place,
stop_gradient=False)
loss_fn = paddle.nn.MSELoss(reduction='mean')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
with paddle.amp.auto_cast(enable=True):
output = self.layer(imgs)
loss = loss_fn(output, labels)
scaled = scaler.scale(loss)
scaled.backward()
scaler.minimize(self.optimizer, scaled)
self.optimizer.clear_grad()
for param in self.layer.parameters():
if ASPHelper._is_supported_layer(
paddle.static.default_main_program(), param.name):
mat = param.numpy()
self.assertTrue(
paddle.fluid.contrib.sparsity.check_sparsity(
mat.T, n=2, m=4))
if __name__ == '__main__':
unittest.main()
| 6,065 |
astropop/plot_utils/skyview.py
|
rudnerlq/astropop
| 3 |
2172275
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Skyview helper for default plots"""
import numpy as np
from astropy.wcs.utils import proj_plane_pixel_scales
from astroquery.skyview import SkyView
from astropy.coordinates import SkyCoord
from reproject import reproject_interp
from astropy import units as u
def get_dss_image(shape, wcs, survey='DSS'):
'''Use astroquery SkyView to get a DSS image projected to wcs and shape.'''
shape = np.array(shape)
platescale = proj_plane_pixel_scales(wcs)
ra, dec = wcs.wcs_pix2world(*(shape/2), 0)
sk = SkyCoord(ra, dec, unit=('degree', 'degree'), frame='icrs')
im = SkyView.get_images(sk, survey='DSS', coordinates='ICRS',
width=shape[0]*platescale[0]*u.degree,
height=shape[1]*platescale[1]*u.degree)[0]
im = reproject_interp(im[0], output_projection=wcs, shape_out=shape)[0]
return im
| 937 |
api.py
|
seekheart/bank_rate_scraper
| 0 |
2172645
|
from flask import Flask, jsonify
from flask_cors import CORS
from engines import BankRateEngine
app = Flask('__name__')
CORS(app)
bank_rate_engine = BankRateEngine()
@app.route('/rates', methods=['GET'])
def get_rates():
return jsonify(bank_rate_engine.find_all())
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=3000,
debug=False,
threaded=True
)
| 405 |
objectModel/Python/tests/cdm/projection/test_projection_fk.py
|
MiguelSHS/microsoftCDM
| 1 |
2171516
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import os
import unittest
from typing import List
from cdm.objectmodel import CdmCorpusDefinition, CdmManifestDefinition, CdmEntityDefinition
from cdm.utilities import ResolveOptions, AttributeResolutionDirectiveSet
from tests.cdm.projection.attribute_context_util import AttributeContextUtil
from tests.common import async_test
from tests.utilities.projection_test_utils import ProjectionTestUtils
class ProjectionFKTest(unittest.TestCase):
res_opts_combinations = [
[],
['referenceOnly'],
['normalized'],
['structured'],
['referenceOnly', 'normalized'],
['referenceOnly', 'structured'],
['normalized', 'structured'],
['referenceOnly', 'normalized', 'structured']
]
# The path between TestDataPath and TestName.
tests_subpath = os.path.join('Cdm', 'Projection', 'TestProjectionFK')
@async_test
async def test_entity_attribute(self):
test_name = 'test_entity_attribute'
entity_name = 'SalesEntityAttribute'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_entity_attribute_proj(self):
test_name = 'test_entity_attribute_proj'
entity_name = 'SalesEntityAttribute'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_source_with_EA(self):
test_name = 'test_source_with_EA'
entity_name = 'SalesSourceWithEA'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_source_with_EA_proj(self):
test_name = 'test_source_with_EA_proj'
entity_name = 'SalesSourceWithEA'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_group_FK(self):
test_name = 'test_group_FK'
entity_name = 'SalesGroupFK'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_group_FK_proj(self):
test_name = 'test_group_FK_proj'
entity_name = 'SalesGroupFK'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_nested_FK_proj(self):
test_name = 'test_nested_FK_proj'
entity_name = 'SalesNestedFK'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_polymorphic(self):
test_name = 'test_polymorphic'
entity_name = 'PersonPolymorphicSource'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_polymorphic_proj(self):
test_name = 'test_polymorphic_proj'
entity_name = 'PersonPolymorphicSource'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_polymorphic_FK_proj(self):
test_name = 'test_polymorphic_FK_proj'
entity_name = 'PersonPolymorphicSourceFK'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_array_source(self):
test_name = 'test_array_source'
entity_name = 'SalesArraySource'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_array_source_proj(self):
test_name = 'test_array_source_proj'
entity_name = 'SalesArraySource'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_foreign_key(self):
test_name = 'test_foreign_key'
entity_name = 'SalesForeignKey'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_foreign_key_proj(self):
test_name = 'test_foreign_key_proj'
entity_name = 'SalesForeignKey'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_foreign_key_always(self):
test_name = 'test_foreign_key_always'
entity_name = 'SalesForeignKeyAlways'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
@async_test
async def test_composite_key_proj(self):
self.maxDiff = None
test_name = 'test_composite_key_proj'
entity_name = 'SalesCompositeKey'
corpus = ProjectionTestUtils.get_local_corpus(self.tests_subpath, test_name)
for res_opt in self.res_opts_combinations:
await ProjectionTestUtils.load_entity_for_resolution_option_and_save(self, corpus, test_name, \
self.tests_subpath, entity_name, res_opt)
| 8,220 |
substring/substr.py
|
Yurimahendra/latihan-big-data
| 0 |
2171861
|
#menghitung jumlah substring x dari string s
def cekSubString(x, s) :
#cek keberadaan x dalam s
status = x in s
#nilai awal sum
sum = 0
for i in range(len(s)) :
#jika ditemukan x dalam s maka sum bertambah
if s[i:i+len(x)] == x :
sum += 1
return (status, sum)
st = input("masukan string : ")
sst = input("masukan substring : ")
print(cekSubString(sst, st))
| 409 |
src/cloud_tasks_deferred/wsgi.py
|
grktsh/python-cloud-tasks-deferred
| 3 |
2170198
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import six
from cloud_tasks_deferred import deferred
logger = logging.getLogger(__name__)
def application(environ, start_response):
"""A WSGI application that processes deferred invocations."""
def abort(status):
start_response(status, [('Content-Type', 'text/plain')])
return []
if environ['REQUEST_METHOD'] != 'POST':
return abort('405 Method Not Allowed')
if environ.get('CONTENT_TYPE') != 'application/octet-stream':
return abort('415 Unsupported Media Type')
if not any(key.upper() == 'HTTP_X_APPENGINE_TASKNAME' for key in environ):
logger.error(
'Detected an attempted XSRF attack. '
'The header "X-AppEngine-Taskname" was not set.'
)
return abort('403 Forbidden')
headers = [
k + ':' + v
for k, v in six.iteritems(environ)
if k.upper().startswith('HTTP_X_APPENGINE_')
]
logger.log(deferred._DEFAULT_LOG_LEVEL, ', '.join(headers))
content_length = int(environ.get('CONTENT_LENGTH', 0))
data = environ['wsgi.input'].read(content_length)
try:
deferred.run(data)
except deferred.SingularTaskFailure:
logger.debug('Failure executing task, task retry forced')
return abort('408 Request Timeout')
except deferred.PermanentTaskFailure:
logger.exception('Permanent failure attempting to execute task')
except Exception:
return abort('500 Internal Server Error')
start_response('204 No Content', [])
return []
| 1,654 |
src/test/page/baidu_result_page.py
|
Anduin-Zhu/Test_framework
| 0 |
2172610
|
# -*- coding:utf-8 -*-
__author__ = '朱永刚'
from selenium.webdriver.common.by import By
from test.page.baidu_main_page import BaiDuMainPage
class BaiDuResultPage(BaiDuMainPage):
loc_result_links = (By.XPATH, '//div[contains(@class, "result")]/h3/a')
@property
def result_links(self):
return self.find_elements(*self.loc_result_links)
| 355 |
original_tests/testread.py
|
zhanghuiying2319/Master
| 0 |
2170964
|
import numpy as np, os,sys, matplotlib.pyplot as plt
def tester():
with open('test_withBoundaries_new.npy','rb') as f:
a = np.load(f,allow_pickle=True)
b = np.load(f,allow_pickle=True)
c = np.load(f,allow_pickle=True)
print(a.shape)
print(b.shape)
print(c.shape)
lb = np.zeros((0,9))
sizes=np.zeros( (a.shape[0],2))
mom1 = 0
mom2 = 0
mom1a = 0
mom2a = 0
mom1asr = 0
mom2asr = 0
for i in range(a.shape[0]):
print('a[i].shape', a[i].shape, np.mean(a[i]))
print(b[i][0], b[i][1])
sizes[i,0]=b[i][0]
sizes[i,1]=b[i][1]
print('c[i].shape',c[i].shape, c[i])
lb=np.concatenate( (lb, np.expand_dims(c[i],0)))
print(type(a[i]),type(b[i]),type(c[i]))
#print(a)
#print(b)
#print(np.linalg.norm(a-b))
mom1+= np.mean(a[i]) / a.shape[0]
mom2+= np.mean(a[i] * a[i]) /a.shape[0]
#ar = (sizes[i,0]*sizes[i,1])**0.5
#mom1a+= np.mean( ar) / a.shape[0]
#mom2a+= np.mean(ar * ar) /a.shape[0]
aspectr = min(sizes[i,0], sizes[i,1] ) / float( max(sizes[i,0], sizes[i,1] ) )
mom1asr+= np.mean( aspectr) / a.shape[0]
mom2asr+= np.mean(aspectr * aspectr) /a.shape[0]
print('mean std',mom1, (mom2-mom1*mom1)**0.5 )
hsizep = np.percentile(sizes[:,0], q= [ i*10 for i in range(11) ])
wsizep = np.percentile(sizes[:,1], q= [ i*10 for i in range(11) ])
print(sizes.shape[0]) # 4977
print(sizes[:,0])
print(hsizep)
print(wsizep)
#mean std 0.11091382547154618 0.14565146317287114
for c in range(9):
print('c',c,np.sum(lb[:,c]))
#print('area stats ',mom1a, (mom2a-mom1a*mom1a)**0.5 )
print('aspect stats ',mom1asr, (mom2asr-mom1asr*mom1asr)**0.5 )
def tester2():
with open('testread.npy','rb') as f:
d = np.load(f,allow_pickle=True)
print(type(d), d.shape)
if __name__=='__main__':
tester()
| 1,858 |
src/chat/models.py
|
MoizAK/Django_ChatApplication
| 0 |
2171682
|
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
'''
model to store the messages from the server to db
'''
class Message(models.Model):
author = models.ForeignKey(User, related_name = 'author_messages', on_delete = models.CASCADE)
# deletes all the instance of message of the user if the user is deleted
content = models.TextField()
timestamp = models.DateTimeField(auto_now_add = True)
def __str__(self):
return self.author.username
def last_10_msg(self):
return Message.objects.order_by('-timestamp').all()[:10]
| 616 |
code/abc107_b_02.py
|
KoyanagiHitoshi/AtCoder
| 3 |
2172196
|
h,w=map(int,input().split())
a=[[j for j in input()] for i in range(h)]
b=[]
for x in a:
if "#" in x:b.append(x)
c=[]
for y in zip(*b):
if "#" in y:c.append(y)
for a in zip(*c):print("".join(a))
| 202 |
examples/Cshape.py
|
ion-g-ion/code-paper-tt-iga
| 0 |
2171108
|
import torch as tn
import torchtt as tntt
import matplotlib.pyplot as plt
import tt_iga
import numpy as np
import datetime
import matplotlib.colors
import pandas as pd
tn.set_default_dtype(tn.float64)
Np = 8
Ns = [40,20,80]
deg = 2
nl = 8
qtt = True
# B-splines
Ns = np.array([40,20,80])-deg+1
baza1 = tt_iga.bspline.BSplineBasis(np.linspace(0,1,Ns[0]),deg)
baza2 = tt_iga.bspline.BSplineBasis(np.linspace(0,1,Ns[1]),deg)
baza3 = tt_iga.bspline.BSplineBasis(np.linspace(0,1,Ns[2]),deg)
Basis = [baza1,baza2,baza3]
N = [baza1.N,baza2.N,baza3.N]
# Parameter space basis
var = 0.05
Basis_param = [tt_iga.lagrange.LagrangeLeg(nl,[-var,var])]*Np
# B-spline basis for the radius perturbation
bspl = tt_iga.bspline.BSplineBasis(np.linspace(0,1,Np-2+3),2)
def interface_func(t1,tp):
return tn.einsum('ij,ji->j',tn.tensor(bspl(t1)[1:-1,:]),tn.tensor(tp))
line = lambda t,a,b: t*(b-a)+a
damp = lambda x: 1 # -4*x*(x-1)
# parametrization
w = 1
h = 0.5
r = 2
xparam = lambda t : (2-h+line(t[:,1],0,interface_func(t[:,2],t[:,3:])*damp(t[:,2])+h))*tn.cos(1.5*np.pi+0.25*np.pi*t[:,2])
yparam = lambda t : (2-h+line(t[:,1],0,interface_func(t[:,2],t[:,3:])*damp(t[:,2])+h))*tn.sin(1.5*np.pi+0.25*np.pi*t[:,2])
zparam = lambda t : w*t[:,0]
# instantiate the GeometryMapping object. It is used for intepolating, evaluating and computing the discrete operators corresponding to a parameter dependent geometry
geom = tt_iga.Geometry(Basis+Basis_param)
# interpolate the geometry parametrization
geom.interpolate([xparam, yparam, zparam])
# compute the mass matrix in TT
tme = datetime.datetime.now()
Mass_tt = geom.mass_interp(eps=1e-11)
tme = datetime.datetime.now() -tme
print('Time mass matrix ',tme.total_seconds())
# if tn.cuda.is_available():
# tme = datetime.datetime.now()
# Stt = geom.stiffness_interp( eps = 1e-9, qtt = True, verb=True, device = tn.device('cuda:0'))
# tme = datetime.datetime.now() -tme
# print('Time stiffness matrix GPU',tme.total_seconds())
# dct['time stiff GPU'] = tme.total_seconds()
tme = datetime.datetime.now()
Stt = geom.stiffness_interp( eps = 1e-9, qtt = qtt, verb=True, device = None)
tme = datetime.datetime.now() -tme
print('Time stiffness matrix ',tme.total_seconds())
# projection operators for enforcing the BCs
Pin_tt, Pbd_tt = tt_iga.projectors.get_projectors(N,[[1,1],[0,0],[1,1]])
Pin_tt = Pin_tt ** tntt.eye([nl]*Np)
Pbd_tt = Pbd_tt ** tntt.eye([nl]*Np)
# right hand side. Zero since we solve the homogenous equation.
f_tt = tntt.zeros(Stt.N)
# interpoalte the excitation and compute the correspinding tensor
u0 = 1
extitation_dofs = tt_iga.Function(Basis).interpolate(lambda t: t[:,0]*0+u0)
tmp = np.zeros(N)
tmp[:,-1,:] = extitation_dofs[:,-1,:].full()
g_tt = Pbd_tt @ (tntt.TT(tmp) ** tntt.ones([nl]*Np))
# assemble the system matrix
M_tt = Pin_tt@Stt@Pin_tt + Pbd_tt
rhs_tt = Pin_tt @ (Mass_tt @ f_tt - Stt @ Pbd_tt @ g_tt) + g_tt
M_tt = M_tt.round(1e-11)
# solve the system
eps_solver = 1e-7
tme_amen = datetime.datetime.now()
dofs_tt = tntt.solvers.amen_solve(M_tt, rhs_tt, x0 = tntt.ones(rhs_tt.N), eps = eps_solver, nswp = 50, preconditioner = 'c', verbose = False)
tme_amen = (datetime.datetime.now() -tme_amen).total_seconds()
print('Time solver', tme_amen)
if tn.cuda.is_available():
tme_amen_gpu = datetime.datetime.now()
dofs_tt = tntt.solvers.amen_solve(M_tt.cuda(), rhs_tt.cuda(), x0 = tntt.ones(rhs_tt.N).cuda(), eps = eps_solver, nswp = 50, preconditioner = 'c', verbose = False).cpu()
tme_amen_gpu = (datetime.datetime.now() -tme_amen_gpu).total_seconds()
print('Time solver GPU', tme_amen_gpu)
# save stats in the dictionary
print('Rank matrix',np.mean(M_tt.R))
print('Rank rhs',np.mean(rhs_tt.R))
print('Rank solution',np.mean(dofs_tt.R))
print('Memory stiff [MB]',tntt.numel(Stt)*8/1e6)
print('Memory mass [MB]',tntt.numel(Mass_tt)*8/1e6)
print('Memory system mat [MB]',tntt.numel(M_tt)*8/1e6)
print('Memory rhs [MB]',tntt.numel(rhs_tt)*8/1e6)
print('Memory solution [MB]',tntt.numel(dofs_tt)*8/1e6)
# check the error for the case Theta = 0 (cylinder capacitor)
fspace = tt_iga.Function(Basis+Basis_param)
fspace.dofs = dofs_tt
u_val = fspace([tn.linspace(0,1,8),tn.linspace(0,1,128),tn.linspace(0,1,128)]+[tn.tensor([0.0]) for i in range(Np)]).full()
x,y,z = geom([tn.linspace(0,1,8),tn.linspace(0,1,128),tn.linspace(0,1,128)]+[tn.tensor([0.0]) for i in range(Np)])
r = tn.sqrt(x.full()**2+y.full()**2)
a = u0/np.log(2/(2-h))
b = u0-a*np.log(2)
u_ref = a*tn.log(r)+b
err = tn.max(tn.abs(u_val-u_ref))
print('\nMax err %e\n\n'%(err))
random_params4plot = [2*var*(tn.rand((1))-0.5) for i in range(Np)]
u_val = fspace([tn.tensor([0.5]), tn.linspace(0,1,128), tn.linspace(0,1,128)]+random_params4plot).full()
x,y,z = geom([tn.tensor([0.5]), tn.linspace(0,1,128), tn.linspace(0,1,128)]+random_params4plot)
plt.figure()
fig = geom.plot_domain(random_params4plot, [(0,1),(0,1),(0.0,1)], surface_color=None, wireframe = False, alpha=0.1, n=64, frame_color = 'k')
ax = fig.gca()
C = u_val.numpy().squeeze()
norm = matplotlib.colors.Normalize(vmin=C.min(),vmax=C.max())
C = plt.cm.jet(norm(C))
C[:,:,-1] = 1
ax.plot_surface(x.numpy().squeeze(), y.numpy().squeeze(), z.numpy().squeeze(), edgecolors=None, linewidth=0, facecolors = C, antialiased=True, rcount=256, ccount=256, alpha=0.5)
fig.gca().set_xlabel(r'$x_1$', fontsize=14)
fig.gca().set_ylabel(r'$x_2$', fontsize=14)
fig.gca().set_zlabel(r'$x_3$', fontsize=14)
fig.gca().view_init(45, -60)
fig.gca().zaxis.set_rotate_label(False)
fig.gca().set_xticks([0,0.5,1,1.5])
fig.gca().set_yticks([-2,-1.5,-1])
fig.gca().set_zticks([0,0.5,1])
fig.gca().tick_params(axis='both', labelsize=14)
fig.gca().set_box_aspect(aspect = (1.5,1,1))
plt.figure()
fig = geom.plot_domain([tn.tensor([0.0])]*Np, [(0,1),(0,1),(0.0,1)], surface_color='blue', wireframe = False, alpha=0.1, n=64, frame_color = 'k')
for i in range(5): geom.plot_domain([2*var*(tn.rand((1))-0.5) for i in range(Np)],[(0,1),(0,1),(0.0,1)], fig = fig, surface_color=None, wireframe = False, alpha=0.1, n=64, frame_color = 'r')
fig.gca().set_xlabel(r'$x_1$', fontsize=14)
fig.gca().set_ylabel(r'$x_2$', fontsize=14)
fig.gca().set_zlabel(r'$x_3$', fontsize=14)
fig.gca().view_init(45, -60)
fig.gca().zaxis.set_rotate_label(False)
fig.gca().set_xticks([0,0.5,1,1.5])
fig.gca().set_yticks([-2,-1.5,-1])
fig.gca().set_zticks([0,0.5,1])
fig.gca().tick_params(axis='both', labelsize=14)
fig.gca().set_box_aspect(aspect = (1.5,1,1))
| 6,458 |
tool/dump/gdb_scripts/metafs.py
|
so931/poseidonos
| 1 |
2172691
|
import gdb
import sys
import os
current_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, current_path)
sys.path.insert(1, current_path + "/../")
import core_dump_lib
import gdb_lib
def show_metafs_mbr(metafsPtr):
print("- mbr info")
requestString = 'p ((pos::MetaFs*)' + metafsPtr + ').mgmt.sysMgr.mbrMgr.mbr.content'
output = gdb.execute(requestString + '.isNPOR', to_string=True)
output = output.strip(',\n ')
print("isNPOR: " + output.split('=')[1].strip())
output = gdb.execute(requestString + '.mbrSignature', to_string=True)
output = output.strip(',\n ')
print("mbrSignature: " + output.split('=')[1].strip())
output = gdb.execute(requestString + '.mfsEpochSignature', to_string=True)
output = output.strip(',\n ')
print("mfsEpochSignature: " + output.split('=')[1].strip())
output = gdb.execute(requestString + '.geometry.volumeInfo.totalFilesystemVolumeCnt', to_string=True)
output = output.strip(',\n ')
partitionCnt = int(output.strip(',\n ').split('=')[1].strip())
for idx in range(partitionCnt):
result = ""
output = gdb.execute(requestString + '.geometry.mediaPartitionInfo._M_elems[' + str(idx) + ']', to_string=True)
output = output.strip(',\n ').split('\n')
output = [i.split('=') for i in output]
for i in range(1, len(output) - 1):
result += output[i][0].strip() + ": " + output[i][1].strip(", ") + " "
print(result)
def print_metafs_config(configName):
config = gdb.execute('p pos::debugInfo.metaFsService.configManager.' + configName, to_string=True)
config = config.split('=')
print(configName + ": " + config[1].strip())
def show_metafs_config():
print_metafs_config("mioPoolCapacity_")
print_metafs_config("mpioPoolCapacity_")
print_metafs_config("writeMpioEnabled_")
print_metafs_config("writeMpioCapacity_")
print_metafs_config("directAccessEnabled_")
print_metafs_config("timeIntervalInMillisecondsForMetric_")
def get_metafs_ptr_list():
metafsList = gdb.execute('p pos::debugInfo->metaFsService->fileSystems', to_string=True)
metafsList = metafsList.split('\n')
metafsPtrList = []
# array
for item in metafsList:
if "_M_elems =" in item:
first = item.split('=')[1].strip(',\n {}')
if len(first) > 0:
count = 0
addrList = first.split(',')
for addr in addrList:
metafsPtrList.append(addr.strip(',\n '))
count += 1
return metafsPtrList
def show_array_list():
arrayList = gdb.execute('p pos::debugInfo->metaFsService->arrayNameToId', to_string=True)
arrayList = arrayList.split('\n')
count = 0
# unordered_map
for item in arrayList:
if "[" in item:
first = item.split('=')[0].strip()[1:][:-1]
second = item.split('=')[1].strip(',\n ')
print(str(count) + ": arrayName: " + first + ", arrayId: " + second)
count += 1
def show_inode_info(inodePtr):
fieldList = gdb.execute('p ((pos::MetaFileInode*)' + inodePtr + ').data.basic.field', to_string=True)
fieldList = fieldList.split('\n')
fd = 0
fileName = ""
for line in fieldList:
if "fd =" in line:
fd = line.split('=')[1].strip(',\n ')
elif "_M_elems =" in line:
fileName = line.split('=')[1].strip(',\n ').split(',')[0]
print("fd: " + str(fd) + ", fileName: " + fileName)
def show_volume_info(volumePtr):
volumeType = gdb.execute('p ((pos::MetaVolume*)' + volumePtr + ').volumeType', to_string=True)
volumeType = volumeType.split('=')[1].strip(',\n ')
print("- volume ptr: " + volumePtr + ", volumeType: " + volumeType)
inodePtrList = gdb.execute('p ((pos::MetaVolume*)' + volumePtr + ').inodeMgr.fd2InodeMap', to_string=True)
inodePtrList = inodePtrList.split('\n')
for line in inodePtrList:
if "[" in line:
inodeAddr = line.split('=')[1].strip(',\n ')
show_inode_info(inodeAddr)
print("- extent allocator")
output = gdb.execute('p ((pos::MetaVolume*)' + volumePtr + ').inodeMgr.extentAllocator.freeList', to_string=True)
output = output.split('\n')
print("free list")
extentStr = ""
for item in output:
if "startLpn =" in item:
extentStr = "startLpn: " + item.strip(", ").split(" = ")[1]
elif "count =" in item:
extentStr += ", count: " + item.strip(", ").split(" = ")[1]
print(extentStr)
def show_metafs_io_info(metafsPtr):
print("- meta io manager")
output = gdb.execute('p ((pos::MetaFs*)' + metafsPtr + ').io.ioMgr.mioHandlerCount', to_string=True)
output = output.split('=')
print("meta thread count: " + output[1].strip())
def get_metafs_info_str(metafsPtr):
requestStr = "p ((pos::MetaFs*)" + metafsPtr + ")"
arrayId = gdb.execute(requestStr + '.arrayId_', to_string=True)
arrayId = arrayId.split('=')[1].strip(',\n ')
arrayName = gdb.execute(requestStr + '.arrayName_._M_dataplus._M_p', to_string=True)
arrayName = arrayName.split('\"')[1].strip(',\n ')
result = "arrayId: " + arrayId + ", arrayName: " + arrayName
return result
def show_status():
print("##### metafs information #####")
print("\n# metafs config")
show_metafs_config()
print("\n# metafs ptr list (metaFsService->fileSystems)")
metafsPtrList = get_metafs_ptr_list()
count = 0
for metafsPtr in metafsPtrList:
if metafsPtr != "0x0":
print(str(count) + ": " + metafsPtr)
count += 1
count = 0
for metafsPtr in metafsPtrList:
if metafsPtr != "0x0":
arrayStr = get_metafs_info_str(metafsPtr)
print("\n# metafs ptr " + str(count) + ": " + metafsPtr + ", " + arrayStr)
show_metafs_io_info(metafsPtr)
show_metafs_mbr(metafsPtr)
count += 1
volumeContainerList = gdb.execute('p ((pos::MetaFs*)' + metafsPtr + ').ctrl.volMgr.volContainer.volumeContainer', to_string=True)
volumeContainerList = volumeContainerList.split('\n')
for metaVolume in volumeContainerList:
if "get() =" in metaVolume:
volumeAddr = metaVolume.split('=')
volumeAddr = volumeAddr[1].strip(',\n {}')
show_volume_info(volumeAddr)
print("- if you want to see ctrl.cxtList: " + "p ((pos::MetaFs*)" + metafsPtr + ").ctrl.cxtList")
| 6,549 |
utils/request_utils.py
|
Rudi9719/booksearch-web
| 0 |
2172514
|
#!/usr/bin/env python
from api.error import Error
from frameworks.bottle import request, response
def validate_value(name, value, enforced_type=str, max=None, min=None, choices=None, step=None):
if value is None:
return None
# Check type
try:
if enforced_type == str:
value_type = type(value)
if value_type == unicode:
value = value.encode("utf-8")
else:
value = str(value)
elif enforced_type == int:
value = int(value)
elif enforced_type == bool:
if type(value) != bool:
value = value.lower() in ["true"]
elif enforced_type == float:
value = float(value)
except:
Error.raise_bad_request(response, "Unexpected type {0} for field {1}. Expected {2}.".format(type(value).__name__, name, enforced_type.__name__), field=name)
# Check step
if enforced_type == float and step is not None:
if value % step != 0:
Error.raise_bad_request(response, "Expected type {0} with incorrect precision for field {1}.".format(type(value).__name__, name), field=name)
# Check max
if max:
if enforced_type == str and len(value) > max or (enforced_type == int or enforced_type == float) and value > max:
Error.raise_bad_request(response, "Value for field {0} too long. Must be {1} or less.".format(name, max), field=name)
# Check min
if min:
if enforced_type == str and len(value) < min or (enforced_type == int or enforced_type == float) and value < min:
Error.raise_bad_request(response, "Value for field {0} too short. Must be {1} or more.".format(name, min), field=name)
# Check choices
if choices:
if not value in choices:
Error.raise_bad_request(response, "Value for field {0} not a valid option.".format(name), field=name)
return value
def get_values(name, source, enforced_type=str, required=True, max=None, min=None, default=None, choices=None):
# Get values
values = None
if source:
if source == request.params:
values = source.getall(name)
else:
values = source.get(name)
# Check default
if values is None and default is not None:
values = default
# Check required
if required and (values is None or len(values) == 0) :
Error.raise_bad_request(response, "Missing required field {0}".format(name), field=name)
# Process values
processed_values = list()
if values:
for value in values:
processed_values.append(validate_value(name, value, enforced_type=enforced_type, max=max, min=min, choices=choices))
return processed_values
def get_value(name, source, enforced_type=str, required=True, max=None, min=None, default=None, choices=None, step=None):
# Get value
value = source.get(name) if source else None
if value is None and default is not None:
value = default
# Check required
if value is None and required:
Error.raise_bad_request(response, "Missing required field {0}".format(name), field=name)
return validate_value(name, value, enforced_type=enforced_type, max=max, min=min, choices=choices, step=step)
| 3,278 |
shiva_deployer/tests/test_commandline.py
|
erikrose/shiva
| 2 |
2171111
|
"""Tests for the shiva commandline program"""
from nose.tools import eq_, assert_raises
from nose.util import src
from shiva_deployer.commandline import inner_main, wake_pickle
from shiva_deployer.exceptions import ShouldNotDeploy
def test_subcommand_success():
"""Make sure shiva finds and delegates to a Deployment instance method and
extracts the result, when all goes well.
"""
status, output = inner_main([src(__file__),
'--shiva-subcommand',
'get_lock_name'])
eq_(wake_pickle(output), 'harvey')
eq_(status, 0)
def test_subcommand_should_not_deploy():
"""If a shiva subcommand raises ShouldNotDeploy, it should get pickled and
printed.
"""
status, output = inner_main([src(__file__),
'--shiva-subcommand',
'check_out'])
assert_raises(ShouldNotDeploy, wake_pickle, output)
eq_(status, 0)
class Deployment(object):
def __init__(self, argv):
pass
def get_lock_name(self):
return 'harvey'
def check_out(self):
raise ShouldNotDeploy
| 1,161 |
scripts/data_convert/msmarco/add_doc2query_pass.py
|
gitter-badger/FlexNeuART
| 101 |
2171549
|
#!/usr/bin/env python
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adding predicted query fields for the MS MARCO *PASSAGE* collection.
https://github.com/castorini/docTTTTTquery
It reads all the predictions into memory
"""
import argparse
import json
from tqdm import tqdm
from flexneuart.text_proc.parse import SpacyTextParser
from flexneuart.io.stopwords import read_stop_words, STOPWORD_FILE
from flexneuart.io import jsonl_gen, FileWrapper
from flexneuart.config import SPACY_MODEL
from flexneuart.config import DOCID_FIELD, TEXT_FIELD_NAME
DOC2QUERY_FIELD_TEXT = 'doc2query_text'
DOC2QUERY_FIELD_TEXT_UNLEMM = 'doc2query_text_unlemm'
parser = argparse.ArgumentParser(description='Add doc2query fields to the existing JSONL data entries')
parser.add_argument('--input', metavar='input JSONL file', help='input JSONL file (can be compressed)',
type=str, required=True)
parser.add_argument('--output', metavar='output JSONL file', help='output JSONL file (can be compressed)',
type=str, required=True)
parser.add_argument('--target_fusion_field', metavar='target fusion field',
help='the name of the target field that will store concatenation of the lemmatized doc2query text and the original lemmatized text',
type=str, required=True)
parser.add_argument('--predictions_path',
required=True, metavar='doc2query predictions',
help='File containing predicted queries for passage data: one per each passage.')
args = parser.parse_args()
print(args)
stop_words = read_stop_words(STOPWORD_FILE, lower_case=True)
print(stop_words)
nlp = SpacyTextParser(SPACY_MODEL, stop_words, keep_only_alpha_num=True, lower_case=True)
doc_id_prev = None
predicted_queries = []
target_fusion_field = args.target_fusion_field
for line in tqdm(FileWrapper(args.predictions_path), desc='reading predictions'):
line = line.strip()
if line:
predicted_queries.append(line)
print(f'Read predictions for {len(predicted_queries)} passages')
pass_qty = 0
with FileWrapper(args.output, 'w') as outf:
for doce in tqdm(jsonl_gen(args.input), desc='adding doc2query fields'):
doc_id = doce[DOCID_FIELD]
text, text_unlemm = nlp.proc_text(predicted_queries[pass_qty])
doce[target_fusion_field] = doce[TEXT_FIELD_NAME] + ' ' + text
doce[DOC2QUERY_FIELD_TEXT] = text
doce[DOC2QUERY_FIELD_TEXT_UNLEMM] = text_unlemm
pass_qty += 1
outf.write(json.dumps(doce) + '\n')
if pass_qty != len(predicted_queries):
raise Exception(f'Mismatch in the number of predicted queries: {len(predicted_queries)} ' +
f' and the total number of passages: {pass_qty}')
| 3,331 |
plugins/grpc/client/kaldi_serve/__init__.py
|
zhaoyi2/kaldi-based-asr-server
| 79 |
2171639
|
from kaldi_serve.core import KaldiServeClient
from kaldi_serve.kaldi_serve_pb2 import RecognitionAudio, RecognitionConfig
| 122 |
main.py
|
samomar/spotify-desktop-overlay-temp-fix
| 0 |
2172559
|
import pyautogui
import cv2
from PIL import Image
im_spotify_logo = cv2.imread('spotify_logo.png')
spotify_logo = Image.fromarray(im_spotify_logo)
pyautogui.FAILSAFE = True
screen_size_x, screen_size_y = pyautogui.size()
def find_spotify_logo(x=15, y=15):
try:
settings_down_arrow = pyautogui.locateCenterOnScreen("settings_down_arrow.png", grayscale=True, confidence=.7)
if settings_down_arrow:
return True
spotify_x, spotify_y = pyautogui.locateCenterOnScreen(spotify_logo.resize((x, y)), grayscale=True,
confidence=.6)
pyautogui.moveTo(spotify_x, spotify_y)
pyautogui.doubleClick(spotify_x, spotify_y)
search = pyautogui.locateCenterOnScreen('search.png', grayscale=True, confidence=.7)
if search:
return True
else:
x += 5
y += 5
find_spotify_logo(x, y)
except TypeError:
x += 5
y += 5
print(x, y)
find_spotify_logo(x, y)
find_spotify_logo()
if find_spotify_logo():
pass
def find_and_click_settings_down_arrow():
try:
settings_down_arrow = pyautogui.locateCenterOnScreen("settings_down_arrow.png", grayscale=True, confidence=.7)
pyautogui.click(settings_down_arrow[0], settings_down_arrow[1])
settings_x, settings_y = pyautogui.locateCenterOnScreen('spotify_settings_button.png', grayscale=True,
confidence=.7)
if settings_x:
pyautogui.moveTo(settings_x, settings_y)
pyautogui.click(settings_x, settings_y)
else:
find_and_click_settings_down_arrow()
except TypeError:
find_and_click_settings_down_arrow()
find_and_click_settings_down_arrow()
def main_settings():
try:
locate_big_settings_text = pyautogui.locateCenterOnScreen("big_settings.png", grayscale=True, confidence=.7)
if locate_big_settings_text:
def show_desktop_overlay_option():
pyautogui.scroll(-2000)
locate_show_desktop_overlay_option = pyautogui.locateCenterOnScreen("show_desktop_overlay_option.png",
grayscale=True,
confidence=.7)
if locate_show_desktop_overlay_option:
pass
else:
show_desktop_overlay_option()
show_desktop_overlay_option()
else:
main_settings()
except TypeError:
main_settings()
main_settings()
def off_button():
try:
locate_off_buttons = pyautogui.locateAllOnScreen("off_button.png")
last_off_button = list(i for i in locate_off_buttons)[-1] # Finds the last visible deactivated switch.
pyautogui.moveTo(last_off_button[0] + 5, last_off_button[1] + 10)
pyautogui.click(last_off_button[0] + 5, last_off_button[1] + 10)
pyautogui.moveTo(last_off_button[0] + 10, last_off_button[1] + 20)
pyautogui.click(last_off_button[0] + 10, last_off_button[1] + 20)
except TypeError:
off_button()
off_button()
def go_back():
try:
back_arrow = pyautogui.locateCenterOnScreen('back_arrow.png', grayscale=True, confidence=.7)
pyautogui.moveTo(back_arrow[0], back_arrow[1])
pyautogui.click(back_arrow[0], back_arrow[1])
except TypeError:
go_back()
go_back()
| 3,601 |
grammpy/exceptions/NotRuleException.py
|
PatrikValkovic/grammpy
| 1 |
2170685
|
#!/usr/bin/env python
"""
:Author <NAME>
:Created 03.08.2017 10:04
:Licence MIT
Part of grammpy
"""
from .GrammpyException import GrammpyException
class NotRuleException(GrammpyException, TypeError):
"""
Passed something else than Rule class
"""
def __init__(self, rule):
super().__init__()
self.object = rule
| 346 |
src/Solver.py
|
caiozanatelli/Simplex
| 7 |
2172416
|
from IOUtils import IOUtils
from LinearProgramming import LinearProgramming
from Simplex import Simplex
import logging
import sys
#LP_DIR_IN = "../tests/toys/teste6.txt"
RES_DIR_OUT = "conclusao.txt"
LOG_DIR = "log_simplex.txt"
if __name__ == '__main__':
# Setting logger to register all the operations made in the Simplex Algorithm
logging.basicConfig(filename = LOG_DIR, level = logging.DEBUG, format='%(message)s', filemode='w')
logging.getLogger()
if len(sys.argv) < 2:
print("No input has been set. Aborting program.")
exit(0)
# Get the input file through a parameter
input_file = sys.argv[1]
# Reading the input
io = IOUtils(input_file, RES_DIR_OUT)
alg_mode, rows, cols, input_matrix = io.read_input()
# Solving the linear programming through Simplex Algorithm
simplex = Simplex(rows, cols, input_matrix, io)
simplex.solve(alg_mode)
| 916 |
Day 17 Surrounded Regions.py
|
akhildhiman7/June-LeetCoding-Challenge
| 2 |
2170858
|
'''
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
Example:
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
Explanation:
Surrounded regions shouldn’t be on the border, which means that any 'O' on the border of the board are not flipped to 'X'. Any 'O' that is not on the border and it is not connected to an 'O' on the border will be flipped to 'X'. Two cells are connected if they are adjacent cells connected horizontally or vertically.
'''
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
def helper(board, i, j, rows, cols):
if i < 0 or i >= rows or j >= cols or j < 0 or board[i][j] != 'O':
return
board[i][j] = '-'
helper(board, i+1, j, rows, cols)
helper(board, i, j+1, rows, cols)
helper(board, i-1, j, rows, cols)
helper(board, i, j-1, rows, cols)
rows = len(board)
if rows <= 1:
return board
cols = len(board[0])
for i in range(rows):
if board[i][0] == 'O':
helper(board, i, 0, rows, cols)
if board[i][cols-1] == 'O':
helper(board, i, cols-1, rows, cols)
for i in range(cols):
if board[0][i] == 'O':
helper(board, 0, i, rows, cols)
if board[rows-1][i] == 'O':
helper(board, rows-1, i, rows, cols)
for i in range(rows):
for j in range(cols):
if board[i][j] == '-': board[i][j] = 'O'
elif board[i][j] == 'O': board[i][j] = 'X'
| 1,857 |
src/fr/tagc/rainet/core/execution/processing/catrapid/split_fasta_from_fragments.py
|
TAGC-Brun/RAINET-RNA
| 0 |
2172588
|
#!/usr/bin/python2.7
import os
import sys
import glob
from fr.tagc.rainet.core.util.subprocess.SubprocessUtil import SubprocessUtil
# requires fastaq 3.11.0 to be installed
##### THIS SCRIPT SHOULD BE RUN FROM COMMAND LINE
###############################################################################
# output files will be written in same folder as input
# input fasta file must only contain one sequence
# inputFragmentsResult is output file from running catRAPID omics fragments (webserver automatically fragments if above 1200nt)
###############################################################################
inputFolder = sys.argv[1] #"/home/diogo/Documents/RAINET_data/catRAPID/webserver_results/HOTAIR_fragments/"
inputFasta = inputFolder + sys.argv[2] #"ENST00000424518_HOTAIR_001.fa"
inputFragmentsResult = inputFolder + sys.argv[3] #"ENST00000424518_HOTAIR_001.tsv"
###############################################################################
# function to read a output file from catRAPID omics (fragmenting RNA) and retrieve all the fragment coordinates
def read_input_fragments_result( input_fragments_result):
# example
#sp|Q9Y6V7|DDX49_HUMAN HOTAIR-001.cdna_1_253-404 -0.75 0.14 0.14 NO yes 1 - 1.23
#sp|A0AV96|RBM47_HUMAN HOTAIR-001.cdna_1_261-405 -0.12 0.50 0.99 NO yes 1 - 1.61
setOfCoordinates = set()
with open( input_fragments_result, "r") as inFile:
for line in inFile:
if line.startswith("#"):
continue
spl = line.split("\t")
try:
spl2 = spl[0].split(" ")
spl3 = spl2[1].split("_")
coords = spl3[2]
setOfCoordinates.add( coords)
except IndexError as e:
print e
print "Failed to split line"
print line
print "expected format includes coodinates (fragmented transcript) such as: sp|A0AV96|RBM47_HUMAN HOTAIR-001.cdna_1_261-405 -0.12 0.50 0.99 NO yes 1 - 1.61"
raise Exception
print "Found %s fragments" % len( setOfCoordinates)
return setOfCoordinates
# Launches commands to clip fasta file based on given set of coordinates
def clip_fasta(input_fasta, set_of_coordinates):
#e.g. fastaq add_indels --delete HOTAIR:1-219 --delete HOTAIR:416-9999999 test.fa tgest.fa
### replace ":" in fasta file to "_"
modInputFile = input_fasta.replace(".fa","") + ".modified"
cmd = "sed 's/:/_/g' %s | sed 's/ /_/g' > %s" % ( input_fasta, modInputFile)
# os.system(cmd)
SubprocessUtil.run_command( cmd)
### Get sequence header
name = ""
with open( modInputFile, "r") as inFile:
for line in inFile:
if ">" in line:
name = line.strip()[1:]
break
if len( name) < 1:
print "Failure to detect name of fasta sequence"
raise Exception
### Clip fasta sequence for each set of coordinates
for coord in set_of_coordinates:
start, end = coord.split("-")
outFile = input_fasta + "_" + start + "-" + end
# delete for start of sequence to start of wanted coordinate
tag1 = name + ":1-" + start
# deleted from end of wanted coordinate to end of sequence
tag2 = name + ":" + end + "-999999"
command = "fastaq add_indels --delete %s --delete %s %s %s" % ( tag1, tag2, modInputFile, outFile)
SubprocessUtil.run_command( command)
# os.system( command)
### Change fasta headers
#e.g. fastaq enumerate_names ENST00000424518_HOTAIR_001.fa_2288-2422 ENST00000424518_HOTAIR_001.fa_2288-2422_renamed --suffix ENST00000424518_HOTAIR_001.fa_2288-2422
newFastas = glob.glob( inputFolder + "/*.fa_*")
for fasta in newFastas:
fileName = fasta.split("/")[-1]
command = "fastaq enumerate_names %s %s --suffix %s" % ( fileName, fileName + "_renamed", fileName)
SubprocessUtil.run_command( command)
### Concatenate files into a multi fasta
newFileName = input_fasta + ".all_fragments.fa"
command = "cat *renamed* > %s" % ( newFileName)
SubprocessUtil.run_command( command)
# ### convert into oneline format (for catRAPID)
# command = "fastaq to_fasta -l 0 %s %s" % ( newFileName, newFileName + ".nospace")
# SubprocessUtil.run_command( command)
# command = "sed -e ':a' -e 'N' -e '$!ba' -e 's/\n/ /g' %s | sed 's/>/\n>/g' > %s" % ( newFileName + ".nospace", newFileName + ".oneline")
# SubprocessUtil.run_command( command)
### clean up
command = "rm *.fa_*"
SubprocessUtil.run_command( command)
setOfCoordinates = read_input_fragments_result( inputFragmentsResult)
clip_fasta( inputFasta, setOfCoordinates)
print "FINISHED!"
| 4,964 |
StockAnalytics-master/StockAnalytics-master/SP500Prediction/Prototype_Prediction/Dashboard/Prediction_SP500_dashboard.py
|
iVibudh/stock-prediction
| 0 |
2171916
|
import pandas as pd
import psycopg2
import dash
import plotly.graph_objs as go
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
# Connect to database
conn = psycopg2.connect(host='localhost', port=5432, database='postgres')
##### Dashboard layout #####
# Dash Set up
app = dash.Dash()
# Obtain the dropdown list
query = """ select ticker, companyname
from stock.stockmeta
where indexcomponent = 'S&P 500'
order by 2; """
shares_list = pd.io.sql.read_sql(query, conn)
shares_list['display'] = shares_list['ticker'] + '\t' + \
shares_list['companyname']
company_choice = [{'label': row['display'], 'value': row['ticker']}
for index, row in shares_list.iterrows()]
# Plot Graph on Tab 1
def get_sp500():
query_train = """select tradedate, closeprice as sp500
from stock.stockprice
where ticker='^GSPC' and
date_part('year', tradedate) >= 2019
order by 1; """
query_pred = """select tradedate, sp500_pred
from stock.pred_proto_sp500
where date_part('year', tradedate) >= 2019
order by 1; """
df_train = pd.io.sql.read_sql(query_train, conn)
df_pred = pd.io.sql.read_sql(query_pred, conn)
data = []
# Add real world data
data.append(go.Scatter(x=df_train['tradedate'], y=df_train['sp500'],
name='S&P 500', line={'color':'royalblue'}))
# Add prediction data
data.append(go.Scatter(x=df_pred['tradedate'], y=df_pred['sp500_pred'],
name='Prediction',
line={'color': 'orange'}))
# Layout for the visualization
layout = {'xaxis':{'title':'Date','rangeslider':{'visible':False}},
'yaxis':{'title':'Index'},
'hovermode':False}
return {'data':data, 'layout':layout}
# Base Layout
app.layout = html.Div([
dcc.Tabs(id='dashboard-tabs', value='sp500-only', children=[
dcc.Tab(label='S&P 500 Prediction', value='sp500-only', children=[
html.Div([
html.Br(),
html.H2('S&P 500 Prediction',
style={'width':'70%', 'text-align':'center',
'margin':'auto'}), # Position 0, title
dcc.Graph(id='sp500-vis', figure=get_sp500())
# Position 1, visualization
]) # Close Div
]), # 1st Tab
dcc.Tab(label='S&P 500 Stocks Growth', value='sp500-stocksgrowth',
children=[
html.Br(),
html.H2('Stock Future Growth vs. S&P 500 Future Growth',
style={'width':'70%', 'text-align':'center',
'margin':'auto'}), # Position 0, title
html.Br(),
html.Div([
dcc.Dropdown(id='tab2-dropdown',
options=company_choice,
value=[],
multi=True,
style={}
)], style={'width':'50%'}),
dcc.Graph(id='sp500-stocksgrowth-vis')
# Position 2, visualization
]) # 2nd Tab
]) # Close Tabs
], style={'width':'70%', 'margin':'auto'}) # Close Base Div
@app.callback(Output('sp500-stocksgrowth-vis','figure'),
[Input('tab2-dropdown','value')])
def generate_tab2_graph(tickers):
query_sp500 = """
select tradedate,
sp500_pred/first_value(sp500_pred)
over(order by tradedate) -1 as growth
from stock.pred_proto_sp500
where tradedate > (select max(tradedate)
from stock.stockprice)
and date_part('dow', tradedate)
between 1 and 5; """
df_sp500 = pd.io.sql.read_sql(query_sp500, conn)
data = [go.Scatter(x=df_sp500['tradedate'], y=df_sp500['growth'],
name='S&P 500 Growth')]
for ticker in tickers:
query_curr = """select tradedate,
closeprice/first_value(closeprice)
over(order by tradedate) -1 as growth
from stock.pred_proto_staging
where ticker = '{}' and
tradedate > (select max(tradedate)
from stock.stockprice)
and date_part('dow', tradedate)
between 1 and 5;""".format(ticker)
df_stock= pd.io.sql.read_sql(query_curr, conn)
data.append(go.Scatter(x=df_stock['tradedate'],
y=df_stock['growth'],
line=dict(dash='dash'),
name=ticker+' Growth'))
layout = {'xaxis':{'title':'Date'},
'yaxis':{'title':'% Change', 'tickformat':'.0%'},
'hovermode':False}
return {'data':data, 'layout':layout}
if __name__ == '__main__':
app.run_server(debug=True, port=8050)
| 4,382 |
notes/algo-ds-practice/problems/list/merge_sorted_lists.py
|
Anmol-Singh-Jaggi/interview-notes
| 6 |
2171491
|
def merge(head_a, head_b):
if head_b is None:
return head_a
if head_a is None:
return head_b
if head_b.data < head_a.data:
head_a, head_b = head_b, head_a
head_a.next = merge(head_a.next, head_b)
return head_a
def merge_iterative(head_a, head_b):
# A dummy node whose next value would be the head of the merged list.
# We could have done without it too but it would have complicated the code slightly.
pre_head = ListNode()
tail = pre_head
while True:
if head_b is None:
tail.next = head_a
break
if head_a is None:
tail.next = head_b
break
if head_a.data < head_b.data:
tail.next = head_a
head_a = head_a.next
else:
tail.next = head_b
head_b = head_b.next
tail = tail.next
return pre_head.next
| 900 |
pwncat/commands/sessions.py
|
Mitul16/pwncat
| 1,454 |
2170651
|
#!/usr/bin/env python3
from rich import box
from rich.table import Table
import pwncat
from pwncat.util import console
from pwncat.commands import Complete, Parameter, CommandDefinition
class Command(CommandDefinition):
"""
Interact and control active remote sessions. This command can be used
to change context between sessions or kill active sessions which were
established with the `connect` command.
"""
PROG = "sessions"
ARGS = {
"--list,-l": Parameter(
Complete.NONE,
action="store_true",
help="List active connections",
),
"--kill,-k": Parameter(
Complete.NONE,
action="store_true",
help="Kill an active session",
),
"session_id": Parameter(
Complete.NONE,
type=int,
help="Interact with the given session",
nargs="?",
),
}
LOCAL = True
def run(self, manager: "pwncat.manager.Manager", args):
if args.list or (not args.kill and args.session_id is None):
table = Table(title="Active Sessions", box=box.MINIMAL_DOUBLE_HEAD)
table.add_column("ID")
table.add_column("User")
table.add_column("Host ID")
table.add_column("Platform")
table.add_column("Type")
table.add_column("Address")
for session_id, session in manager.sessions.items():
ident = str(session_id)
kwargs = {"style": ""}
if session is manager.target:
ident = "*" + ident
kwargs["style"] = "underline"
table.add_row(
str(ident),
session.current_user().name,
str(session.hash),
session.platform.name,
str(type(session.platform.channel).__name__),
str(session.platform.channel),
**kwargs,
)
console.print(table)
return
if args.session_id is None:
console.log("[red]error[/red]: no session id specified")
return
# check if a session with the provided ``session_id`` exists or not
if args.session_id not in manager.sessions:
console.log(f"[red]error[/red]: {args.session_id}: no such session!")
return
session = manager.sessions[args.session_id]
if args.kill:
channel = str(session.platform.channel)
session.close()
console.log(f"session-{args.session_id} ({channel}) closed")
return
manager.target = session
console.log(f"targeting session-{args.session_id} ({session.platform.channel})")
| 2,813 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.