max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
Minion Labour Shifts/minion_labour_shifts.py
|
raghavpatnecha/Foobar
| 2 |
2025090
|
def answer(data, n):
data_set = set(data)
for d in data_set:
if data.count(d) > n:
data = filter(lambda a: a != d, data)
return data
data=[1,2,2,3,3,3,4,5,5]
n=1
print answer(data,n)
| 226 |
anc/stim_src_v1.3/glue/cirq/stimcirq/_stim_to_cirq_circuit_conversion.py
|
Strilanc/stim-paper
| 1 |
2025993
|
import functools
from typing import Callable, Dict, List, Tuple, Union, Iterator
import cirq
import stim
@functools.lru_cache(maxsize=1)
def stim_to_cirq_gate_table() -> Dict[str, Union[Tuple, cirq.Gate, Callable[[float], cirq.Gate]]]:
return {
"R": cirq.ResetChannel(),
"I": cirq.I,
"X": cirq.X,
"Y": cirq.Y,
"Z": cirq.Z,
"H_XY": cirq.SingleQubitCliffordGate.from_xz_map(x_to=(cirq.Y, False), z_to=(cirq.Z, True)),
"H": cirq.H,
"H_YZ": cirq.SingleQubitCliffordGate.from_xz_map(x_to=(cirq.X, True), z_to=(cirq.Y, False)),
"SQRT_X": cirq.X**0.5,
"SQRT_X_DAG": cirq.X**-0.5,
"SQRT_Y": cirq.Y**0.5,
"SQRT_Y_DAG": cirq.Y**-0.5,
"S": cirq.S,
"S_DAG": cirq.S**-1,
"SWAP": cirq.SWAP,
"ISWAP": cirq.ISWAP,
"ISWAP_DAG": cirq.ISWAP**-1,
"XCX": cirq.PauliInteractionGate(cirq.X, False, cirq.X, False),
"XCY": cirq.PauliInteractionGate(cirq.X, False, cirq.Y, False),
"XCZ": cirq.PauliInteractionGate(cirq.X, False, cirq.Z, False),
"YCX": cirq.PauliInteractionGate(cirq.Y, False, cirq.X, False),
"YCY": cirq.PauliInteractionGate(cirq.Y, False, cirq.Y, False),
"YCZ": cirq.PauliInteractionGate(cirq.Y, False, cirq.Z, False),
"CX": cirq.CNOT,
"CY": cirq.Y.controlled(1),
"CZ": cirq.CZ,
"DEPOLARIZE1": lambda arg: cirq.DepolarizingChannel(arg, 1),
"DEPOLARIZE2": lambda arg: cirq.DepolarizingChannel(arg, 2),
"X_ERROR": lambda arg: cirq.X.with_probability(arg),
"Y_ERROR": lambda arg: cirq.Y.with_probability(arg),
"Z_ERROR": lambda arg: cirq.Z.with_probability(arg),
"DETECTOR": (),
"OBSERVABLE_INCLUDE": (),
"TICK": (),
}
def _translate_flattened_operation(
op: Tuple[str, List, float],
get_next_measure_id: Callable[[], int]) -> Iterator[cirq.Operation]:
name, targets, arg = op
handler = stim_to_cirq_gate_table().get(name)
if handler is not None:
if isinstance(handler, cirq.Gate):
gate = handler
elif handler == ():
return
else:
gate = handler(arg)
for q in targets:
if isinstance(q, tuple) and q[0] == "rec":
raise NotImplementedError("Measurement record.")
m = gate.num_qubits()
for k in range(0, len(targets), m):
yield gate(*[cirq.LineQubit(q) for q in targets[k:k+m]])
return
if name == "M" or name == "MR":
for t in targets:
if isinstance(t, int):
q = t
inv = False
elif t[0] == "inv":
q = t[1]
inv = True
else:
raise NotImplementedError("Unrecognized measurement target.")
q = cirq.LineQubit(q)
yield cirq.measure(q, key=str(get_next_measure_id()), invert_mask=(True,) if inv else ())
if name == "MR":
yield cirq.ResetChannel().on(q)
return
if name == "E":
yield cirq.PauliString({cirq.LineQubit(q): k for k, q in targets}).with_probability(arg)
return
raise NotImplementedError(f"Unsupported gate: {name}")
def stim_circuit_to_cirq_circuit(circuit: stim.Circuit) -> cirq.Circuit:
"""Converts a stim circuit into an equivalent cirq circuit.
Qubit indices are turned into cirq.LineQubit instances. Measurements are
keyed by their ordering (e.g. the first measurement is keyed "0", the second
is keyed "1", etc).
Not all circuits can be converted:
- ELSE_CORRELATED_ERROR instructions are not supported.
Not all circuits can be converted with perfect 1:1 fidelity:
- DETECTOR annotations are discarded.
- OBSERVABLE_INCLUDE annotations are discarded.
- MR ops decompose into separate measurement and reset ops.
Args:
circuit: The stim circuit to convert into a cirq circuit.
Returns:
The converted circuit.
Examples:
>>> import stimcirq
>>> import stim
>>> print(stimcirq.stim_circuit_to_cirq_circuit(stim.Circuit('''
... H 0
... CNOT 0 1
... X_ERROR(0.25) 0
... M !1 0
... ''')))
0: ───H───@───X[prob=0.25]───M('1')───
│
1: ───────X───!M('0')─────────────────
"""
_next_measure_id = 0
def get_next_measure_id() -> int:
nonlocal _next_measure_id
_next_measure_id += 1
return _next_measure_id - 1
full_circuit = cirq.Circuit()
current_tick = cirq.Circuit()
for op in circuit.flattened_operations():
if op[0] == 'TICK':
if len(current_tick):
full_circuit += current_tick
current_tick = cirq.Circuit()
else:
full_circuit += cirq.Moment()
else:
current_tick += _translate_flattened_operation(op, get_next_measure_id)
full_circuit += current_tick
return full_circuit
| 5,083 |
studip_sync/helpers.py
|
PHKA-ZIM/StudIPSync
| 0 |
2025868
|
import json
import os
class ConfigError(Exception):
pass
class JSONConfig(object):
def __init__(self, config_path=None):
super(JSONConfig, self).__init__()
try:
config_file = open(config_path)
except FileNotFoundError:
raise ConfigError("Config file missing! Run 'studip-sync --init' to create a new "
"config file")
if config_file:
self.config = json.load(config_file)
else:
self.config = None
self._check()
def _check(self):
pass
@staticmethod
def save_config(path, config):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w") as config_file:
print("Writing new config to '{}'".format(path))
json.dump(config, config_file, ensure_ascii=False, indent=4)
| 876 |
mantrid/tests/loadbalancer.py
|
epio/mantrid
| 30 |
2026047
|
from unittest import TestCase
from ..loadbalancer import Balancer
from ..actions import Empty, Unknown, Redirect, Spin, Proxy
class BalancerTests(TestCase):
"Tests the main load balancer class itself"
def test_resolution(self):
"Tests name resolution"
balancer = Balancer(None, None, None, None)
balancer.hosts = {
"localhost": [
"empty",
{"code": 402},
False,
],
"local.ep.io": [
"spin",
{},
True,
],
"http://ep.io": [
"redirect",
{"redirect_to": "https://www.ep.io"},
True,
],
"ep.io": [
"proxy",
{"backends": ["0.0.0.0:0"]},
True,
],
}
# Test direct name resolution
self.assertEqual(
balancer.resolve_host("localhost").__class__,
Empty,
)
self.assertEqual(
balancer.resolve_host("local.ep.io").__class__,
Spin,
)
self.assertEqual(
balancer.resolve_host("ep.io").__class__,
Redirect,
)
self.assertEqual(
balancer.resolve_host("ep.io", "https").__class__,
Proxy,
)
# Test subdomain resolution
self.assertEqual(
balancer.resolve_host("subdomain.localhost").__class__,
Unknown,
)
self.assertEqual(
balancer.resolve_host("subdomain.local.ep.io").__class__,
Spin,
)
self.assertEqual(
balancer.resolve_host("subdomain.ep.io").__class__,
Redirect,
)
self.assertEqual(
balancer.resolve_host("multi.level.subdomain.local.ep.io").__class__,
Spin,
)
# Test nonexistent base name
self.assertEqual(
balancer.resolve_host("i-love-bees.com").__class__,
Unknown,
)
| 2,066 |
official/audio/tacotron2/generate_hdf5.py
|
mindspore-ai/models
| 77 |
2025069
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
''' generate hdf5 file '''
import os
import argparse
import random
import h5py
from tqdm import tqdm
import numpy as np
import librosa
from src.utils.audio import load_wav, melspectrogram
from src.hparams import hparams as hps
from src.text import text_to_sequence
from src.utils import audio
random.seed(0)
def files_to_list(fdir):
''' collect text and filepath to list'''
f_list = []
with open(os.path.join(fdir, 'metadata.csv'), encoding='utf-8') as f:
for line in f:
parts = line.strip().split('|')
wav_path = os.path.join(fdir, 'wavs', '%s.wav' % parts[0])
f_list.append([wav_path, parts[1]])
return f_list
def get_mel_text_pair(filename_and_text):
'''preprocessing mel and text '''
filename, text = filename_and_text[0], filename_and_text[1]
text += '~'
text = get_text(text)
mel = produce_mel_features(filename)
print(mel.shape)
return (text, mel)
def get_text(text):
'''encode text to sequence'''
return text_to_sequence(text, hps.text_cleaners)
def get_mel(filename):
'''extract mel spectrogram'''
wav = load_wav(filename)
trim_wav, _ = librosa.effects.trim(
wav, top_db=60, frame_length=2048, hop_length=512)
wav = np.concatenate(
(trim_wav,
np.zeros(
(5 * hps.hop_length),
np.float32)),
0)
mel = melspectrogram(wav).astype(np.float32)
return mel
def produce_mel_features(filename):
'''produce Mel-Frequency features'''
wav, fs = librosa.load(filename, sr=22050)
wav = librosa.resample(wav, fs, 16000)
# between audio and mel-spectrogram
wav = audio.wav_padding(wav, hps)
assert len(wav) % hps.hop_size == 0
# Pre-emphasize
preem_wav = audio.preemphasis(wav, hps.preemphasis, hps.preemphasize)
# Compute the mel scale spectrogram from the wav
mel_spectrogram = audio.mel_spectrogram(preem_wav, hps).astype(np.float32)
mel = (mel_spectrogram + hps.max_abs_value) / (2 * hps.max_abs_value)
return mel.astype(np.float32)
def generate_hdf5(fdir):
'''generate hdf5 file'''
f_list = files_to_list(fdir)
random.shuffle(f_list)
max_text, max_mel = 0, 0
for idx, filename_and_text in tqdm(enumerate(f_list)):
text, mel = get_mel_text_pair(filename_and_text)
max_text = max(max_text, len(text))
max_mel = max(max_mel, mel.shape[1])
with h5py.File('ljdataset.hdf5', 'a') as hf:
hf.create_dataset('{}_mel'.format(idx), data=mel)
hf.create_dataset('{}_text'.format(idx), data=text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_path',
type=str,
default='',
help='Path to LJSpeech-1.1')
args = parser.parse_args()
generate_hdf5(args.data_path)
| 3,524 |
ialgebra/interpreters/__init__.py
|
huashen218/pytorch-ialgebra
| 2 |
2024575
|
from .interpreter import *
from .gradcam import *
from .gradsaliency import *
from .GuidedBackpropGrad import *
from .GuidedBackpropSmoothGrad import *
from .mask import *
from .smoothgrad import *
| 198 |
Tools/PostProcessing/plot_timestep_duration.py
|
hklion/WarpX
| 0 |
2025608
|
#!/usr/bin/env python3
import argparse
import re
import matplotlib.pyplot as plt
import numpy as np
def extract_data(filename):
regex_step = re.compile(
r"STEP [0-9]* ends.*\n.* Avg\. per step = ([0-9]*[.])?[0-9]+ s", re.MULTILINE)
string_data = []
print("Processing " + filename + " ...", end='')
with open(filename) as f:
text = f.read()
string_data = [s.group(0) for s in regex_step.finditer(text)]
regex_real = re.compile(
r" -?[\d.]+(?:e-?\d+)?", re.MULTILINE)
time_data = np.zeros([len(string_data), 6])
for i, ss in enumerate(string_data):
numbers = regex_real.findall(ss)
time_data[i,:] = np.array(numbers)
print("...done!")
return time_data
def plot_timestep_duration(time_data, name):
fig_name = name + "_ts_duration.png"
print("Generating " + fig_name + "...", end='')
plt.rcParams.update({'font.size': 20})
plt.rcParams['axes.linewidth'] = 3
f, ax = plt.subplots(figsize=(12,6))
ax.set_ylabel("timestep duration [s]")
ax.set_xlabel("step [#]")
ax.semilogy(time_data[:,0], time_data[:,4])
ax.spines['bottom'].set_color('gray')
ax.spines['top'].set_visible(False)
ax.spines['left'].set_color('gray')
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(fig_name, transparent=False, dpi=300)
print("...done!")
def plot_cumulative_duration(time_data, name):
fig_name = name + "_cumulative_duration.png"
print("Generating " + fig_name + "...", end='')
plt.rcParams.update({'font.size': 20})
plt.rcParams['axes.linewidth'] = 3
f, ax = plt.subplots(figsize=(12,6))
ax.set_ylabel("cumulative duration [s]")
ax.set_xlabel("step [#]")
ax.plot(time_data[:,0], np.cumsum(time_data[:,4]))
ax.spines['bottom'].set_color('gray')
ax.spines['top'].set_visible(False)
ax.spines['left'].set_color('gray')
ax.spines['right'].set_visible(False)
plt.tight_layout()
plt.savefig(name + "_cumulative_duration.png", transparent=False, dpi=300)
print("...done!")
def do_plot_timestep_duration():
parser = argparse.ArgumentParser(description='Generates plots of timestep duration from WarpX standard output logs')
parser.add_argument('file_name', metavar='file_name', type=str, nargs=1,
help='the name of the WarpX output log to process')
args = parser.parse_args()
log_file_name = args.file_name[0]
time_data = extract_data(log_file_name)
plot_timestep_duration(time_data, log_file_name)
plot_cumulative_duration(time_data, log_file_name)
if __name__ == "__main__":
do_plot_timestep_duration()
| 2,674 |
plot.py
|
Picovoice/voice-activity-benchmark
| 4 |
2026113
|
#
# Copyright 2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license.
# A copy of the license is located in the "LICENSE" file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
from engine import Engines
def plot_roc_chart():
engine_true_detects = dict([(e.value, [1]) for e in Engines])
engine_false_alarms = dict([(e.value, [1]) for e in Engines])
for engine in Engines:
engine = engine.value
with open(os.path.join(os.path.dirname(__file__), 'benchmark_%s.csv' % engine), 'r') as f:
for line in f.readlines():
true_detect, false_alarm = [float(x) for x in line.strip('\n').split(', ')]
engine_true_detects[engine].append(true_detect)
engine_false_alarms[engine].append(false_alarm)
engine_true_detects[engine].append(0)
engine_false_alarms[engine].append(0)
fig, ax = plt.subplots()
for engine in Engines:
engine = engine.value
ax.plot(engine_false_alarms[engine], engine_true_detects[engine])
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
ax.legend([e.value for e in Engines])
ax.set_title('ROC for VAD Engines')
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
ax.xaxis.set_major_locator(MultipleLocator(0.1))
ax.yaxis.set_major_locator(MultipleLocator(0.1))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.grid(which='major', color='#CCCCCC', linestyle='--')
ax.grid(which='minor', color='#CCCCCC', linestyle=':')
fig.tight_layout()
plt.show()
if __name__ == '__main__':
plot_roc_chart()
| 2,158 |
scripts/plot.py
|
UCR-Robotics/hdcp_planning
| 18 |
2026321
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
from hexmap import HexMap
from turtlebot import TurtleBot
def visualization(directory, center, debugging_mode):
# plot trajectory once finished
_, ax = plt.subplots(1)
ax.set_aspect('equal')
# map boundary
X_min = -10 - center[0]
X_max = 10 - center[0]
Y_min = -10 - center[1]
Y_max = 10 - center[1]
# load and plot hex map
hexmap = HexMap(radius=1.0)
hexmap.plot_map(ax, directory, debugging_mode)
# load and plot trajectory
TurtleBot.load_and_plot_trajectory(directory, debugging_mode)
plt.xlim(X_min - 1, X_max + 1)
plt.ylim(Y_min - 1, Y_max + 1)
plt.show()
if __name__ == '__main__':
directory = "../data/hdcp_e"
robot_init_coordinate = [-9, -9]
debugging_mode = True
visualization(directory, robot_init_coordinate, debugging_mode)
| 880 |
apps/vendors/admin.py
|
ExpoAshique/ProveBanking__s
| 0 |
2025894
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from import_export.admin import ExportMixin, ImportExportModelAdmin
from locations.models import Location
from .models import (Vendor, VendorLocation, VendorCategories, VendorRoles,
PortfolioItem, ProcurementContact, KindLabel, ClientReference, Invoice, ClientQueue,
CertVerification)
from .resources import VendorResource
class VendorAdmin(ExportMixin, admin.ModelAdmin):
exclude = ('phone', 'story', 'founded', 'industries',
'avg_score')
resource_class = VendorResource
list_display = ('name', 'website', 'slug', 'proven_score', 'potential_proven_score', 'client_score',
'potential_client_score', 'company_score', 'web_score', 'modified_on', 'sync_clearbit', )
readonly_fields = ('slug', 'proven_score', 'potential_proven_score', 'client_score', 'potential_client_score', 'company_score', 'web_score', 'clearbit_data')
search_fields = ('name', )
class VendorSearchTerm(Vendor):
class Meta:
proxy = True
class HasSearchKeyworkFilter(admin.SimpleListFilter):
title = _('Search keywords?')
parameter_name = 'has_search_keywords'
def lookups(self, request, model_admin):
return (
('with', 'with search keywords'),
('without', 'without search keywords'),
)
def queryset(self, request, queryset):
value = self.value()
if value == 'with':
return queryset.exclude(search_keywords=[])
elif value == 'without':
return queryset.filter(search_keywords=[])
return queryset
class VendorSearchTermAdmin(admin.ModelAdmin):
list_filter = (HasSearchKeyworkFilter,)
fields = ('name', 'search_keywords')
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request):
return False
class ProcurementAdmin(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
form = super(ProcurementAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['locations'].queryset = Location.objects.filter(kind=Location.KIND_COUNTRY)
return form
class ClientReferenceAdmin(admin.ModelAdmin):
model = ClientReference
list_display = ('id', 'vendor', 'client', 'email', 'weighted_value')
readonly_fields = ('weighted_value',)
class InvoiceAdmin(admin.ModelAdmin):
model = Invoice
list_display = ('uuid', 'reference', 'date_verified')
class CertVerificationAdmin(admin.ModelAdmin):
model = CertVerification
admin.site.register(Vendor, VendorAdmin)
admin.site.register(ProcurementContact, ProcurementAdmin)
admin.site.register(VendorLocation)
admin.site.register(VendorCategories)
admin.site.register(VendorRoles)
admin.site.register(PortfolioItem)
admin.site.register(KindLabel)
admin.site.register(VendorSearchTerm, VendorSearchTermAdmin)
admin.site.register(ClientReference, ClientReferenceAdmin)
admin.site.register(Invoice, InvoiceAdmin)
admin.site.register(ClientQueue)
admin.site.register(CertVerification, CertVerificationAdmin)
| 3,174 |
openapy/io.py
|
edge-minato/openapy
| 0 |
2025351
|
import pkgutil
import sys
from pathlib import Path
def read_file(path: Path) -> str:
with path.open(mode="r") as f:
return f.read()
def write_file(path: Path, content: str) -> None:
with path.open(mode="w") as f:
f.write(content)
def exit_with_error(msg: str) -> None:
print(msg)
sys.exit(1)
def get_resource(package_name: str, filename: Path) -> str:
try:
resource = pkgutil.get_data(package_name, str(filename))
if resource is None:
raise FileNotFoundError(f"ERROR: Failed to load {filename}")
return resource.decode("utf-8")
except Exception:
raise Exception(f"ERROR: Failed to load {filename}")
def get_my_resource(filename: str) -> str:
return get_resource(__package__, Path("examples").joinpath(filename))
| 810 |
macromodel.py
|
ks8/conformation
| 0 |
2024555
|
""" Run Schrodinger's MacroModel conformational search tools (LMOD and MCMM). """
import os
from conformation.create_logger import create_logger
from conformation.macromodel import macromodel, Args
if __name__ == '__main__':
args = Args().parse_args()
logger = create_logger(name='train', save_dir=args.save_dir)
args.save(os.path.join(args.save_dir, "args.json"))
macromodel(args, logger)
| 419 |
hal_hw_interface/src/hal_hw_interface/tests/test_fixtures.py
|
zultron/hal_ros_control
| 39 |
2024456
|
# -*- coding: utf-8 -*-
# Test keys and values
keys1 = dict(pin1=True, pin2=0.009, pin3=-9)
# More test keys and values, an overlapping set
keys2 = dict(pin1=False, pin2=1.88e42, pin4=0)
class TestFixtures:
def test_mock_comp_obj_fixture(self, mock_comp_obj, mock_objs):
# Test hal.component returns mock_comp_obj
assert mock_objs["hal_comp"]() is mock_comp_obj
# Set each pin (with out-of-band method) and check
for name, value in keys1.items():
mock_comp_obj.set_pin(name, value)
assert mock_comp_obj[name] == value
# Recheck
for name, value in keys1.items():
assert mock_comp_obj[name] == value
# Set each pin and check
for name, value in keys2.items():
mock_comp_obj[name] = value
assert mock_comp_obj[name] == value
# Recheck everything
pins = keys1.copy()
pins.update(keys2)
for name, value in pins.items():
assert mock_comp_obj[name] == value
# Default case
assert mock_comp_obj["bogus"] == 0xDEADBEEF
def test_mock_rospy_fixture(self, mock_rospy, mock_objs):
# Test mock rospy.get_param()
gp = mock_objs["rospy_get_param"]
gp.set_key("foo", 1)
gp.set_key("bar", 2)
assert gp("foo") == 1
assert gp("bar") == 2
gp.set_key("baz", 3)
assert gp("foo") == 1
assert gp("bar") == 2
assert gp("baz") == 3
# Test rospy.Rate() returns expected object
assert mock_objs["rospy_Rate"]() is mock_objs["rospy_Rate_obj"]
# Test rospy.is_shutdown() returns True values, then False
found_false = False
for i in range(10):
val = mock_objs["rospy_is_shutdown"]()
print("iter {} val {}".format(i, val))
if val is False:
found_false = True
if val is True:
break
else:
raise Exception("is_shutdown never returned True")
if not found_false:
raise Exception("is_shutdown never returned False")
# Test returned objects
for name in ("Subscriber", "Publisher", "Service"):
method = mock_objs["rospy_{}".format(name)]
obj = mock_objs["rospy_{}_obj".format(name)]
assert method() == obj
def test_mock_redis_client_obj(self, mock_redis_client_obj, mock_objs):
# Test redis_store.ConfigClient() returns object
assert mock_objs["redis_store"]() is mock_redis_client_obj
# Set each param (with out-of-band method) and check
for name, value in keys1.items():
mock_redis_client_obj.set_key(name, value)
assert mock_redis_client_obj.get_param(name) == value
# Recheck
for name, value in keys1.items():
assert mock_redis_client_obj.get_param(name) == value
# Set each param and check
for name, value in keys2.items():
mock_redis_client_obj.set_param(name, value)
assert mock_redis_client_obj.get_param(name) == value
# Recheck everything
params = keys1.copy()
params.update(keys2)
for name, value in params.items():
assert mock_redis_client_obj.get_param(name) == value
# Default case
assert mock_redis_client_obj.get_param("bogus") == 0
| 3,398 |
0x2.py
|
windring/Spider4TwtterIMG
| 0 |
2025892
|
# -*- coding: UTF-8 -*-
import unittest
from selenium import webdriver
from bs4 import BeautifulSoup
import os
import time
import urllib
class seleniumTest(unittest.TestCase):
def setUp(self):
print("start")
self.driver=webdriver.PhantomJS()
self.driver.implicitly_wait(20)
self.driver.set_window_size('1280', '768')
self.driver.maximize_window()
self.endselector="stream-end-inner"
self.cardselector=".js-stream-item.stream-item.stream-item"
self.imgselector={"src": True, "data-aria-label-part": True}
self.username="ruru2333"
def testEle(self):
driver=self.driver
username=self.username
endselector=self.endselector
cardselector=self.cardselector
imgselector=self.imgselector
if os.path.isdir("./"+username)==False:
os.mkdir("./"+username)
driver.get("https://twitter.com/"+username+"/media")
len1=0
len2=0
count=0
while(1):
driver.execute_script('window.scrollTo(0,document.body.scrollHeight)')
print(time.time())
soup = BeautifulSoup(driver.page_source, "html.parser")
soup_re = soup.findAll("img",imgselector )
if(soup_re and len(soup_re)>len2):
for imgs in soup_re[len2-1:len(soup_re)]:
print(imgs['src'])
u = urllib.request.urlopen(imgs['src'] + ":large")
data = u.read()
f = open("./"+username+"/"+str(count)+"-"+imgs['src'].split('/')[4], "wb")
f.write(data)
f.close()
count+=1
print(count)
len1 = len(driver.find_elements_by_css_selector(cardselector))
if(len(soup_re)>len2):
len2=len(soup_re)
print(len1,len2)
if driver.find_element_by_class_name(endselector).is_displayed():
#driver.save_screenshot("./"+username+"/end.png")
print("last:",count)
break
def tearDown(self):
self.driver.close()
print('End')
if __name__ == "__main__":
unittest.main()
| 2,213 |
tests/unittests/test_obj_query.py
|
cooomma/mayday-ticketing-bot
| 4 |
2025824
|
import unittest
from mayday.objects.query import Query
USER_ID = 123456789
USERNAME = 'testcase'
CATEGORY = 1
class Test(unittest.TestCase):
def test_query_init(self):
query = Query(user_id=USER_ID, username=USERNAME, category_id=CATEGORY)
expect = dict(
category=1,
dates=list(),
prices=list(),
quantities=list(),
status=1,
username=USERNAME,
user_id=USER_ID
)
self.assertDictEqual(query.to_dict(), expect)
def test_query_dict_to_obj(self):
query = dict(
category=1,
dates=[503, 504],
prices=[1, 2],
quantities=[2, 3],
status=1,
username=USERNAME,
user_id=USER_ID
)
obj = Query(user_id=USER_ID, username=USERNAME, category_id=CATEGORY).to_obj(query)
assert obj.dates == query['dates']
assert obj.prices == query['prices']
assert obj.quantities == query['quantities']
assert obj.status == query['status']
assert obj.category == query['category']
assert obj.to_dict() == query
def test_query_update_field(self):
query = Query(user_id=USER_ID, username=USERNAME, category_id=CATEGORY)
query.update_field('category', 1)
assert isinstance(query.category, int)
assert query.category == 1
query.update_field('dates', 503)
assert isinstance(query.dates, list)
assert query.dates == [503]
query.update_field('dates', 504)
assert isinstance(query.dates, list)
assert query.dates == [503, 504]
query.update_field('dates', 505)
assert isinstance(query.dates, list)
assert query.dates == [503, 504, 505]
query.update_field('dates', 510)
assert isinstance(query.dates, list)
assert query.dates == [503, 504, 505, 510]
query.update_field('dates', 511)
assert isinstance(query.dates, list)
assert query.dates == [503, 504, 505, 510, 511]
query.update_field('prices', 1)
assert isinstance(query.prices, list)
assert query.prices == [1]
query.update_field('prices', 2)
assert isinstance(query.prices, list)
assert query.prices == [1, 2]
query.update_field('prices', 3)
assert isinstance(query.prices, list)
assert query.prices == [1, 2, 3]
query.update_field('prices', 4)
assert isinstance(query.prices, list)
assert query.prices == [1, 2, 3, 4]
query.update_field('quantities', 1)
assert isinstance(query.quantities, list)
assert query.quantities == [1]
query.update_field('quantities', 2)
assert isinstance(query.quantities, list)
assert query.quantities == [1, 2]
query.update_field('quantities', 3)
assert isinstance(query.quantities, list)
assert query.quantities == [1, 2, 3]
query.update_field('quantities', 4)
assert isinstance(query.quantities, list)
assert query.quantities == [1, 2, 3, 4]
query.update_field('status', 1)
assert isinstance(query.status, int)
assert query.status == 1
query.update_field('status', 2)
assert isinstance(query.status, int)
assert query.status == 2
# Remove
query.update_field('quantities', 4, remove=True)
assert isinstance(query.quantities, list)
assert query.quantities == [1, 2, 3]
query.update_field('quantities', 3, remove=True)
assert isinstance(query.quantities, list)
assert query.quantities == [1, 2]
query.update_field('quantities', 2, remove=True)
assert isinstance(query.quantities, list)
assert query.quantities == [1]
query.update_field('dates', 511, remove=True)
assert isinstance(query.dates, list)
assert query.dates == [503, 504, 505, 510]
query.update_field('dates', 510, remove=True)
assert isinstance(query.dates, list)
assert query.dates == [503, 504, 505]
query.update_field('dates', 505, remove=True)
assert isinstance(query.dates, list)
assert query.dates == [503, 504]
query.update_field('dates', 504, remove=True)
assert isinstance(query.dates, list)
assert query.dates == [503]
query.update_field('dates', 503, remove=True)
assert isinstance(query.dates, list)
assert query.dates == list()
query.update_field('prices', 4, remove=True)
assert isinstance(query.prices, list)
assert query.prices == [1, 2, 3]
query.update_field('prices', 3, remove=True)
assert isinstance(query.prices, list)
assert query.prices == [1, 2]
query.update_field('prices', 2, remove=True)
assert isinstance(query.prices, list)
assert query.prices == [1]
query.update_field('prices', 1, remove=True)
assert isinstance(query.prices, list)
assert query.prices == list()
def test_query_to_human_readable(self):
sample_query = dict(
category=1,
dates=[503, 504],
prices=[1, 2],
quantities=[2, 3],
status=1,
username=USERNAME,
user_id=USER_ID
)
query = Query(user_id=USER_ID, username=USERNAME, category_id=CATEGORY).to_obj(sample_query)
query_string = query.to_human_readable()
assert query_string['category'] == '原價轉讓'
assert query_string['dates'] == '5.3(Fri), 5.4(Sat)'
assert query_string['prices'] == '$1180座位, $880座位'
assert query_string['quantities'] == '2, 3'
assert query_string['status'] == '待交易'
def test_query_validation(self):
query = Query(user_id=USER_ID, username=USERNAME, category_id=1)
assert query.validate()
| 5,942 |
tests/test_interfaces.py
|
ZipFile/layer-enforcer
| 1 |
2026199
|
from pytest import mark
from layer_enforcer.interfaces import Layer, Match
def test_layer_repr():
assert repr(Layer("test")) == repr("test")
@mark.parametrize(
["chains", "submodules", "expected"],
[
([], set(), False),
([("a", "b")], set(), True),
([], {"x", "y"}, True),
([("a", "b")], {"x", "y"}, True),
],
)
def test_match_bool(chains, submodules, expected, domain):
assert bool(Match("test", domain, chains, submodules)) == expected
| 495 |
setup.py
|
904labs/semanticizest
| 32 |
2025412
|
#!/usr/bin/env python
from setuptools import setup
import os.path
# Get __version__ from source
dist_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dist_dir, 'semanticizest/_version.py')) as versionpy:
exec(versionpy.read())
def readme():
try:
with open(os.path.join(dist_dir, 'README.rst')) as f:
return f.read()
except IOError:
return ""
def requirements():
with open(os.path.join(dist_dir, "requirements.txt")) as f:
return f.readlines()
setup(
name="semanticizest",
description="Semanticizer NG",
long_description=readme(),
packages=["semanticizest", "semanticizest.parse_wikidump"],
url="https://github.com/semanticize/semanticizest",
version=__version__,
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Text Processing",
],
install_requires=requirements(),
include_package_data=True,
test_suite='nose.collector',
tests_require=['nose'],
)
| 1,184 |
coffemachine_project/coffemachine/machine/migrations/0001_initial.py
|
Dejna93/django-coffee-machine
| 0 |
2026390
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-12-18 12:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Coffee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coffee_type', models.CharField(choices=[(b'espresso', b'Espresso'), (b'americano', b'Americano'), (b'latte', b'Latte')], max_length=15)),
('beans', models.CharField(max_length=15)),
('coffee_quantity', models.IntegerField()),
('size', models.IntegerField(choices=[(120, b'Normal'), (240, b'Large')])),
('extra_quantity', models.IntegerField(blank=True, null=True)),
('contains_milk', models.BooleanField(default=False)),
('time_preparing', models.IntegerField()),
],
),
]
| 1,067 |
python/nltk/tokenizer_stemmer.py
|
initrunlevel0/sns
| 0 |
2025737
|
from __future__ import division
import nltk, re, pprint
# Open Gutenberg
text = open("2554.txt", "r").read()
# TOKENIZE
# Definisi: Memisahkan sebuah bagian teks menjadi bagian-bagian unit tertentu.
# Tidak mesti dibagi per kata, karena pembagian dilakukan untuk memisahkan unit semantikna.
# http://nlp.stanford.edu/IR-book/html/htmledition/tokenization-1.html
# nltk.word_tokenize: Memisahkan token berbasis kata
tokens = nltk.word_tokenize(text)
# Vocabulary
# Get list of vocabulary inside
words = [w.lower() for w in tokens]
vocab = sorted(set(words))
# STEMMER
# Definisi: Mendapatkan kata dasar (stem) dari sebuah kata.
# Nltk menyediakan dua jenis stemmer :
# * nltk.PorterStemmer()
# * nltk.LancasterStemmer()
# * nltk.WordNetLemmatizer()
porter = nltk.PorterStemmer()
lancaster = nltk.LancasterStemmer()
stem_porter = [porter.stem(t) for t in tokens]
stem_lancaster = [lancaster.stem(t) for t in tokens]
print stem_lancaster
| 943 |
tests/integration_tests/test_suite.py
|
raineydavid/mindsdb
| 0 |
2026193
|
from run_example import run_example
from generated_data_tests import *
import multiprocessing
import os
# Run the generated data tests
for backend in ['ludwig', 'lightwood']:
test_one_label_prediction_wo_strings(backend)
test_timeseries(backend)
test_multilabel_prediction(backend)
test_one_label_prediction(backend)
# Run the CI tests
os.system('cd ..; cd ci_tests; python3 full_test.py')
# Run the example datassts
datasets = [{
'name':'default_of_credit',
'sample':True,
'expect_accuracy_above':72
},{
'name':'imdb_movie_review',
'sample':False,
'expect_accuracy_above':83
},{
'name':'cifar_100',
'sample':True,
'expect_accuracy_above': 40 # For full dataset: 69
}]
for dataset in datasets:
dataset_name = dataset['name']
res = run_example(dataset_name, sample=dataset['sample'])
acc = res['accuracy']
ex_acc = dataset['expect_accuracy_above']
if acc < ex_acc:
print('\n\n\n============WARNING===============\n\n\n')
print(f'Expected an accuracy above {ex_acc} for dataset {dataset_name}.')
print(f'Got accuracy of {acc} instead.')
print('\n\n\n==================================\n\n\n')
else:
print('\n\n\n============SUCCESS===============\n\n\n')
print(f'Example dataset {dataset_name}, ran with success')
print(f'Got accuracy of {acc} !')
print('\n\n\n==================================\n\n\n')
#with multiprocessing.Pool(max(len(datasets),6)) as pool:
# pool.map(run_example,datasets)
| 1,546 |
youtube_dl/extractor/gametrailers.py
|
zoogaezee/youtubeDL
| 5 |
2026344
|
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_age_limit,
url_basename,
)
class GametrailersIE(InfoExtractor):
_VALID_URL = r'http://www\.gametrailers\.com/videos/view/[^/]+/(?P<id>.+)'
_TEST = {
'url': 'http://www.gametrailers.com/videos/view/gametrailers-com/116437-Just-Cause-3-Review',
'md5': 'f28c4efa0bdfaf9b760f6507955b6a6a',
'info_dict': {
'id': '2983958',
'ext': 'mp4',
'display_id': '116437-Just-Cause-3-Review',
'title': 'Just Cause 3 - Review',
'description': 'It\'s a lot of fun to shoot at things and then watch them explode in Just Cause 3, but should there be more to the experience than that?',
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<title>(.+?)\|', webpage, 'title').strip()
embed_url = self._proto_relative_url(
self._search_regex(
r'src=\'(//embed.gametrailers.com/embed/[^\']+)\'', webpage,
'embed url'),
scheme='http:')
video_id = url_basename(embed_url)
embed_page = self._download_webpage(embed_url, video_id)
embed_vars_json = self._search_regex(
r'(?s)var embedVars = (\{.*?\})\s*</script>', embed_page,
'embed vars')
info = self._parse_json(embed_vars_json, video_id)
formats = []
for media in info['media']:
if media['mediaPurpose'] == 'play':
formats.append({
'url': media['uri'],
'height': media['height'],
'width:': media['width'],
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'formats': formats,
'thumbnail': info.get('thumbUri'),
'description': self._og_search_description(webpage),
'duration': int_or_none(info.get('videoLengthInSeconds')),
'age_limit': parse_age_limit(info.get('audienceRating')),
}
| 2,294 |
colorsample.py
|
CYBARZ/black-white-image
| 0 |
2025183
|
import cv2 as cv , cv2
import os
import numpy as np
import argparse
from os.path import isfile, join
def color(input_image_path):
#make main folder
try:
if not os.path.exists('data'):
os.makedirs('data')
except OSError:
print ('Error: Creating directory of data')
try:
if not os.path.exists('data/colored'):
os.makedirs('data/colored')
except OSError:
print ('Error: Creating directory of data')
print("yes it is working")
caffemodel = "model\colorization.caffemodel"
prototxt = "model\colorization_deploy_v2.prototxt"
kernel = "model\pts_in_hull.npy"
print("models called successfully")
inp = input_image_path
# Network input size
W_in = 224
H_in = 224
imshowSize = (640, 480)
# Create network graph and load weights
net = cv.dnn.readNetFromCaffe(prototxt, caffemodel)
# load cluster centers
pts_in_hull = np.load(kernel)
# populate cluster centers as 1x1 convolution kernel
pts_in_hull = pts_in_hull.transpose().reshape(2, 313, 1, 1)
net.getLayer(net.getLayerId('class8_ab')).blobs = [pts_in_hull.astype(np.float32)]
net.getLayer(net.getLayerId('conv8_313_rh')).blobs = [np.full([1, 313], 2.606, np.float32)]
print(inp)
# Read the input image in BGR format
frame = cv.imread(inp)
# convert it to rgb format
frame = frame[:, :, [2, 1, 0]]
# Scale the image to handle the variations in intensity
img_rgb = (frame * 1.0 / 255).astype(np.float32)
# convert to Lab color space
img_lab = cv.cvtColor(img_rgb, cv.COLOR_RGB2Lab)
# pull out L channel
img_l = img_lab[:, :, 0]
(H_orig, W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = cv.resize(img_rgb, (W_in, H_in)) # resize image to network input size
img_lab_rs = cv.cvtColor(img_rs, cv.COLOR_RGB2Lab)
img_l_rs = img_lab_rs[:, :, 0]
# subtract 50 for mean-centering
img_l_rs -= 50
# Set the input for forwarding through the openCV DNN module
net.setInput(cv.dnn.blobFromImage(img_l_rs))
# Inference on network
ab_dec = net.forward('class8_ab')[0, :, :, :].transpose((1, 2, 0)) # this is our result
# Get the a and b channels
(H_out, W_out) = ab_dec.shape[:2]
# Resize to original size
ab_dec_us = cv.resize(ab_dec, (W_orig, H_orig))
# concatenate with original image i.e. L channel
img_lab_out = np.concatenate((img_l[:, :, np.newaxis], ab_dec_us), axis=2)
# convert to BGR space from Lab space
img_bgr_out = cv.cvtColor(img_lab_out, cv.COLOR_Lab2BGR)
# Clip and then rescale to 0-255
img_bgr_out = 255 * np.clip(img_bgr_out, 0, 1)
img_bgr_out = np.uint8(img_bgr_out)
# concatenate input and output image to display
con = np.hstack([frame, img_bgr_out])
print("everything")
cv.imwrite('\data\colored\out' + inp, img_bgr_out)
cv.imshow("colored output",img_bgr_out)
#os.remove(input_image_path)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 3,136 |
yocto/poky/meta/lib/oeqa/runtime/smart.py
|
libreswitch/libreswitch
| 16 |
2026091
|
import unittest
import re
from oeqa.oetest import oeRuntimeTest, skipModule
from oeqa.utils.decorators import *
from oeqa.utils.httpserver import HTTPService
def setUpModule():
if not oeRuntimeTest.hasFeature("package-management"):
skipModule("Image doesn't have package management feature")
if not oeRuntimeTest.hasPackage("smart"):
skipModule("Image doesn't have smart installed")
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
skipModule("Rpm is not the primary package manager")
class SmartTest(oeRuntimeTest):
@skipUnlessPassed('test_smart_help')
def smart(self, command, expected = 0):
command = 'smart %s' % command
status, output = self.target.run(command, 1500)
message = os.linesep.join([command, output])
self.assertEqual(status, expected, message)
self.assertFalse("Cannot allocate memory" in output, message)
return output
class SmartBasicTest(SmartTest):
@testcase(716)
@skipUnlessPassed('test_ssh')
def test_smart_help(self):
self.smart('--help')
@testcase(968)
def test_smart_version(self):
self.smart('--version')
@testcase(721)
def test_smart_info(self):
self.smart('info python-smartpm')
@testcase(421)
def test_smart_query(self):
self.smart('query python-smartpm')
@testcase(720)
def test_smart_search(self):
self.smart('search python-smartpm')
@testcase(722)
def test_smart_stats(self):
self.smart('stats')
class SmartRepoTest(SmartTest):
@classmethod
def setUpClass(self):
self.repolist = []
self.repo_server = HTTPService(oeRuntimeTest.tc.d.getVar('DEPLOY_DIR', True), oeRuntimeTest.tc.target.server_ip)
self.repo_server.start()
@classmethod
def tearDownClass(self):
self.repo_server.stop()
for i in self.repolist:
oeRuntimeTest.tc.target.run('smart channel -y --remove '+str(i))
@testcase(1143)
def test_smart_channel(self):
self.smart('channel', 1)
@testcase(719)
def test_smart_channel_add(self):
image_pkgtype = self.tc.d.getVar('IMAGE_PKGTYPE', True)
deploy_url = 'http://%s:%s/%s' %(self.target.server_ip, self.repo_server.port, image_pkgtype)
pkgarchs = self.tc.d.getVar('PACKAGE_ARCHS', True).replace("-","_").split()
for arch in os.listdir('%s/%s' % (self.repo_server.root_dir, image_pkgtype)):
if arch in pkgarchs:
self.smart('channel -y --add {a} type=rpm-md baseurl={u}/{a}'.format(a=arch, u=deploy_url))
self.repolist.append(arch)
self.smart('update')
@testcase(969)
def test_smart_channel_help(self):
self.smart('channel --help')
@testcase(970)
def test_smart_channel_list(self):
self.smart('channel --list')
@testcase(971)
def test_smart_channel_show(self):
self.smart('channel --show')
@testcase(717)
def test_smart_channel_rpmsys(self):
self.smart('channel --show rpmsys')
self.smart('channel --disable rpmsys')
self.smart('channel --enable rpmsys')
@testcase(1144)
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install(self):
self.smart('remove -y psplash-default')
self.smart('install -y psplash-default')
@testcase(728)
@skipUnlessPassed('test_smart_install')
def test_smart_install_dependency(self):
self.smart('remove -y psplash')
self.smart('install -y psplash-default')
@testcase(723)
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install_from_disk(self):
self.smart('remove -y psplash-default')
self.smart('download psplash-default')
self.smart('install -y ./psplash-default*')
@testcase(725)
@skipUnlessPassed('test_smart_channel_add')
def test_smart_install_from_http(self):
output = self.smart('download --urls psplash-default')
url = re.search('(http://.*/psplash-default.*\.rpm)', output)
self.assertTrue(url, msg="Couln't find download url in %s" % output)
self.smart('remove -y psplash-default')
self.smart('install -y %s' % url.group(0))
@testcase(729)
@skipUnlessPassed('test_smart_install')
def test_smart_reinstall(self):
self.smart('reinstall -y psplash-default')
@testcase(727)
@skipUnlessPassed('test_smart_channel_add')
def test_smart_remote_repo(self):
self.smart('update')
self.smart('install -y psplash')
self.smart('remove -y psplash')
@testcase(726)
def test_smart_local_dir(self):
self.target.run('mkdir /tmp/myrpmdir')
self.smart('channel --add myrpmdir type=rpm-dir path=/tmp/myrpmdir -y')
self.target.run('cd /tmp/myrpmdir')
self.smart('download psplash')
output = self.smart('channel --list')
for i in output.split("\n"):
if ("rpmsys" != str(i)) and ("myrpmdir" != str(i)):
self.smart('channel --disable '+str(i))
self.target.run('cd $HOME')
self.smart('install psplash')
for i in output.split("\n"):
if ("rpmsys" != str(i)) and ("myrpmdir" != str(i)):
self.smart('channel --enable '+str(i))
self.smart('channel --remove myrpmdir -y')
self.target.run("rm -rf /tmp/myrpmdir")
@testcase(718)
def test_smart_add_rpmdir(self):
self.target.run('mkdir /tmp/myrpmdir')
self.smart('channel --add myrpmdir type=rpm-dir path=/tmp/myrpmdir -y')
self.smart('channel --disable myrpmdir -y')
output = self.smart('channel --show myrpmdir')
self.assertTrue("disabled = yes" in output, msg="Failed to disable rpm dir")
self.smart('channel --enable myrpmdir -y')
output = self.smart('channel --show myrpmdir')
self.assertFalse("disabled = yes" in output, msg="Failed to enable rpm dir")
self.smart('channel --remove myrpmdir -y')
self.target.run("rm -rf /tmp/myrpmdir")
@testcase(731)
@skipUnlessPassed('test_smart_channel_add')
def test_smart_remove_package(self):
self.smart('install -y psplash')
self.smart('remove -y psplash')
| 6,293 |
homeworkpal_project/project_admin/managers.py
|
luiscberrocal/homeworkpal
| 0 |
2022864
|
from django.db.models import Manager, Sum, QuerySet
from employee.models import CompanyGroupEmployeeAssignment
__author__ = 'lberrocal'
class ProjectManagerQuerySet(QuerySet):
def sum_regular_hours(self):
return self.filter().annotate(total_regular_hours=Sum('maximo_time_registers__regular_hours'))
class ProjectManager(Manager):
def get_queryset(self):
return ProjectManagerQuerySet(self.model, using=self._db)
def sum_regular_hours(self):
return self.get_queryset().sum_regular_hours()
class ProjectMemberManager(Manager):
def assigned_to_project(self, project):
return self.get_queryset().filter(project=project).select_related('employee', 'employee__user')
# def unassigned_to_project(self, project, company_group=None):
# if company_group:
# group_assignments = CompanyGroupEmployeeAssignment.objects.group_members(company_group)
# pks = self._get_employees_pk_list(group_assignments)
# return self.get_queryset().filter(employee__pk__in=pks).exclude(project=project,).select_related('employee', 'employee__user')
# else:
# return self.get_queryset().exclude(project=project,).select_related('employee', 'employee__user')
def _get_employees_pk_list(self, group_assignments):
pks = list()
for group_assignment in group_assignments:
pks.append(group_assignment.employee.pk)
return pks
| 1,462 |
python/common.py
|
Jacopo47/ssd-project
| 0 |
2024778
|
"""
Common functions and a colormap for the line charts.
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import base64
import io
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
# ---------------------------- read from sqlite database
def load_orders(db, cust):
sql = "SELECT time, quant FROM ordini WHERE customer IN ({})".format(cust)
engine = create_engine('sqlite:///' + db)
df_all_orders = pd.read_sql(sql, engine, index_col='time')
if df_all_orders.size == 0:
raise CustomerNotFound()
return df_all_orders
def load_stock_data(db, tickers, start_date, end_date):
"""
Loads the stock data for the specified ticker symbols, and for the specified date range.
:param db: Full path to database with stock data.
:param tickers: A list with ticker symbols.
:param start_date: The start date.
:param end_date: The start date.
:return: A list of time-indexed dataframe, one for each ticker, ordered by date.
"""
SQL = "SELECT * FROM Quotes WHERE TICKER IN ({}) AND Date >= '{}' AND Date <= '{}'" \
.format(tickers, start_date, end_date)
engine = create_engine('sqlite:///' + db)
df_all = pd.read_sql(SQL, engine, index_col='Date', parse_dates='Date')
df_all = df_all.round(2)
result = []
for ticker in tickers.split(","):
df_ticker = df_all.query("Ticker == " + ticker)
result.append(df_ticker)
return result
# ------------------------------ Accuracy metrics
def forecast_accuracy(forecast, actual):
mape = np.mean(np.abs(forecast - actual) / np.abs(actual)) # MAPE
me = np.mean(forecast - actual) # ME
mae = np.mean(np.abs(forecast - actual)) # MAE
mpe = np.mean((forecast - actual) / actual) # MPE
rmse = np.mean((forecast - actual) ** 2) ** .5 # RMSE
corr = np.corrcoef(forecast, actual)[0, 1] # corr
mins = np.amin(np.hstack([forecast[:, None],
actual[:, None]]), axis=1)
maxs = np.amax(np.hstack([forecast[:, None],
actual[:, None]]), axis=1)
minmax = 1 - np.mean(mins / maxs) # minmax
return ({'mape': mape, 'me': me, 'mae': mae,
'mpe': mpe, 'rmse': rmse,
'corr': corr, 'minmax': minmax})
def get_orders(db, customers):
SQL = "SELECT * FROM ordini WHERE customer IN ({})" \
.format(customers)
engine = create_engine('sqlite:///' + db)
df_all_orders = pd.read_sql(SQL, engine, index_col='id')
result = []
for cust in customers.split(","):
df_order = df_all_orders.query("customer == " + cust)
result.append(df_order)
return result
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def get_figure(fig):
"""
Converts a figure (as created e.g. with matplotlib or seaborn) to a png image and this
png subsequently to a base64-string, then prints the resulting string to the console.
"""
buf = io.BytesIO()
fig.savefig(buf, format='png')
return base64.b64encode(buf.getbuffer())
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Custom colormap that is used with line charts
COLOR_MAP = [
'blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan',
'darkblue', 'darkorange', 'darkgreen', 'darkred', 'rebeccapurple', 'darkslategray',
'mediumvioletred', 'dimgray', 'seagreen', 'darkcyan', 'deepskyblue', 'yellow',
'lightgreen', 'lightcoral', 'plum', 'lightslategrey', 'lightpink', 'lightgray',
'lime', 'cadetblue'
]
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
class CustomerNotFound(Exception):
"""Base class for other exceptions"""
pass
| 3,966 |
MPCGUACC-0.0.2/StereoCameraCalibrator/list_generator.py
|
hyu1834/-Stereoscopic-Point-Cloud-Generation-Using-Array-of-Commodity-Cameras
| 1 |
2025162
|
import os
import sys
def get_images_paths(base_path):
images_path = []
for path in os.listdir(base_path):
full_path = os.path.join(base_path, path)
if not os.path.isfile(full_path):
continue
extension = os.path.splitext(os.path.basename(path))[1]
if not extension == ".bmp":
continue
images_path.append(full_path)
return sorted(images_path)
base_path = sys.argv[1]
# get all image from calibration folder
left_images_path = get_images_paths(os.path.join(base_path, "L_calibration"))
right_images_path = get_images_paths(os.path.join(base_path, "R_calibration"))
# Generate list for Left images
with open(os.path.join(base_path, "left_images.txt"), 'w') as f:
for path in left_images_path:
f.write("%s\n"%path)
with open(os.path.join(base_path, "right_images.txt"), 'w') as f:
for path in left_images_path:
f.write("%s\n"%path)
with open(os.path.join(base_path, "all_images.txt"), 'w') as f:
for image1, image2 in zip(left_images_path, right_images_path):
f.write("%s\n"%image1)
f.write("%s\n"%image2)
| 1,045 |
test/components/core/TestKrakken.py
|
AaronWxy/TestCommon
| 0 |
2023374
|
from src.components.core.Krakken import Krakken
from src.components.core.Logger import Logger
def main():
# test 1
krakken = Krakken("host1", "4.1.0", "4.1.2")
assert len(krakken.hosts) == 1
assert krakken.hosts[0] == "host1"
assert krakken.ip_map.get(krakken.hosts[0]) is None
assert krakken.version == "4.1.0"
assert krakken.content_version == "4.1.2"
assert krakken.variant == ""
assert krakken.suite == ""
assert krakken.config == ""
assert isinstance(krakken, Krakken)
assert isinstance(krakken.logger, Logger)
# test 2
krakken = Krakken("host1,host2,host3", "4.1.0", ips="1.1.1.1,1.1.1.2,1.1.1.3", variant="AWS", suite="Beta", test_config="config/config2.ini")
assert len(krakken.hosts) == 3
assert krakken.hosts[0] == "host1"
assert krakken.ip_map.get(krakken.hosts[0]) == "1.1.1.1"
assert krakken.ip_map.get(krakken.hosts[2]) == "1.1.1.3"
assert krakken.version == "4.1.0"
assert krakken.content_version == "4.1.0"
assert krakken.variant == "AWS"
assert krakken.suite == "Beta"
assert krakken.config == "config/config2.ini"
assert isinstance(krakken, Krakken)
assert isinstance(krakken.logger, Logger)
if __name__ == "__main__":
main()
| 1,250 |
tests/test_role.py
|
gantsign/ansible-role-dockbarx-launcher
| 2 |
2025832
|
from testinfra.utils.ansible_runner import AnsibleRunner
testinfra_hosts = AnsibleRunner('.molecule/ansible_inventory').get_hosts('all')
def test_dockbarx_config(Sudo, Command):
# Need to -set-home when using sudo for gconftool-2 to work
output = Command.check_output("sudo %s --set-home gconftool-2 --get %s",
'--user=test_usr',
'/apps/dockbarx/launchers')
assert 'test-app;/usr/share/applications/test-app.desktop' in output
| 510 |
Edit_color_tags.py
|
ImageJ-scripts/ij-macros
| 0 |
2026094
|
# Bootstrap to extend modules search path #
from sys import path
import os.path
from java.lang.System import getProperty
jython_scripts = os.path.join(getProperty('user.home'), 'Jython_scripts')
path.append(jython_scripts)
#=========================================#
from IBPlib.ij.Colortags import Colortags
# GUI for editing the user color tags to be used in ColorMerger
t = Colortags()
t.edit()
| 400 |
src/py/flwr/client/numpy_client.py
|
g-pichler/flower
| 0 |
2026261
|
# Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flower client app."""
from abc import ABC, abstractmethod
from typing import Dict, List, Tuple
import numpy as np
from flwr.common import (
Code,
Config,
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Metrics,
ParametersRes,
PropertiesIns,
PropertiesRes,
Scalar,
Status,
parameters_to_weights,
weights_to_parameters,
)
from .client import Client
EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT = """
NumPyClient.fit did not return a tuple with 3 elements.
The returned values should have the following type signature:
Tuple[List[np.ndarray], int, Dict[str, Scalar]]
Example
-------
model.get_weights(), 10, {"accuracy": 0.95}
"""
EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_EVALUATE = """
NumPyClient.evaluate did not return a tuple with 3 elements.
The returned values should have the following type signature:
Tuple[float, int, Dict[str, Scalar]]
Example
-------
0.5, 10, {"accuracy": 0.95}
"""
class NumPyClient(ABC):
"""Abstract base class for Flower clients using NumPy."""
def get_properties(self, config: Config) -> Dict[str, Scalar]:
"""Returns a client's set of properties.
Parameters
----------
config : Config
Configuration parameters requested by the server.
This can be used to tell the client which parameters
are needed along with some Scalar attributes.
Returns
-------
properties : Dict[str, Scalar]
A dictionary mapping arbitrary string keys to values of type
bool, bytes, float, int, or str. It can be used to communicate
arbitrary property values back to the server.
"""
@abstractmethod
def get_parameters(self) -> List[np.ndarray]:
"""Return the current local model parameters.
Returns
-------
parameters : List[numpy.ndarray]
The local model parameters as a list of NumPy ndarrays.
"""
@abstractmethod
def fit(
self, parameters: List[np.ndarray], config: Dict[str, Scalar]
) -> Tuple[List[np.ndarray], int, Dict[str, Scalar]]:
"""Train the provided parameters using the locally held dataset.
Parameters
----------
parameters : List[numpy.ndarray]
The current (global) model parameters.
config : Dict[str, Scalar]
Configuration parameters which allow the
server to influence training on the client. It can be used to
communicate arbitrary values from the server to the client, for
example, to set the number of (local) training epochs.
Returns
-------
parameters : List[numpy.ndarray]
The locally updated model parameters.
num_examples : int
The number of examples used for training.
metrics : Dict[str, Scalar]
A dictionary mapping arbitrary string keys to values of type
bool, bytes, float, int, or str. It can be used to communicate
arbitrary values back to the server.
"""
@abstractmethod
def evaluate(
self, parameters: List[np.ndarray], config: Dict[str, Scalar]
) -> Tuple[float, int, Dict[str, Scalar]]:
"""Evaluate the provided weights using the locally held dataset.
Parameters
----------
parameters : List[np.ndarray]
The current (global) model parameters.
config : Dict[str, Scalar]
Configuration parameters which allow the server to influence
evaluation on the client. It can be used to communicate
arbitrary values from the server to the client, for example,
to influence the number of examples used for evaluation.
Returns
-------
loss : float
The evaluation loss of the model on the local dataset.
num_examples : int
The number of examples used for evaluation.
metrics : Dict[str, Scalar]
A dictionary mapping arbitrary string keys to values of
type bool, bytes, float, int, or str. It can be used to
communicate arbitrary values back to the server.
Warning
-------
The previous return type format (int, float, float) and the
extended format (int, float, float, Dict[str, Scalar]) have been
deprecated and removed since Flower 0.19.
"""
class NumPyClientWrapper(Client):
"""Wrapper which translates between Client and NumPyClient."""
def __init__(self, numpy_client: NumPyClient) -> None:
self.numpy_client = numpy_client
def get_properties(self, ins: PropertiesIns) -> PropertiesRes:
"""Return the current client properties."""
if hasattr(self.numpy_client, "get_properties"):
properties = self.numpy_client.get_properties(ins.config)
else:
properties = {}
return PropertiesRes(
status=Status(code=Code.OK, message="Success"),
properties=properties,
)
def get_parameters(self) -> ParametersRes:
"""Return the current local model parameters."""
parameters = self.numpy_client.get_parameters()
parameters_proto = weights_to_parameters(parameters)
return ParametersRes(parameters=parameters_proto)
def fit(self, ins: FitIns) -> FitRes:
"""Refine the provided weights using the locally held dataset."""
# Deconstruct FitIns
parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)
# Train
results: Tuple[List[np.ndarray], int, Metrics] = self.numpy_client.fit(
parameters, ins.config
)
if not (
len(results) == 3
and isinstance(results[0], list)
and isinstance(results[1], int)
and isinstance(results[2], dict)
):
raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT)
# Return FitRes
parameters_prime, num_examples, metrics = results
parameters_prime_proto = weights_to_parameters(parameters_prime)
return FitRes(
parameters=parameters_prime_proto,
num_examples=num_examples,
metrics=metrics,
)
def evaluate(self, ins: EvaluateIns) -> EvaluateRes:
"""Evaluate the provided parameters using the locally held dataset."""
parameters: List[np.ndarray] = parameters_to_weights(ins.parameters)
results: Tuple[float, int, Metrics] = self.numpy_client.evaluate(
parameters, ins.config
)
if not (
len(results) == 3
and isinstance(results[0], float)
and isinstance(results[1], int)
and isinstance(results[2], dict)
):
raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_EVALUATE)
# Return EvaluateRes
loss, num_examples, metrics = results
return EvaluateRes(
loss=loss,
num_examples=num_examples,
metrics=metrics,
)
| 7,777 |
main.py
|
qiliux/hexo-jupyter-notebook
| 19 |
2026171
|
"""
jupyter convert
"""
from __future__ import print_function
import sys
import re
from nbconvert import HTMLExporter
def main(jupyter_file):
"""
convert jupyter file to html
:params jupyter_file: juptyer file path
"""
html_exporter = HTMLExporter()
html_exporter.template_file = 'full'
restr = "%s" % (str(html_exporter.from_filename(jupyter_file)[0]))
template = """
<iframe id='ipynb' marginheight="0" frameborder="0" width='924px' srcdoc="%s" style="scrolling:no;">
</iframe>
<script>
$("#ipynb").load( function() {
console.log($("#ipynb").contents().find("body").find("#notebook"));
document.getElementById('ipynb').height=$("#ipynb").contents().find("#notebook").height()+100;
})
</script>
""" % restr.replace("\"", "'")
# print(sys.version)
# template = '2341'
print(re.sub(r'<a.*?\/a>', '', template))
main(sys.argv[1])
| 886 |
alipay/aop/api/domain/AnttechDataServiceBlockchainContractQueryModel.py
|
antopen/alipay-sdk-python-all
| 213 |
2025988
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechDataServiceBlockchainContractQueryModel(object):
def __init__(self):
self._at_tenant_name = None
self._block_chain_id = None
self._block_hash = None
self._contract_hash = None
self._end_timestamp = None
self._page_no = None
self._page_size = None
self._start_timestamp = None
@property
def at_tenant_name(self):
return self._at_tenant_name
@at_tenant_name.setter
def at_tenant_name(self, value):
self._at_tenant_name = value
@property
def block_chain_id(self):
return self._block_chain_id
@block_chain_id.setter
def block_chain_id(self, value):
self._block_chain_id = value
@property
def block_hash(self):
return self._block_hash
@block_hash.setter
def block_hash(self, value):
self._block_hash = value
@property
def contract_hash(self):
return self._contract_hash
@contract_hash.setter
def contract_hash(self, value):
self._contract_hash = value
@property
def end_timestamp(self):
return self._end_timestamp
@end_timestamp.setter
def end_timestamp(self, value):
self._end_timestamp = value
@property
def page_no(self):
return self._page_no
@page_no.setter
def page_no(self, value):
self._page_no = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def start_timestamp(self):
return self._start_timestamp
@start_timestamp.setter
def start_timestamp(self, value):
self._start_timestamp = value
def to_alipay_dict(self):
params = dict()
if self.at_tenant_name:
if hasattr(self.at_tenant_name, 'to_alipay_dict'):
params['at_tenant_name'] = self.at_tenant_name.to_alipay_dict()
else:
params['at_tenant_name'] = self.at_tenant_name
if self.block_chain_id:
if hasattr(self.block_chain_id, 'to_alipay_dict'):
params['block_chain_id'] = self.block_chain_id.to_alipay_dict()
else:
params['block_chain_id'] = self.block_chain_id
if self.block_hash:
if hasattr(self.block_hash, 'to_alipay_dict'):
params['block_hash'] = self.block_hash.to_alipay_dict()
else:
params['block_hash'] = self.block_hash
if self.contract_hash:
if hasattr(self.contract_hash, 'to_alipay_dict'):
params['contract_hash'] = self.contract_hash.to_alipay_dict()
else:
params['contract_hash'] = self.contract_hash
if self.end_timestamp:
if hasattr(self.end_timestamp, 'to_alipay_dict'):
params['end_timestamp'] = self.end_timestamp.to_alipay_dict()
else:
params['end_timestamp'] = self.end_timestamp
if self.page_no:
if hasattr(self.page_no, 'to_alipay_dict'):
params['page_no'] = self.page_no.to_alipay_dict()
else:
params['page_no'] = self.page_no
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
if self.start_timestamp:
if hasattr(self.start_timestamp, 'to_alipay_dict'):
params['start_timestamp'] = self.start_timestamp.to_alipay_dict()
else:
params['start_timestamp'] = self.start_timestamp
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechDataServiceBlockchainContractQueryModel()
if 'at_tenant_name' in d:
o.at_tenant_name = d['at_tenant_name']
if 'block_chain_id' in d:
o.block_chain_id = d['block_chain_id']
if 'block_hash' in d:
o.block_hash = d['block_hash']
if 'contract_hash' in d:
o.contract_hash = d['contract_hash']
if 'end_timestamp' in d:
o.end_timestamp = d['end_timestamp']
if 'page_no' in d:
o.page_no = d['page_no']
if 'page_size' in d:
o.page_size = d['page_size']
if 'start_timestamp' in d:
o.start_timestamp = d['start_timestamp']
return o
| 4,667 |
at/chat.py
|
ygalustov/at
| 2 |
2024828
|
"""
The purpose of this module is to handle sending commands and receiving responses from
a serial port in a separate thread.
"""
import threading
import queue
import serial
import at
class ChatError(Exception):
"""ChatThrad exception class, inherits from the built-in Exception class."""
def __init__(self, error_str=None):
"""Constructs a new object and sets the error."""
if error_str:
self.err_str = 'Chat error: {}'.format(error_str)
else:
self.err_str = 'Chat error'
Exception.__init__(self, self.err_str)
class Chat():
"""Simplifies the process of sending AT commands to a modem and waiting for a response."""
def __init__(self, port):
"""Create a new object and open the specified serial port."""
self._rx_q = queue.Queue()
self._tx_q = queue.Queue()
self._closed = False
self._thread = ChatThread(self._rx_q, self._tx_q, port)
self._thread.start()
def _raise_thread_errors(self):
"""Iterate through the information that was sent back from the thread
and raise any Exceptions that were encountered.
"""
while not self._rx_q.empty():
item = self._rx_q.get()
if isinstance(item, Exception):
raise item
self.close()
def _read(self, block=True, timeout_s=None):
"""Read a response line as a string. Return None if block is False
and there is no data to read.
"""
if self._closed:
raise ChatError("Port is closed.")
if self._thread.is_closed():
if self._rx_q.empty():
self.close()
raise ChatError('Thread closed unexpectedly.')
else:
self._raise_thread_errors()
if not block and self._rx_q.empty():
return None
item = self._rx_q.get(block, timeout_s)
if isinstance(item, str):
return item
else:
raise ChatError(str(item))
def _write(self, seq):
"""Write the string or bytes seq to the serial port."""
if self._closed:
raise ChatError("Port is closed.")
if self._thread.is_closed():
if self._rx_q.empty():
self.close()
raise ChatError('Thread closed unexpectedly.')
else:
self._raise_thread_errors()
self._tx_q.put(seq)
def send_cmd(self, cmd, timeout_s=5):
"""Send a command to the serial port and wait for a response that is either
an OK or an ERROR. The cmd parameter can be a string or a dict from the at
module. Any responses that arrive before the OK or ERROR will be returned
along with the final response as part of a tuple: (result, [responses]).
"""
responses = []
if self._closed:
raise ChatError("Port is closed.")
if isinstance(cmd, str):
self._write(cmd)
elif isinstance(cmd, dict):
cmd_str = at.encode_command(cmd)
self._write(cmd_str)
while True:
try:
line = self._read(True, timeout_s)
except queue.Empty:
raise ChatError('Command timed out ({} seconds).'.format(timeout_s))
if line:
res = at.parse_string(line)
if res[at.AT_TYPE_KEY] == at.AT_TYPE_VALUE_RESPONSE:
if res[at.AT_RESPONSE_KEY] == at.AT_RSP_OK or res[at.AT_ERROR_KEY]:
return (res, responses)
else:
responses.append(res)
def close(self):
"""Close the serial port."""
if self._closed:
raise ChatError("Port is already closed.")
self._closed = True
self._thread.close()
def is_closed(self):
"""Return True if the thread was manually closed or closed due to an error."""
return self._closed
class ChatThread(threading.Thread):
"""Creates a simple thread for interacting with a serial port."""
DEFAULT_BAUDRATE = 115200
DEFAULT_TIMEOUT_S = 0.5
CR_LF_BYTES = b'\r\n'
CR_LF_STR = '\r\n'
NULL_BYTE = b'\x00'
def __init__(self, rx_queue, tx_queue, port, baudrate=DEFAULT_BAUDRATE):
"""Create a new object but do not start the thread."""
super(ChatThread, self).__init__()
self.daemon = True
self._rx_q = rx_queue
self._tx_q = tx_queue
self._port = port
self._baudrate = baudrate
self._closed = False
self._stop = threading.Event()
def _term_and_encode(self, seq):
"""Ensure that seq terminates with <CR><LF> and convert to bytes if necessary."""
if isinstance(seq, str):
if seq.endswith(self.CR_LF_STR):
return seq.encode()
else:
return "".join((seq, self.CR_LF_STR)).encode()
else:
if seq.endswith(self.CR_LF_BYTES):
return seq
else:
return b''.join((seq, self.CR_LF_BYTES))
def run(self):
"""Interact with the serial port until the semaphore is set.
NOTE: Automatically decodes received bytes into strings.
"""
ser = None
try:
ser = serial.Serial(self._port, self._baudrate, timeout=self.DEFAULT_TIMEOUT_S)
while not self._stop.is_set():
while not self._tx_q.empty():
tx_item = self._tx_q.get()
ser.write(self._term_and_encode(tx_item))
line = ser.readline()
if line and line != self.NULL_BYTE:
self._rx_q.put(line.decode())
except serial.SerialException as err:
self._rx_q.put(err)
finally:
if ser:
ser.close()
self.close()
def close(self):
"""Set the semaphore to instruct the thread to close."""
self._stop.set()
self._closed = True
def is_closed(self):
"""Return True if the thread was manually closed or closed due to an error."""
return self._closed
| 6,193 |
mail.py
|
T1duS/ccextractor-web
| 19 |
2024865
|
"""
ccextractor-web | mail.py
Author : <NAME>
Email : saurabh.shrivastava54+ccextractorweb[at]gmail.com
Link : https://github.com/saurabhshri
"""
import requests
from flask import current_app as app
from logger import Logger
from config_parser import general_config
mail_logger = Logger(log_level=general_config['LOG_LEVEL'],
dir=general_config['LOG_FILE_DIR'],
filename="mail",
format='[%(asctime)s] [%(name)s] [%(levelname)s] [%(pathname)s#L%(lineno)d] | %(message)s\n\n',
console_level='DEBUG')
mail_log = mail_logger.get_logger("mail")
def get_api_url(domain):
return "https://api.mailgun.net/v3/{domain}/messages".format(domain=domain)
def send_simple_message(receiver, subject, body):
api_key = app.config['MAILGUN_PVT_API_KEY']
domain = app.config['EMAIL_DOMAIN']
sender = 'CCExtractor Web <no-reply@{domain}>'.format(domain=domain)
response = requests.post(get_api_url(domain),
auth=("api", api_key),
data={"from": sender,
"to": receiver,
"subject": subject,
"text": body})
if response.status_code is not 200:
mail_log.debug('\n[{response}] \nTO : {to}, \nfrom : {sender}, \nsubject : {subject}, \ntext: {text}'.format(response=response,
to=receiver,
sender=sender,
subject=subject,
text=body))
else:
mail_log.info('\n[{response}] \nTO : {to}, \nfrom : {sender}, \nsubject : {subject}, \ntext: {text}'.format(response=response,
to=receiver,
sender=sender,
subject=subject,
text=body))
return response
| 2,657 |
app/SASconnector.py
|
StefanEkstromFFCG/FlightPlanner
| 0 |
2025493
|
import swat
host = 'https://Flightplanner-forefront.saasnow.com:8443'
port_number = "5570"
user = "stefan.ekstrom"
password = "<PASSWORD>"
conn = swat.CAS(host, username=user, password=password)
print(conn.serverstatus())
conn.close()
print(conn.serverstatus())
# SKAPA EN REQUEST MOT REST ISTÄLLET!!!
| 304 |
sitemessage/settings.py
|
furins/django-sitemessage
| 49 |
2025092
|
from django.conf import settings
# Module name to search sitemessage preferences in.
APP_MODULE_NAME = getattr(settings, 'SITEMESSAGE_APP_MODULE_NAME', 'sitemessages')
# Whether to register builtin message types.
INIT_BUILTIN_MESSAGE_TYPES = getattr(settings, 'SITEMESSAGE_INIT_BUILTIN_MESSAGE_TYPES', True)
# Priority for messages sent by Django Email backend (sitemessage.backends.EmailBackend).
EMAIL_BACKEND_MESSAGES_PRIORITY = getattr(settings, 'SITEMESSAGE_EMAIL_BACKEND_MESSAGES_PRIORITY', None)
# Messenger type alias for messages sent with `schedule_email` shortcut.
SHORTCUT_EMAIL_MESSENGER_TYPE = getattr(settings, 'SITEMESSAGE_SHORTCUT_EMAIL_MESSENGER_TYPE', 'smtp')
# Message type alias to be used for messages sent with `schedule_email` shortcut.
SHORTCUT_EMAIL_MESSAGE_TYPE = getattr(settings, 'SITEMESSAGE_SHORTCUT_EMAIL_MESSAGE_TYPE', None)
# Site URL to use in messages.
SITE_URL = getattr(settings, 'SITEMESSAGE_SITE_URL', None)
| 955 |
Utils.py
|
rajaram5/EJP-RD_standards_and_tools_documentation
| 0 |
2025545
|
import chevron
class Utils:
def get_standards_body(self, standards):
html_body = ""
table_body = ""
for standard in standards:
name = standard.TITLE
fair_info = standard.FAIRNESS_INFO
table_body = table_body + "<br> <br>\n" + self.get_table_body(name, fair_info, 'templates/table.mustache')
with open('templates/standards-page.mustache', 'r') as f:
html_body = chevron.render(f, {'tableContent': table_body})
return html_body
def get_standards_tables(self, standards):
tables = {}
for standard in standards:
name = standard.TITLE
fair_info = standard.FAIRNESS_INFO
table_body = self.get_table_body(name, fair_info, 'templates/table-for-image.mustache')
tables[name] = table_body
return tables
def get_table_body(self, standard_name, fairness_info, template):
table_body = None
is_findable = " "
if fairness_info.IS_FINDABLE:
is_findable = "X"
is_accessible = " "
if fairness_info.IS_ACCESSIBLE:
is_accessible = "X"
is_interoperable = " "
if fairness_info.IS_INTEROPERABLE:
is_interoperable = "X"
is_reusable = " "
if fairness_info.IS_REUSABLE:
is_reusable = "X"
is_for_machines = " "
if fairness_info.IS_FOR_MACHINES:
is_for_machines = "X"
is_for_humans = " "
if fairness_info.IS_FOR_HUMANS:
is_for_humans = "X"
is_for_catalogue = " "
if fairness_info.IS_FOR_CATALOGUE:
is_for_catalogue = "X"
is_for_database = " "
if fairness_info.IS_FOR_DATABASE:
is_for_database = "X"
is_for_data_record = " "
if fairness_info.IS_FOR_DATA_RECORD:
is_for_data_record = "X"
with open(template, 'r') as f:
table_body = chevron.render(f, {'isFindable': is_findable, 'isAccessible': is_accessible,
'isInteroperable': is_interoperable, 'isReusable': is_reusable,
'isForHuman': is_for_humans, 'isForMachines': is_for_machines,
'isForCatalog': is_for_catalogue, 'isForDatabases': is_for_database,
'isForRecord': is_for_data_record, 'standardName': standard_name})
return table_body
| 2,573 |
blendsite/template.py
|
akloster/blender-asyncio
| 54 |
2026070
|
from jinja2 import Environment, FileSystemLoader
from aiohttp import web
def render_to_string(template_name, context={}):
env = Environment(loader=FileSystemLoader())
template = env.get_template(template_name)
return template.render(**context)
def render_to_response(template_name, context={}):
data = render_to_string(template_name, context)
return web.Response(text=data)
| 396 |
main.py
|
akash1729/label_reader
| 1 |
2026294
|
import cv2
import numpy as np
import math
import pytesseract
from boundbox import BoundBox
def display(img, keep_size=False):
if not keep_size:
img = cv2.resize(img, (500, 500))
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
hed_model_path = "models/hed_pretrained_bsds.caffemodel"
hed_prototext_path = "models/deploy.prototxt"
# set the path to frozen_east_text_detection.pb
east_detector_path = "models/frozen_east_text_detection.pb"
def reduce_noise(img):
# we use fastNlMeansDenoisingColored to reduce the noise
noise_reduced_image = cv2.fastNlMeansDenoisingColored(img, None, 10, 10, 7, 21)
return noise_reduced_image
def find_hed(image):
class CropLayer():
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
def getMemoryShapes(self, inputs):
input_shape, target_shape = inputs[0], inputs[1]
batch_size, num_channels = input_shape[0], input_shape[1]
height, width = target_shape[2], target_shape[3]
self.ystart = (input_shape[2] - target_shape[2]) // 2
self.xstart = (input_shape[3] - target_shape[3]) // 2
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batch_size, num_channels, height, width]]
def forward(self, inputs):
return [inputs[0][:, :, self.ystart:self.yend, self.xstart:self.xend]]
Height, Width = image.shape[:2]
cv2.dnn_registerLayer('Crop', CropLayer)
net = cv2.dnn.readNet(hed_prototext_path, hed_model_path)
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(Width, Height),
mean=(104.00698793, 116.66876762, 122.67891434),
swapRB=False, crop=False)
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(hed[0, 0], (Width, Height))
hed = (255 * hed).astype("uint8")
return hed
def find_biggerst_rect_contours(image):
contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
max_area = 0
epsilon = 0.03
for i in contours:
area = cv2.contourArea(i)
#we check for contours with area greater than 100 because we don't need very small ones
if area > 100:
peri = cv2.arcLength(i, True)
#here the value of epsilon should be fixed propely to match your enviornment
approx = cv2.approxPolyDP(i, epsilon*peri, True)
if area > max_area and len(approx) == 4:
rectangle = approx
max_area = area
#reshape the numpy array of the rectangle
rect = rectangle.reshape(4, 2)
return rect
def decode(scores, geometry, scoreThresh):
detections = []
confidences = []
############ CHECK DIMENSIONS AND SHAPES OF geometry AND scores ############
assert len(scores.shape) == 4, "Incorrect dimensions of scores"
assert len(geometry.shape) == 4, "Incorrect dimensions of geometry"
assert scores.shape[0] == 1, "Invalid dimensions of scores"
assert geometry.shape[0] == 1, "Invalid dimensions of geometry"
assert scores.shape[1] == 1, "Invalid dimensions of scores"
assert geometry.shape[1] == 5, "Invalid dimensions of geometry"
assert scores.shape[2] == geometry.shape[2], "Invalid dimensions of scores and geometry"
assert scores.shape[3] == geometry.shape[3], "Invalid dimensions of scores and geometry"
height = scores.shape[2]
width = scores.shape[3]
for y in range(0, height):
# Extract data from scores
scoresData = scores[0][0][y]
x0_data = geometry[0][0][y]
x1_data = geometry[0][1][y]
x2_data = geometry[0][2][y]
x3_data = geometry[0][3][y]
anglesData = geometry[0][4][y]
for x in range(0, width):
score = scoresData[x]
# If score is lower than threshold score, move to next x
if(score < scoreThresh):
continue
# Calculate offset
offsetX = x * 4.0
offsetY = y * 4.0
angle = anglesData[x]
# Calculate cos and sin of angle
cosA = math.cos(angle)
sinA = math.sin(angle)
h = x0_data[x] + x2_data[x]
w = x1_data[x] + x3_data[x]
# Calculate offset
offset = ([offsetX + cosA * x1_data[x] + sinA * x2_data[x], offsetY - sinA * x1_data[x] + cosA * x2_data[x]])
# Find points for rectangle
p1 = (-sinA * h + offset[0], -cosA * h + offset[1])
p3 = (-cosA * w + offset[0], sinA * w + offset[1])
center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))
detections.append((center, (w,h), -1*angle * 180.0 / math.pi))
confidences.append(float(score))
# Return detections and confidences
return [detections, confidences]
def east_text_detector(image):
# for east text detector to work we need to resize the image to multiples of 32
(H, W) = image.shape[:2]
new_height = 320
new_width = 320
# before resizing we store the original size ratios
width_ratio = W / float(new_height)
height_ratio = H / float(new_width)
resized_image = cv2.resize(image, (new_width, new_height))
# set two output layers to the network of boxes and scores
output_layers = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]
net = cv2.dnn.readNet(east_detector_path)
# construct a blob from the image and then perform a forward pass
blob = cv2.dnn.blobFromImage(resized_image, 1.0, (new_width, new_height), (123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
output = net.forward(output_layers)
scores = output[0]
geometry = output[1]
# set minimum confidence of each text box we find, you can fine tune it accordingly but for now we will go with 0.5
min_confidence = 0.2
[boxes, confidences] = decode(scores, geometry, min_confidence)
# we need a threshold for NMS. We use 0.4 as default
nms_threshold = 0.4
indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, min_confidence, nms_threshold)
box_list = []
for i in indices:
# we iterate through each index for finding each valid boxes from the list of all boxes
box = boxes[i[0]]
# xc, yc is the center of the box
(xc, yc) = box[0]
# l and b is the length and breadth of the box
l, b = box[1][0], box[1][1]
# find angle of box in radians
angle = math.radians(box[2])
"""
in the test images that I used the angle was not very accurate. Since we have already made the image
straight using hed we can go with angle 0. But if you have a better east model with correct angle then use the
proper angle. Since i had more accuracy on zero angle i went with that
"""
angle = 0
# create box using the data we have
box = BoundBox.from_center(xc, yc, l, b, angle)
# change the ratio of the coordinates of the box to match that of the original image.
# here we make use of the ratios we saved before resizing the image
box.change_ratio(width_ratio, height_ratio)
# insert the box to the list that contains all the boxes
box_list.append(box)
return box_list
def main():
img = cv2.imread("/home/wasp/WorkingDirectory/label_reader/output_images/1_original.jpg")
noise_reduced_image = reduce_noise(img)
# now we will resize the image to a smaller size like 500, 500
new_h, new_w = 500, 500
noise_reduced_image_resized = cv2.resize(noise_reduced_image, (new_h, new_w))
height, width = img.shape[:2]
# we keep the original ratio to the image to calculate the bounding box sizes
height_ratio = height/new_h
width_ratio = width/new_w
hed = find_hed(noise_reduced_image_resized)
rect = find_biggerst_rect_contours(hed)
reshaped_rect = np.zeros((4, 2), dtype="int32")
reshaped_rect[:, 0] = rect[:, 0] * width_ratio
reshaped_rect[:, 1] = rect[:, 1] * height_ratio
box = BoundBox.box_from_array(reshaped_rect)
image = box.perspective_wrap(noise_reduced_image)
box_list = east_text_detector(image)
# now we have list of all the text boxes with the text value, the next step is to join
# the once that are similar for that we will use box compare
merged_box = BoundBox.merge_box(box_list, dx=1.2)
text_fields = []
for m_box in merged_box:
# we will scale the box slightly to make it very little bigger than the text.
# this will be useful when we use the OCR, we increase the size by 0.008 times original box
m_box.scale_box(1.008, 1.008)
# we will crop out the image for the bound box. crop image function will take an image as input
# and returns the cropped image as per the dimensions of the bounding box
cropped = m_box.crop_image(image)
# now we can run the image to string function of pytesseract
text = pytesseract.image_to_string(cropped)
# we will set the text we found inside the box
m_box.text_value = text
text_fields.append(text)
# for k in merged_box:
# k.draw_box(image)
#
# display(image)
main()
| 9,428 |
tests/test_script_runner.py
|
CS-SI/pytest-executable
| 8 |
2025855
|
# Copyright 2020 CS Systemes d'Information, http://www.c-s.fr
#
# This file is part of pytest-executable
# https://www.github.com/CS-SI/pytest-executable
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ScriptRunner."""
import re
import subprocess
from pathlib import Path
import pytest
from pytest_executable.script_runner import ScriptExecutionError, ScriptRunner
from . import ROOT_DATA_DIR
DATA_DIR = ROOT_DATA_DIR / "runners"
def test_error_with_missing_setting(tmp_path):
"""Test error when a placeholder cannot be replaced."""
error_msg = "in .*tests/data/runners/nproc.sh: 'nproc' is undefined"
with pytest.raises(ValueError, match=error_msg):
ScriptRunner(DATA_DIR / "nproc.sh", {}, tmp_path)
def test_error_with_unreadable_script(tmp_path):
"""Test error when the script is not readable."""
error_msg = "cannot read the script .*/bin/bash"
with pytest.raises(TypeError, match=error_msg):
ScriptRunner(Path("/bin/bash"), {}, tmp_path)
def test_execution_with_setting(tmp_path):
"""Test script execution with placeholder replaced."""
script_path = DATA_DIR / "nproc.sh"
runner = ScriptRunner(script_path, {"nproc": "100"}, tmp_path)
runner.run()
_assertions(tmp_path / script_path.name, "echo 100", "100", "")
def test_execution_with_timeout(tmp_path):
"""Test script execution with timeout."""
# with enough time
script_path = DATA_DIR / "timeout.sh"
runner = ScriptRunner(script_path, {"timeout": "2s"}, tmp_path)
runner.run()
# without enough time
runner = ScriptRunner(script_path, {"timeout": "0.1s"}, tmp_path)
error_msg = (
r"Command '\['/usr/bin/env', 'bash', 'timeout\.sh'\]' timed out after "
".* seconds"
)
with pytest.raises(subprocess.TimeoutExpired, match=error_msg):
runner.run()
def test_execution_error(tmp_path):
"""Test error when the script execution fails."""
error_msg = "execution failure, see the stdout and stderr files in /"
script_path = DATA_DIR / "error.sh"
runner = ScriptRunner(script_path, {}, tmp_path)
with pytest.raises(ScriptExecutionError, match=error_msg):
runner.run()
_assertions(
tmp_path / script_path.name,
"ls non-existing-file",
"",
"ls: (?:cannot access )?'?non-existing-file'?: No such file or directory",
)
def _assertions(runner_path, script, stdout, stderr_regex):
# check the content of the script, stdout and stderr files
with runner_path.open() as file_:
assert file_.read().strip() == script
with (runner_path.with_suffix(".sh.stdout")).open() as file_:
assert file_.read().strip() == stdout
with (runner_path.with_suffix(".sh.stderr")).open() as file_:
assert re.match(stderr_regex, file_.read())
| 3,326 |
call_alphavantage_api/__init__.py
|
JoranSlingerland/StockTracker
| 1 |
2026360
|
"""Call alphavantage API"""
# pylint: disable=logging-fstring-interpolation
import json
import logging
import time
import requests
from shared_code import get_config
def main(payload: str) -> str:
"""Get data from API"""
url = payload
api_key = get_config.get_api_key()
errorcounter = 0
while True:
logging.info(f"Calling API: {url}")
url = f"{url}&apikey={api_key}"
data = requests.get(url)
if data.status_code != 200:
logging.error(f"Error: {data.status_code}")
logging.info("Retrying in 30 seconds")
errorcounter += 1
time.sleep(30)
if errorcounter > 3:
logging.error("Too many errors, exiting. Error: {data.status_code}")
raise Exception(f"Error: {data.status_code}")
continue
key = "Note"
keys = data.json()
if key in keys.keys():
logging.warning("To many api calls, Waiting for 60 seconds")
time.sleep(60)
errorcounter += 1
if errorcounter > 3:
logging.critical("Too many api calls, Exiting.")
raise Exception("Too many api calls, Exiting.")
continue
return data.json()
| 1,263 |
social/models/board.py
|
Mangeneh/akkaskhooneh-backend
| 7 |
2026442
|
from django.db import models
from authentication.models import User
from social.models import Posts
class Board(models.Model):
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.CharField(max_length=20)
class Meta:
unique_together = ('owner', 'name')
class BoardContains(models.Model):
board = models.ForeignKey(Board, on_delete=models.CASCADE)
post = models.ForeignKey(Posts, on_delete=models.CASCADE)
| 457 |
libs/config.py
|
Anshit01/GLUG-NITH-Discord-Bot
| 0 |
2023335
|
import json
config_path = "config/config.json"
strings_path = "config/strings.json"
with open(config_path) as config_file, \
open(strings_path) as strings_file:
config_json = json.loads(config_file.read())
strings_json = json.loads(strings_file.read())
def get_config(key):
return config_json[key]
def get_string(key):
return strings_json[key]
| 374 |
sense/surface/dubois95.py
|
pygeo/sense
| 1 |
2026395
|
"""
implements the Dubois95 model
as described in Ulaby (2014), Chapter 10.6
"""
import numpy as np
from . scatter import SurfaceScatter
class Dubois95(SurfaceScatter):
def __init__(self, eps, ks, theta, lam=None):
"""
Parameters
----------
lam : float
wavelength in meter
"""
super(Dubois95, self).__init__(eps, ks, theta)
self.lam = lam
assert self.lam is not None
self.vv, self.hh = self._calc_sigma()
self.hv = None
def _calc_sigma(self):
lam = self.lam*100.
return self._vv(lam, self.ks), self._hh(lam, self.ks)
def _hh(self, lam, ks):
"""
lam : float
wavelength in cm
"""
a = (10.**-2.75)*(np.cos(self.theta)**1.5)/(np.sin(self.theta)**5.)
c = 10.**(0.028*np.real(self.eps)*np.tan(self.theta))
d = ((ks*np.sin(self.theta))**1.4)*lam**0.7
return a*c*d
def _vv(self, lam, ks):
""" eq. 10.41b """
b = 10.**(-2.35)*((np.cos(self.theta)**3.) / (np.sin(self.theta)**3.))
c = 10.**(0.046*np.real(self.eps)*np.tan(self.theta))
d = (ks*np.sin(self.theta))**1.1*lam**0.7
return b*c*d
| 1,222 |
reagent/models/mlp_scorer.py
|
yuanyuan0057/ReAgent
| 1 |
2025852
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
from dataclasses import field
from typing import List
import reagent.types as rlt
import torch
from reagent.core.configuration import resolve_defaults
from reagent.models.base import ModelBase
from torch import nn
EPS = 1e-12
class MLPScorer(ModelBase):
@resolve_defaults
def __init__(
self,
input_dim: int,
layer_sizes: List[int] = field(default_factory=list), # noqa: B008
output_dim: int = 1,
concat: bool = False,
log_transform: bool = False,
) -> None:
super().__init__()
# Mix Linear layers with ReLU layers, except for the last one.
inputs = [input_dim] + layer_sizes
outputs = layer_sizes + [output_dim]
fc_layers = [nn.Linear(ind, outd) for ind, outd in zip(inputs, outputs)]
relu_layers = [nn.ReLU(inplace=True)] * len(fc_layers)
all_layers = list(itertools.chain.from_iterable(zip(fc_layers, relu_layers)))[
:-1
] # drop last relu layer
self.concat = concat
self.log_transform = log_transform
self.mlp = nn.Sequential(*all_layers)
def forward(self, obs):
if self.log_transform:
obs = rlt.FeatureData(
float_features=obs.float_features.clip(EPS).log(),
candidate_docs=rlt.DocList(
float_features=obs.candidate_docs.float_features.clip(EPS).log(),
),
)
return self.mlp(self._concat_features(obs)).squeeze(-1)
def _concat_features(self, obs):
if self.concat:
return obs.concat_user_doc()
else:
return obs.candidate_docs.float_features.float()
def input_prototype(self):
# Sample config for input
batch_size = 2
state_dim = 5
num_docs = 3
candidate_dim = 4
rlt.FeatureData(
float_features=torch.randn((batch_size, state_dim)),
candidate_docs=rlt.DocList(
float_features=torch.randn(batch_size, num_docs, candidate_dim)
),
)
| 2,182 |
urbansounds/train.py
|
mapattacker/bioacoustics
| 0 |
2024522
|
"""refactored from https://www.kaggle.com/prajaktaparate14/audio-classification"""
import json
from time import time
import pandas as pd
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.layers import Activation, Dense, Dropout, Flatten
from tensorflow.keras.models import Sequential, load_model
from tensorflow.keras.optimizers import Adam
class train:
def __init__(self, input_shape, num_labels,
model_path="./model",
mapping_path="mapping.json"):
"""
Args:
input_shape (int): number of features
num_labels (int): number of classes
"""
self.input_shape = input_shape
self.num_labels = num_labels
self.model_path = model_path
self.encoder_path = encoder_path
def model_arch(self):
"""define NN architecture in keras"""
model = Sequential()
### first Layer
model.add(Dense(1024, input_shape=(self.input_shape,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
### second Layer
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
### third Layer
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.2))
### final Layer
model.add(Dense(self.num_labels))
model.add(Activation('softmax'))
model.compile(
loss='categorical_crossentropy',
metrics=['accuracy'],
optimizer='adam')
return model
def start_train(self, X_train, X_test, y_train, y_test, epochs, batch_size):
"""start training"""
model = self.model_arch()
checkpointer = ModelCheckpoint(filepath=self.model_path,
verbose=1,
save_best_only=True)
start = time()
model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
callbacks=[checkpointer])
elapsed_time = round((time()-start)/60, 2)
print(f'Training completed in time: {elapsed_time} min')
test_accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Accuracy: {test_accuracy[1]}')
def evaluate(self, X_test, cf_gradient=True):
"""get evaluation reports.
Rets:
(str): classification report. Use print().
(df): confusion matrix. Do not use print().
"""
model = load_model(self.model_path)
with open(self.mapping_path) as f:
mapping = json.load(f)
classes = [mapping[i] for i in mapping]
prediction = model.predict(X_test)
y_pred = np.argmax(prediction, axis=1)
y_test_orig = np.argmax(y_test, axis=1)
# classification report
class_report = classification_report(y_test_orig, y_pred, target_names=classes)
# confusion matrix
cfm = confusion_matrix(y_test_orig, y_pred)
conf_matrix = pd.DataFrame(cfm, columns=classes, index=classes)
if cf_gradient:
conf_matrix = conf_matrix.style.background_gradient(cmap='coolwarm')
return class_report, conf_matrix
if __name__ == "__main__":
epochs=20
batch_size=32
input_shape=len(X_train[0])
num_labels=y_train.shape[1]
t = train(input_shape, num_labels)
t.start_train(X_train, X_test, y_train, y_test, epochs, batch_size)
class_r, conf_m = t.evaluate(X_test, True)
print(class_r)
conf_m
| 3,745 |
bin/filter_assemblies.py
|
czbiohub/sc2-ngs-analysis
| 0 |
2026333
|
#!/usr/bin/env python3
import argparse
import subprocess
import pandas as pd
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument('--max_n', type=int)
parser.add_argument('--min_len', type=int)
parser.add_argument('--stats')
parser.add_argument('--fasta')
parser.add_argument('--vcf')
parser.add_argument('--out_prefix')
args = parser.parse_args()
stats_df = pd.read_csv(args.stats, sep="\t")
filtered_rows = []
for _, row in stats_df.iterrows():
if row["n_missing"] <= args.max_n and row["n_actg"] >= args.min_len:
filtered_rows.append(row)
if filtered_rows:
filtered_rows = pd.DataFrame(filtered_rows)
else:
filtered_rows = pd.DataFrame(columns=stats_df.columns)
filtered_rows.to_csv(args.out_prefix + ".stats.tsv", sep="\t", index=False)
samples_to_keep = set(filtered_rows["sample_name"])
def filtered_seqs():
for seq in SeqIO.parse(args.fasta, "fasta"):
if seq.id in samples_to_keep:
yield seq
SeqIO.write(filtered_seqs(), args.out_prefix + ".fa", "fasta")
if args.vcf:
with open(args.out_prefix + ".vcf", "w") as f:
subprocess.run(["bcftools", "view",
"-s", ",".join(samples_to_keep),
"-c", "1",
args.vcf],
stdout=f)
| 1,306 |
src/mannkendall/mk_stats.py
|
mannkendall/Python
| 1 |
2025066
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2020 MeteoSwiss, contributors of the original matlab version of the code listed in
ORIGINAL_AUTHORS.
Copyright (c) 2020 MeteoSwiss, contributors of the Python version of the code listed in AUTHORS.
Distributed under the terms of the BSD 3-Clause License.
SPDX-License-Identifier: BSD-3-Clause
This file contains the core statistical routines for the package.
"""
# Import the required packages
from datetime import datetime
import numpy as np
from scipy.interpolate import interp1d
from scipy.stats import norm
# Import from this package
from . import mk_tools as mkt
def std_normal_var(s, var_s):
""" Compute the normal standard variable Z.
From Gilbert (1987).
Args:
s (int): S statistics of the Mann-Kendall test computed from the S_test.
k_var (float): variance of the time series taking into account the ties in values and time.
It should be computed by Kendall_var().
Returns:
float: S statistic weighted by the variance.
"""
# First some sanity checks.
# Be forgiving if I got a float ...
if isinstance(s, float) and s.is_integer():
s = int(s)
if not isinstance(s, (int)):
raise Exception('Ouch ! Variable s must be of type int, not: %s' % (type(s)))
if not isinstance(var_s, (int, float)):
raise Exception('Ouch ! Variable var_s must be of type float, not: %s' % (type(s)))
# Deal with the case when s is 0
if s == 0:
return 0.0
# Deal with the other cases.
return (s - np.sign(s))/var_s**0.5
def sen_slope(obs_dts, obs, k_var, alpha_cl=90.):
""" Compute Sen's slope.
Specifically, this computes the median of the slopes for each interval::
(xj-xi)/(j-i), j>i
The confidence limits are computed with an interpolation that is important if the number of data
point is small, such as for yearly averages for a 10 year trend.
Args:
obs_dts (ndarray of datetime.datetime): an array of observation times. Must be 1-D.
obs (ndarray of floats): the data array. Must be 1-D.
k_var (float): Kendall variance, computed with Kendall_var.
confidence (float, optional): the desired confidence limit, in %. Defaults to 90.
Return:
(float, float, float): Sen's slope, lower confidence limit, upper confidence limit.
Note:
The slopes are returned in units of 1/s.
"""
# Start with some sanity checks
if not isinstance(alpha_cl, (float, int)):
raise Exception('Ouch! confidence should be of type int, not: %s' % (type(alpha_cl)))
if alpha_cl > 100 or alpha_cl < 0:
raise Exception('Ouch ! confidence must be 0<=alpha_cl<=100, not: %f' % (float(alpha_cl)))
if not isinstance(k_var, (int, float)):
raise Exception('Ouch ! The variance must be of type float, not: %s' % (type(k_var)))
l = len(obs)
# Let's compute the slope for all the possible pairs.
d = np.array([item for i in range(0, l-1)
for item in list((obs[i+1:l] - obs[i])/mkt.dt_to_s(obs_dts[i+1:l] - obs_dts[i]))])
# Let's only keep the values that are valid
d = d[~np.isnan(d)]
# Let's compute the median slope
slope = np.nanmedian(d)
# Apply the confidence limits
cconf = -norm.ppf((1-alpha_cl/100)/2) * k_var**0.5
# Note: because python starts at 0 and not 1, we need an additional "-1" to the following
# values of m_1 and m_2 to match the matlab implementation.
m_1 = (0.5 * (len(d) - cconf)) - 1
m_2 = (0.5 * (len(d) + cconf)) - 1
# Let's setup a quick interpolation scheme to get the best possible confidence limits
f = interp1d(np.arange(0, len(d), 1), np.sort(d), kind='linear',
fill_value=(np.sort(d)[0], np.sort(d)[-1]),
assume_sorted=True, bounds_error=False)
lcl = f(m_1)
ucl = f(m_2)
return (float(slope), float(lcl), float(ucl))
def s_test(obs, obs_dts):
""" Compute the S statistics (Si) for the Mann-Kendall test.
From Gilbert (1987).
Args:
obs (ndarray of floats): the observations array. Must be 1-D.
obs_dts (ndarray of datetime.datetime): a list of observation datetimes.
Returns:
(float, ndarray): S, n.
S (float) = double sum on the sign of the difference between data pairs
(Si).
n (ndarray of int) = number of valid data in each year of the time series
"""
# If the user gave me a list ... be nice and deal with it.
if isinstance(obs, list) and np.all([isinstance(item, (float, int)) for item in obs]):
obs = np.array(obs)
# Idem for the obs_dts
if isinstance(obs_dts, list) and np.all([isinstance(item, datetime) for item in obs_dts]):
obs_dts = np.array(obs_dts)
# Some sanity checks first
for item in [obs, obs_dts]:
if not isinstance(item, np.ndarray):
raise Exception('Ouch ! I was expecting some numpy.ndarray, not: %s' % (type(item)))
if np.ndim(item) != 1:
raise Exception('Ouch ! The numpy.ndarray must have 1 dimensions, not: %i' %
(np.ndim(item)))
if len(item) != len(obs):
raise Exception('Ouch ! obs and obs_dts should have the same length !')
# Check that I was indeed given proper datetimes !
if np.any([not isinstance(item, datetime) for item in obs_dts]):
raise Exception('Ouch ! I need proper datetime.datetime entities !')
# Find the limiting years
obs_years = np.array([item.year for item in obs_dts])
min_year = np.min(obs_years)
max_year = np.max(obs_years)
# An array to keep track of the number of valid data points in each season
n = np.zeros(max_year - min_year + 1) * np.nan
# Create a vector to keep track of the results
sij = np.zeros(max_year - min_year + 1) * np.nan
for (yr_ind, year) in enumerate(range(min_year, max_year+1)):
#How valid points do I have :
n[yr_ind] = np.count_nonzero(~np.isnan(obs[obs_years == year]))
# Compute s for that year, by summing the signs for the differences with all the upcoming
# years
sij[yr_ind] = np.nansum([np.sign(item - obs[obs_years == year])
for yr2 in range(year+1, max_year+1)
for item in obs[obs_years == yr2]])
return (np.nansum(sij), n)
| 6,475 |
cfb-cursos/checkbutton.py
|
joseluizbrits/sobre-python
| 0 |
2022772
|
from tkinter import *
app = Tk()
app.title('BLOCO')
app.geometry('500x300')
vfutebol = IntVar()
vvolei = IntVar()
vbasquete = IntVar()
quadro = Frame(app, borderwidth=1, relief='solid')
quadro.place(x=10, y=10, width=300, height=100)
cb_futebol = Checkbutton(quadro, text='Futebol', variable=vfutebol, onvalue=1, offvalue=0)
cb_futebol.pack(side=LEFT)
cb_volei = Checkbutton(quadro, text='Vôlei', variable=vvolei, onvalue=1, offvalue=0)
cb_volei.pack(side=LEFT)
cb_basquete = Checkbutton(quadro, text='Basquete', variable=vbasquete, onvalue=1, offvalue=0)
cb_basquete.pack(side=LEFT)
app.mainloop()
| 608 |
cohesity_management_sdk/models/pattern_request_body.py
|
sachinthakare-cohesity/management-sdk-python
| 0 |
2026132
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.supported_pattern
class PatternRequestBody(object):
"""Implementation of the 'Pattern Request Body.' model.
Specifies details about the pattern which has to be saved.
Attributes:
application_data_type (int): Specifies the data type for which
suppprted patterns can be fetched.
application_id (long|int): Specifies AWB Application ID.
user_pattern (SupportedPattern): Specifies details of the pattern
available for search available in an application such as Pattern
Finder within Analytics Work Bench.
"""
# Create a mapping from Model property names to API property names
_names = {
"application_data_type":'applicationDataType',
"application_id":'applicationId',
"user_pattern":'userPattern'
}
def __init__(self,
application_data_type=None,
application_id=None,
user_pattern=None):
"""Constructor for the PatternRequestBody class"""
# Initialize members of the class
self.application_data_type = application_data_type
self.application_id = application_id
self.user_pattern = user_pattern
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
application_data_type = dictionary.get('applicationDataType')
application_id = dictionary.get('applicationId')
user_pattern = cohesity_management_sdk.models.supported_pattern.SupportedPattern.from_dictionary(dictionary.get('userPattern')) if dictionary.get('userPattern') else None
# Return an object of this model
return cls(application_data_type,
application_id,
user_pattern)
| 2,338 |
eny.py
|
ICT4H/dcs-web
| 1 |
2026190
|
# coding=utf-8
from Crypto.Cipher import AES
import base64
import os
def encryption(privateInfo):
BLOCK_SIZE = 16
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) *PADDING
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
secret = '<KEY>'
print 'encryptionkey:', secret
cipher = AES.new(secret)
encoded = EncodeAES(cipher, privateInfo)
print 'Encryptedstring:', encoded
encryption('<EMAIL>')
| 461 |
client/concli.py
|
lzubiaur/debugconsole
| 2 |
2024820
|
#!/usr/bin/env python
"""
" Copyright (c) 2013 <NAME>
"
" http://www.pix2d.com/
"
" Permission is hereby granted, free of charge, to any person obtaining a copy
" of this software and associated documentation files (the "Software"), to deal
" in the Software without restriction, including without limitation the rights
" to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
" copies of the Software, and to permit persons to whom the Software is
" furnished to do so, subject to the following conditions:
"
" The above copyright notice and this permission notice shall be included in
" all copies or substantial portions of the Software.
"
" THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
" IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
" FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
" AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
" LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
" OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
" THE SOFTWARE.
"""
import os,sys,socket
import base64
import argparse
import plistlib
import ConfigParser
from struct import *
def main(argv=None):
# load config file
config = ConfigParser.ConfigParser()
config.readfp(open('concli.conf'))
argv = (argv or sys.argv)[1:]
parser = argparse.ArgumentParser(usage=("usage: %(prog)s [plist | --plist | -p]"))
parser.add_argument("--plist", "-p",
dest="plist_path",
type=unicode,
help="Sprites sheet plist path")
parser.add_argument("--json", "-j",
dest="json_path",
type=unicode,
help="level's json data")
options, args = parser.parse_known_args(argv)
if options.plist_path:
UpdateSpritesCmd(options.plist_path,config).run()
if options.json_path:
ReloadLevelCmd(options.json_path,config).run()
return 0
class Command(object):
msg = ''
config = None
sock = None
PROTO_VERSION = 1
def __init__(self,config):
self.config = config
# create an INET, STREAMing socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def send(self):
# Request header
header = pack('!IB',len(self.msg),self.PROTO_VERSION)
# Construct the request message (header + content)
packet = '{}{}'.format(header,self.msg)
packet_len = len(packet)
# send the message header and content
total_sent = 0
while True:
sent = self.sock.send(packet[total_sent:])
if sent == 0:
raise RuntimeError("Can't send message. Please ensure that the remote application is running.")
total_sent = total_sent + sent
if total_sent == packet_len:
break
print '{} byte(s) sent'.format(total_sent)
def recv(self):
resp = ''
while True:
chunk = self.sock.recv(4096)
if chunk == '':
break # EOF
resp = resp + chunk
return resp
def run(self):
port = self.config.getint("usbmuxd","console.port")
addr = self.config.get("usbmuxd","console.ip")
# Connect to the remote device
code = self.sock.connect_ex((addr, port))
if not code == 0:
print "Can't connect to device ({}). Please check that the usbmuxd proxy is running if the device is attached via usb.".format(os.strerror(code))
return
# Send the command message
self.send();
# Done sending
# self.sock.shutdown(socket.SHUT_WR) # breaks receiving response on mac os
# Read the response
# resp = self.recv() # now implemented using makefile().read
resp = self.sock.makefile().read()
# close the connection
self.sock.close()
# Print the response message
print 'Response: ' + resp
class ReloadLevelCmd(Command):
def __init__(self,json_path,config):
super(ReloadLevelCmd,self).__init__(config)
# base64 encode the json file
with open(json_path, 'r') as f:
json_base64 = base64.encodestring(f.read())
f.closed
self.msg = """-- Lua script
data = [[
{}]]
v = pix2d_console.Command:new()
v:reloadLevel(data)
""".format(json_base64).encode('utf-8')
class UpdateSpritesCmd(Command):
def __init__(self,plist_path,config):
super(UpdateSpritesCmd,self).__init__(config)
# base64 encode the plist file
with open(plist_path, 'r') as f:
plist_base64 = base64.encodestring(f.read())
f.closed
# Load the plist file
plist = plistlib.readPlist(plist_path)
# base64 encode the texture file
texture_path = "{0}/{1}".format(os.path.dirname(plist_path), plist['metadata']['textureFileName'])
with open(texture_path, 'r') as f:
sprite_base64 = base64.encodestring(f.read())
f.closed
# The formated Lua script is formated with the base64 files content
self.msg = """-- Lua script
texture = [[
{}]]
plist = [[
{}]]
v = pix2d_console.Command:new()
v:updateSpriteFrames(plist,texture)
v:reloadLevel()
""".format(sprite_base64,plist_base64).encode('utf-8')
if __name__ == "__main__":
sys.exit(main())
| 5,431 |
djvue/defaults.py
|
abahnihi/djvue
| 13 |
2024704
|
from django.conf import settings
def _get(key, default):
return getattr(settings, key, default)
VV_LOCALE_PATH = _get("DJVUE_VV_LOCALE_PATH", "djvue.vv_locale.vv_locale")
| 179 |
tests/test_pdfnode.py
|
bupt-ipcr/expdf
| 0 |
2025276
|
#!/usr/bin/env python
# coding=utf-8
"""
@author: <NAME>
@create time: 1970-01-01 08:00
@edit time: 2020-05-10 10:29
@FilePath: /expdf/tests/test_pdfnode.py
@desc: 测试Graph模块是否正常工作
"""
from expdf.pdfnode import PDFNode, LocalPDFNode
import json
class TestPDFNode:
def setup(self):
PDFNode.clear_nodes()
def teardown(self):
PDFNode.clear_nodes()
def test_new(self):
"""测试新建node是否表现正常"""
n0 = PDFNode('title0', refs=['ta', 'tb'])
# 创建和n0同名对象时,使用相同ref应该不报错
has_except = False
try:
n0 = PDFNode('title0', refs=['ta', 'tb'])
except Exception:
has_except = True
assert not has_except, "Unexpected error in creating PDFNode instance"
# 使用不同ref应该会报错
has_except = False
try:
n0copy = PDFNode('title0', [])
except Exception as e:
assert str(e) == "Can't instantiate PDFNode with same title but different refs"
has_except = True
assert has_except, "Create two instance of PDFNode with different refs has no error"
# 不指明refs则不会报错
has_except = False
try:
n0copy = PDFNode('title0')
except Exception:
has_except = True
assert not has_except, "Unexpected error in creating PDFNode instance"
# n0 和 n0copy 应该是一个对象
assert n0 is n0copy
# 所有用到的node都应该被创建
nodes = PDFNode.nodes()
assert nodes == [n0, PDFNode('ta'), PDFNode('tb')]
def test_relations(self):
"""测试节点之间祖先/子孙关系是否正常"""
n0 = PDFNode('title0', refs=['ta', 'tb'])
na, nb = PDFNode('ta'), PDFNode('tb')
# n0 祖先应该是ta, tb
assert n0.actients == {na, nb}
# ta对象应该有posterity,但是没有children
assert na.children == set()
assert na.posterities == {n0}
def test_instances(self):
"""测试记录所有实例的instances是否正常工作
instances的key应该是小写并去除 非字母数字下划线 字符后的结果
"""
n0 = PDFNode('A collaborative distributed strategy for multi-agent reinforcement learning through consensus + innovations.')
# 验证记录的key是否小写且去除特殊符号
assert list(PDFNode.instances.keys()) == [
'acollaborativedistributedstrategyformultiagentreinforcementlearningthroughconsensusinnovations']
# 验证本身的title没变化
assert n0.title == 'A collaborative distributed strategy for multi-agent reinforcement learning through consensus + innovations.'
# 验证仅符号不同的Node不会被新建
n0new = PDFNode(
'?A collaborative distributed strategy for multi-agent reinforcement learning through consensus + innovations')
assert n0new == n0
# 验证字母或数字不同的Node会被新建
n1 = PDFNode(
'A new collaborative distributed strategy for multi-agent reinforcement learning through consensus + innovations')
assert n0 != n1
def test_get_json(self):
"""test method get_json"""
PDFNode('title0', ['title2'])
PDFNode('title1', ['title2'])
PDFNode('title3', ['title0'])
pdf_info = json.loads(PDFNode.get_json())
# posterities is a set and convert to list may have different order
assert pdf_info == [
{'node_key': 'title0', 'title': 'title0', 'local': False, 'actients': [
{'node_key': 'title2', 'title': 'title2'}], 'posterities': [{'node_key': 'title3', 'title': 'title3'}]},
{'node_key': 'title2', 'title': 'title2', 'local': False, 'actients': [], 'posterities': [
{'node_key': 'title0', 'title': 'title0'}, {'node_key': 'title1', 'title': 'title1'}]},
{'node_key': 'title1', 'title': 'title1', 'local': False, 'actients': [
{'node_key': 'title2', 'title': 'title2'}], 'posterities': []},
{'node_key': 'title3', 'title': 'title3', 'local': False, 'actients': [
{'node_key': 'title0', 'title': 'title0'}], 'posterities': []}
] or pdf_info == [
{'node_key': 'title0', 'title': 'title0', 'local': False, 'actients': [
{'node_key': 'title2', 'title': 'title2'}], 'posterities': [{'node_key': 'title3', 'title': 'title3'}]},
{'node_key': 'title2', 'title': 'title2', 'local': False, 'actients': [], 'posterities': [
{'node_key': 'title1', 'title': 'title1'}, {'node_key': 'title0', 'title': 'title0'}]},
{'node_key': 'title1', 'title': 'title1', 'local': False, 'actients': [
{'node_key': 'title2', 'title': 'title2'}], 'posterities': []},
{'node_key': 'title3', 'title': 'title3', 'local': False, 'actients': [
{'node_key': 'title0', 'title': 'title0'}], 'posterities': []}
]
class TestLocalPDFNode:
@classmethod
def setup_class(cls):
PDFNode.clear_nodes()
@classmethod
def teardown_class(cls):
PDFNode.clear_nodes()
def test_override(self):
"""测试新建node是否表现正常"""
n0 = LocalPDFNode('title0', refs=['ta', 'tb'])
has_except = False
try:
LocalPDFNode('title0', refs=['ta', 'tc'])
except Exception:
has_except = True
assert not has_except, "Unexpected error in override LocalPDFNode instance"
assert n0.actients == {PDFNode('ta'), PDFNode('tc')}
assert PDFNode.nodes() == [PDFNode('title0'), PDFNode('ta'), PDFNode('tb'), PDFNode('tc')]
| 5,411 |
lliregistration_back/foundation/management/commands/email_when_payment_due.py
|
ydang5/final-project-back
| 0 |
2026307
|
from django.core.management.base import BaseCommand, CommandError
from foundation.models import LLIStudentData
from django.utils import timezone
from django.core.mail import send_mail
class Command(BaseCommand):
help = '-'
def process_students(self, students):
now = timezone.now().date()
student_expired_payment_arr = []
for student in students:
print(student.payment_valid_date)
print(now)
target_date = student.payment_valid_date
delta = target_date - now
print(delta.days+1)
print()
if delta.days +1 <= 7:
if student.was_payment_valid_date_dealt == False:
print('send email to student with id:', student.student_id)
student_expired_payment_arr.append(student)
self.email_staff(student_expired_payment_arr)
def email_staff(self, students):
email_message = ""
for student in students:
student_first_name = student.first_name
student_last_name = student.last_name
student_id = str(student.student_id)
student_payment_expire_date = str(student.payment_valid_date)
email_message +="Student :"+ student_first_name+" "+student_last_name+" with student id: "+student_id+" payment is going to be expired on "+student_payment_expire_date +"\n"
send_mail(
'Subject here',
email_message,
'<EMAIL>',
['<EMAIL>'],
fail_silently=False,
)
def handle(self, *args, **options):
sutudents = LLIStudentData.objects.all()
self.process_students(sutudents)
self.stdout.write(self.style.SUCCESS('Successfully process payment due dates.'))
| 1,790 |
setup.py
|
danleonard-nj/swagger-gen
| 0 |
2025872
|
from setuptools import setup, find_packages
VERSION = '0.1.1'
DESCRIPTION = 'Swagger UI for Flask apps'
LONG_DESCRIPTION = 'Automatically generate Swagger UI documentation for a Flask app. Batteries included.'
setup(
name="swagger-gen",
version=VERSION,
author="<NAME>",
author_email="<EMAIL>",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
package_data={'swagger_gen': ['./resources/swagger.pkl']},
packages=find_packages(),
install_requires=['flask'],
keywords=['python', 'swagger-gen'],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| 763 |
exercicios_curso_em_video/Exercicio 102.py
|
Sposigor/Caminho_do_Python
| 1 |
2025376
|
from math import factorial
def fatorial(x, show=False):
x_fact = factorial(x)
print(f'O fatorial de {x} é {x_fact}.')
if show:
print('O cálculo desse fatorial é:')
for c in range(x, 0, -1):
print(f'{c}', end='')
if c > 1:
print(' . ',end='')
else:
print(' = ',end='')
print(x_fact)
else:
print('Cálculo não exibido.')
x = int(input('Informe um número inteiro para o cálculo de seu fatorial: '))
fatorial(x, show=True)
| 537 |
muteria/statistics/main.py
|
muteria/muteria
| 5 |
2026009
|
from __future__ import print_function
import os
import shutil
import logging
from jinja2 import Template
import webbrowser
import muteria.common.mix as common_mix
import muteria.common.fs as common_fs
import muteria.common.matrices as common_matrices
import muteria.statistics.algorithms as stats_algo
import muteria.controller.explorer as fd_structure
ERROR_HANDLER = common_mix.ErrorHandler
def get_subsuming_elements(matrix_file):
mat = common_matrices.ExecutionMatrix(filename=matrix_file)
elem_to_tests = mat.query_active_columns_of_rows()
equiv, subs_clusters = stats_algo.getSubsumingMutants(\
elem_to_tests, clustered=True)
return equiv, subs_clusters
#~ def get_subsuming_elements()
class StatsComputer(object):
@staticmethod
def merge_lmatrix_into_right(lmatrix_file, rmatrix_file):
if not os.path.isfile(rmatrix_file):
shutil.copy2(lmatrix_file, rmatrix_file)
else:
lmatrix = common_matrices.ExecutionMatrix(filename=lmatrix_file)
rmatrix = common_matrices.ExecutionMatrix(filename=rmatrix_file)
rmatrix.update_with_other_matrix(lmatrix, override_existing=True, \
ask_confirmation_with_exist_missing=True)
rmatrix.serialize()
#~ def merge_lmatrix_into_right()
@staticmethod
def merge_lexecoutput_into_right(lexecoutput_file, rexecoutput_file):
if not os.path.isfile(rexecoutput_file):
shutil.copy2(lexecoutput_file, rexecoutput_file)
else:
lexecoutput = common_matrices.OutputLogData(\
filename=lexecoutput_file)
rexecoutput = common_matrices.OutputLogData(\
filename=rexecoutput_file)
rexecoutput.update_with_other(lexecoutput, override_existing=True,\
ask_confirmation_with_exist_missing=True)
rexecoutput.serialize()
#~ def merge_lmatrix_into_right()
@staticmethod
def compute_stats(config, explorer, checkpointer):
# get the matrix of each test criterion
coverages = {}
total_to = {}
number_of_testcases = None
for c in config.ENABLED_CRITERIA.get_val():
if explorer.file_exists(fd_structure.CRITERIA_MATRIX[c]):
mat_file = explorer.get_existing_file_pathname(\
fd_structure.CRITERIA_MATRIX[c])
mat = common_matrices.ExecutionMatrix(filename=mat_file)
row2collist = mat.query_active_columns_of_rows()
cov = len([k for k,v in row2collist.items() if len(v) > 0])
tot = len(row2collist)
coverages[c.get_str()] = 'n.a.' if tot == 0 else \
'{:.2f}'.format(cov * 100.0 / tot)
total_to[c.get_str()] = tot
if number_of_testcases is None:
number_of_testcases = len(mat.get_nonkey_colname_list())
# JSON
out_json = {}
out_json['TOTAL EXECUTION TIME (s)'] = \
checkpointer.get_execution_time()
out_json['NUMBER OF TESTCASES'] = number_of_testcases
out_json['CRITERIA'] = {}
for c in coverages:
out_json['CRITERIA'][c] = {'coverage': coverages[c],
'# test objectives': total_to[c]}
common_fs.dumpJSON(out_json, explorer.get_file_pathname(\
fd_structure.STATS_MAIN_FILE_JSON), \
pretty=True)
# HTML
template_file = os.path.join(os.path.dirname(\
os.path.abspath(__file__)), 'summary_report.html')
report_file = explorer.get_file_pathname(\
fd_structure.STATS_MAIN_FILE_HTML)
def format_execution_time(exec_time):
n_day = int(exec_time // (24 * 3600))
exec_time = exec_time % (24 * 3600)
n_hour = int(exec_time // 3600)
exec_time %= 3600
n_minutes = int(exec_time // 60)
exec_time %= 60
n_seconds = int(round(exec_time))
res = ""
for val, unit in [(n_day, 'day'), (n_hour, 'hour'), \
(n_minutes, 'minutes'), (n_seconds, 'second')]:
if val > 0:
s = ' ' if val == 1 else 's '
res += str(val) + ' ' + unit + s
return res
#~ def format_execution_time()
total_exec_time = format_execution_time(\
checkpointer.get_execution_time())
rendered = Template(open(template_file).read()).render( \
{
'total_execution_time': total_exec_time,
'number_of_testcases': number_of_testcases,
'coverages':coverages,
'total_to':total_to,
})
with open(report_file, 'w') as f:
f.write(rendered)
try:
webbrowser.get()
webbrowser.open('file://' + report_file,new=2)
except Exception as e:
logging.warning("webbrowser error: "+str(e))
#~ def compute_stats()
#~ class DataHandling
| 5,608 |
spotfinder/command_line/find_active_area.py
|
dperl-sol/cctbx_project
| 155 |
2025702
|
from __future__ import absolute_import, division, print_function
from six.moves import range
# LIBTBX_SET_DISPATCHER_NAME distl.find_active_area
import os, sys
from iotbx.detectors.npy import NpyImage
from spotfinder.core_toolbox import find_active_area
#special import of pickled NumPy array: CXI/CSPad data file
def ImageFactory(filename):
if os.path.isfile(filename):
I = NpyImage(filename)
I.readHeader()
return I
class graph_tracker:
def has_one(self,graph):
for key in graph.keys():
if len(graph[key])==1:
self.key = key
self.item_sink = graph[key][0]
return True
return False
def prune(self,graph):
for key in graph.keys():
try:
graph[key].remove(self.item_sink)
except ValueError: pass
def run_one(path, display):
image = ImageFactory(path)
image.read()
data = image.linearintdata
PC = find_active_area(data)
sources = []; sinks = []
for x in range(0,len(PC),2):
if PC[x]>=0:
sources.append((PC[x],PC[x+1]))
else:
sinks.append((-PC[x],-PC[x+1]))
print(len(sources),len(sinks))
assert len(sources)==len(sinks)
graph = {}
final_graph = {}
for src in sources:
item_sinks = [i for i in sinks if i[0]>src[0] and i[1]>src[1]]
graph[src]=item_sinks
G = graph_tracker()
while G.has_one(graph):
print(G.key, G.item_sink)
final_graph[G.key]=G.item_sink
del graph[G.key]
G.prune(graph)
assert len(graph)==0
if __name__ == "__main__":
for arg in sys.argv[1:]:
run_one(arg, display=True)
| 1,548 |
homepage/migrations/0010_auto_20171117_2008.py
|
nitinkedia7/IntelligentFacultyPortal
| 0 |
2026106
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-17 14:38
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('homepage', '0009_auto_20171117_1953'),
]
operations = [
migrations.AlterModelOptions(
name='course',
options={'ordering': ['year'], 'verbose_name_plural': 'Courses'},
),
]
| 445 |
bin/bck_rsync.py
|
rboman/progs
| 2 |
2025746
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copie les repertoires rsync de garfield sur ma dropbox.
# Ce script peut être lancé de n'importe où.
import os, subprocess, shutil
pars = {}
pars['destdir'] = '/hdd2/boman/Dropbox/Backups/Repositories'
pars['srcdir'] = '/hdd2/boman/Backups/rsync'
pars['folders'] = ['web' ] #['SVN', 'SVN2', 'web']
def tar_folder(fname):
bname = os.path.basename(fname)
arcname = '%s.tar.bz2' % bname
if os.path.isfile(arcname):
print('rm %s' % arcname)
os.remove(arcname)
parallel = False
with open(os.devnull, 'w') as FNULL:
parallel = not subprocess.call(["which", "pbzip2"], stdout=FNULL, stderr=subprocess.STDOUT)
# parallel compression if possible
if parallel:
# parallel (requires 'pbzip2') - ?pxz does not always work ... and is very slow...
cmd = ['tar', '-I', 'pbzip2', '-cf', arcname, fname]
else:
print('\tinfo: use "apt-get install pbzip2" for parallel compression')
cmd = ['tar', '-cjf', arcname, fname]
#print cmd
print(" ".join(cmd))
subprocess.check_call(cmd)
# TODO: utiliser shutil.make_archive ????
return arcname
def main(pars):
destdir = pars['destdir']
srcdir = pars['srcdir']
# check whether both folders exist
if not os.path.isdir(destdir):
raise Exception('destdir does not exist (%s)!' % destdir)
if not os.path.isdir(srcdir):
raise Exception('srcdir does not exist (%s)!' % srcdir)
# change dir to srcdir
print('changing dir to %s' % srcdir)
os.chdir(srcdir)
for folder in pars['folders']:
if not os.path.isdir(folder):
raise Exception('folder does not exist (%s)!' % folder)
# build archive
arcname = tar_folder(folder)
# copy archive to destination
target = os.path.join(destdir, arcname)
print('cp %s %s' % (arcname, target))
shutil.copy2(arcname, target)
if __name__=="__main__":
main(pars)
| 2,007 |
src/event_based.py
|
gimmie7/projekt2
| 0 |
2026221
|
import argparse
import datetime
import numpy
from em340.em340 import Em340
from em340.measurement import Measurement
from helper.scheduler import PeriodicScheduler
from helper.csvhelper import CsvHelper
# Settings and default values
port = 'COM4' # serial port
parser = argparse.ArgumentParser()
parser.add_argument("--p", help="serial port where your modbus device is plugged in, e.g. COM4")
args = parser.parse_args()
if args.p:
port = args.p
# Inizialize Em340
em340 = Em340(port)
print('run script with the following settings: port={0}'.format(port))
active_devices = []
active_power_total = 0
devices = numpy.genfromtxt('data_samples/devices.csv',delimiter=',',dtype=None,usecols=(0,1,2,3,4),encoding='UTF-8')
def guess(measurement: Measurement):
global active_devices, active_power_total, devices
difference = 1000000
guessed_device = None
for device in devices:
difference_temp = 0
if active_power_total < measurement.p: # device added
difference_temp = abs(active_power_total + device[1] - measurement.p)
else: # device removed
difference_temp = abs(active_power_total - device[1] - measurement.p)
if difference_temp < 25 and difference_temp < difference:
guessed_device = device[0]
difference = difference_temp
if guessed_device != None:
if measurement.p > active_power_total:
if guessed_device not in active_devices:
active_devices.append(guessed_device)
else:
if guessed_device in active_devices:
active_devices.remove(guessed_device)
active_power_total = measurement.p
return guessed_device
def read():
global active_power_total, active_devices
measurement = em340.read_phase1()
print(str(measurement.p) + ' ' + str(measurement.s))
if abs(active_power_total - measurement.p) > 25:
guessed_device = guess(measurement)
if guessed_device != None:
print(active_devices)
# Periodic reading/writing
periodic_scheduler = PeriodicScheduler()
periodic_scheduler.setup(5, read) # it executes the event just once
periodic_scheduler.run() # starts the scheduler
| 2,230 |
regression.py
|
ethangoan/regression
| 2 |
2025881
|
'''
A regression example using TensorFlow library.
Author: <NAME>
Modified from original source by <NAME>
Project: https://github.com/aymericdamien/TensorFlow-Examples/
MIT Licence
'''
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def forward_pass(X, W_1, b_1, W_2, b_2, W_o, b_o):
# Construct a linear model
A_1 = tf.nn.tanh(tf.multiply(W_1, X) + b_1)
A_2 = tf.nn.tanh(tf.matmul(W_2, A_1) + b_2)
return tf.matmul(W_o, A_2) + b_o
rng = np.random
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
# Training Data
train_X = np.load('x.npy').ravel()
train_Y = np.load('y.npy').ravel()
test_X = np.load('x_s.npy').ravel()
test_Y= np.load('y_test.npy').ravel()
n_samples = train_X.shape[0]
# tf Graph Input
X = tf.placeholder("float64")
Y = tf.placeholder("float64")
hidden_layer_dims = [20, 20]
# Set model weights
#first hidden layer
W_1 = tf.Variable(rng.randn(hidden_layer_dims[0], 1),
name="weight_1", dtype=tf.float64)
b_1 = tf.Variable(rng.randn(hidden_layer_dims[0], 1),
name="bias_1", dtype=tf.float64)
#second hidden layer
W_2 = tf.Variable(rng.randn(hidden_layer_dims[1], hidden_layer_dims[0]),
name="weight_2", dtype=tf.float64)
b_2 = tf.Variable(rng.randn(hidden_layer_dims[1], 1),
name="bias_2", dtype=tf.float64)
#output layer
W_o = tf.Variable(rng.randn(1, hidden_layer_dims[1]),
name="weight_o", dtype=tf.float64)
b_o = tf.Variable(rng.randn(1, 1),
name="bias_o", dtype=tf.float64)
# Construct a linear model
pred = forward_pass(X, W_1, b_1, W_2, b_2, W_o, b_o)
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-Y, 2))/(2*n_samples)
# Gradient descent
# Note, minimize() knows to modify W and b because Variable objects are
# trainable=True by default
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Run the initializer
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
for (x, y) in zip(train_X, train_Y):
sess.run(optimizer, feed_dict={X: x, Y: y})
# Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: train_X, Y:train_Y})
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
print("Training cost=", training_cost, '\n')
print("Testing... (Mean square loss Comparison)")
testing_cost = sess.run(
tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * test_X.shape[0]),
feed_dict={X: test_X, Y: test_Y}) # same function as cost above
print("Testing cost=", testing_cost)
print("Absolute mean square loss difference:", abs(
training_cost - testing_cost))
plt.plot(test_X, test_Y, 'b', label='Testing data')
y_nn = sess.run(forward_pass(test_X, W_1, b_1, W_2, b_2, W_o, b_o))
plt.plot(test_X,
y_nn.ravel(), #the ravel just makes it one dimensional
'r', label='Fitted line')
plt.legend()
plt.savefig('regression_nn.png')
#save the regression data
np.save('x_nn', test_X)
np.save('y_nn', y_nn)
| 3,481 |
driveanon/driveanon.py
|
tjcrone/driveanon
| 5 |
2026136
|
import io
import requests
def _get_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def _is_folder(response):
if 'P3P' in response.headers:
return True
else:
return False
def _get_response(blob_id):
session = requests.Session()
url = 'https://drive.google.com/open'
response = session.get(url, params = { 'id' : blob_id })
if _is_folder(response):
return response
url = "https://docs.google.com/uc?export=download"
response = session.get(url, params = { 'id' : blob_id }, stream = True,)
token = _get_token(response)
if token:
params = { 'id' : blob_id, 'confirm' : token }
response = session.get(url, params = params, stream = True)
return response
def open(blob_id):
""" Read a file from Google Drive into memory. Returns an open (BytesIO) file-like object. """
response = _get_response(blob_id)
return io.BytesIO(response.content)
def save(blob_id, filename = None, overwrite = False):
""" Save a file from Google Drive to disk."""
# get response
response = _get_response(blob_id)
# parse filename
if not filename:
filename = response.headers['Content-Disposition'].split('=')[1].split('"')[1]
# check if filename is a file
from pathlib import Path
p = Path(filename)
if p.is_file() and not overwrite:
raise FileExistsError('File exists: %s' % filename)
# write file
import builtins
with builtins.open(filename, 'wb') as w:
w.write(response.content)
return filename
| 1,658 |
Day_01/position.py
|
Uklusi/AdventOfCode2016
| 0 |
2022644
|
from copy import copy
class Position:
def __init__(self, x=0, y=0, orientation=0):
self.x = x
self.y = y
self.orientation = orientation
def turnRight(self):
self.orientation = (self.orientation + 1) % 4
def turnLeft(self):
self.orientation = (self.orientation - 1) % 4
def turn(self, direction):
if direction in ["R", "r", "1", 1]:
self.turnRight()
elif direction in ["L", "l", "-1", -1]:
self.turnLeft()
elif direction in ["0", 0]:
pass
else:
raise("DirectionError")
def move(self, n, direction=None):
if direction is None:
direction = self.orientation
if direction == 0:
self.y += n
elif direction == 1:
self.x += n
elif direction == 2:
self.y -= n
elif direction == 3:
self.x -= n
else:
raise("DirectionError")
def __add__(self, other):
return Position(self.x + other.x, self.y + other.y, self.orientation)
def current(self):
return (self.x, self.y)
def __hash__(self):
return hash((self.x, self.y))
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def copy(self):
return copy(self)
| 1,357 |
tricolour/stokes.py
|
smasoka/tricolour
| 1 |
2026152
|
# -*- coding: utf-8 -*-
import numba
import numpy as np
"""
Enumeration of stokes, linear and circular correlations used in
Measurement Set 2.0 as per Stokes.h in casacore:
https://casacore.github.io/casacore/classcasacore_1_1Stokes.html
the rest are left unimplemented.
"""
STOKES_TYPES = {
'I': 1, # stokes unpolarised
'Q': 2, # stokes linear
'U': 3, # stokes linear
'V': 4, # stokes circular
'RR': 5, # right-right circular
'RL': 6, # right-left cross-circular
'LR': 7, # left-right cross-circular
'LL': 8, # left-left circular
'XX': 9, # parallel linear
'XY': 10, # XY cross linear
'YX': 11, # YX cross linear
'YY': 12 # parallel linear
}
# Correlation dependencies required for reconstructing stokes values
# (corr1, corr2, a, s1, s2). stokes = a*(s1*corr1 + s2*corr2)
stokes_deps = {
'I': [('XX', 'YY', 0.5 + 0.0j, 1, 1), ('RR', 'LL', 0.5 + 0.0j, 1, 1)],
'Q': [('XX', 'YY', 0.5 + 0.0j, 1, -1), ('RL', 'LR', 0.5 + 0.0j, 1, 1)],
'U': [('XY', 'YX', 0.5 + 0.0j, 1, 1), ('RL', 'LR', 0.0 - 0.5j, 1, -1)],
'V': [('XY', 'YX', 0.0 - 0.5j, 1, -1), ('RR', 'LL', 0.5 + 0.0j, 1, -1)]
}
# Convert to numeric stokes types
stokes_deps = {k: [(STOKES_TYPES[c1], STOKES_TYPES[c2], a, s1, s2)
for (c1, c2, a, s1, s2) in deps]
for k, deps in stokes_deps.items()}
def stokes_corr_map(corr_types):
"""
Produces a map describing how to combine visibility correlations
in order to form a stokes parameter.
Parameters
----------
corr_ids : list of integers
List of correlation types as defined in `casacore <stokes>_`_
.. _stokes: https://casacore.github.io/casacore/classcasacore_1_1Stokes.html
Returns
-------
dict
Correlation map with schema :code:`{ stokes: (c1, c2, a, s1, s2)}`
.. code-block:: python
stokes = a*(s1*vis[:,:,c1] + s2*vis[:,:,c2])
""" # noqa
corr_type_set = set(corr_types)
corr_maps = {}
for stokes, deps in stokes_deps.items():
for (corr1, corr2, alpha, sign1, sign2) in deps:
# If both correlations are available as dependencies
# we can generate this stokes parameter
if len(corr_type_set.intersection((corr1, corr2))) == 2:
c1 = corr_types.index(corr1)
c2 = corr_types.index(corr2)
corr_maps[stokes] = (c1, c2, alpha, sign1, sign2)
return corr_maps
@numba.jit(nopython=True, nogil=True, cache=True)
def unpolarised_intensity(vis, stokes_unpol, stokes_pol):
r"""
Derives the unpolarised intensity from visibilities
and tuples describing how to derive stokes parameters
from visibility correlations.
.. math::
I - \sqrt(Q^2 + U^2 + V^2)
``stokes_unpol`` and ``stokes_pol`` can be derived from
:func:`stokes_corr_map`.
Parameters
----------
vis: :class:`numpy.ndarray`
Visibilities of shape :code:`(row, chan, corr)`
stokes_unpol: tuple
Tuple with schema :code:`(c1,c2,a,s1,s2)` describing
how to derive unpolarised stokes parameters (I):
1. ``c1`` -- First correlation index
2. ``c2`` -- Second correlation index
3. ``a`` -- alpha, multiplier
4. ``s1`` -- First correlation sign
5. ``s2`` -- Second correlation sign
stokes_pol: tuple
Tuple with schema :code:`(c1,c2,a,s1,s2)` describing
how to derive polarised stokes parameters (Q,U,V):
Returns
-------
:class:`numpy.ndarray`
Unpolarised intensities of shape :code:`(row, chan, 1)`.
"""
if not len(stokes_unpol) == 1:
raise ValueError("There should be exactly one entry "
"for unpolarised stokes (stokes_unpol)")
if not len(stokes_pol) > 0:
raise ValueError("No entries for polarised stokes (stokes_pol)")
# Only one output correlation -- unpolarised intensity
out_vis = np.empty(vis.shape[:2] + (1,), vis.dtype)
for r in range(vis.shape[0]):
for f in range(vis.shape[1]):
# Polarised intensity (Q,U,V)
pol = 0
for (c1, c2, a, s1, s2) in stokes_pol:
value = a * (s1 * vis[r, f, c1] +
s2 * vis[r, f, c2])
# uncalibrated data may have a substantial amount of power in
# the imaginary
pol += np.abs(value)**2
# use absolute to be certain
# Unpolarised intensity (I)
unpol = 0
for (c1, c2, a, s1, s2) in stokes_unpol:
value = a * (s1 * vis[r, f, c1] +
s2 * vis[r, f, c2])
# uncalibrated data may have a substantial amount of power in
# the imaginary
unpol += np.abs(value)
# use absolute to be certain
# I - sqrt(Q^2 + U^2 + V^2)
out_vis[r, f, 0] = unpol - np.sqrt(pol)
return out_vis
@numba.jit(nopython=True, nogil=True, cache=True)
def polarised_intensity(vis, stokes_pol):
r"""
Derives the polarised intensity from visibilities
and tuples describing how to derive stokes parameters
from visibility correlations.
.. math::
\sqrt(Q^2 + U^2 + V^2)
``stokes_pol`` can be derived from :func:`stokes_corr_map`.
Parameters
----------
vis: :class:`numpy.ndarray`
Visibilities of shape :code:`(row, chan, corr)`
stokes_pol: tuple
Tuple with schema :code:`(c1,c2,a,s1,s2)` describing
how to derive polarised stokes parameters (Q,U,V):
1. ``c1`` -- First correlation index
2. ``c2`` -- Second correlation index
3. ``a`` -- alpha, multiplier
4. ``s1`` -- First correlation sign
5. ``s2`` -- Second correlation sign
Returns
-------
:class:`numpy.ndarray`
Unpolarised intensities of shape :code:`(row, chan, 1)`.
"""
# Only one output correlation -- polarised intensity
out_vis = np.empty(vis.shape[:2] + (1,), vis.dtype)
for r in range(vis.shape[0]):
for f in range(vis.shape[1]):
# Polarised intensity (Q,U,V)
pol = 0
for (c1, c2, a, s1, s2) in stokes_pol:
value = a * (s1 * vis[r, f, c1] +
s2 * vis[r, f, c2])
# uncalibrated data may have a substantial amount of power in
# the imaginary
pol += np.abs(value)**2
# use absolute to be certain
# sqrt(Q^2 + U^2 + V^2)
out_vis[r, f, 0] = np.sqrt(pol)
return out_vis
| 6,745 |
Advent2020/14.py
|
SSteve/AdventOfCode
| 0 |
2024881
|
import re
from collections import defaultdict
from itertools import combinations
from typing import List
TEST = """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X
mem[8] = 11
mem[7] = 101
mem[8] = 0"""
TEST2 = """mask = 000000000000000000000000000000X1001X
mem[42] = 100
mask = 00000000000000000000000000000000X0XX
mem[26] = 1"""
maskRegex = re.compile(r"mask = (.*)")
memRegex = re.compile(r"mem\[(\d+)\] = (\d+)")
def Part1(program: List[str]) -> int:
orMask = 0
andMask = 2**36 - 1
memory = defaultdict(int)
for instruction in program:
match = maskRegex.match(instruction)
if match:
orString = ''
andString = ''
for char in match[1]:
if char == 'X':
orString += '0'
andString += '1'
else:
orString += char
andString += char
orMask = int(orString, 2)
andMask = int(andString, 2)
else:
match = memRegex.match(instruction)
value = (int(match[2]) | orMask) & andMask
memory[int(match[1])] = value
return sum(memory[key] for key in memory)
def Part2(program: List[str]) -> int:
memory = defaultdict(int)
orMask = 0
memoryBits = []
for instruction in program:
match = maskRegex.match(instruction)
if match:
memoryBits = []
orString = ''
memoryString = ''
for char in match[1]:
if char == 'X':
# We'll start off by setting all the floating bits so put
# a '1' in the or mask.
orString += '1'
memoryString += '1'
else:
orString += char
memoryString += '0'
orMask = int(orString, 2)
memoryValue = int(memoryString, 2)
for bit in range(36):
bitValue = 2 ** bit
if memoryValue & bitValue:
memoryBits.append(bitValue)
else:
match = memRegex.match(instruction)
# Set all the overwritten bits and floating bits in the address.
address = int(match[1]) | orMask
value = int(match[2])
# Write to the address with all floating bits set.
memory[address] = value
# Go through all bit combinations and turn off the address bits
# in that combination.
for maskCount in range(len(memoryBits)):
for bitValues in combinations(memoryBits, maskCount + 1):
bitMask = 2 ** 36 - 1 - sum(bitValues)
maskedAddress = address & bitMask
memory[maskedAddress] = value
return sum(memory[key] for key in memory)
if __name__ == "__main__":
testPart1 = Part1(TEST.splitlines())
assert testPart1 == 165
testPart2 = Part2(TEST2.splitlines())
assert testPart2 == 208
with open("14.txt", "r") as infile:
instructions = infile.read().splitlines()
part1 = Part1(instructions)
print(f"Part 1: {part1}")
part2 = Part2(instructions)
print(f"Part 2: {part2}")
| 3,232 |
school/FRC_2412_CODE/python_10/Garchomp/this doesn't work/threadingpygame.py
|
Xe/code
| 7 |
2024197
|
import threading
import pygame_joy_test
class joyThread(threading.Thread):
def __init__(self, name="pygame test thread"):
threading.Thread.__init__(self)
self.setName(name)
def run(self):
self.xy=[pygame_joy_test.jx, pygame_joy_test.jy]
def getxy(self):
return self.xy
n = joyThread()
n.run()
| 351 |
tests/parsing/test_grammar.py
|
qbecb1zen/booleano
| 4 |
2025959
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009 by <NAME> <http://gustavonarea.net/>.
# This file is part of Booleano <http://code.gustavonarea.net/booleano/>.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, distribute with
# modifications, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Except as contained in this notice, the name(s) of the above copyright
# holders shall not be used in advertising or otherwise to promote the sale,
# use or other dealings in this Software without prior written authorization.
"""
Test suite for the grammar configurations.
"""
from __future__ import unicode_literals
from nose.tools import eq_, ok_, assert_raises
from booleano.parser import Grammar
from booleano.exc import GrammarError
class TestDefaultGrammar(object):
"""Tests for the grammar with the default properties, at least initially."""
def setUp(self):
self.grammar = Grammar()
# Token handling stuff
def test_default_tokens(self):
"""All the tokens must have an initial value."""
# Logical connectives:
eq_(self.grammar.get_token("not"), "~")
eq_(self.grammar.get_token("and"), "&")
eq_(self.grammar.get_token("or"), "|")
eq_(self.grammar.get_token("xor"), "^")
# Relational operators
eq_(self.grammar.get_token("eq"), "==")
eq_(self.grammar.get_token("ne"), "!=")
eq_(self.grammar.get_token("lt"), "<")
eq_(self.grammar.get_token("gt"), ">")
eq_(self.grammar.get_token("le"), "<=")
eq_(self.grammar.get_token("ge"), ">=")
# Set operators
eq_(self.grammar.get_token("belongs_to"), u"∈")
eq_(self.grammar.get_token("is_subset"), u"⊂")
eq_(self.grammar.get_token("set_start"), "{")
eq_(self.grammar.get_token("set_end"), "}")
eq_(self.grammar.get_token("element_separator"), ",")
# Grouping marks
eq_(self.grammar.get_token("string_start"), '"')
eq_(self.grammar.get_token("string_end"), '"')
eq_(self.grammar.get_token("group_start"), "(")
eq_(self.grammar.get_token("group_end"), ")")
# Function call stuff
eq_(self.grammar.get_token("arguments_start"), "(")
eq_(self.grammar.get_token("arguments_end"), ")")
eq_(self.grammar.get_token("arguments_separator"), ",")
# Numeric stuff
eq_(self.grammar.get_token("positive_sign"), "+")
eq_(self.grammar.get_token("negative_sign"), "-")
eq_(self.grammar.get_token("decimal_separator"), ".")
eq_(self.grammar.get_token("thousands_separator"), ",")
# Miscellaneous
eq_(self.grammar.get_token("identifier_spacing"), "_")
eq_(self.grammar.get_token("namespace_separator"), ":")
def test_get_all_tokens(self):
eq_(self.grammar.get_all_tokens(),
self.grammar.default_tokens
)
def test_setting_existing_token(self):
self.grammar.set_token("negative_sign", "!")
eq_(self.grammar.get_token("negative_sign"), "!")
def test_requesting_non_existing_token(self):
assert_raises(GrammarError, self.grammar.get_token, "non_existing")
def test_setting_non_existing_token(self):
assert_raises(GrammarError, self.grammar.set_token, "non_existing", "-")
# Setting handling stuff
def test_default_settings(self):
eq_(self.grammar.get_setting("superset_right_in_is_subset"), True)
eq_(self.grammar.get_setting("set_right_in_contains"), True)
eq_(self.grammar.get_setting("optional_positive_sign"), True)
def test_setting_existing_setting(self):
self.grammar.set_setting("set_right_in_contains", False)
eq_(self.grammar.get_setting("set_right_in_contains"), False)
def test_requesting_non_existing_setting(self):
assert_raises(GrammarError, self.grammar.get_setting, "non_existing")
def test_setting_non_existing_setting(self):
assert_raises(GrammarError, self.grammar.set_setting, "non_existing",
None)
# Custom generator handling stuff
def test_no_custom_generators_by_default(self):
"""There must not be custom generators by default."""
eq_(self.grammar.get_custom_generator("operation"), None)
eq_(self.grammar.get_custom_generator("string"), None)
eq_(self.grammar.get_custom_generator("number"), None)
def test_setting_existing_generator(self):
mock_generator = lambda: None
self.grammar.set_custom_generator("number", mock_generator)
eq_(self.grammar.get_custom_generator("number"), mock_generator)
def test_requesting_non_existing_generator(self):
assert_raises(GrammarError, self.grammar.get_custom_generator,
"non_existing")
def test_setting_non_existing_generator(self):
mock_generator = lambda: None
assert_raises(GrammarError, self.grammar.set_custom_generator,
"non_existing", mock_generator)
#
class TestEarlyCustomizedGrammar(object):
"""
Tests for the grammar customized from the beginning (i.e., in the
constructor).
"""
def test_custom_tokens(self):
"""The tokens can be customized using the keyword arguments."""
grammar = Grammar(eq="=", ne="<>")
# Checking the two new tokens:
eq_(grammar.get_token("eq"), "=")
eq_(grammar.get_token("ne"), "<>")
# Everything else must have not changed:
eq_(grammar.get_token("not"), "~")
eq_(grammar.get_token("and"), "&")
eq_(grammar.get_token("or"), "|")
eq_(grammar.get_token("xor"), "^")
eq_(grammar.get_token("lt"), "<")
eq_(grammar.get_token("gt"), ">")
eq_(grammar.get_token("le"), "<=")
eq_(grammar.get_token("ge"), ">=")
eq_(grammar.get_token("belongs_to"), u"∈")
eq_(grammar.get_token("is_subset"), u"⊂")
eq_(grammar.get_token("set_start"), "{")
eq_(grammar.get_token("set_end"), "}")
eq_(grammar.get_token("element_separator"), ",")
eq_(grammar.get_token("string_start"), '"')
eq_(grammar.get_token("string_end"), '"')
eq_(grammar.get_token("group_start"), "(")
eq_(grammar.get_token("group_end"), ")")
eq_(grammar.get_token("arguments_start"), "(")
eq_(grammar.get_token("arguments_end"), ")")
eq_(grammar.get_token("arguments_separator"), ",")
eq_(grammar.get_token("positive_sign"), "+")
eq_(grammar.get_token("negative_sign"), "-")
eq_(grammar.get_token("decimal_separator"), ".")
eq_(grammar.get_token("thousands_separator"), ",")
eq_(grammar.get_token("identifier_spacing"), "_")
eq_(grammar.get_token("namespace_separator"), ":")
def test_settings(self):
settings = {'superset_right_in_is_subset': False,
'set_right_in_contains': None}
grammar = Grammar(settings)
# Checking the new settings:
eq_(grammar.get_setting("superset_right_in_is_subset"), False)
eq_(grammar.get_setting("set_right_in_contains"), None)
# Everything else must have not changed:
eq_(grammar.get_setting("optional_positive_sign"), True)
def test_generators(self):
mock_generator = lambda: None
generators = {'string': mock_generator}
grammar = Grammar(None, generators)
# Checking the new generators:
eq_(grammar.get_custom_generator("string"), mock_generator)
# Everything else must have not changed:
eq_(grammar.get_custom_generator("operation"), None)
eq_(grammar.get_custom_generator("number"), None)
def test_get_all_tokens(self):
grammar = Grammar(eq="=", ne="<>")
expected = grammar.default_tokens.copy()
expected['eq'] = '='
expected['ne'] = '<>'
eq_(grammar.get_all_tokens(),
expected
)
| 8,872 |
wsdot_traffic/util.py
|
pferate/wsdot_traveltimes
| 0 |
2024875
|
import json
import re
from datetime import datetime, timedelta
def bytes2json(bytes_in):
return json.loads(bytes_in.decode('utf-8'))
def clean_js_timestamp(raw_js_datestring=None):
# test_string = '/Date(1431113400000-0700)/'
date_pattern = 'Date\((\d*)(\d{3})([-+]\d{2})00\)'
matches = re.search(date_pattern, raw_js_datestring)
timestamp, millisecs, tz_offset = matches.groups()
offset = timedelta(hours=int(tz_offset))
# print(offset)
# dt_obj = datetime.utcfromtimestamp(int(timestamp)) + offset
dt_obj = datetime.utcfromtimestamp(int(timestamp))
return dt_obj
| 608 |
msvlm/msspectrum/io/hdf5.py
|
plpla/msvlm
| 3 |
2026361
|
from __future__ import print_function, division, absolute_import, unicode_literals
__author__ = '<NAME>'
import h5py as h
import json
from numpy import float
from ..spectrum import Spectrum
from ..utils import _is_mz_equal, _is_mz_precision_equal, _is_metadata_type_dict
def hdf5_save(file_name, spectra, compression_type="gzip", compression_opts=4):
"""
Saves a list of spectra to a HDF5 file.
Parameters:
-----------
file_name: str
The path to the output file.
spectra: list of Spectrum
The list of spectra to save.
compression_type: str, default="gzip"
The type of compression to use for compressing the datasets. Refer to H5py's documentation.
compression_opts: variable, default=4
The compression options for the specified compression type. Refer to H5py's documentation. In the case of gzip
compression, this specifies the level of compression used from 0 to 9.
Note:
-----
* The spectra must have identical m/z values.
* The spectra must have the same m/z precision.
"""
if not _is_mz_equal(spectra[0].mz_values, spectra):
raise ValueError("The spectra m/z values must be identicial to save to HDF5.")
if not _is_mz_precision_equal(spectra[0].mz_precision, spectra):
raise ValueError("The spectra m/z precisions must be equal to save to HDF5.")
if not _is_metadata_type_dict(spectra):
raise ValueError("The spectra metadata must be of type dict or None to save to HDF5.")
file = h.File(file_name, "w")
n_spectra = len(spectra)
n_mz_values = len(spectra[0])
file.create_dataset("precision", data=spectra[0].mz_precision)
file.create_dataset("mz", data=spectra[0].mz_values, compression=compression_type,
compression_opts=compression_opts)
spectra_intensity_dataset = file.create_dataset("intensity",
shape=(n_spectra, n_mz_values),
dtype=float,
chunks=(1, n_mz_values),
compression=compression_type,
compression_opts=compression_opts)
for i, spectrum in enumerate(spectra):
spectra_intensity_dataset[i] = spectrum.intensity_values
dt = h.special_dtype(vlen=str)
spectra_metadata_dataset = file.create_dataset("metadata", shape=(n_spectra,), dtype=dt)
for i, spectrum in enumerate(spectra):
spectra_metadata_dataset[i] = json.dumps(spectrum.metadata)
file.close()
def hdf5_load(file_name, metadata=True):
"""
Loads spectra from a HDF5 file.
Parameters:
-----------
file_name: str
The path to the file to load.
metadata: boolean
Defaults to True. Boolean to check if we load the metadata along with the spectrum data.
Returns:
-------
spectra: list of Spectrum
The list of spectra extracted form the file.
"""
file = h.File(file_name, "r")
mz_precision = file["precision"][...]
mz_values = file["mz"][...]
spectra_intensity_dataset = file["intensity"]
if metadata and "metadata" in file:
spectra_metadata_dataset = file['metadata']
else:
spectra_metadata_dataset = [None] * spectra_intensity_dataset.shape[0]
spectra = []
for spectrum_intensity_values, spectrum_metadata in zip(spectra_intensity_dataset, spectra_metadata_dataset):
try:
spectrum_metadata = json.loads(spectrum_metadata.decode("utf-8")) if not spectrum_metadata is None else None
except AttributeError:
spectrum_metadata = json.loads(spectrum_metadata) if not spectrum_metadata is None else None
spectra.append(Spectrum(mz_values=mz_values, intensity_values=spectrum_intensity_values,
mz_precision=mz_precision, metadata=spectrum_metadata))
file.close()
return spectra
| 4,041 |
wagtail/wagtailadmin/rich_text.py
|
Girbons/wagtail
| 1 |
2025484
|
from __future__ import absolute_import, unicode_literals
import json
from django.conf import settings
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.forms import Media, widgets
from django.utils.module_loading import import_string
from wagtail.utils.widgets import WidgetWithScript
from wagtail.wagtailadmin.edit_handlers import RichTextFieldPanel
from wagtail.wagtailcore.rich_text import DbWhitelister, expand_db_html
class HalloRichTextArea(WidgetWithScript, widgets.Textarea):
def get_panel(self):
return RichTextFieldPanel
def render(self, name, value, attrs=None):
if value is None:
translated_value = None
else:
translated_value = expand_db_html(value, for_editor=True)
return super(HalloRichTextArea, self).render(name, translated_value, attrs)
def render_js_init(self, id_, name, value):
return "makeHalloRichTextEditable({0});".format(json.dumps(id_))
def value_from_datadict(self, data, files, name):
original_value = super(HalloRichTextArea, self).value_from_datadict(data, files, name)
if original_value is None:
return None
return DbWhitelister.clean(original_value)
@property
def media(self):
return Media(js=[
static('wagtailadmin/js/vendor/hallo.js'),
static('wagtailadmin/js/hallo-bootstrap.js'),
static('wagtailadmin/js/hallo-plugins/hallo-wagtaillink.js'),
static('wagtailadmin/js/hallo-plugins/hallo-hr.js'),
static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),
])
DEFAULT_RICH_TEXT_EDITORS = {
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
}
}
def get_rich_text_editor_widget(name='default'):
editor_settings = getattr(settings, 'WAGTAILADMIN_RICH_TEXT_EDITORS', DEFAULT_RICH_TEXT_EDITORS)
editor = editor_settings[name]
options = editor.get('OPTIONS', None)
if options is None:
return import_string(editor['WIDGET'])()
return import_string(editor['WIDGET'])(options=options)
| 2,137 |
cupyimg/numpy/core/numeric.py
|
FX196/cupyimg
| 39 |
2024813
|
"""Implementations of functions from the NumPy API via upfirdn.
"""
import cupy
__all__ = ["convolve", "correlate"]
def convolve(a, v, mode="full", *, xp=None):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The main difference in functionality as compared to NumPy is that this
version only operates using np.float32, np.float64, np.complex64 and
np.complex128.
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> from cupyimg.numpy import convolve
>>> convolve(cupy.array([1, 2, 3]), cupy.array([0, 1, 0.5]))
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> convolve(cupy.array([1, 2, 3]), cupy.array([0, 1, 0.5]), 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> convolve(cupy.array([1, 2, 3]),cupy.array([0, 1, 0.5]), 'valid')
array([2.5])
"""
from fast_upfirdn.cupy import convolve1d
a = cupy.array(a, copy=False, ndmin=1)
v = cupy.array(v, copy=False, ndmin=1)
if len(a) < len(v):
v, a = a, v
if len(a) == 0:
raise ValueError("a cannot be empty")
if len(v) == 0:
raise ValueError("v cannot be empty")
if mode == "full":
offset = 0
size = len(a) + len(v) - 1
crop = False
elif mode == "same":
offset = (len(v) - 1) // 2 # needed - 1 here to match NumPy
size = len(a)
crop = True
elif mode == "valid":
offset = len(v) - 1
size = len(a) - len(v) + 1
crop = True
else:
raise ValueError("unrecognized mode: {}".format(mode))
out = convolve1d(v, a, offset=offset, mode="constant", cval=0, crop=crop)
return out[:size]
def correlate(a, v, mode="valid", *, xp=None):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
Notes
-----
The main difference in functionality vs. NumPy is that this version only
operates using np.float32, np.float64, np.complex64 and np.complex128.
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
Examples
--------
>>> from cupyimg.numpy import correlate
>>> correlate(cupy.array([1, 2, 3]), cupy.array([0, 1, 0.5]))
array([3.5])
>>> correlate(cupy.array([1, 2, 3]), cupy.array([0, 1, 0.5]), "same")
array([2. , 3.5, 3. ])
>>> correlate(cupy.array([1, 2, 3]), cupy.array([0, 1, 0.5]), "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> correlate(cupy.array([1+1j, 2, 3-1j]), cupy.array([0, 1, 0.5j]), 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> correlate(cupy.array([0, 1, 0.5j]), cupy.array([1+1j, 2, 3-1j]), 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
v = v[::-1]
if v.dtype.kind == "c":
v = cupy.conj(v)
return convolve(a, v, mode=mode, xp=xp)
| 6,721 |
main.py
|
omkarshelar/simple-url-shortner
| 0 |
2026401
|
from flask import Flask, jsonify, render_template, redirect, request, session, url_for, flash, send_from_directory
from models import add_link, is_short_link_valid, get_link, create_user, login_user, check_user_present, increment_count, get_analysis,delete_link, activeToggle
from forms import ShortenForm, SignupForm, LoginForm
import random, string, bcrypt, os
app = Flask(__name__)
app.secret_key = 'any random string'
host = 'http://127.0.0.1:5000/s/' #Change this to the URL you are using.
@app.route('/', methods=['GET','POST'])
def index():
if 'username' not in session:
#flash("Please login to continue")
return redirect(url_for('login'))
form = ShortenForm()
if form.validate_on_submit():
if(form.short_link.data):
short_link = form.short_link.data
else:
short_link = ''.join(random.choices(string.ascii_letters + string.digits, k=5))
long_link = form.link.data
while(not is_short_link_valid(short_link)):
short_link = ''.join(random.choices(string.ascii_letters + string.digits, k=5))
if not (long_link.startswith("http://") or long_link.startswith("https://")):
long_link = 'http://'+long_link
owner = session['id']
if add_link(short_link, long_link, owner):
# return jsonify(short_link = short_link)
short_link = host+short_link
# print("Short Link:"+short_link)
return render_template('result.html', short_link=short_link)
else:
return '<h1>Server Error</h1>', 500
return render_template('index.html',form=form)
@app.route('/s/<short_link>/', methods=['GET'])
def resolve(short_link):
increment_count(short_link)
long_link = get_link(short_link)
if(long_link is not None):
return redirect(long_link, code=302)
else:
return render_template('404.html')
@app.route('/checkAvailable',methods=['GET'])
def checkAvailabe():
sl = request.args.get('link')
if(sl and len(sl)>4 and len(sl)<11):
if(is_short_link_valid(sl)):
msg = {"available":"true"}
else:
msg = {"available":"false"}
else:
msg = {"available":"false"}
return jsonify(msg)
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
form = SignupForm()
print(form.validate_on_submit())
if form.validate_on_submit():
username = form.name.data
password = form.password.data
password1 = form.password_conf.data
if password != password1:
return '<h1>Password do not match!</h1>'
if check_user_present(username):
flash('User already present')
return redirect(url_for('login'))
hashed_pwd = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
if bcrypt.checkpw(password.encode('utf8'), hashed_pwd):
res = create_user(username, hashed_pwd)
username,_,user_id = login_user(username)
if res:
session['id'] = user_id
session['username'] = username
flash('Signup Success!')
return redirect(url_for('index'))
else:
flash('Signup Failed!')
return redirect(url_for('signup'))
return render_template('signup.html',form=form)
@app.route('/analysis/', methods=['GET'])
def analysis():
if 'username' not in session:
flash("Please login to continue")
return redirect(url_for('login'))
user_id = session['id']
res = get_analysis(user_id)
print(res)
return render_template('analysis.html', res=res, host=host)
@app.route('/login/', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
username = form.name.data
password = form.password.data
user_name, user_pwd_hash, user_id = login_user(username)
if user_name is None:
flash('User not present')
return redirect(url_for('login'))
elif not bcrypt.checkpw(password.encode('utf8'), user_pwd_hash):
flash('Incorrect Password!')
return redirect(url_for('login'))
else:
session['id'] = user_id
session['username'] = username
#flash('Login Success!')
return redirect(url_for('index'))
return render_template('login.html',form=form)
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('username', None)
session.pop('id', None)
return redirect(url_for('login'))
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),'favicon.png')
@app.route('/static/<filename>')
def send(filename):
return send_from_directory(os.path.join(app.root_path, 'static'), filename);
@app.route('/delete/<short_link>')
def delete(short_link):
if delete_link(short_link):
flash('Link Deleted')
else:
flash("Delete failed")
return redirect(url_for('analysis'))
@app.route('/activateToggle',methods=['GET'])
def activateToggle():
short_link = request.args.get('sl')
status = request.args.get('status')
flag, new_status = activeToggle(short_link, status)
if(new_status and flag):
return jsonify({"status":1,"new_status":new_status})
else:
return jsonify({"status":0,"new_status":new_status})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 4,882 |
main_single.py
|
jhoon-oh/FL_BABU
| 10 |
2025039
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import os
import copy
import pickle
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from tqdm import tqdm
from utils.options import args_parser
from utils.train_utils import get_data, get_model
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
base_dir = './save/{}/{}_single_{}/{}/'.format(args.dataset, args.model, args.opt, args.results_save)
algo_dir = 'blr_{}_hlr{}_bm{}_hm_{}'.format(args.body_lr, args.head_lr, args.body_m, args.head_m)
if not os.path.exists(os.path.join(base_dir, algo_dir)):
os.makedirs(os.path.join(base_dir, algo_dir), exist_ok=True)
# set dataset
dataset_train, dataset_test = get_data(args, env='single')
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=128, shuffle=True, num_workers=4)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=128, num_workers=4)
dataloaders = {'train': train_loader,
'test': test_loader}
# build a model
net_glob = get_model(args)
# Basically, He uniform
if args.results_save=='xavier_uniform':
nn.init.xavier_uniform_(net_glob.linear.weight, gain=nn.init.calculate_gain('relu'))
elif args.results_save=='xavier_normal':
nn.init.xavier_normal_(net_glob.linear.weight, gain=nn.init.calculate_gain('relu'))
elif args.results_save=='kaiming_uniform':
nn.init.kaiming_uniform_(net_glob.linear.weight, nonlinearity='relu')
elif args.results_save=='kaiming_normal':
nn.init.kaiming_normal(net_glob.linear.weight, nonlinearity='relu')
elif args.results_save=='orthogonal':
nn.init.orthogonal_(net_glob.linear.weight, gain=nn.init.calculate_gain('relu'))
elif args.results_save=='not_orthogonal':
nn.init.uniform_(net_glob.linear.weight, a=0.45, b=0.55)
net_glob.linear.weight.data = net_glob.linear.weight.data / torch.norm(net_glob.linear.weight.data, dim=1, keepdim=True)
nn.init.zeros_(net_glob.linear.bias)
# set optimizer
body_params = [p for name, p in net_glob.named_parameters() if not 'linear' in name]
head_params = [p for name, p in net_glob.named_parameters() if 'linear' in name]
if args.opt == 'SGD':
optimizer = torch.optim.SGD([{'params': body_params, 'lr': args.body_lr, 'momentum': args.body_m},
{'params': head_params, 'lr': args.head_lr, 'momentum': args.head_m}],
weight_decay=5e-4)
elif args.opt == 'RMSProp':
optimizer = torch.optim.RMSprop([{'params': body_params, 'lr': args.body_lr, 'momentum': args.body_m},
{'params': head_params, 'lr': args.head_lr, 'momentum': args.head_m}],
weight_decay=5e-4)
elif args.opt == 'ADAM':
optimizer = torch.optim.Adam([{'params': body_params, 'lr': args.body_lr, 'betas': (args.body_m, 1.11*args.body_m)},
{'params': head_params, 'lr': args.head_lr, 'betas': (args.head_m, 1.11*args.head_m)}],
weight_decay=5e-4)
# set scheduler
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
[80, 120],
gamma=0.1,
last_epoch=-1)
# set criterion
criterion = nn.CrossEntropyLoss()
# training
results_log_save_path = os.path.join(base_dir, algo_dir, 'results.csv')
results_model_save_path = os.path.join(base_dir, algo_dir, 'best_model.pt')
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in tqdm(range(args.epochs)):
net_glob.train()
train_loss = 0
train_correct = 0
train_data_num = 0
for i, data in enumerate(dataloaders['train']):
image = data[0].type(torch.FloatTensor).to(args.device)
label = data[1].type(torch.LongTensor).to(args.device)
pred_label = net_glob(image)
loss = criterion(pred_label, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred_label = torch.argmax(pred_label, dim=1)
train_loss += loss.item()
train_correct += (torch.sum(pred_label==label).item())
train_data_num += label.shape[0]
net_glob.eval()
test_loss = 0
test_correct = 0
test_data_num = 0
for i, data in enumerate(dataloaders['test']):
image = data[0].type(torch.FloatTensor).to(args.device)
label = data[1].type(torch.LongTensor).to(args.device)
pred_label = net_glob(image)
loss = criterion(pred_label, label)
pred_label = torch.argmax(pred_label, dim=1)
test_loss += loss.item()
test_correct += (torch.sum(pred_label==label).item())
test_data_num += label.shape[0]
train_loss_list.append(train_loss/len(dataloaders['train']))
train_acc_list.append(train_correct/train_data_num)
test_loss_list.append(test_loss/len(dataloaders['test']))
test_acc_list.append(test_correct/test_data_num)
res_pd = pd.DataFrame(data=np.array([train_loss_list, train_acc_list, test_loss_list, test_acc_list]).T,
columns=['train_loss', 'train_acc', 'test_loss', 'test_acc'])
res_pd.to_csv(results_log_save_path, index=False)
if (test_correct/test_data_num) >= max(test_acc_list):
torch.save(net_glob.state_dict(), results_model_save_path)
scheduler.step()
| 6,111 |
goss/common/utils.py
|
wxnacy/gos
| 0 |
2024527
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy(<EMAIL>)
"""
"""
def get_current_module_path():
"""获取当前模块的路径"""
import goss as _module
module_path = _module.__path__[0]
return module_path
| 221 |
python_module/test/unit/module/test_batchnorm.py
|
WestCityInstitute/MegEngine
| 2 |
2026384
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine as mge
from megengine.core import tensor
from megengine.module import BatchNorm1d, BatchNorm2d
from megengine.test import assertTensorClose
def test_batchnorm():
nr_chan = 8
data_shape = (3, nr_chan, 4)
momentum = 0.9
bn = BatchNorm1d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
xv_transposed = np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6
)
assertTensorClose(
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6
)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm2d():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
momentum = 0.9
bn = BatchNorm2d(nr_chan, momentum=momentum)
running_mean = np.zeros((1, nr_chan, 1, 1), dtype=np.float32)
running_var = np.ones((1, nr_chan, 1, 1), dtype=np.float32)
data = tensor()
for i in range(3):
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var_biased = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var_biased + bn.eps)
var_unbiased = np.var(xv_transposed, axis=0, ddof=1).reshape((1, nr_chan, 1, 1))
running_mean = running_mean * momentum + mean * (1 - momentum)
running_var = running_var * momentum + var_unbiased * (1 - momentum)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6)
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6)
# test set 'training' flag to False
mean_backup = bn.running_mean.numpy()
var_backup = bn.running_var.numpy()
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
data.set_value(xv)
yv1 = bn(data)
yv2 = bn(data)
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0)
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0)
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0)
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps)
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6)
def test_batchnorm_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 4)
bn = BatchNorm1d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
mean = np.mean(np.mean(xv, axis=0, keepdims=True), axis=2, keepdims=True)
var = np.var(
np.transpose(xv, [0, 2, 1]).reshape(
(data_shape[0] * data_shape[2], nr_chan)
),
axis=0,
).reshape((1, nr_chan, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
def test_batchnorm2d_no_stats():
nr_chan = 8
data_shape = (3, nr_chan, 16, 16)
bn = BatchNorm2d(8, track_running_stats=False)
data = tensor()
for i in range(4):
if i == 2:
bn.training = False
xv = np.random.normal(loc=2.3, size=data_shape).astype(np.float32)
xv_transposed = np.transpose(xv, [0, 2, 3, 1]).reshape(
(data_shape[0] * data_shape[2] * data_shape[3], nr_chan)
)
mean = np.mean(xv_transposed, axis=0).reshape(1, nr_chan, 1, 1)
var = np.var(xv_transposed, axis=0).reshape((1, nr_chan, 1, 1))
sd = np.sqrt(var + bn.eps)
data.set_value(xv)
yv = bn(data)
yv_expect = (xv - mean) / sd
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6)
| 5,952 |
client.py
|
m-primo/Secure-Local-Server-Chat
| 0 |
2026431
|
import socket
import sys
import time
import hashlib
host = input("Host: ")
port = int(input("Port: "))
password = input("Password: ")
host_k = input("Host Key: ")
host_key = str(hashlib.sha256(str(str(host)+str(password)).encode('utf-8')).hexdigest())
if(host_k == host_key):
s = socket.socket()
s.connect((host, port))
print("Connected to the server.")
while 1:
incoming_message = s.recv(1024)
incoming_message = ((incoming_message.decode()).replace(host_key, ''))
print("Server : ", incoming_message)
print("------------------------------------")
message = input(str(">> "))
message = str(message+host_key).encode()
s.send(message)
print("Message has been sent.")
print("Waiting for any incoming message...")
print("-----------------------------------")
else:
print("Host key is wrong.")
| 966 |
python-client/Main.py
|
ArtemGits/ddos_protection_v0.1
| 2 |
2025385
|
import os
from LSTM_Model import LSTM_Model
from TrainingProcess import TrainingProcess
import configparser
class Main(object):
"""Main class for creates,trains model"""
def __init__(self):
"""__init__ first setups directories and initialization
from config files"""
self.config = configparser.ConfigParser()
self.config.read('config.ini')
self.num_epochs = self.config.getint('DEFAULT', 'NUM_EPOCHS')
self.train_size_per = self.config.getfloat('DEFAULT',
'TRAIN_SIZE_PERCENT')
self.num_features = self.config.getint('DEFAULT', 'NUM_FEATURES')
self.actual_features = self.config.getint('DEFAULT', 'ACTUAL_FEATURES')
self.activation_function = self.config['DEFAULT'][
'ACTIVATION_FUNCTION']
self.loss_function = self.config['DEFAULT']['LOSS_FUNCTION']
self.metrics = self.config['DEFAULT']['METRICS']
self.batch_size_train = self.config.getint('DEFAULT',
'BATCH_SIZE_TRAIN')
self.batch_size_test = self.config.getint('DEFAULT', 'BATCH_SIZE_TEST')
self.cur_path = os.path.dirname(__file__)
self.outputDir = os.path.relpath('../resources/model_resources',
self.cur_path)
self.datasetDir = os.path.relpath('../data/dataset', self.cur_path)
def launch(self):
"""launch func for start proecess of creates and trains process"""
csv_file = self.datasetDir + \
'/Friday-WorkingHours-Afternoon-DDos.pcap_ISCX.csv'
lstm_model = LSTM_Model(self.actual_features, self.num_features, 1,
self.activation_function, self.loss_function,
self.metrics, self.batch_size_train)
model = lstm_model.create_model()
lstm_model.compile_model(model)
tp = TrainingProcess(csv_file, self.num_features, self.train_size_per,
self.batch_size_train, self.batch_size_test,
self.num_epochs, self.outputDir)
dataset = tp.read_data_from_csv()
X_train, Y_train, X_test, Y_test = tp.preprocess(dataset)
history = tp.train(X_train, Y_train, model)
tp.evaluate(X_test, Y_test)
if __name__ == '__main__':
main = Main()
main.launch()
| 2,420 |
fabfile.py
|
RuanJylf/Information
| 1 |
2025229
|
import time
from fabric import env, run
import os
env.user = 'root'
env.hosts = "192.168.127.12"
env.password = "<PASSWORD>"
local_project_dir = os.getcwd()
with open("requirements.txt", "r") as f:
req_list = f.readlines()
requirements = [req.strip() for req in req_list]
virtualenv_dir = "/root/.virtualenvs/flask_py3/bin/"
explain = 'first'
# 创建虚拟环境
def make_virtualenv():
run("mkvirtualenv -p python3 flask_py3")
# 上传项目文件
def put_dir():
# 确保有git命令
run("rm -rf /root/information")
run("git clone https://github.com/RuanJylf/Information.git")
# 安装第三方包
def install_package():
for i in requirements:
run(virtualenv_dir +"pip install {}".format(i))
# 如果已经有表不需要执行
def database_migrate():
# 确保有数据库
run("cd /root/information && " + virtualenv_dir + "python manage.py db init", pty=False)
run("cd /root/information && " + virtualenv_dir + "python manage.py db migrate -m'%s'" % (explain), pty=False)
run("cd /root/information && " + virtualenv_dir + "python manage.py db upgrade", pty=False)
# 如果已经有数据不需要执行
def import_sql():
# 确保测试数据在当前项目里面
run("cd /root/information/ && mysql -u root -pmysql information < information_info_category.sql", pty=False)
time.sleep(5)
run("cd /root/information/ && mysql -u root -pmysql information < information_info_news.sql", pty=False)
# 启动项目
def run_project():
run("cd /root/information/ && " + virtualenv_dir + "gunicorn -w 2 -b 127.0.0.1:5000 manage:app -D --log-level debug --log-file /root/information/logs/log --reload", pty=False)
| 1,554 |
proxy/common/_compat.py
|
zanachka/proxy.py
| 0 |
2026339
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by <NAME> and contributors.
:license: BSD, see LICENSE for more details.
Compatibility code for using Proxy.py across various versions of Python.
.. spelling::
compat
py
"""
import platform
SYS_PLATFORM = platform.system()
IS_WINDOWS = SYS_PLATFORM == 'Windows'
| 551 |
becpy/data/__init__.py
|
BEC-Trento/becpy
| 0 |
2026355
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created: 11-2020 - <NAME> <carmelo> <<EMAIL>>
"""
Module docstring
"""
import sys
import numpy as np
from importlib.resources import path
thismodule = sys.modules[__name__]
with path(thismodule, 'hf_scan.npz') as f:
hf_scan = np.load(f)
| 295 |
apex_legends_api/al_api.py
|
johnsturgeon/apex-legends-api
| 6 |
2026158
|
"""
Apex Legends API
| The ApexLegendsAPI wraps the api at: https://apexlegendsapi.com
| Get your API Key Here: https://apexlegendsapi.com
"""
import json
import requests
from deprecated import deprecated
from .al_domain import ALPlayer # noqa E0402
from .al_base import ALPlatform, ALAction, ALHTTPExceptionFromResponse # noqa E0402
class ApexLegendsAPI:
"""
Main class that wraps the API calls
"""
api_version: str = "5"
base_params: dict = {'version': api_version}
base_url: str = "https://api.mozambiquehe.re/bridge"
def __init__(self, api_key: str):
""" Initialize with the API Key """
self.session: requests.Session = requests.Session()
self.session.headers.update({'Authorization': api_key})
def _make_request(self, additional_params: dict, new_base_url: str = None) -> list:
""" Send the request to the apex legends api """
if new_base_url:
url: str = new_base_url
params: dict = additional_params
else:
url: str = self.base_url
params: dict = dict(self.base_params, **additional_params)
response: requests.Response = self.session.get(url, params=params)
if response.status_code == 200:
try:
response_text = json.loads(response.text)
except ValueError:
response_text = response.text
else:
raise ALHTTPExceptionFromResponse(response)
# sometimes we get a pure dictionary back, let's wrap it in a list for consistency
if isinstance(response_text, dict):
response_text = [response_text]
return response_text
def nametouid(self, player: str, platform: ALPlatform) -> int:
"""
Retrieve a player's uid given they're name and platform
:parameter player: Name of the player
:type player: str
:parameter platform: see [ALPlatform] for all types
:type platform: ALPlatform
"""
new_base_url: str = "https://api.mozambiquehe.re/nametouid?"
additional_params = {
'player': player,
'platform': platform.value
}
result = self._make_request(additional_params=additional_params, new_base_url=new_base_url)
assert len(result) == 1
return result[0]['result']
def get_player(self, name: str, platform: ALPlatform, skip_tracker_rank=False) -> ALPlayer:
"""
Retrieve the ALPlayer object populated with data from the api.
NOTE:
Player must exist, method will return None if the player cannot be found
:parameter name: Name of the player
:type name: str
:parameter platform: see ALPlatform for all types
:parameter skip_tracker_rank: if set to True, this will skip fetching the legend ranks
:return: a single player or None if no player is found
"""
basic_player_stats: list = self.basic_player_stats(name, platform, skip_tracker_rank)
assert len(basic_player_stats) == 1
event_info: list = self.events(
player_name=name,
platform=platform,
action=ALAction.INFO
)
events: list = list()
tracked_player: dict
for tracked_player in event_info[0].get('data'):
if name == tracked_player.get('name') and \
platform.value == tracked_player.get('platform'):
events = self.events(
player_name=name,
platform=platform,
action=ALAction.GET
)
return ALPlayer(basic_player_stats_data=basic_player_stats[0], events=events)
def get_player_by_uid(
self, uid: str, platform: ALPlatform, skip_tracker_rank=False
) -> ALPlayer:
"""
Retrieve the ALPlayer object populated with data from the api.
NOTE:
Player must exist, method will return None if the player cannot be found
:parameter uid: UID of the player
:parameter platform: see ALPlatform for all types
:parameter skip_tracker_rank: if set to True, this will skip fetching the legend ranks
:return: a single player or None if no player is found
"""
basic_player_stats: list = self.basic_player_stats_by_uid(uid, platform, skip_tracker_rank)
assert len(basic_player_stats) == 1
event_info: list = self.events_by_uid(
uid=uid,
platform=platform,
action=ALAction.INFO
)
events: list = list()
tracked_player: dict
for tracked_player in event_info[0].get('data'):
if uid == tracked_player.get('uid') and \
platform.value == tracked_player.get('platform'):
events = self.events_by_uid(
uid=uid,
platform=platform,
action=ALAction.GET
)
return ALPlayer(basic_player_stats_data=basic_player_stats[0], events=events)
def add_player_by_uid(self, player_uid: int, platform: ALPlatform) -> list:
"""
Adds the given player's UUID to the list of tracked players
:param player_uid: UUID of the player to add
:type player_uid: int
:param platform: ALPlatform of player to add
:type platform: ALPlatform
"""
return self.events_by_uid(str(player_uid), platform=platform, action=ALAction.ADD)
def basic_player_stats(
self, player_name: str,
platform: ALPlatform,
skip_tracker_rank=False) -> list:
"""
Query the server for the given player / platform and returns a dictionary of their
stats.
More here: https://apexlegendsapi.com/#basic
:param player_name: Player Name to search for
:param platform: (see Platform enum for values)
:param skip_tracker_rank: if set to true, this will not fetch the legend's tracker rank
:return: List of player stats created from response json
"""
params: dict = {'platform': platform.value, 'player': player_name}
if skip_tracker_rank:
params.update({'skipRank': True})
return self._make_request(additional_params=params)
def basic_player_stats_by_uid(
self, uid: str,
platform: ALPlatform,
skip_tracker_rank=False) -> list:
"""
Query the server for the given player / platform and returns a dictionary of their
stats.
More here: https://apexlegendsapi.com/#basic
:param uid: Player UID to search for
:param platform: (see Platform enum for values)
:param skip_tracker_rank: if set to true, this will not fetch the legend's tracker rank
:return: List of player stats created from response json
"""
params: dict = {'platform': platform.value, 'uid': uid}
if skip_tracker_rank:
params.update({'skipRank': True})
return self._make_request(additional_params=params)
@deprecated(reason="use `events` instead")
def match_history(self, player_name: str, platform: ALPlatform, action: ALAction) -> list:
"""
.. deprecated:: 1.1.0
use `events` instead
"""
return self.events(player_name=player_name, platform=platform, action=action)
def events(self, player_name: str, platform: ALPlatform, action: ALAction) -> list:
"""
Query the server for the given player / platform and return a list of their
events
NOTE:
* Match history is only available for supporters
* Match history must be tracked by the server otherwise this will return nothing
* In order to add a player to be tracked, you need to call this passing 'add' action.
:param player_name: Player Name for match history
:param platform: see Platform enum for values
:param action: see Action enum for values
:return: List of history created from response json
"""
params: dict = {
'platform': platform.value,
'player': player_name,
'history': 1,
'action': action.value
}
return self._make_request(additional_params=params)
def events_by_uid(self, uid: str, platform: ALPlatform, action: ALAction) -> list:
"""
Query the server for the given player's UID / platform and return a list of their
events
NOTE:
* Match history is only available for supporters
* Match history must be tracked by the server otherwise this will return nothing
* In order to add a player to be tracked, you need to call this passing 'add' action.
:param uid: Player UID for match history
:param platform: see Platform enum for values
:param action: see Action enum for values
:return: List of history created from response json
"""
params: dict = {
'platform': platform.value,
'uid': uid,
'history': 1,
'action': action.value
}
return self._make_request(additional_params=params)
def get_player_origin(self, player_name: str, show_all_hits: bool = False) -> list:
"""
Query the server for the origin user and returns Origin UID, real username, PID and avatar
for a given username
:param player_name: Player Name for match history
:param show_all_hits: True to 'search' for player (show multiple hits), default False
:return: list of results
"""
new_base_url: str = "https://api.mozambiquehe.re/origin?"
new_base_url += f"&player={player_name}"
if show_all_hits:
new_base_url += "&showAllHits"
return self._make_request(additional_params={}, new_base_url=new_base_url)
| 9,952 |
acs/acs/Device/Model/AndroidDevice/Agent/AcsAgent.py
|
wangji1/test-framework-and-suites-for-android
| 0 |
2025291
|
"""
:copyright: (c)Copyright 2015, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: This file implements class which will handle acs agent for versions above Kitkat.
This class manage 2 agents (user and system).
:since: 26/01/2015
:author: ssavrimoutou
"""
# flake8: noqa: W503
import time
from acs.Device.Model.AndroidDevice.Agent.IAgent import IAgent
from acs.Device.Common.Common import Global
from acs.UtilitiesFWK.Utilities import AcsConstants
from acs.ErrorHandling.DeviceException import DeviceException
class AcsAgent(IAgent):
"""
Class that will handle Acs embedded agent status for android implementation
This class will be used by Android versions upper than KitKat
"""
def __init__(self, device):
"""
Constructor
:type logger: logging
:param logger: the logger to use in this module
"""
self._logger = device._logger
self._device = device
self._agentv2_version = None
self._user_package_name = "com.intel.acs.agentv2.user"
self._system_package_name = "com.intel.acs.agentv2.system"
self.is_started = False
@property
def version(self):
"""
Get the ACS Agent Version that has been retrieve through retrieve_version()
:rtype: str
:return: ACS Agents version
"""
if self._agentv2_version is None:
self.update_version()
# return V2, as agent V1 will soon be removed
return self._agentv2_version
def update_version(self):
"""
Get the ACS Agent version deployed on device
:return: None
"""
# Get agent user version
agentv2_user_version = str(self._device.get_apk_version(self._user_package_name)
or AcsConstants.NOT_INSTALLED).replace(" (user)", "")
# Check if version of agents user and system are the same (Intel device only)
if self._device.has_intel_os():
# Get agent user version
agentv2_system_version = str(self._device.get_apk_version(self._system_package_name)
or AcsConstants.NOT_INSTALLED).replace(" (system)", "")
if agentv2_user_version != agentv2_system_version:
self._logger.warning(
"ACS agent user ({0}) and system ({1}) versions are different! ".format(agentv2_user_version,
agentv2_system_version))
self._logger.warning(
"Same version of agents shall be installed on the device else ue commands will not work properly !")
# Store the user agent version
self._agentv2_version = agentv2_user_version
def start(self):
"""
Try to start the Android embedded agent.
:rtype: boolean
:return: True if agent is started, False otherwise
"""
self._logger.debug("Trying to start ACS agent V2 (user) ...")
acs_agent_activity = "com.intel.acs.agentv2.common.framework.ServiceStarterActivity"
cmd_user = "adb shell am start -n {0}/{1}".format(self._user_package_name, acs_agent_activity)
output = self._device.run_cmd(cmd_user, self._device.get_uecmd_timeout(), force_execution=True)
# In case of intel device start also system agent
if output[0] == Global.SUCCESS and self._device.has_intel_os():
self._logger.debug("Trying to start ACS agent V2 (system) ...")
cmd_system = "adb shell am start -n {0}/{1}".format(self._system_package_name, acs_agent_activity)
output = self._device.run_cmd(cmd_system, self._device.get_uecmd_timeout(), force_execution=True)
return output[0] == Global.SUCCESS
def stop(self):
"""
Try to stop the Android embedded Service.
:rtype: boolean
:return: True if AcsAgentService is stopped, False otherwise
"""
# stop service
cmd = "adb shell am broadcast -a intel.intent.action.acs.stop_service"
self._device.run_cmd(cmd, self._device.get_uecmd_timeout(), force_execution=True)
time.sleep(0.5)
# kill agent process (user)
cmd = "adb shell am force-stop {0}".format(self._user_package_name)
output = self._device.run_cmd(cmd, 2, force_execution=True)
# kill agent process (system) (Intel device only)
if output[0] == Global.SUCCESS and self._device.has_intel_os():
cmd = "adb shell am force-stop {0}".format(self._system_package_name)
output = self._device.run_cmd(cmd, 2, force_execution=True)
return output[0] is Global.SUCCESS
def wait_for_agent_started(self, timeout=None):
"""
Wait for acs agent to start before timeout.
If no timeout is set, it will get value of device parameter **acsAgentStartTimeout**.
:type timeout: float
:param timeout: Value before which agent shall be started before.
:rtype: bool
:return: True if agent is started, False otherwise
"""
is_started = False
if not timeout:
timeout = self._device.get_config("acsAgentStartTimeout", 60.0, float)
# check that service is ready
uecmd_phonesystem = self._device.get_uecmd("PhoneSystem")
end_time = time.time() + timeout
while time.time() < end_time and not is_started:
# wait before checking service start
time.sleep(self._device.get_config("waitBetweenCmd", 5.0, float))
# check that service is ready
try:
is_started = uecmd_phonesystem.is_acs_agent_ready()
except DeviceException:
# No answer from the device
is_started = False
# Update is_started attribute
self.is_started = is_started
return self.is_started
def __is_service_running(self, package_name):
"""
Check if the service of the given package is running
:param package_name: Name of the package to check
:return: boolean True if service is running else False
"""
service_is_running = False
cmd = "adb shell ps | grep {0}".format(package_name)
result, output_msg = self._device.run_cmd(cmd,
self._device.get_uecmd_timeout(),
force_execution=True,
wait_for_response=True)
if result == 0 and "{0}.service".format(package_name) in str(output_msg):
service_is_running = True
return service_is_running
def is_running(self):
"""
Check if agent is running
:return: boolean
:return: True if Acs agent is running, False otherwise
"""
agent_is_running = self.__is_service_running(self._user_package_name)
if agent_is_running and self._device.has_intel_os():
# In case the device has an Intel os with root access, the system apk shall be installed
# And ACS shall check if the system service is running
agent_is_running = self.__is_service_running(self._user_package_name)
return agent_is_running
def get_intent_action_cmd(self, is_system=False):
"""
Get intent action command line
:param is_system: boolean to notify that the command is system or user.
By default we consider that the command is user
:return: string containing the intent action command line
"""
intent_action_cmd = "intel.intent.action.acs.cmd.user"
if is_system:
intent_action_cmd = "intel.intent.action.acs.cmd.system"
return intent_action_cmd
| 8,895 |
libs/hermes/hermes.cloudbreak/hermes/cloudbreak/utils.py
|
KAGRA-TW-ML/gw-iaas
| 2 |
2022638
|
import re
import time
from typing import Callable, Optional
from rich.progress import BarColumn, Progress, ProgressBar, TimeElapsedColumn
def snakeify(name: str) -> str:
return re.sub("(?<!^)(?=[A-Z])", "_", name).lower()
class PulsingBarColumn(BarColumn):
def render(self, task) -> ProgressBar:
"""Gets a progress bar widget for a task."""
return ProgressBar(
total=max(0, task.total),
completed=max(0, task.completed),
width=None if self.bar_width is None else max(1, self.bar_width),
pulse=not task.completed,
animation_time=task.get_time(),
style=self.style,
complete_style=self.complete_style,
finished_style=self.finished_style,
pulse_style=self.pulse_style,
)
def wait_for(
callback: Callable,
msg: Optional[str] = None,
timeout: Optional[float] = None,
):
def run():
start_time = time.time()
while True:
result = callback()
if result:
return result
time.sleep(0.1)
if timeout is not None and (time.time() - start_time) > timeout:
raise RuntimeError(f"Timeout {timeout} reached")
if msg:
with Progress(
"[progress.description]{task.description}",
PulsingBarColumn(),
TimeElapsedColumn(),
) as progress:
task_id = progress.add_task(msg, total=1)
result = run()
progress.update(task_id, advance=1)
else:
result = run()
return result
| 1,608 |
tests/features/steps/test_docker_build_found.py
|
Omegaice/smartcontainers
| 6 |
2026473
|
import pytest
from pytest_bdd import given, scenario, then, when
@pytest.mark.skip(reason="Currently not implemented")
@scenario('../docker_build.feature', 'Dockerfile is found')
def test_dockerfile_is_found():
pass
@given('a path containing a Dockerfile')
def a_path_containing_a_dockerfile():
"""a path containing a Dockerfile."""
@when('smart containers is asked to build it')
def smart_containers_is_asked_to_build_it():
"""smart containers is asked to build it."""
@then('it should run Docker build')
def it_should_run_docker_build():
"""it should run Docker build."""
@then('it should parse the file to create a dictionary of the steps')
def it_should_parse_the_file_to_create_a_dictionary_of_the_steps():
"""it should parse the file to create a dictionary of the steps."""
| 805 |
ex057.py
|
felipesch92/PythonExercicios
| 0 |
2024269
|
#Faça um programa que leia o sexo de uma pessoa, mas só aceite os valores ‘M’ ou ‘F’.
# Caso esteja errado, peça a digitação novamente até ter um valor correto.
s = input('Digite seu sexo: [M/F] ').strip().upper()[0]
while s not in 'MF':
s = input('Dados inválidos, Digite seu sexo: [M/F] ').strip().upper()[0]
print('Sexo {} registrado com sucesso.'.format(s))
| 366 |
src/extensions.py
|
Kim-Ha-Jeong/Capstone_flask
| 3 |
2026153
|
ALLOWED_EXTENSIONS = set(['mp4', 'avi', 'mkv', 'flv', 'wmv', 'mov'])
def allowed_file(value):
return '.' in value and value.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
| 167 |
Question_81_90/K -means Clustering (Step 3) Adjust Initial Calss.py
|
SHK2018/Gasyori100knock
| 5 |
2025491
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
def decrease_color(img):
out = img.copy()
out = (out // 63) * 64 + 32
return out
# Database
def get_DB(dataset, th=0.5):
# get image paths
data = glob(dataset) # Read all training data
data.sort()
# Set draw figure size
plt.figure(figsize=(19.20, 10.80))
# prepare database
# 13 = (B + G + R) * 4 + tag
db = np.zeros((len(data), 13), dtype=np.int32)
# each image
for i, path in enumerate(data):
img = decrease_color(cv2.imread(path))
# get histogram
for j in range(4):
# count for numbers of pixels
db[i, j] = len(np.where(img[..., 0] == (64 * j + 32))[0])
db[i, j+4] = len(np.where(img[..., 1] == (64 * j + 32))[0])
db[i, j+8] = len(np.where(img[..., 2] == (64 * j + 32))[0])
# get class
if 'akahara' in path:
cls = 0
elif 'madara' in path:
cls = 1
# store class label
db[i, -1] = cls
# for histogram: B(1,4), B(5,8), B(9,12)
img_h = img.copy() // 64
img_h[..., 1] += 4
img_h[..., 2] += 8
plt.subplot(2, int(len(data)/2), i+1)
plt.hist(img_h.ravel(), bins=12, rwidth=0.8)
plt.title(path[15:])
# print(db)
# plt.savefig("Myresult/out84.png", dpi=326)
plt.show()
return db, data
def k_means_step3(db, pdata, th=0.5, Class=2):
tmp_db = db.copy()
# initiate random seed
np.random.seed(4)
for i in range(len(tmp_db)):
# get random label
label = 0
if np.random.random(1,)[0] > th:
label = 1
tmp_db[i, -1] = label
while True:
num = 0
# get grabity for each class
grabity = np.zeros((2, len(tmp_db[0])-1), dtype=np.float32)
for i in range(2):
grabity[i] = np.mean(tmp_db[np.where(tmp_db[..., -1] == i)[0], :len(tmp_db[0])-1], axis=0)
for i in range(len(tmp_db)):
# get distance each nearest graviry
dis = np.sqrt(np.sum(np.square(np.abs(grabity - tmp_db[i, :len(tmp_db[0])-1])), axis=1))
# get new label
pred = np.argmin(dis, axis=0)
# if label is difference from old label
if int(tmp_db[i, -1]) != pred:
num += 1
tmp_db[i, -1] = pred
if num < 1:
break
for i in range(db.shape[0]):
print(pdata[i], " Pred:", tmp_db[i, -1])
# get database
trainpath = "dataset/train_*"
trainDB, pdata = get_DB(trainpath)
# clustering
reuslt = k_means_step3(trainDB, pdata, th=0.3)
| 2,768 |
src/tests/test_PhpVersion.py
|
wirecard/extension-release-info-updater
| 0 |
2025605
|
from unittest import TestCase
from src.PhpVersion import PhpVersion
class TestPhpVersion(TestCase):
def setUp(self) -> None:
self.phpVersion = PhpVersion('woocommerce', 'v3.2.1')
def test_get_tested_php_versions(self):
self.assertEquals(self.phpVersion.get_tested_php_versions_from_config(), ['7.2'])
def test_get_compatible_php_versions(self):
self.assertEquals(self.phpVersion.get_compatible_php_versions_from_config(), ['5.6', '7.0', '7.1', '7.2'])
def test_get_compatible_php_versions_from_changelog(self):
self.assertEquals(self.phpVersion.get_compatible_php_versions_from_changelog(), ['5.6', '7.0', '7.1', '7.2'])
def test_set_tested_php_versions_from_changelog(self):
self.assertEquals(self.phpVersion.get_tested_php_versions_from_changelog(), ['7.2'])
| 828 |
chatroom.py
|
gustavogarciautp/Chat---Sistemas-distribuidos
| 0 |
2024965
|
import os
from cliente_socketio import Cliente
import json
import pickle
# Loading the client with the current ip server
if os.path.exists('ip.binary'):
with open('ip.binary', 'rb') as ip:
unpickler = pickle.Unpickler(ip)
client = Cliente(unpickler.load()) # Creates the client
if __name__ == '__main__':
ip_server = input("Digite la dirección ip del servidor: ")
with open('ip.binary', 'wb') as ip:
pickle.dump(ip_server, ip)
# Opens the application
from signup import *
| 489 |
lattice/urls.py
|
aditya2695/Applicatiopn-for-Computation-of-Lattice-Invariants
| 0 |
2026014
|
from django.contrib import admin
from django.urls import path,re_path
from. import views
urlpatterns = [
path('',views.index,name='index'),
path('home',views.index,name='home'),
path('upload',views.upload,name='upload'),
path('cluster',views.cluster,name='cluster'),
path('calculate',views.calculate,name='calculate'),
path('compute2d',views.compute2d,name='compute2d'),
path('compute3d',views.compute3d,name='compute3d'),
path('compareCIFs',views.compareCIFs,name='compareCIFs'),
path('computeDistMatrix',views.computeDistMatrix,name='computeDistMatrix'),
path('upload_file',views.upload_file,name='upload_file'),
path('remove_file',views.remove_file,name='remove_file'),
path('getMediaFiles',views.getMediaFiles,name='getMediaFiles'),
]
| 792 |
setup.py
|
arnabhan/graphletminer
| 2 |
2026428
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='GraphletMiner',
version='0.1.0',
description='Graphlet miner: a text pattern analysis library for Python',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/arnabhan/graphletminer',
license=license,
install_requires=['nltk','networkx','tqdm'],
packages=find_packages(exclude=('tests', 'docs'))
)
| 539 |
myhood/serializers.py
|
Rodgersouko/neighbourhood
| 1 |
2026270
|
from rest_framework import generics, permissions, serializers, exceptions
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import *
from django.contrib.auth.hashers import make_password
from rest_framework import viewsets
class UserSerializer(serializers.ModelSerializer):
# neighbourhood = serializers.CharField(source='neighbourhood.name')
class Meta:
model = User
fields = ['first_name', 'email', 'is_staff', 'last_name', 'avatar']
def create(self, validated_data):
validated_data['password'] = make_password(
validated_data.get('password'))
return super(UserSerializer, self).create(validated_data)
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('first_name', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['email'], validated_data['password'])
return user
class ChangePasswordSerializer(serializers.Serializer):
model = User
"""
Serializer for password change endpoint.
"""
old_password = serializers.CharField(required=True)
new_password = serializers.CharField(required=True)
############################################################
class NeighbourhoodSerializer(serializers.ModelSerializer):
class Meta:
model = Neighbourhood
fields = ('id', 'name', 'location', 'admin', 'occupantsCount', 'image')
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('id', 'name', 'email', 'status', 'image', 'user')
class BusinessSerializer(serializers.ModelSerializer):
class Meta:
model = Business
fields = ('id', 'business_name', 'user', 'neighbourhood',
'business_email', 'business_profile')
class PostSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('id', 'title', 'user', 'neighbourhood', 'text', 'image')
| 2,150 |
mob_suite/wrappers/__init__.py
|
jrober84/mob-suite
| 1 |
2024091
|
from subprocess import Popen, PIPE, check_call
import os, sys, logging
from mob_suite.blast import BlastRunner
from mob_suite.blast import BlastReader
class mash:
def __init__(self):
return
def run_mash(self, reference_db, input_fasta, table=False, num_threads=1):
if table:
p = Popen(['mash', "dist", "-t", "-p", str(num_threads), reference_db, input_fasta],
stdout=PIPE,
stderr=PIPE)
(stdout, stderr) = p.communicate()
else:
p = Popen(['mash', "dist", "-p", str(num_threads), reference_db, input_fasta],
stdout=PIPE,
stderr=PIPE)
(stdout, stderr) = p.communicate()
if len(str(stderr)) > 0 and str(stderr) != "b''":
logging.info('{}'.format(stderr))
return stdout
def run_mash_screen(self, reference_db, input_fasta, winner_take_all=True, num_threads=1):
if winner_take_all:
p = Popen(['mash', "screen", "-p", str(num_threads), '-w', '-i','0', reference_db, input_fasta],
stdout=PIPE,
stderr=PIPE)
(stdout, stderr) = p.communicate()
else:
p = Popen(['mash', "screen", "-p", str(num_threads), '-i','0', reference_db, input_fasta],
stdout=PIPE,
stderr=PIPE)
(stdout, stderr) = p.communicate()
logging.info(
'{}'.format(stderr))
return stdout
def mashsketch(self, input_fasta, output_path, sketch_ind=True, num_threads=1, kmer_size=21, sketch_size=1000):
if output_path == '':
os.path.dirname(input_fasta)
if sketch_ind:
p = Popen(['mash', "sketch",
"-p", str(num_threads),
"-i",
"-o", output_path,
"-k", str(kmer_size),
"-s", str(sketch_size), input_fasta],
stdout=PIPE,
stderr=PIPE)
else:
p = Popen(['mash', "sketch",
"-p", str(num_threads),
"-o", output_path,
"-k", str(kmer_size),
"-s", str(sketch_size), input_fasta],
stdout=PIPE,
stderr=PIPE)
p.wait()
stdout = p.stdout.read()
stderr = p.stderr.read()
class detectCircularity:
### Method adapted from Berokka https://github.com/tseemann/berokka by <NAME>
def __init__(self):
return
def run(self,input_fasta, output_path, logging, min_cov=1, min_ident=1, evalue=1, num_threads=1, min_length=25):
blast_results_file = os.path.join(output_path, 'circularize.blast.txt')
self.run_blast(input_fasta=input_fasta,
output_path=output_path,
blast_results_file=blast_results_file,
logging=logging,
min_cov=min_cov,
min_ident=min_ident,
evalue=evalue,
num_threads=num_threads,
min_length=min_length)
return (self.overhangDetection(blast_results_file,logging))
def run_blast(self,input_fasta,output_path,blast_results_file,logging,min_cov=1,min_ident=1,evalue=1,num_threads=1,min_length=25):
blast_runner = BlastRunner(input_fasta, output_path)
blast_runner.makeblastdb(input_fasta, 'nucl',logging)
blast_runner.run_blast(query_fasta_path=input_fasta, blast_task='megablast', db_path=input_fasta,
db_type='nucl', min_cov=min_cov, min_ident=min_ident, evalue=evalue,
blast_outfile=blast_results_file, num_threads=num_threads, word_size=11,logging=logging)
if os.path.getsize(blast_results_file) == 0:
fh = open(blast_results_file, 'w', encoding="utf-8")
fh.write('')
fh.close()
return dict()
blast_df = BlastReader(blast_results_file,logging).df
blast_df = blast_df.loc[blast_df['length'] >= min_length]
blast_df = blast_df.reset_index(drop=True)
blast_df.to_csv(blast_results_file, sep='\t', header=False, line_terminator='\n', index=False)
def overhangDetection(self,blast_results_file,logging,min_length=25):
if os.path.getsize(blast_results_file) == 0:
return dict()
blast_df = BlastReader(blast_results_file,logging).df.sort_values(['qseqid', 'qstart', 'qend', 'bitscore'], ascending=[True, True, True, False])
circular_contigs = {}
for index, row in blast_df.iterrows():
contig_id_query = row['qseqid']
contig_id_subject = row['sseqid']
contig_start_subject = int(row['sstart'])
contig_end_subject = int(row['send'])
contig_start_query = int(row['qstart'])
contig_end_query = int(row['qend'])
contig_length = int(row['qlen'])
length = int(row['length'])
if contig_id_query != contig_id_subject and contig_id_subject != "ref|{}|".format(contig_id_query):
continue
if contig_start_query != 1 or length < min_length:
continue
if contig_start_query == contig_start_subject and contig_end_query == contig_end_subject:
continue
if contig_start_query == 1 and contig_end_subject == contig_length:
circular_contigs[contig_id_query] = 'Circular: Overlap {} bp'.format(length)
return circular_contigs
| 5,707 |
learn/03week/code/workshop3/donations_pkg/user.py
|
tmax818/nucamp_intro_python
| 0 |
2024372
|
def login(database, username, password):
if username in database.keys() and database[username] == password:
print(f"\nWelcome back {username}!")
return username
elif username in database.keys() and database[username] != password:
print("invalid credentials")
return ""
else:
print("who the hell are you?")
return ""
def register(database, username):
if username in database.keys():
print(f"{username} already registered!")
return ""
else:
print(f"Username {username} registered!")
return username
| 600 |
z2/part3/updated_part2_batch/jm/parser_errors_2/778913919.py
|
kozakusek/ipp-2020-testy
| 1 |
2025275
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 778913919
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 5, 2, 2)
assert board is not None
assert gamma_move(board, 1, 2, 2) == 1
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_busy_fields(board, 1) == 3
assert gamma_move(board, 2, 0, 0) == 1
assert gamma_golden_move(board, 2, 1, 0) == 0
assert gamma_move(board, 2, 0, 4) == 1
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 3, 1) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_free_fields(board, 1) == 14
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 1, 4, 2) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 1, 4, 4) == 1
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 0, 0) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 1, 3, 4) == 1
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 3, 4) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_golden_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_free_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_free_fields(board, 1) == 9
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 4) == 0
board224524589 = gamma_board(board)
assert board224524589 is not None
assert board224524589 == ("2.111\n"
"11111\n"
".11.1\n"
"11...\n"
"22...\n")
del board224524589
board224524589 = None
assert gamma_move(board, 1, 4, 1) == 1
assert gamma_busy_fields(board, 1) == 14
board442419720 = gamma_board(board)
assert board442419720 is not None
assert board442419720 == ("2.111\n"
"11111\n"
".11.1\n"
"11..1\n"
"22...\n")
del board442419720
board442419720 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_free_fields(board, 2) == 2
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 1, 4, 2) == 0
assert gamma_free_fields(board, 1) == 8
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 2, 4, 4) == 0
assert gamma_golden_move(board, 2, 3, 4) == 0
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_move(board, 2, 2, 1) == 0
board904994937 = gamma_board(board)
assert board904994937 is not None
assert board904994937 == ("2.111\n"
"11111\n"
".11.1\n"
"11..1\n"
"221..\n")
del board904994937
board904994937 = None
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
assert gamma_move(board, 1, 3, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 3, 2) == 0
gamma_delete(board)
| 4,086 |
rl_multi_agent/furnmove_eval_experiments/furnmove_grid_marginal_3agents_config.py
|
allenai/cordial-sync
| 28 |
2026195
|
import os
from typing import Optional
from constants import SPLIT_TO_USE_FOR_EVALUATION, ABS_PATH_TO_FINAL_FURNMOVE_CKPTS
from rl_multi_agent.experiments.furnmove_grid_marginal_3agents_config import (
FurnMove3AgentUncoordinatedExperimentConfig,
)
from rl_multi_agent.furnmove_utils import SaveFurnMoveMixin
class EvalConfig(SaveFurnMoveMixin, FurnMove3AgentUncoordinatedExperimentConfig):
@property
def saved_model_path(self) -> Optional[str]:
return os.path.join(
ABS_PATH_TO_FINAL_FURNMOVE_CKPTS,
"furnmove_grid_marginal_3agents_1000000_2020-02-28_00-24-58.dat",
)
def simple_name(self):
return "grid_marginal_3agents"
def get_experiment():
ec = EvalConfig()
ec.episode_init_queue_file_name = "furnmove_episode_start_positions_for_eval__3agents__{}.json".format(
SPLIT_TO_USE_FOR_EVALUATION
)
return ec
| 900 |
events/migrations/0006_auto_20200109_0203.py
|
Ajuajmal/art-fest-event-manager-sattva
| 0 |
2024228
|
# Generated by Django 2.2.9 on 2020-01-08 20:33
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20200109_0201'),
]
operations = [
migrations.AlterField(
model_name='participant',
name='contact',
field=models.CharField(default='', max_length=10, validators=[django.core.validators.RegexValidator(message="Phone number must be entered in the format: '9876543210'. 10 digits .", regex='^[0-9]{10}$')]),
),
migrations.AlterField(
model_name='participant',
name='regnumber',
field=models.CharField(default='', max_length=8, validators=[django.core.validators.RegexValidator(message="reg number must be entered in the format: '12180222'. Up to 8 digits allowed.", regex='^[0-9]{8}$')]),
),
]
| 920 |
hardcoded_shit.py
|
MaxHeller0/celinas-kitchen
| 3 |
2026373
|
import os
client_types = {"Base": 0, "A La Carte": 1, "Standing Order": 2, "Catering": 3}
client_attribute_order = ["id", "client_type", "name", "phone", "address",
"delivery", "hash", "tax_exempt", "tax_exempt", "contact",
"contact_phone", "contact_email", "weekly_money", "monday_salads",
"thursday_salads", "salad_dressings", "monday_hotplates",
"tuesday_hotplates", "thursday_hotplates", "allergies",
"dietary_preferences", "protein", "salad_dislikes",
"salad_loves", "hotplate_likes", "hotplate_dislikes",
"hotplate_loves", "general_notes", "salad_notes",
"hotplate_notes"]
client_type_order = ["Base", "A La Carte", "Standing Order", "Catering"]
client_attributes = {}
client_attributes[0] = sorted(["name", "phone", "general_notes"
],
key=lambda x: client_attribute_order.index(x))
client_attributes[1] = sorted(client_attributes[0]
+ ["address", "delivery",
"allergies", "dietary_preferences"],
key=lambda x: client_attribute_order.index(x))
client_attributes[2] = sorted(client_attributes[0]
+ ["address", "delivery",
"allergies", "dietary_preferences",
"weekly_money", "monday_salads", "thursday_salads",
"salad_dressings", "protein", "salad_dislikes",
"salad_loves", "salad_notes", "hotplate_likes",
"hotplate_dislikes", "hotplate_loves",
"hotplate_notes", "monday_hotplates",
"tuesday_hotplates", "thursday_hotplates"],
key=lambda x: client_attribute_order.index(x))
client_attributes[3] = sorted(client_attributes[0]
+ ["address", "delivery",
"tax_exempt", "contact", "contact_phone", "contact_email"],
key=lambda x: client_attribute_order.index(x))
input_types = {
"default_text": ["address", "monday_salads", "thursday_salads",
"monday_hotplates", "tuesday_hotplates", "thursday_hotplates", "contact",
"contact_email"],
"opinion_text": ["protein", "salad_dislikes", "salad_loves",
"hotplate_likes", "hotplate_dislikes", "hotplate_loves",
"allergies"],
"note_text": ["general_notes", "salad_notes", "hotplate_notes"],
"boolean": ["salad_dressings", "delivery", "tax_exempt"],
"money": ["weekly_money"]
}
try:
# connect to production db if running in AWS
db_config = "mysql+mysqldb://{username}:{password}@{server}:{port}/{db}".format(
username=os.environ["RDS_USERNAME"], password=<PASSWORD>["<PASSWORD>"],
server=os.environ["RDS_HOSTNAME"], port=os.environ["RDS_PORT"],
db=os.environ["RDS_DB_NAME"])
except:
# connect to testing db
db_config = "mysql+mysqldb://admin:jOKb7lRRps&smt1bPeW!$@{server}:3306/ebdb".format(
server="celinas-kitchen-testing.czfoxvxyu3gn.us-east-2.rds.amazonaws.com")
| 3,406 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.