max_stars_repo_path
stringlengths 4
182
| max_stars_repo_name
stringlengths 6
116
| max_stars_count
int64 0
191k
| id
stringlengths 7
7
| content
stringlengths 100
10k
| size
int64 100
10k
|
---|---|---|---|---|---|
models.py
|
lucasmansilla/Multi-Atlas_RCA
| 1 |
2170495
|
import SimpleITK as sitk
import os
import numpy as np
import utils
from elastix import register
from binary_metrics import dsc
class MultiAtlasSegmentation(object):
""" Multi-Atlas Segmentation (MAS) model for segmenting chest X-ray images. """
def __init__(self, atlas_paths, atlas_size, image_metric, label_fusion):
import inspect
import image_metrics
import fusion_methods
# Check inputs
similarity_funcs = dict(inspect.getmembers(image_metrics, inspect.isfunction))
if image_metric not in list(similarity_funcs.keys()):
raise ValueError('Invalid image similarity metric')
fusion_funcs = dict(inspect.getmembers(fusion_methods, inspect.isfunction))
if label_fusion not in list(fusion_funcs.keys()):
raise ValueError('Invalid label fusion method')
self.atlas_paths = atlas_paths
self.atlas_size = atlas_size
self.image_metric_func = similarity_funcs[image_metric]
self.is_similarity_metric = image_metrics.is_sim_metric[image_metric]
self.label_fusion_func = fusion_funcs[label_fusion]
def predict_segmentation(self, image_path, output_dir, parameter_map_lst, remove_tmp_dir=True):
""" Predict the segmentation for a given image using an atlas set. """
# Temporary directory for saving Elastix results
tmp_dir = os.path.join(output_dir, 'tmp')
utils.create_dir(tmp_dir)
# Step 1: Atlas selection
atlas_idxs = self._atlas_selection(image_path)
images_lst = [self.atlas_paths['images'][i] for i in atlas_idxs]
labels_lst = [self.atlas_paths['labels'][i] for i in atlas_idxs]
# Step 2: Registration
result_labels = []
for atlas_image, atlas_label in zip(images_lst, labels_lst):
result_label = register(
image_path, atlas_image, atlas_label, parameter_map_lst, tmp_dir)[1]
result_labels.append(sitk.Cast(result_label, sitk.sitkUInt8))
# Step 3: Label propagation
predicted_label = self.label_fusion_func(result_labels)
# Save the predicted label image
predicted_label_path = os.path.join(
output_dir, os.path.splitext(os.path.basename(image_path))[0] + '_labels.png')
sitk.WriteImage(predicted_label, predicted_label_path)
if remove_tmp_dir:
os.system('rm -rf {0}'.format(tmp_dir))
return predicted_label, predicted_label_path, atlas_idxs
def _atlas_selection(self, image_path):
""" Select the atlas set using an image similarity (or dissimilarity) measure. """
image = utils.read_image_arr(image_path)
scores = []
for atlas_image in self.atlas_paths['images']:
scores.append(self.image_metric_func(image, utils.read_image_arr(atlas_image)))
if self.is_similarity_metric:
# Similarity is higher for more similar images
return np.argsort(scores)[-self.atlas_size:]
else:
# Dissimilarity is lower for more similar images
return np.argsort(scores)[:self.atlas_size]
class SingleAtlasClassifier(object):
""" Single-Atlas Classifier for predicting the accuracy of segmented images using the concept
of Reverse Classification Accuracy (RCA). """
def _init__(self, image_path=None, label_path=None):
self.image_path = image_path
self.label_path = label_path
def predict_dice(self, atlas_paths, output_dir, parameter_map_lst, remove_tmp_dir=True):
""" Predict the Dice score for a given segmentation. """
# Temporary directory for saving Elastix results
tmp_dir = os.path.join(output_dir, 'tmp')
utils.create_dir(tmp_dir)
scores = []
for atlas_image, atlas_label in zip(atlas_paths['images'], atlas_paths['labels']):
predicted_label = register(
atlas_image, self.image_path, self.label_path, parameter_map_lst, tmp_dir)[1]
scores.append(dsc(utils.read_image_itk(atlas_label), predicted_label, True))
if remove_tmp_dir:
os.system('rm -rf {0}'.format(tmp_dir))
return np.max(scores)
def set_atlas_path(self, image_path, label_path):
self.image_path = image_path
self.label_path = label_path
| 4,339 |
uploader/tests.py
|
ADiscordUser/adiscorduser-site
| 2 |
2170235
|
from rest_framework import test, authtoken, reverse, status
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.files import File
from .models import Media
from .serializers import MediaSerializer
import pathlib
class MediaCreationTestCase(test.APITestCase):
def setUp(self):
self.client = test.APIClient()
user = get_user_model().objects.create_user( # type: ignore
username="creation",
email="<EMAIL>",
password="<PASSWORD>"
)
user_auth = authtoken.models.Token.objects.get(user=user) # type: ignore
self.client.credentials(HTTP_AUTHORIZATION="Token " + user_auth.key)
def tearDown(self):
# ensure that the media is deleted from the storage system
Media.objects.all().delete()
def verify(self, response):
# make sure the status code is 201
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
# make sure that the media is actually inserted into the db
inserted_media = Media.objects.get(identifier=response.data["identifier"]) # type: ignore
serialized = MediaSerializer(inserted_media)
self.assertEqual(serialized.data, response.data) # type: ignore
def test_uploads(self):
"""Test media uploads and that its response matches the record in the database."""
examples = pathlib.Path(f"{settings.MEDIA_ROOT}examples/")
for example in examples.iterdir():
with open(example, "rb") as fp:
f = File(fp, name=example.name)
r = self.client.post(reverse.reverse("media-list"), {"media": f})
self.verify(r)
class MediaListTestCase(test.APITestCase):
def setUp(self):
self.client = test.APIClient()
# create multiple users for testing filtered querysets
self.fake1 = get_user_model().objects.create(
username="fake1",
email="<EMAIL>",
password="<PASSWORD>"
)
self.fake2 = get_user_model().objects.create(
username="fake2",
email="<EMAIL>",
password="<PASSWORD>"
)
self.fake1_auth = authtoken.models.Token.objects.get(user=self.fake1).key # type: ignore
self.fake2_auth = authtoken.models.Token.objects.get(user=self.fake2).key # type: ignore
def tearDown(self):
# ensure that the media is deleted from the storage system
Media.objects.all().delete()
def test_pagination(self):
"""Tests that pagination specific keys exist."""
response = self.client.get(reverse.reverse("media-list"), HTTP_AUTHORIZATION="Token " + self.fake1_auth) # it doesn't matter whose token we use
# check status code
self.assertEqual(response.status_code, 200)
# make sure pagination keys are there
self.assertIn("count", response.data) # type: ignore
self.assertIn("next", response.data) # type: ignore
self.assertIn("previous", response.data) # type: ignore
self.assertIn("results", response.data) # type: ignore
def test_results_are_from_user(self):
"""Test that the serialized queryset is filtered by the authenticated user."""
# create media using both users
with open(f"{settings.MEDIA_ROOT}examples/png.png", "rb") as fp:
f = File(fp, name="png.png")
# "fake1" user
Media.objects.create(media=f, user=self.fake1)
# "fake2" user
fp.seek(0)
Media.objects.create(media=f, user=self.fake2)
# make request using both users
fake1_response = self.client.get(reverse.reverse("media-list"), HTTP_AUTHORIZATION="Token " + self.fake1_auth)
fake2_response = self.client.get(reverse.reverse("media-list"), HTTP_AUTHORIZATION="Token " + self.fake2_auth)
# check status codes
self.assertEqual(fake1_response.status_code, 200)
self.assertEqual(fake2_response.status_code, 200)
# check "fake1"
fake1_serialized = MediaSerializer(Media.objects.filter(user=self.fake1), many=True)
self.assertEqual(fake1_serialized.data, fake1_response.data["results"]) # type: ignore
# check "fake2"
fake2_serialized = MediaSerializer(Media.objects.filter(user=self.fake2), many=True)
self.assertEqual(fake2_serialized.data, fake2_response.data["results"]) # type: ignore
class MediaInstanceTestCase(test.APITestCase):
def setUp(self):
self.client = test.APIClient()
self.user = get_user_model().objects.create_user( # type: ignore
username="instance",
email="<EMAIL>",
password="<PASSWORD>"
)
self.user_auth = authtoken.models.Token.objects.get(user=self.user).key # type: ignore
def tearDown(self):
# ensure that the media is deleted from the storage system
Media.objects.all().delete()
def test_media_detail(self):
"""Tests getting details (info) on media."""
with open(f"{settings.MEDIA_ROOT}examples/png.png", "rb") as fp:
f = File(fp, name="png.png")
media = Media.objects.create(media=f, user=self.user)
# fetch media instance using endpoint
response = self.client.get(reverse.reverse("media-detail", args=[media.identifier]))
# check status code
self.assertEqual(response.status_code, 200)
# make sure that the serialized data from the db matches the response
serialized = MediaSerializer(media)
self.assertEqual(serialized.data, response.data) # type: ignore
def test_media_deletion(self):
"""Test that media is deleted."""
with open(f"{settings.MEDIA_ROOT}examples/png.png", "rb") as fp:
f = File(fp, name="png.png")
media = Media.objects.create(media=f, user=self.user)
response = self.client.delete(reverse.reverse("media-detail", args=[media.identifier]), HTTP_AUTHORIZATION="Token " + self.user_auth)
# check status code
self.assertEqual(response.status_code, 204)
# make sure that the media doesn't exist anymore
self.assertFalse(Media.objects.filter(pk=media.identifier).exists())
| 6,263 |
actors/projectile.py
|
Catsuko/Westward
| 3 |
2169781
|
import uuid
class Projectile:
def __init__(self, velocity, key):
self.velocity = velocity
self.key = key
def act(self, tile, root):
destination = tile.neighbour(*self.velocity, root)
root_without_projectile = tile.leave(self, root)
origin = tile.neighbour(0, 0, root_without_projectile)
return destination.enter(self, origin, root_without_projectile)
def attempt(self, action, root, *args):
tile, *_ = args
return tile.leave(self, root)
def interact_with(self, other, origin, tile, root):
return origin.leave(self, other.attempt("damage", root, tile))
def receive(self, other, origin, tile, root):
root = tile.find_in(tile.leave(self, root)).enter(other, origin, root)
return self.interact_with(other, origin.find_in(root), tile.find_in(root), root)
def identifies_with(self, key):
return key is self.key
def unique(self):
return Projectile(self.velocity, self.key + str(uuid.uuid1()))
def print_to(self, x, y, media):
return media.with_actor(x, y, self.key)
| 1,115 |
SciDataTool/GUI/DDataPlotter/Ui_DDataPlotter.py
|
enjoyneer87/SciDataTool
| 0 |
2170092
|
# -*- coding: utf-8 -*-
# File generated according to DDataPlotter.ui
# WARNING! All changes made in this file will be lost!
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from SciDataTool.GUI.WPlotManager.WPlotManager import WPlotManager
class Ui_DDataPlotter(object):
def setupUi(self, DDataPlotter):
if not DDataPlotter.objectName():
DDataPlotter.setObjectName("DDataPlotter")
DDataPlotter.setEnabled(True)
DDataPlotter.resize(1246, 884)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(DDataPlotter.sizePolicy().hasHeightForWidth())
DDataPlotter.setSizePolicy(sizePolicy)
DDataPlotter.setCursor(QCursor(Qt.ArrowCursor))
self.gridLayout = QGridLayout(DDataPlotter)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout_2 = QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.is_auto_refresh = QCheckBox(DDataPlotter)
self.is_auto_refresh.setObjectName("is_auto_refresh")
sizePolicy1 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(
self.is_auto_refresh.sizePolicy().hasHeightForWidth()
)
self.is_auto_refresh.setSizePolicy(sizePolicy1)
self.is_auto_refresh.setMinimumSize(QSize(0, 24))
self.is_auto_refresh.setMaximumSize(QSize(16777215, 24))
self.is_auto_refresh.setChecked(False)
self.horizontalLayout_2.addWidget(self.is_auto_refresh)
self.b_refresh = QPushButton(DDataPlotter)
self.b_refresh.setObjectName("b_refresh")
self.b_refresh.setEnabled(True)
sizePolicy1.setHeightForWidth(self.b_refresh.sizePolicy().hasHeightForWidth())
self.b_refresh.setSizePolicy(sizePolicy1)
self.b_refresh.setMinimumSize(QSize(0, 0))
self.b_refresh.setMaximumSize(QSize(16777215, 16777215))
self.b_refresh.setLayoutDirection(Qt.LeftToRight)
self.horizontalLayout_2.addWidget(self.b_refresh)
self.gridLayout.addLayout(self.horizontalLayout_2, 1, 1, 1, 1)
self.w_scroll = QScrollArea(DDataPlotter)
self.w_scroll.setObjectName("w_scroll")
sizePolicy2 = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
sizePolicy2.setHorizontalStretch(0)
sizePolicy2.setVerticalStretch(0)
sizePolicy2.setHeightForWidth(self.w_scroll.sizePolicy().hasHeightForWidth())
self.w_scroll.setSizePolicy(sizePolicy2)
self.w_scroll.setMinimumSize(QSize(200, 0))
self.w_scroll.setMaximumSize(QSize(400, 16777215))
self.w_scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.w_scroll.setWidgetResizable(True)
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollAreaWidgetContents.setGeometry(QRect(0, 0, 398, 823))
self.lay_scroll = QVBoxLayout(self.scrollAreaWidgetContents)
self.lay_scroll.setObjectName("lay_scroll")
self.lay_scroll.setContentsMargins(0, 0, 0, 0)
self.w_plot_manager = WPlotManager(self.scrollAreaWidgetContents)
self.w_plot_manager.setObjectName("w_plot_manager")
sizePolicy1.setHeightForWidth(
self.w_plot_manager.sizePolicy().hasHeightForWidth()
)
self.w_plot_manager.setSizePolicy(sizePolicy1)
self.w_plot_manager.setMinimumSize(QSize(0, 0))
self.lay_scroll.addWidget(self.w_plot_manager)
self.w_scroll.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.w_scroll, 0, 1, 1, 1)
self.plot_layout = QVBoxLayout()
self.plot_layout.setObjectName("plot_layout")
self.gridLayout.addLayout(self.plot_layout, 0, 0, 2, 1)
self.retranslateUi(DDataPlotter)
QMetaObject.connectSlotsByName(DDataPlotter)
# setupUi
def retranslateUi(self, DDataPlotter):
DDataPlotter.setWindowTitle(
QCoreApplication.translate("DDataPlotter", "Data Plot", None)
)
self.is_auto_refresh.setText(
QCoreApplication.translate("DDataPlotter", "Auto Refresh", None)
)
self.b_refresh.setText(
QCoreApplication.translate("DDataPlotter", "Refresh", None)
)
# retranslateUi
| 4,893 |
cassiopeia/simulator/TreeSimulator.py
|
YosefLab/SingleCellLineageTracing
| 52 |
2169522
|
"""
Abstract class TreeSimulator, for tree simulation module.
All tree simulators are derived classes of this abstract class, and at a minimum
implement a method called `simulate_tree`.
"""
import abc
from cassiopeia.data import CassiopeiaTree
from cassiopeia.mixins import TreeSimulatorError
class TreeSimulator(abc.ABC):
"""
TreeSimulator is an abstract class that all tree simulators derive from.
A TreeSimulator returns a CassiopeiaTree with at least its tree topology
initialized. The character matrix need not be initialized (this is
accomplished instead using a LineageTracingDataSimulator object). The
branch lengths may be interpretable or not depending on the specific
TreeSimulator.
The purpose of the TreeSimulator is to allow users to perform in silico
simulations of single-cell phylogenies, such as tumor phylogenies, organism
development, etc., providing a ground truth phylogeny and thus a means to
evaluate methodologies for reconstructing and analyzing single-cell
phylogenies.
"""
@abc.abstractmethod
def simulate_tree(self) -> CassiopeiaTree:
"""
Simulate a CassiopeiaTree.
The returned tree will have at least its tree topology initialized.
"""
| 1,267 |
Algorithm/3D Algorithms/3dalgo-pseudo-initial.py
|
sagarchotalia/Pick-and-Place-Robot-Eklavya
| 10 |
2170123
|
# we have the height, breadth and length values from the python script
# which we need to use in this algorithm
# this script also needs to take into account the remaining space left in the target bin. for this, it needs to work with openCV
# in order to get the coordinates and the contours which will give the space left in the bin.
if(binWidth < binHeight and binWidth < binDepth):
packByWidth = True
packByHeight = False
elif(binDepth<binHeight and binDepth<binWidth):
packByWidth = False
packByHeight = False
#this implies that packing by depth is true
elif(binHeight<binDepth and binHeight<binWidth):
packByHeight = True
packByWidth = False
def toPack():
toPack = notPacked
notPacked = {}
# Create a new bin called currentBin and check whether the item toPack[0]
# is able to fit in this bin at position (x,y,z)=(0,0,0).
# if toPack[0] does not fit then rotate it (over the six rotation types) until it fits and pack it
# into this bin at postion (0,0,0).
i = 1
for i in range (sys.getsizeof(toPack) - 1):
currentItem = toPack[i]
fitted = False
p = 0
for p in range(2):
k = 0
while (k < numberOfItems in currentBin) and (not fitted):
binItem = currentBin[k]
if(packByWidth):
pivot = p
elif(packByHeight):
# compute pivot point p for height
else
#compute pivot point p for depth
# switch (pivot)
# {
# case 0 : Choose (pivotX, pivotY, pivotZ ) as the back lower right corner of binItem
# break
# case 1 : Choose (pivotX, pivotY, pivotZ ) as the front lower left corner of binItem
# break
# case 2 : Choose (pivotX, pivovY, pivotZ ) as the back Upper left corner of binItem
# break
# }
if(currentItem can be packed in currentBin at position(pivotX,pivotY,pivotZ)):
pack currentItem in currentBin at (pivotX,pivotY,pivotZ)
else:
#try rotating the item
while notPacked has at least one Item in it:
rotate(currentItem)
while (currentItem cannot be packed in currentBin at position(pivotX,pivotY)) and (not all rotations for currentItem checked)
if (currentItem can be packed in currentBin at position(pivotX,pivotY,pivotZ)):
Pack currentItem into currentBin at position(pivotX, pivotY ,pivotZ)
fitted=true
else:
restore currentItem to original rotation type
if(notFitted):
Add currentItem to the list notPacked
| 2,665 |
casparser_isin/mf_isin.py
|
codereverser/casparser-isin
| 3 |
2169632
|
from collections import namedtuple
from decimal import Decimal
import re
import sqlite3
from typing import Optional
from rapidfuzz import process
from .utils import get_isin_db_path
RTA_MAP = {
"CAMS": "CAMS",
"FTAMIL": "FRANKLIN",
"FRANKLIN": "FRANKLIN",
"KFINTECH": "KARVY",
"KARVY": "KARVY",
}
SchemeData = namedtuple("SchemeData", "name isin amfi_code type score")
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class MFISINDb:
"""ISIN database for (Indian) Mutual Funds."""
connection = None
cursor = None
def __enter__(self):
self.initialize()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def initialize(self):
"""Initialize database."""
self.connection = sqlite3.connect(get_isin_db_path())
self.connection.row_factory = dict_factory
self.cursor = self.connection.cursor()
def close(self):
"""Close database connection."""
if self.cursor is not None:
self.cursor.close()
self.cursor = None
if self.connection is not None:
self.connection.close()
self.connection = None
def run_query(self, sql, arguments, fetchone=False):
self_initialized = False
if self.connection is None:
self.initialize()
self_initialized = True
try:
self.cursor.execute(sql, arguments)
if fetchone:
return self.cursor.fetchone()
return self.cursor.fetchall()
finally:
if self_initialized:
self.close()
def scheme_lookup(self, rta: str, scheme_name: str, rta_code: str):
"""
Lookup scheme details from the database
:param rta: RTA (CAMS, KARVY, FTAMIL)
:param scheme_name: scheme name
:param rta_code: RTA code for the scheme
:return:
"""
if rta_code is not None:
rta_code = re.sub(r"\s+", "", rta_code)
sql = """SELECT name, isin, amfi_code, type from scheme"""
where = ["rta = :rta"]
if re.search(r"fti(\d+)", rta_code, re.I) and rta.upper() in ("CAMS", "FRANKLIN", "FTAMIL"):
# Try searching db for Franklin schemes
where_ = ["rta = :rta", "rta_code = :rta_code"]
args = {"rta": "FRANKLIN", "rta_code": rta_code}
sql_statement = "{} WHERE {}".format(sql, " AND ".join(where_))
results = self.run_query(sql_statement, args)
if len(results) != 0:
return results
args = {"rta": RTA_MAP.get(str(rta).upper(), ""), "rta_code": rta_code}
if (
"hdfc" in scheme_name.lower()
and re.search(r"^h\d+$", rta_code, re.I)
and re.search("dividend|idcw", scheme_name, re.I)
):
# Special case for old HDFC funds with scheme codes of format "H\d+"
if re.search("re-*invest", scheme_name, re.I):
where.append("name LIKE '%reinvest%'")
where.append("rta_code like :rta_code_d")
args.update(rta_code_d=f"{rta_code}%")
else:
where.append("rta_code = :rta_code")
sql_statement = "{} WHERE {}".format(sql, " AND ".join(where))
results = self.run_query(sql_statement, args)
if len(results) == 0 and "rta_code" in args:
args["rta_code"] = args["rta_code"][:-1]
results = self.run_query(sql_statement, args)
return results
def isin_lookup(
self, scheme_name: str, rta: str, rta_code: str, min_score: int = 60
) -> SchemeData:
"""
Return the closest matching scheme from MF isin database.
:param scheme_name: Scheme Name
:param rta: RTA (CAMS, KARVY, KFINTECH)
:param rta_code: Scheme RTA code
:param min_score: Minimum score (out of 100) required from the fuzzy match algorithm
:return: isin and amfi_code code for matching scheme.
:rtype: SchemeData
:raises: ValueError if no scheme is found in the database.
"""
if not (
isinstance(scheme_name, str) and isinstance(rta, str) and isinstance(rta_code, str)
):
raise TypeError("Invalid input")
if rta.upper() not in RTA_MAP:
raise ValueError(f"Invalid RTA : {rta}")
results = self.scheme_lookup(rta, scheme_name, rta_code)
if len(results) == 1:
result = results[0]
return SchemeData(
name=result["name"],
isin=result["isin"],
amfi_code=result["amfi_code"],
type=result["type"],
score=100,
)
elif len(results) > 1:
schemes = {
x["name"]: (x["name"], x["isin"], x["amfi_code"], x["type"]) for x in results
}
key, score, _ = process.extractOne(scheme_name, schemes.keys())
if score >= min_score:
name, isin, amfi_code, scheme_type = schemes[key]
return SchemeData(
name=name, isin=isin, amfi_code=amfi_code, type=scheme_type, score=score
)
raise ValueError("No schemes found")
def nav_lookup(self, isin: str) -> Optional[Decimal]:
"""
Return the NAV of the fund on 31st Jan 2018. used for LTCG computations
:param isin: Fund ISIN
:return: nav value as a Decimal if available, else return None
"""
sql = """SELECT nav FROM nav20180131 where isin = :isin"""
result = self.run_query(sql, {"isin": isin}, fetchone=True)
if result is not None:
return Decimal(result["nav"])
| 5,833 |
info/admin.py
|
MrRobot100/applibro
| 0 |
2170009
|
from django.contrib import admin
from .models import Informacion, Usuarios, Front
# Register your models here.
admin.site.register(Informacion)
admin.site.register(Usuarios)
admin.site.register(Front)
| 202 |
Python/informix-db-delete.py
|
Shreeeram/Code-dump
| 0 |
2169050
|
#!/usr/bin/python
import os
import sys
import getopt
import time
import informixdb
def usage(server):
print "%s -s <server> -d <database> -w <where clause> -n <num rows> -s <sleep seconds> [-v]" % sys.argv[0]
print " -s : DBSERVERNAME, default %s" % server
print " -d : database name, required"
print " -w : where clause, required"
print " -n : number of rows per transaction, optional, default 10"
print " -s : sleep seconds in between each transaction, optional, default 1"
print " -v : verbose output, optional, default off"
# wt4logbf executes onstat to identify the number of threads waiting for a log buffer
# if more than maxlogbfwt threads waiting on logbf are found we will sleep for sleepSeconds
#
# threads waiting for logbf is an indication that HDR is behind and if we do not throttle
# back out deletes engine performance will drop
def wt4logbf(maxlogbfwt, sleepSeconds):
# execute onstat -g ath and count the number of threads waiting on logbf
logbfwt = int(os.popen("onstat -g ath | grep logbf | wc -l").readlines()[0])
# sleep sleepSeconds and recheck number of waiting threads
# repeat until number of threads waiting for logbf is below maxlogbfwt
while logbfwt >= maxlogbfwt:
print "max logbf waits reached [%d >= %d] sleeping %d seconds" % (logbfwt, maxlogbfwt, sleepSeconds)
sys.stdout.flush()
time.sleep(sleepSeconds)
logbfwt = int(os.popen("onstat -g ath | grep logbf | wc -l").readlines()[0])
def main():
server = os.getenv("INFORMIXSERVER")
database = None
where = None
numDelPerTransaction = 10
sleepSeconds = 1
verbose = False
# parse command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "S:d:w:n:s:v?")
except:
usage(server)
sys.exit(2)
for opt, val in opts:
if opt == "-S":
server = val
if opt == "-d":
database = val
if opt == "-w":
where = val
if opt == "-n":
numDelPerTransaction = int(val)
if opt == "-s":
sleepSeconds = int(val)
if opt == "-v":
verbose = True
if opt == "-?":
usage(server)
sys.exit()
# if the required arguments were not passed display the usage and exit
if (numDelPerTransaction < 1) or (sleepSeconds < 0) or (where is None):
usage(server)
sys.exit()
# sql to select the primary key fields (pkcol1 and pkcol2) from table1 that
# meet the user defined where clause
sqlSelect = """
select
pkcol1,
pkcol2
from
table1
where
%s
""" % (where, )
# sql to delete a row by the primary key of table1
sqlDelete = """
delete from
table1
where
pkcol1 = :1 and
pkcol2 = :2
"""
# connect to the database
try:
dbCon = informixdb.connect("%s@%s" % (database, server), autocommit = False)
except informixdb.DatabaseError, e:
print "unable to connect to %s@%s, %ld" % (database, server, e.sqlcode)
sys.exit(2)
# define select and delete cursors
try:
dbSelectCursor = dbCon.cursor(rowformat = informixdb.ROW_AS_OBJECT, hold=True)
dbDeleteCursor = dbCon.cursor()
except informixdb.DatabaseError, e:
print "unable to define cursors, %ld" % (e.sqlcode, )
sys.exit(2)
# set some session attributes
try:
dbSelectCursor.execute("set lock mode to wait")
dbSelectCursor.execute("set isolation dirty read")
except informixdb.DatabaseError, e:
print "unable to set session attributes, %ld" % (e.sqlcode, )
sys.exit(2)
try:
# select the primary key of all rows in table1 that meet our where clause
dbSelectCursor.execute(sqlSelect)
numRowsInTransaction = 0
totalRows = 0
startTime = time.time()
# for each row that meets our where clause, delete it
# committing the transaction and checking engine load at the user
# defined intervals
for dbRow in dbSelectCursor:
if verbose:
print "deleting row pkcol1 = %ld and pkcol2 = %ld" % (dbRow.pkcol1, dbRow.pkcol2)
# attempt to delete this row
try:
dbDeleteCursor.execute(sqlDelete, (dbRow.pkcol1, dbRow.pkcol2))
numRowsInTransaction = numRowsInTransaction + 1
totalRows = totalRows + 1
except informixdb.DatabaseError, e:
print "unable to delete row pkcol1 = %ld and pkcol2 = %ld, %ld" % (dbRow.pkcol1, dbRow.pkcol2, e.sqlcode)
# if we have met out rows to delete per transaction limit,
# commit the transaction, sleep and check engine load
if numRowsInTransaction == numDelPerTransaction:
dbCon.commit()
print "deleted %d rows [%f rows/second], sleeping %d seconds" % (totalRows, totalRows / (time.time() - startTime), sleepSeconds)
sys.stdout.flush()
numRowsInTransaction = 0
time.sleep(sleepSeconds)
wt4logbf(2, 30)
# commit the last transaction
dbCon.commit()
print "deleted %d rows" % totalRows
except informixdb.DatabaseError, e:
print "unable to execute %s, %ld" % (sqlSelect, e.sqlcode)
sys.exit(2)
if __name__ == "__main__":
main()
| 4,564 |
Script/Commands/Messages/buildings_th.py
|
iocaeaniqa/Clash-Of-Clans-Discord-Bot
| 0 |
2169652
|
# Sends a message with the maximum level of each main base building for the given TH level
from Data.Constants.clash_of_clans import MainBuildings
from Data.components import Components
from Data.Constants.useful import Useful
from Script.import_functions import create_embed
async def buildings_th_embed(ctx, lvl):
level_th = MainBuildings[lvl]
text_th = ""
for category, buildings in level_th.items():
text_th += f"\n__{category} :__\n"
for building_name, building_max_level in buildings.items():
text_th += f"{building_name} level max : {building_max_level}\n"
embed = create_embed(f"__**TH {lvl} :\n**__", text_th, ctx.guild.me.color, f"buildings_th|{ctx.author.id}", ctx.guild.me.avatar_url)
return embed
async def buildings_th(ctx, lvl):
if lvl > Useful["max_th_lvl"] or lvl < 0:
await ctx.send(f"Town Hall not found\nPlease give a valid TH level : there is no level `{lvl}` TH.", hidden=True)
return
elif lvl == 0:
embed = create_embed("What is your TH level ?", "", ctx.guild.me.color, f"buildings_th|{ctx.author.id}", ctx.guild.me.avatar_url)
await ctx.send(embed=embed, components=Components["buildings_th"])
elif 0 < lvl <= Useful["max_th_lvl"]:
embed = await buildings_th_embed(ctx, lvl)
await ctx.send(embed=embed, components=Components["buildings_th"])
return
| 1,393 |
tests/test_formatting.py
|
MiTo0o/Vestaboard
| 19 |
2170434
|
import pytest
from vestaboard.formatter import Formatter
validCharacters = [
[63, 64, 65, 66, 67, 68, 69, 63, 64, 65, 66, 67, 68, 69, 63, 64, 65, 66, 67, 68, 69, 63],
[64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64],
[65, 0, 0, 0, 8, 1, 16, 16, 25, 0, 2, 9, 18, 20, 8, 4, 1, 25, 0, 0, 0, 65],
[66, 0, 0, 0, 0, 0, 0, 0, 13, 9, 14, 1, 20, 15, 37, 0, 0, 0, 0, 0, 0, 66],
[67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 67],
[68, 69, 63, 64, 65, 66, 67, 68, 69, 63, 64, 65, 66, 67, 68, 69, 63, 64, 65, 66, 67, 68]
]
validCharactersResult= {
'characters': validCharacters
}
def test_standard_formatting():
assert Formatter()._standard('Love is all you need') == {'text': 'Love is all you need'}, 'Should return a dict with a "text" key and the passed in value.'
def test_raw_formatting():
assert Formatter()._raw(validCharacters) == validCharactersResult, 'Should return a dict with a "characters" key and the passed in list of lists as the value.'
def test_character_conversion_by_letter():
assert Formatter().convert('test') == [20, 5, 19, 20], 'Should convert by letter into a list.'
def test_character_conversion_with_invalid_characters_fails():
with pytest.raises(Exception):
Formatter().convert('^*^')
def test_character_ignores_case():
Formatter().convert('tHiS Is A sCHEdulEd TESt')
def test_character_conversion_by_word():
assert Formatter().convert('test message', byWord=True) == [[20, 5, 19, 20], [13, 5, 19, 19, 1, 7, 5]], 'Should return a list with nested lists - each nested list should contain the character codes.'
def test_word_conversion_with_invalid_characters_fails():
with pytest.raises(Exception):
Formatter().convert('test message^*', byWord=True)
def test_convert_line_fails_if_too_many_characters():
with pytest.raises(Exception):
Formatter().convertLine('This is too many characters for a line')
def test_convert_line_with_centering():
assert len(Formatter().convertLine('test message')) == 22, 'Should return a list with 22 elements'
assert Formatter().convertLine('test message') == [0, 0, 0, 0, 0, 20, 5, 19, 20, 0, 13, 5, 19, 19, 1, 7, 5, 0, 0, 0, 0, 0], 'Should add padding to reach 22 characters'
def test_convert_line_left_justified():
assert len(Formatter().convertLine('Oh hi!', justify='left')) == 22, 'Should return a list with 22 elements'
assert Formatter().convertLine('Oh hi!', justify='left') == [15, 8, 0, 8, 9, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'Should left justify up to 22 characters'
def test_convert_line_right_justified():
assert len(Formatter().convertLine('Oh hi!', justify='right')) == 22, 'Should return a list with 22 elements'
assert Formatter().convertLine('Oh hi!', justify='right') == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 8, 0, 8, 9, 37], 'Should left justify up to 22 characters'
def test_convert_line_with_22_characters_and_single_digit_code_at_end():
assert Formatter().convertLine(
"THU{0}{0}{0}{0}{0}{0}{63}{64}{65}{66}{67}{68}{69}{0}{0}{0}{0}{0}{0}"
) == [20, 8, 21, 0, 0, 0, 0, 0, 0, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 0, 0, 0]
def test_convert_line_with_22_characters_and_double_digit_code_at_end():
assert Formatter().convertLine(
"THU{0}{0}{0}{0}{0}{0}{63}{64}{65}{66}{67}{68}{69}{0}{0}{0}{0}{0}{60}"
) == [20, 8, 21, 0, 0, 0, 0, 0, 0, 63, 64, 65, 66, 67, 68, 69, 0, 0, 0, 0, 0, 60]
def test_valid_characters_should_pass():
assert Formatter()._isValid('abcdefghijklmnopqrstuvwxyz1234567890 !@#$()-+&=;:"%,./?°') == True
def test_with_character_code_at_beginning_of_string():
result = Formatter().convertLine('{23}{1} Test')
expected = [0, 0, 0, 0, 0, 0, 0, 0, 23, 1, 0, 20, 5, 19, 20, 0, 0, 0, 0, 0, 0, 0]
assert result == expected
def test_with_character_code_at_end_of_string():
result = Formatter().convertLine('Test {23}{1}')
expected = [0, 0, 0, 0, 0, 0, 0, 0, 20, 5, 19, 20, 0, 23, 1, 0, 0, 0, 0, 0, 0, 0]
assert result == expected
def test_with_character_code_in_middle_of_text():
result = Formatter().convertLine('Test {23}{1} Test')
expected = [0, 0, 0, 0, 0, 20, 5, 19, 20, 0, 23, 1, 0, 20, 5, 19, 20, 0, 0, 0, 0, 0]
assert result == expected
def test_with_text_between_character_codes():
result = Formatter().convertLine('{48}{3} Test {23}{1}')
expected = [0, 0, 0, 0, 0, 0, 48, 3, 0, 20, 5, 19, 20, 0, 23, 1, 0, 0, 0, 0, 0, 0]
assert result == expected
def test_invalid_characters_should_fail():
assert Formatter()._isValid('^*') == False
assert Formatter()._isValid('{100}') == False
assert Formatter()._isValid('{sldkfn}') == False
assert Formatter()._isValid('{}') == False
def test_regex_finds_valid_character_codes():
actual = Formatter()._getEmbeddedCharCodes('{24}{1}')
expected = ['{24}', '{1}']
assert actual == expected
def test_regex_returns_num_of_extra_characters():
t1 = Formatter()._numCharacterCodes('{13}{2}')
e1 = 5
t2 = Formatter()._numCharacterCodes('{23}{25}{25}')
e2 = 9
t3 = Formatter()._numCharacterCodes('There are no codes')
e3 = 0
assert t1 == e1
assert t2 == e2
assert t3 == e3
def test_formatter_accepts_padding_colors():
t1 = Formatter().convertLine('red', color='red')
e1 = [63, 63, 63, 63, 63, 63, 63, 63, 63, 18, 5, 4, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63]
t2 = Formatter().convertLine('orange', color='orange')
e2 = [64, 64, 64, 64, 64, 64, 64, 64, 15, 18, 1, 14, 7, 5, 64, 64, 64, 64, 64, 64, 64, 64]
t3 = Formatter().convertLine('yellow', color='yellow')
e3 = [65, 65, 65, 65, 65, 65, 65, 65, 25, 5, 12, 12, 15, 23, 65, 65, 65, 65, 65, 65, 65, 65]
assert t1 == e1
assert t2 == e2
assert t3 == e3
def test_formatter_fails_invalid_colors():
with pytest.raises(KeyError):
Formatter().convertLine('error', color='pink')
def test_space_buffer_adds_spaces_where_appropriate():
t1 = Formatter().convertLine('center', justify='center', spaceBuffer=True, color='white')
t2 = Formatter().convertLine('left', justify='left', spaceBuffer=True, color='white')
t3 = Formatter().convertLine('right', justify='right', spaceBuffer=True, color='white')
e1 = [69, 69, 69, 69, 69, 69, 69, 0, 3, 5, 14, 20, 5, 18, 0, 69, 69, 69, 69, 69, 69, 69]
e2 = [12, 5, 6, 20, 0, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]
e3 = [69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 0, 18, 9, 7, 8, 20]
assert t1 == e1, 'Should add spacing on both sides of centered text'
assert t2 == e2, 'Should add spacing to the right side of left-justified text'
assert t3 == e3, 'Should add spacing to the left side of right-justified text'
| 6,789 |
examples/fc_corr.py
|
wmvanvliet/dyconnmap
| 42 |
2169675
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
import numpy as np
np.set_printoptions(precision=2, linewidth=256)
from dyconnmap import analytic_signal
from dyconnmap.fc import corr, crosscorr, partcorr
from dyconnmap.fc import Corr
if __name__ == "__main__":
data = np.load("data/eeg_32chans_10secs.npy")
n_channels, n_samples = np.shape(data)
fb = [1.0, 4.0]
fs = 128.0
# Correlation
r = corr(data, fb, fs)
print(r)
# Partial correlation
# pr = partcorr(data, fb, fs)
# print(pr)
ro = Corr(fb, fs)
pp_data = ro.preprocess(data)
r = ro.estimate(pp_data)
print(r)
| 633 |
gallery/migrations/0009_auto_20211208_1043.py
|
JoyWambui/instaphotoz
| 0 |
2169611
|
# Generated by Django 3.2.9 on 2021-12-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gallery', '0008_auto_20211208_1041'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='followers',
),
migrations.AddField(
model_name='image',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='gallery.profile'),
),
migrations.AlterField(
model_name='followsystem',
name='follower',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile_follower', to='gallery.profile'),
),
migrations.AlterField(
model_name='followsystem',
name='following',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile_following', to='gallery.profile'),
),
]
| 1,108 |
periodicals/migrations/0020_re_save_all.py
|
kingsdigitallab/ncse-django
| 0 |
2169874
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
def resave(apps, schema_editor):
print("WARNING: You need to re-save articles, issues\
and publications")
'''
Article = apps.get_model('periodicals', 'Article')
Issue = apps.get_model('periodicals', 'Issue')
Publication = apps.get_model('periodicals', 'Publication')
for a in Article.objects.all():
a.save()
# Just in case
for i in Issue.objects.all():
i.save()
for p in Publication.objects.all():
p.save()
'''
dependencies = [
('periodicals', '0019_change_meta_options_on_article_issue_page'),
]
operations = [
migrations.RunPython(resave),
]
| 862 |
exponent.py
|
CrazyJ36/python
| 0 |
2170212
|
#!/usr/bin/python3
# Exponentiation is 'raising of one number to the
# power of another.
print(1**2)
| 103 |
tests/test_source.py
|
Grindizer/ptolemy
| 1 |
2169975
|
# -*- coding: utf-8 -*-
import tempfile
import unittest
from mock import Mock, patch, sentinel
from jsonschema.exceptions import ValidationError
from ptolemy.exceptions import InvalidFileError
from ptolemy.source import Source
class SourceTestCase(unittest.TestCase):
def setUp(self):
self.source = Source("file/path")
@patch("ptolemy.source.os.getcwd")
def test_init(self, mock_getcwd):
mock_getcwd.return_value = "/path/"
source = Source("file.yaml")
self.assertEqual(source.file_path, "/path/file.yaml")
self.assertEqual(source.source, None)
def test_compile_with_invalid_file(self):
self.source.file_path = "/this/file/does/not.exist"
with self.assertRaises(InvalidFileError):
self.source.compile()
@patch("ptolemy.source.Source._generate_mapping")
@patch("ptolemy.source.Source._validate")
def test_compile_with_valid_file(
self, mock_validate, mock_generate_mapping
):
mock_mapping = Mock()
mock_mapping.to_json.return_value = sentinel.mapping
mock_generate_mapping.return_value = mock_mapping
with tempfile.NamedTemporaryFile() as f:
self.source.file_path = f.name
mapping = self.source.compile()
self.assertEqual(self.source.source, None)
self.assertEqual(mapping, sentinel.mapping)
def test_validate_with_valid_source(self):
self.source.source = {
"selection": {
"include": [
{
"object-locators": {
"schema-names": ["Test"],
"table-names": ["%"]
}
}
]
}
}
self.source._validate()
def test_validate_with_invalid_source(self):
self.source.source = {
"selection": {
"incorrect-key": []
}
}
with self.assertRaises(ValidationError):
self.source._validate()
@patch("ptolemy.source.Source._get_rules")
@patch("ptolemy.source.Mapping")
def test_generate_mapping(self, mock_Mapping, mock_get_rules):
mock_Mapping.return_value.mapping = {}
mock_get_rules.return_value = sentinel.rules
mapping = self.source._generate_mapping()
self.assertEqual(mapping.mapping, {"rules": sentinel.rules})
def test_get_rules(self):
# This is not a very thorough test. It assumes that Source.source
# is correctly formed, which is fair given the source will have
# previously been validated with Source._validate. More extensive tests
# of this area of code are carried out in the integration tests.
self.source.source = {
"selection": {
"include": [
{
"object-locators": {
"schema-names": ["Test"],
"table-names": ["%"]
}
}
]
}
}
expected_rules = [
{
"object-locator": {
"schema-name": "Test",
"table-name": "%"
},
"rule-action": "include",
"rule-type": "selection"
}
]
rules = self.source._get_rules()
self.assertEqual(rules, expected_rules)
def test_get_object_locations(self):
# See comment in test_get_rules().
object_locators = {
"schema-names": ["s1", "s2"],
"table-names": ["t1", "t2"],
"column-names": ["c1", "c2"]
}
expected_object_locations = [
{"column-name": "c1", "schema-name": "s1", "table-name": "t1"},
{"column-name": "c2", "schema-name": "s1", "table-name": "t1"},
{"column-name": "c1", "schema-name": "s1", "table-name": "t2"},
{"column-name": "c2", "schema-name": "s1", "table-name": "t2"},
{"column-name": "c1", "schema-name": "s2", "table-name": "t1"},
{"column-name": "c2", "schema-name": "s2", "table-name": "t1"},
{"column-name": "c1", "schema-name": "s2", "table-name": "t2"},
{"column-name": "c2", "schema-name": "s2", "table-name": "t2"}
]
object_locations = self.source._get_object_locations(object_locators)
self.assertEqual(object_locations, expected_object_locations)
| 4,539 |
seq2seq/criterion/redundancy_loss.py
|
LinjianLi/Seq2Seq-PyTorch
| 0 |
2170461
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
import torch
from torch.nn.modules.loss import _Loss
class RedundancyLoss(_Loss):
"""
RedundancyLoss
"""
def __init__(self):
super(RedundancyLoss, self).__init__()
def forward(self, A):
"""
forward
"""
I = torch.eye(A.size(1))
if A.is_cuda:
I = I.cuda()
norm = torch.bmm(A, A.transpose(1, 2)) - I
norm = torch.sum(
torch.sum(norm.pow(2), dim=2), dim=1) # ** 0.5
loss = norm.mean()
return loss
| 791 |
DeathSwitch/Messages.py
|
henfredemars/python-personal-projects
| 0 |
2170387
|
#Store the messages to be sent out and export as a variable
from email.mime.text import MIMEText
sara_message_text = """
If you are reading this Sara, it means the second test was successful.
"""
sara_message = MIMEText(sara_message_text)
sara_message.add_header("To","*****<EMAIL>")
sara_message.add_header("Subject","I have some more news")
msgs = [sara_message]
| 373 |
tests/tests/urls.py
|
darkslab/django-optimistic-lock
| 0 |
2170238
|
from django.conf.urls import url, include
from django.contrib import admin
admin.autodiscover()
from . import views
urlpatterns = [
url(r'^form/(?P<pk>.+)/$', views.form),
url(r'^admin/', include(admin.site.urls)),
]
| 228 |
unfairGame/sprite/run_grit_shared.py
|
J3G0/gba-sprite-engine
| 0 |
2169481
|
# Sebastiaan - 21/12/2019
# Avoid grit at all costs
import os
import subprocess
# grit pic1.png pic2.png kul.png '-ftc -pS -gB8 -O shared.c'
PATH_TO_USED_PNG = 'C:/Users/Sebastiaan/Desktop/Schakel_IIW/CPP/Project/unfairGame/sprite/'
GRIT_COMMAND = 'grit.exe'
GRIT_PARAMETERS = ' -ftc -pS -gB8 -O shared.c'
SUBFOLDER = 'Used/'
# Generate files
all_files = os.listdir(PATH_TO_USED_PNG + SUBFOLDER)
files_as_subdirectories = []
for file in all_files:
files_as_subdirectories.append('./' + SUBFOLDER + file + " ")
combined_arguments = files_as_subdirectories
combined_arguments.append(GRIT_PARAMETERS)
subprocess.call([GRIT_COMMAND, combined_arguments])
# Concat all .c files when there is no // in front of line
# source https://stackoverflow.com/questions/3964681/find-all-files-in-a-directory-with-extension-txt-in-python
data_file = open("./sprite_data/combined_data.h", "w+")
data_file.write("// Created by Python script (run_grit_shared.py) to avoid using grit \n")
data_file.write("\n")
data_file.write("#ifndef GBA_SPRITE_ENGINE_PROJECT_COMBINED_DATA_H \n")
data_file.write("#define GBA_SPRITE_ENGINE_PROJECT_COMBINED_DATA_H \n")
for file in os.listdir(PATH_TO_USED_PNG):
if file.endswith(".c"):
opened_file = open(file, "r")
for line in opened_file:
if not line.startswith("//") and len(line) > 1:
data_file.write(line)
opened_file.close()
data_file.write("#endif \n")
# Now remove all the unneeded generated files
# This might be dangerous but who cares right?
for file in os.listdir(PATH_TO_USED_PNG):
if file.endswith(".c") or file.endswith(".h"):
os.remove(file)
| 1,653 |
util/polynomial.py
|
gabinewman/x-perts
| 0 |
2169235
|
from term import Term
class Polynomial:
"""This is how numbers and units are contained"""
# store express as a dictionary of terms indexed by their variable
#Initializing numbers
def __init__(self):
self.terms = []
def add_term(self, num):
i = 0
while i < len(self.terms):
if (num.combineable(self.terms[i])):
self.terms[i] = num.combine(self.terms[i])
return;
i += 1
else:
self.terms.append(num)
def remove_index(self, index):
self.terms.pop(index)
def clean(self): # removes 0s
i = 0
while i < len(self.terms):
if self.terms[i].coefficient == 0:
self.terms.pop(i)
i += 1
def __str__(self):
self.clean()
if (len(self.terms) == 0):
return ""
retstr = str(self.terms[0])
i = 1
while i < len(self.terms):
retstr += " "
if (self.terms[i].coefficient < 0):
copy = self.terms[i].copy()
copy.coefficient = abs(copy.coefficient)
retstr += "- " + str(copy)
else:
retstr += "+ " + str(self.terms[i])
i += 1
return retstr
exp = Polynomial()
vars1 = {"x":2}
vars2 = {"x":1, "y":1}
vars3 = {"x":2}
vars4 = {"q" : 0}
t = [Term(12, vars1), Term(1, vars2), Term(-13, vars3), Term(6, vars4)]
for term in t:
exp.add_term(term)
print(term)
print(exp)
| 1,516 |
examples/midlife_crisis.py
|
stanford-ssi/pylink
| 31 |
2169595
|
#!/usr/bin/env python
import pylink
from pylink import TaggedAttribute as TA
def _net_salary_usd_per_month(model):
return (model.gross_salary_usd_per_month
* (1.0 - model.tax_rate))
def _expenses_usd_per_month(model):
return (model.rent_usd_per_month
+ model.food_usd_per_month
+ model.other_expenses_usd_per_month)
def _savings_usd_per_month(model):
return model.net_salary_usd_per_month - model.expenses_usd_per_month
class GenericFinancialModel(object):
def __init__(self,
gross_salary_usd_per_month=10e3,
rent_usd_per_month=3e3,
food_usd_per_month=500,
other_expenses_usd_per_month=1e3,
tax_rate=0.4):
self.tribute = {
# calculators
'net_salary_usd_per_month': _net_salary_usd_per_month,
'expenses_usd_per_month': _expenses_usd_per_month,
'savings_usd_per_month': _savings_usd_per_month,
# constants
'gross_salary_usd_per_month': gross_salary_usd_per_month,
'tax_rate': tax_rate,
'rent_usd_per_month': rent_usd_per_month,
'food_usd_per_month': food_usd_per_month,
'other_expenses_usd_per_month': other_expenses_usd_per_month,
}
def _months_to_pay_for_car(model):
return float(model.midlife_crisis_car_usd) / model.savings_usd_per_month
extras = {'midlife_crisis_car_usd': TA(110e3, model='Tesla P100d'),
'months_to_pay_for_car': _months_to_pay_for_car,}
m = pylink.DAGModel([GenericFinancialModel()], **extras)
e = m.enum
print('Savings Rate ($/mo): %3g' % m.savings_usd_per_month)
print('Cost of Midlife Crisis ($): %3g' % m.midlife_crisis_car_usd)
print('Car Model: %s' % m.get_meta(e.midlife_crisis_car_usd)['model'])
print('Months to Pay for Crisis: %d' % round(m.months_to_pay_for_car, 0))
| 1,932 |
python/tests/graph_coloring/test_graph.py
|
memgraph/mage
| 67 |
2169006
|
import pytest
from typing import List, Tuple, Any
from mage.graph_coloring_module import Graph
@pytest.mark.parametrize(
"node, neighs",
[(0, [1, 2]), (1, [0, 3, 4]), (2, [0, 4]), (3, [1, 4]), (4, [1, 2, 3])],
)
def test_correct_get_neighbors(graph: Graph, node: int, neighs: List[int]) -> None:
for n in graph.neighbors(node):
assert n in neighs
for n in neighs:
assert n in graph.neighbors(node)
@pytest.mark.parametrize(
"node, neighs",
[(0, [1, 2]), (1, [0, 3, 4]), (2, [0, 4]), (3, [1, 4]), (4, [1, 2, 3])],
)
def test_correct_get_neighbors_with_mapping(
graph_string_labels: Graph, node: int, neighs: List[int]
) -> None:
for n in graph_string_labels.neighbors(node):
assert n in neighs
for n in neighs:
assert n in graph_string_labels.neighbors(node)
@pytest.mark.parametrize(
"node, weight_nodes",
[
(0, [(1, 2), (2, 1)]),
(1, [(0, 2), (3, 3), (4, 1)]),
(2, [(0, 1), (4, 4)]),
(3, [(1, 3), (4, 1)]),
(4, [(1, 1), (2, 4), (3, 1)]),
],
)
def test_correct_get_weighted_neighbors(
graph: Graph, node: int, weight_nodes: List[Tuple[int, float]]
) -> None:
for n in graph.weighted_neighbors(node):
assert n in weight_nodes
for n in weight_nodes:
assert n in graph.weighted_neighbors(node)
@pytest.mark.parametrize(
"node_1, node_2, weight", [(0, 1, 2), (1, 0, 2), (2, 4, 4), (3, 1, 3), (0, 4, 0)]
)
def test_correct_get_weight(
graph: Graph, node_1: int, node_2: int, weight: float
) -> None:
assert graph.weight(node_1, node_2) == weight
@pytest.mark.parametrize(
"node, label",
[
(0, "0"),
(1, "1"),
(2, "2"),
],
)
def test_correct_get_label(graph_string_labels: Graph, node: int, label: Any) -> None:
assert label == graph_string_labels.label(node)
def test_correct_number_of_nodes(graph: Graph) -> None:
assert 5 == graph.number_of_nodes()
def test_correct_number_of_edges(graph: Graph) -> None:
assert 6 == graph.number_of_edges()
def test_correct_length_of_graph(graph: Graph) -> None:
assert 5 == len(graph)
@pytest.fixture
def graph():
nodes = [0, 1, 2, 3, 4]
adj = {
0: [(1, 2), (2, 1)],
1: [(0, 2), (3, 3), (4, 1)],
2: [(0, 1), (4, 4)],
3: [(1, 3), (4, 1)],
4: [(1, 1), (2, 4), (3, 1)],
}
return Graph(nodes, adj)
@pytest.fixture
def graph_string_labels():
nodes = ["0", "1", "2", "3", "4"]
adj = {
"0": [("1", 2), ("2", 1)],
"1": [("0", 2), ("3", 3), ("4", 1)],
"2": [("0", 1), ("4", 4)],
"3": [("1", 3), ("4", 1)],
"4": [("1", 1), ("2", 4), ("3", 1)],
}
return Graph(nodes, adj)
| 2,737 |
Cosmetics/migrations/0006_auto_20200120_2227.py
|
CPU-sangoma/PlentyPot
| 0 |
2169125
|
# Generated by Django 2.2.6 on 2020-01-20 20:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0027_auto_20191213_2336'),
('Cosmetics', '0005_auto_20191229_0057'),
]
operations = [
migrations.RemoveField(
model_name='cosmeticswork',
name='fifthworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='fifthworkpic',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='fourthworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='fourthworkpic',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='fstworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='fstworkpic',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='secworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='secworkpic',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='sixthworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='sixthworkpic',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='thirdworkdescription',
),
migrations.RemoveField(
model_name='cosmeticswork',
name='thirdworkpic',
),
migrations.CreateModel(
name='ActualCosWork',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Workpic', models.ImageField(blank=True, null=True, upload_to='Foodshop/Foodmenu', verbose_name='Upload the picture of the first item on your menu')),
('Workdes', models.TextField(blank=True, null=True, verbose_name='a short description of the first picture on your menu')),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actualCosWork', to='profiles.BusinessProfile')),
],
),
]
| 2,441 |
KCM_implementation_01152021/Binary/utils.py
|
MJ1021/kcm-code
| 1 |
2169037
|
import torch, torchvision, matplotlib, random, sys
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from sklearn.datasets import make_circles, make_moons
from sklearn.model_selection import train_test_split
from PIL import Image
import model
def mixup_data(inputs, targets, alpha, use_cuda):
B = inputs.size()[0]
if (alpha == 0.0):
lam = np.zeros(B, dtype = np.float32)
else:
lam = np.random.beta(alpha, alpha, size=B).astype(np.float32)
lam = np.minimum(lam, 1.0-lam)
input_dim_length = len(inputs.size())
if input_dim_length >= 2:
shape_tiled_inputs = []; transpose_order = [len(inputs.size())-1]
for l in range(1, input_dim_length):
shape_tiled_inputs.append(inputs.size()[l])
transpose_order.append(l-1)
shape_tiled_inputs.append(1)
shape_tiled_inputs[input_dim_length-1] = 1
lam_inputs = np.tile(lam, shape_tiled_inputs).transpose(transpose_order)
else:
lam_inputs = lam
lam_targets = lam
lam_inputs, lam_targets = torch.from_numpy(lam_inputs), torch.from_numpy(lam_targets)
if np.sum(lam) != 0:
index = torch.randperm(B)
else:
index = torch.tensor(np.arange(B))
if use_cuda:
lam_inputs, lam_targets = lam_inputs.cuda(), lam_targets.cuda()
index = index.cuda()
mixed_inputs = (1.0-lam_inputs) * inputs + lam_inputs * inputs[index]
mixed_targets = (1.0-lam_targets) * targets + lam_targets * targets[index]
return mixed_inputs, mixed_targets
class PrepareData_twomoon(Dataset):
def __init__(self, X, y, use_cuda=True):
if not torch.is_tensor(X):
self.X = torch.from_numpy(X)
if use_cuda:
self.X = self.X.type(torch.cuda.FloatTensor)
if not torch.is_tensor(y):
self.y = torch.from_numpy(y)
if use_cuda:
self.y = self.y.type(torch.cuda.FloatTensor)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
class PrepareData_cifar10(Dataset):
def __init__(self, X, y, transform, use_cuda, seed_num):
self.X = X
self.y = y
self.transform = transform
self.use_cuda = use_cuda
self.seed_num = seed_num
random.seed(self.seed_num)
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
img, target = self.X[idx], self.y[idx]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
target = torch.from_numpy(np.array(target, np.float32))
return img, target
class LoadAndSplitData():
def __init__(self, dataset, n_samples, noise, class0, class1, sampling_proportion, ratio_train_to_val, ratio_train_to_test, seed_num):
self.dataset = dataset
self.n_samples = n_samples
self.noise = noise
self.class0 = class0
self.class1 = class1
self.sampling_proportion = sampling_proportion
self.ratio_train_to_val = ratio_train_to_val
self.ratio_train_to_test = ratio_train_to_test
self.seed_num = seed_num
def load_and_split_data(self):
x, y = {}, {}
if self.dataset == 'twomoon':
x['train'], y['train'], x['val'], y['val'], x['test'], y['test'] = self.load_and_split_twomoon()
elif self.dataset == 'cifar10':
x['train'], y['train'], x['val'], y['val'], x['test'], y['test'] = self.load_and_split_cifar10()
return x['train'], y['train'], x['val'], y['val'], x['test'], y['test']
def load_and_split_twomoon(self):
if self.sampling_proportion != 1:
sys.exit("In twomoon dataset, sampling is not required. Please set sampling_proportion be 1.0")
x, y = {}, {}
x['whole'], y['whole'] = make_moons(self.n_samples, noise=self.noise) # numpy.ndarray
y['whole'] = y['whole']*2-1
x['whole'] = (x['whole'] - x['whole'].min(axis=0))/(x['whole'].max(axis=0)-x['whole'].min(axis=0))
# Split the whole dataset into train, val, and test.
ratio_train_to_others = 1.0/(1.0/self.ratio_train_to_val+1.0/self.ratio_train_to_test)
x['train'], x['val'], y['train'], y['val'] = train_test_split(x['whole'], y['whole'],
test_size=1.0/(1.0+ratio_train_to_others),
random_state=self.seed_num)
ratio_val_to_test = self.ratio_train_to_test/self.ratio_train_to_val
x['val'], x['test'], y['val'], y['test'] = train_test_split(x['val'], y['val'],
test_size=1.0/(1.0+ratio_val_to_test),
random_state=self.seed_num)
return x['train'], y['train'], x['val'], y['val'], x['test'], y['test']
def load_and_split_cifar10(self):
cifar10_trainset = torchvision.datasets.CIFAR10('./data', train=True, download=True)
cifar10_testset = torchvision.datasets.CIFAR10('./data', train=False, download=True)
classnames = np.array(['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck'])
class0_number = np.where(classnames == self.class0)[0][0]
class1_number = np.where(classnames == self.class1)[0][0]
x, y = {}, {}
train_multiclass_inputs = cifar10_trainset.data
train_multiclass_targets = np.asarray(cifar10_trainset.targets)
x['train'], y['train'] = self.multiclass_to_binaryclass(train_multiclass_inputs, train_multiclass_targets, class0_number, class1_number)
# Undersample with the given sampling_proportion
if self.sampling_proportion < 1:
_, x['train'], _, y['train'] = train_test_split(x['train'], y['train'], test_size = self.sampling_proportion, random_state=self.seed_num)
# Split the train dataset into train and val.
x['train'], x['val'], y['train'], y['val'] = train_test_split(x['train'], y['train'], test_size=1.0/(1.0+self.ratio_train_to_val), random_state=self.seed_num)
test_multiclass_inputs = cifar10_testset.data
test_multiclass_targets = np.asarray(cifar10_testset.targets)
x['test'], y['test'] = self.multiclass_to_binaryclass(test_multiclass_inputs, test_multiclass_targets,class0_number, class1_number)
return x['train'], y['train'], x['val'], y['val'], x['test'], y['test']
def multiclass_to_binaryclass(self, multiclass_inputs, multiclass_targets, class0_number, class1_number):
class0_idx, class1_idx = np.where(multiclass_targets==class0_number)[0], np.where(multiclass_targets==class1_number)[0]
binaryclass_inputs = np.concatenate((multiclass_inputs[class0_idx], multiclass_inputs[class1_idx]), axis = 0)
binaryclass_targets = np.concatenate((-np.ones(len(class0_idx)), np.ones(len(class1_idx))), axis = 0)
return binaryclass_inputs, binaryclass_targets
class BuildModel():
def __init__(self, dataset, hidden_dims, seed_num):
self.dataset = dataset
self.hidden_dims = hidden_dims
self.seed_num = seed_num
def build_model(self):
if self.dataset == 'twomoon':
net = self.build_twomoon_classifier()
elif self.dataset == 'cifar10':
net = self.build_cifar10_classifier()
return net
def build_twomoon_classifier(self):
return model.twomoon_classifier(self.hidden_dims, self.seed_num)
def build_cifar10_classifier(self):
return model.cifar10_classifier(self.seed_num)
| 8,065 |
WorkStatusForNetTrafficWithDjango/WorkStatusForNetTrafficWithDjango/dashboard/migrations/0027_auto_20161203_0547.py
|
Ayi-/WorkStatusForNetTrafficMonitorAndClassify
| 0 |
2169138
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-03 05:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0026_categorytitle'),
]
operations = [
migrations.AlterField(
model_name='categorytitle',
name='word',
field=models.CharField(max_length=50, unique=True),
),
migrations.AlterField(
model_name='wordvocabulary',
name='word',
field=models.CharField(max_length=50, unique=True),
),
]
| 638 |
pandaharvester/harvestercloud/aws_unhealthy_nodes.py
|
dougbenjamin/harvester
| 0 |
2169770
|
# Detect and delete nodes that are stuck
from kubernetes import client, config
from subprocess import Popen, PIPE
config.load_kube_config(config_file='YOUR KUBECONFIG FILE')
apis_api = client.CoreV1Api()
# get running nodes
running_nodes = {}
nodes = apis_api.list_node()
for node in nodes.items:
running_nodes[node.metadata.name] = node.spec.provider_id.split('/')[-1]
# get events with FailedMounts and filter them by known error message
failed_mount_events = apis_api.list_namespaced_event(namespace='default', field_selector='reason=FailedMount')
unhealthy_node_ids = set()
for event in failed_mount_events.items:
node_name = event.source.host
if 'Argument list too long' in event.message and node_name in running_nodes:
unhealthy_node_ids.add(running_nodes[node_name])
# set the node as unhealthy using the AWS CLI
command = '/usr/local/bin/aws autoscaling set-instance-health --instance-id {0} --health-status Unhealthy'
for id in unhealthy_node_ids:
command_with_id = command.format(id)
command_list = command_with_id.split(' ')
p = Popen(command_list, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
print('------------------------------------')
print(command_with_id)
print('return code: {0}'.format(p.returncode))
print('return code: {0}'.format(p.returncode))
print('output: {0}'.format(output))
print('err: {0}'.format(err))
print('------------------------------------')
| 1,473 |
german_quiz_app/routes.py
|
baskervilski/german-word-quiz
| 0 |
2169651
|
import flask
from german_quiz_app import app
from german_quiz_app import table
from datetime import datetime as dt
from flask import render_template, request, jsonify, abort, url_for, redirect
import pandas as pd
@app.route("/")
def hello_world():
message = "Hello, world!"
return render_template(
"index.html",
title=message,
date_today=dt.now().strftime("%Y-%m-%d"),
flask_debug=app.debug,
app_version=app.config["APP_VERSION"],
enable_cool_new_feature=True,
)
@app.route("/input")
def input():
return render_template("input.html", api_endpoint=app.config['API_ENDPOINT'])
@app.route("/upload_new", methods=['POST'])
def upload_new():
if not request.json:
abort(400)
table.put_item(Item=request.json)
return request.json, 201
@app.route("/show_dict")
def show_dict():
return pd.DataFrame(table.scan()['Items']).to_html()
| 923 |
bin/bpm/bpm_blowout_sim.py
|
CIGOM-Modelacion/tamoc
| 18 |
2166946
|
"""
Bent Plume Model: Blowout simulation
=====================================
Use the ``TAMOC`` `bent_plume_model` to simulate a subsea accidental
oil spill plume. This script demonstrates the typical steps involved in
running the bent bubble model with petroleum fluids in the ocean.
This simulation uses the ambient data stored in the file
`./test/output/test_BM54.nc`. This dataset is created by the test files in the
`./test` directory. Please be sure that all of the tests pass using ``py.test
-v`` at the command prompt before trying to run this simulation.
"""
# <NAME>, December 2014, Texas A&M University <<EMAIL>>.
from __future__ import (absolute_import, division, print_function)
from tamoc import ambient
from tamoc import dbm
from tamoc import dispersed_phases
from tamoc import bent_plume_model
from datetime import datetime
from netCDF4 import date2num
import numpy as np
if __name__ == '__main__':
# Get the ambient CTD profile data
nc = '../../tamoc/test/output/test_BM54.nc'
try:
# Open the lake dataset as a Profile object if it exists
ctd = ambient.Profile(nc, chem_names='all')
except RuntimeError:
# Tell the user to create the dataset
print('CTD data not available; run test cases in ./test first.')
# Insert a constant crossflow velocity
z = ctd.nc.variables['z'][:]
ua = np.zeros(z.shape) + 0.09
data = np.vstack((z, ua)).transpose()
symbols = ['z', 'ua']
units = ['m', 'm/s']
comments = ['measured', 'arbitrary crossflow velocity']
ctd.append(data, symbols, units, comments, 0)
# Jet initial conditions
z0 = 1000.
U0 = 0.
phi_0 = -np.pi / 2.
theta_0 = 0.
D = 0.3
Tj = 273.15 + 35.
Sj = 0.
cj = 1.
chem_name = 'tracer'
# Create the stratified plume model object
bpm = bent_plume_model.Model(ctd)
# Create the gas phase particles
composition = ['methane', 'ethane', 'propane', 'oxygen']
yk = np.array([0.93, 0.05, 0.02, 0.0])
gas = dbm.FluidParticle(composition)
disp_phases = []
# Larger free gas bubbles
mb0 = 5. # total mass flux in kg/s
de = 0.005 # bubble diameter in m
lambda_1 = 0.85
(m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions(
ctd, z0, gas, yk, mb0, 2, de, Tj)
disp_phases.append(bent_plume_model.Particle(0., 0., z0, gas, m0, T0,
nb0, lambda_1, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=0.,
lag_time=False))
# Smaller free gas bubbles
mb0 = 5. # total mass flux in kg/s
de = 0.0005 # bubble diameter in m
lambda_1 = 0.95
(m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions(
ctd, z0, gas, yk, mb0, 2, de, Tj)
disp_phases.append(bent_plume_model.Particle(0., 0., z0, gas, m0, T0,
nb0, lambda_1, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=0.,
lag_time=False))
# Larger oil droplets
oil = dbm.InsolubleParticle(True, True, rho_p=890., gamma=30.,
beta=0.0007, co=2.90075e-9,
k_bio=3.000e-6,
t_bio=86400.)
mb0 = 10. # total mass flux in kg/s
de = 0.005 # bubble diameter in m
lambda_1 = 0.9
(m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions(
ctd, z0, oil, yk, mb0, 2, de, Tj)
disp_phases.append(bent_plume_model.Particle(0., 0., z0, oil, m0, T0,
nb0, lambda_1, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=0.,
lag_time=False))
# Run the simulation
bpm.simulate(np.array([0., 0., z0]), D, U0, phi_0, theta_0,
Sj, Tj, cj, chem_name, disp_phases, track=True, dt_max=60.,
sd_max = 2000.)
# Plot the full suite of model variables
bpm.plot_all_variables(1)
| 3,866 |
config.py
|
KongXuYao99116/axta
| 0 |
2170531
|
# 打开debug模式之后修改项目python代码不用重复启动
# 但是上线的时候必须要关掉
DEBUG=True
# 打开之后,模板修改了也不需要重启服务器
TEMPLATES_AUTO_RELOAD=True
# 后台key
# 前台存储的key
FRONT_USER_ID = "front_user_id"
# 数据库的配置项
# 数据库连接
DB_USERNAME='root'
DB_PASSWORD="<PASSWORD>"
DB_HOST="127.0.0.1"
DB_PORT="3306"
DB_NAME="bbs"
DB_URL="mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8" % (DB_USERNAME,DB_PASSWORD,DB_HOST,DB_PORT,DB_NAME)
SQLALCHEMY_DATABASE_URI=DB_URL
SQLALCHEMY_COMMIT_ON_TEARDOWN=False # 设置是否在每次连接结束后自动提交数据库中的变动
SQLALCHEMY_POOL_SIZE = 10 # 数据库连接池的大小。默认是数据库引擎的默认值 (通常是 5)。
SQLALCHEMY_MAX_OVERFLOW = 5 # 控制在连接池达到最大值后可以创建的连接数。当这些额外的连接使用后回收到连接池后将会被断开和抛弃。保证连接池只有设置的大小;
SQLALCHEMY_POOL_TIMEOUT = 10 # 指定数据库连接池的超时时间。默认是 10。
# 下面两项调试阶段启动,部署时关闭
SQLALCHEMY_TRACK_MODIFICATIONS=False #如果设置成 True (默认情况),Flask-SQLAlchemy 将会追踪对象的修改并且发送信号。这需要额外的内存,如果不必要的可以禁用它。
SQLALCHEMY_ECHO=True #如果设置成 True,SQLAlchemy 将会记录所有发到标准输出(stderr)的语句,这对调试很有帮助;默认为false;
SECRET_KEY="123456"
# flask-mail
MAIL_SERVER = 'smtp.qq.com'
MAIL_PROT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
MAIL_USERNAME = "<EMAIL>"
MAIL_PASSWORD = "<PASSWORD>" # 不是登录
MAIL_DEFAULT_SENDER='<EMAIL>' # 默认的发件人
#MAIL_USE_TLS 端口号 587
#MAIL_USE_SSL 端口号 467
# QQ邮箱不支持非加密方式发送邮件
# 富文本编辑器上传七牛云
import os
UEDITOR_UPLOAD_PATH = os.path.join(os.path.dirname(__file__),'images')
UEDITOR_UPLOAD_TO_QINIU = True
UEDITOR_QINIU_ACCESS_KEY = "<KEY>"
UEDITOR_QINIU_SECRET_KEY = "<KEY>"
UEDITOR_QINIU_BUCKET_NAME = "pjbbs"
UEDITOR_QINIU_DOMAIN = "peouv6xac.bkt.clouddn.com"
| 1,474 |
PyBlend/Blender_Forest.py
|
nfb2021/PrismPyTrace
| 0 |
2170210
|
# >>> This file contains the code for the prism forest to be implemented in Blender <<<
# >>> In order for this to function, you need the library 'pyblend_prism.py'. You can get this file from 'https://github.com/nfb2021/PrismPyTrace/tree/main/PyBlend' <<<
# >>> Then, copy the file to 'C:\Program Files\Blender Foundation\Blender x.x\x.x\scripts\modules\pyblend_prism.py' <<<
# >>> Once this is done, open Blender and click on 'General' to open a new work space <<<
# >>> Click on one of the two smaller windows on the right size of the blender main window (Should contain 'scene collection' and 'cube') <<<
# >>> Using the cursor you can adapt the size of each window accoridng to your liking <<<
# >>> Then, after selecting one of the two windows, press 'Shift' + 'F11' to open the editor <<<
# >>> Click on 'open' and select the 'Blender_Forest.py' file <<<
# >>> All code required is written below. Just run the script in Blender using 'Alt' + 'P' <<<
from pyblend_prism import Prism
import os
prism = Prism(20, 100, 60) # initialize class with some standard values
prism.clear_scene()
with open(r'PATH to Prism_Forest_Example.txt', 'r') as f:
lines = f.readlines()
for l, line in enumerate(lines):
if l == 0:
continue
x = float(line.split(';')[0])
y = float(line.split(';')[1])
z = float(line.split(';')[2])
width = float(line.split(';')[3])
alpha = float(line.split(';')[4])
new_loc = (x, y, 0)
prism_obj = prism.define_prism(loc = new_loc, angle = alpha, base_width = width)
prism.link_prism(prism_obj)
| 1,570 |
nia/routes.py
|
keystro/Nia_IoT
| 0 |
2170372
|
from flask import render_template, url_for, session, flash, redirect, request, jsonify, Blueprint
from nia import db, login_manager
from nia.forms import SignupForm, LoginForm, NewdeviceForm
from flask_login import login_user, current_user, logout_user, login_required
from nia.models import User, Device, Telemetry, Methods
import secrets, random
views = Blueprint('views',__name__)
@views.route('/')
@views.route('/index')
def index():
return render_template('index.html')
@views.route('/login', methods=['POST', 'GET'])
def login():
if current_user.is_authenticated:
return redirect(url_for('views.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.password == form.password.data:
login_user(user, remember=form.remember.data)
session['email'] = request.form['email']
flash('Login Successful','success')
return redirect(url_for('views.index'))
else:
flash('Login Unsuccessful. Please check your login details', 'danger')
return render_template('login.html', form=form)
@views.route('/register', methods=['POST', 'GET'])
def register():
if current_user.is_authenticated:
return redirect(url_for('views.index'))
form = SignupForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data, password= form.password.data)
db.session.add(user)
db.session.commit()
flash('User account has been created successfully','success')
return redirect(url_for('views.login'))
return render_template('register.html', form=form)
@views.route('/logout')
@login_required
def logout():
logout_user()
session.pop('email', None)
return redirect(url_for('views.index'))
@views.route('/account')
@login_required
def account():
if current_user.is_authenticated:
return render_template('account.html', username=username)
else:
flash('User Authentication required','danger')
return redirect(url_for('views.login'))
@views.route('/adddevice', methods=['GET','POST'])
@login_required
def adddevice():
if current_user.is_authenticated:
groups = Methods.query.all()
group_list=[(i.devicemethod, i.methodname) for i in groups]
form = NewdeviceForm()
form.devicemethod.choices=group_list
device_key = generate_api_key()
if form.validate_on_submit():
device_data = Device(device_name=form.devicename.data, device_IP=form.deciveIP.data, device_method=form.devicemethod.data, api_key=device_key)
db.session.add(device_data)
db.session.commit()
flash('Devive Channel Creation Successful', 'Success')
return device_key
return render_template('devices.html')
else:
flash('User Authentication required','danger')
return redirect(url_for('views.login'))
def generate_api_key():
aphrases=['Alpha','Bravo','Charlie','Delta','Echo','Foxtrot','Golf','Hotel','India','Juliet','Kilo','Lima','Mike','November','Oscar','Papa','Quebec','Romeo','Sierra','Tango','Uniform','Victor','Whiskey','X-ray','Yankee','Zulu']
alist=[]
token = secrets.token_hex(24)
alist.append(token)
tag = random.choice(aphrases)+secrets.token_hex(4)
alist.append(tag)
sep = '/'
key = sep.join(alist)
return key
| 3,465 |
2018-12-17/html_parser.py
|
JiangHongSh/TestGit
| 1 |
2169303
|
# -*- coding: utf-8 -*-
import re
import urllib.parse
from urllib import parse
import requests
import os
from bs4 import BeautifulSoup
from skimage.measure import compare_ssim
import cv2
from PIL import Image
class HtmlParser(object):
def parse(self, page_url, html_cont):
if page_url is None or html_cont is None:
return
soup = BeautifulSoup(html_cont,"html.parser")
new_data = self._get_new_data(page_url,soup)
return new_data
def _get_new_data(self, page_url, soup):
res_data={}
res_data['url']=page_url
title_node=soup.find("h1")
res_data['title']=title_node.get_text()
#summary_node = soup.find('div',class_='vxp-media__summary' or'story-body__inner' or 'vxp-media__summary').findAll('p')
summary_node = soup.find('div',attrs = {"class": ["story-body__inner", "vxp-media__summary"]}).findAll('p')
res_data['summary']=summary_node
img_node = soup.findAll('img')
href = []
for imgs in img_node:
href.append(imgs['src'])
print(imgs['src'])
href = list(set(href))
#res_data['imgs']=href
images = []
root = "D:/images/"
for img in href:
path = root + img.split('/')[-1]
try:
r = requests.get(img)
with open(path, 'wb')as f:
f.write(r.content)
f.close()
except BaseException :
print("爬取失败")
continue
images.append(path)
try:
for img in images :
for imgc in images :
print(img)
print(imgc)
crop_size = (256, 256)
imageA = cv2.imread(img)
imageB = cv2.imread(imgc)
img_newA = cv2.resize(imageA, crop_size, interpolation = cv2.INTER_CUBIC)
img_newB = cv2.resize(imageB, crop_size, interpolation = cv2.INTER_CUBIC)
grayA = cv2.cvtColor(img_newA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(img_newB, cv2.COLOR_BGR2GRAY)
(score, diff) = compare_ssim(grayA, grayB, full=True)
print("SSIM: {}".format(score))
a = 0.9
b = 1.0
if(float(score)> 0.9 and float(score)< 1.0) :
print(img)
print(imgc)
os.remove(imgc)
print("remove:"+imgc)
continue
except BaseException:
print("已删除或删除失败")
res_data['imgs'] = images
return res_data
| 2,734 |
Python/file-io.py
|
DevAbanoub/Just_A_Developer
| 3 |
2169587
|
def write_file():
try:
out = open("output.txt", "w")
except OSError as e:
print("Cannot open file: {}", e)
return
out.write("Hi! I'm a line of text in this file!\n")
out.write("Me, too!\n")
out.flush()
out.close()
def read_file():
try:
in_file = open("output.txt", "r")
except OSError as e:
print("Cannot open file to read: {}", e)
return
line = in_file.readline()
while line:
print(line.rstrip('\n'))
line = in_file.readline()
in_file.close()
if __name__ == '__main__':
write_file()
read_file()
| 633 |
Modules/Chronos/UIServer.py
|
carlosfelgarcia/TimeManager
| 0 |
2169992
|
"""Server for multithreaded UI."""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
class UIServer(object):
"""Server that handles comunications to different UI clients."""
def __init__(self, main, host='localHost', port=2243, bufferSize=1024, maxThreads=2):
"""Constuctor."""
self.__main = main
self.__bufferSize = bufferSize
self.__serverSocket = socket(AF_INET, SOCK_STREAM)
self.startServer(host, port, maxThreads)
self.__serverSocket.close()
def startServer(self, host, port, maxThreads):
"""Start the server and listen for new clients in the specified port.
:param host: Host name of the server.
:type host: str
:param port: Port that connects the server.
:type port: int
:param maxThreads: Maximum number of clients.
:type maxThreads: int
"""
self.__serverSocket.bind((host, port))
self.__serverSocket.listen(maxThreads)
acceptConnections = Thread(target=self.getConnections)
acceptConnections.start()
acceptConnections.join()
def getConnections(self):
"""Handle the connections of the clients."""
while True:
clientUI, clientUIAddress = self.__serverSocket.accept()
print("{client} has connected.".format(client=clientUIAddress))
Thread(target=self.clientConnection, args=(clientUI,)).start()
def clientConnection(self, client):
"""Recived all the commands from the client."""
while True:
cmd = client.recv(self.__bufferSize).decode('UTF-8')
if cmd == 'quit':
client.close()
break
elif cmd == 'current':
current = str(self.__main.getCurrentTimePerProcess())
client.send(bytes(current, "utf8"))
else:
client.send(bytes("Command no implemented", "utf8"))
| 1,965 |
dockit/views/list.py
|
zbyte64/django-dockit
| 5 |
2169380
|
from dockit.paginator import Paginator
from django.core.exceptions import ImproperlyConfigured
from django.views.generic import list as listview
class MultipleObjectMixin(listview.MultipleObjectMixin):
paginator_class = Paginator
document = None
def get_queryset(self):
"""
Get the list of items for this view. This must be an interable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.document is not None:
queryset = self.document.objects.all()
else:
raise ImproperlyConfigured(u"'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
class BaseListView(MultipleObjectMixin, listview.BaseListView):
pass
class ListView(listview.MultipleObjectTemplateResponseMixin, BaseListView):
pass
| 1,064 |
tests/test_bucket_names.py
|
cariad/startifact
| 0 |
2170049
|
from mock import Mock, patch
from startifact import BucketNames
from startifact.parameters import BucketParameter
def test_get(session: Mock) -> None:
bucket_names = BucketNames("/buckets/staging")
bp = BucketParameter(
name="",
session=session,
value="buck",
)
with patch("startifact.bucket_names.BucketParameter", return_value=bp) as bp_cls:
name = bucket_names.get(session)
bp_cls.assert_called_once_with(name="/buckets/staging", session=session)
assert name == "buck"
| 534 |
src/lib/models/networks/sphere/SphereConv2d.py
|
BlueHorn07/CenterNet
| 0 |
2169095
|
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .GridGenerator import GridGenerator
class SphereConv2d(nn.Conv2d):
"""
kernel_size: (H, W)
"""
def __init__(self, in_channels: int, out_channels: int, kernel_size=(3, 3),
stride=1, padding=0, dilation=1,
groups: int = 1, bias: bool = True, padding_mode: str = 'zeros'):
super(SphereConv2d, self).__init__(
in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias, padding_mode)
self.grid_shape = None
self.grid = None
def genSamplingPattern(self, h, w):
gridGenerator = GridGenerator(h, w, self.kernel_size, self.stride)
LonLatSamplingPattern = gridGenerator.createSamplingPattern()
# generate grid to use `F.grid_sample`
lat_grid = (LonLatSamplingPattern[:, :, :, 0] / h) * 2 - 1
lon_grid = (LonLatSamplingPattern[:, :, :, 1] / w) * 2 - 1
grid = np.stack((lon_grid, lat_grid), axis=-1)
with torch.no_grad():
self.grid = torch.FloatTensor(grid)
self.grid.requires_grad = False
def forward(self, x):
# Generate Sampling Pattern
B, C, H, W = x.shape
if (self.grid_shape is None) or (self.grid_shape != (H, W)):
self.grid_shape = (H, W)
self.genSamplingPattern(H, W)
with torch.no_grad():
grid = self.grid.repeat((B, 1, 1, 1)).to(x.device) # (B, H*Kh, W*Kw, 2)
grid.requires_grad = False
# x = F.grid_sample(x, grid, align_corners=True, mode='nearest') # (B, in_c, H*Kh, W*Kw)
try:
x = F.grid_sample(x, grid, align_corners=True, mode='nearest') # (B, in_c, H*Kh, W*Kw)
except:
print(x.shape)
print(grid.shape)
assert False
# self.weight -> (out_c, in_c, Kh, Kw)
x = F.conv2d(x, self.weight, self.bias, stride=self.kernel_size)
return x # (B, out_c, H/stride_h, W/stride_w)
| 1,894 |
squares/results.py
|
Vivokas20/SKEL
| 1 |
2170097
|
import contextlib
import os
import re
import sys
import time
from enum import IntEnum
from logging import getLogger
import sqlparse
from rpy2 import robjects
from . import util
from .dsl import interpreter
logger = getLogger('squares')
class ExitCode(IntEnum):
OK = 0
NON_OPTIMAL = 3
ERROR = 1
SQL_FAILED = 2
SQL_FAILED_NON_OPTIMAL = 4
END_SEARCH_SPACE = 5
start_time = time.time()
specification = None
solution = None
solution_found = False
solution_size = None
n_cubes = 0
blocked_cubes = 0
n_attempts = 0
n_rejects = 0
n_fails = 0
n_blocks = 0
exit_code = ExitCode.ERROR
exceeded_max_loc = False
analysis_time = 0
enum_time = 0
init_time = 0
block_time = 0
empty_output = 0
redundant_lines = 0
def handle_sigint(signal, stackframe):
print()
print_results()
exit(exit_code)
def beautifier(sql):
sql = re.sub(r'\.(?=other(\.other)*`)', '_', sql)
sql = re.sub(r"""`(?=([^"'\\]*(\\.|"([^"'\\]*\\.)*[^"'\\]*"))*[^"']*$)""", '', sql) # remove backticks if not inside strings
return sqlparse.format(sql, reindent=True, keyword_case='upper')
def print_results():
global exit_code
logger.info('Statistics:')
if n_cubes:
logger.info('\tGenerated cubes: %d', n_cubes)
logger.info('\tBlocked cubes: %d (%f / generated avg.)', blocked_cubes, blocked_cubes / n_cubes if n_cubes else 0)
logger.info('\tAttempted programs: %d (approx)', n_attempts)
logger.info('\t\tRejected: %d (approx)', n_rejects)
logger.info('\t\tFailed: %d (approx)', n_fails)
logger.info('\t\tEmpty outputs: %d (%.1f%%) (approx)', empty_output, empty_output / n_attempts * 100 if n_attempts else 0)
logger.info('\t\tRedundant lines: %d (approx)', redundant_lines)
logger.info('\tBlocked programs: %d (%f / attempted avg.) (approx)', n_blocks, n_blocks / n_attempts if n_attempts else 0)
logger.info('\tTotal time spent in enumerator init: %f (approx)', init_time)
logger.info('\tTotal time spent in enumerator: %f (approx)', enum_time)
if enum_time != 0:
logger.info('\t\tEnumerated %f programs/s avg. (just enumeration time)', n_attempts / enum_time)
logger.info('\t\tEnumerated %f programs/s avg. (overall)', n_attempts / (time.time() - start_time))
logger.info('\tTotal time spent in evaluation & testing: %f (approx)', analysis_time)
logger.info('\tTotal time spent blocking cubes/programs: %f (approx)', block_time)
if solution:
logger.info(f'Solution found: {solution}')
logger.info(f'Solution size: {solution_size}')
old_cache = util.get_config().cache_ops
util.get_config().cache_ops = True
interp = interpreter.SquaresInterpreter(specification, True)
evaluation = interp.eval(solution, specification.tables)
assert interp.equals(evaluation, 'expected_output')[0] # this call makes it so that the select() appears in the output
util.get_config().cache_ops = old_cache
try:
program = specification.r_init + interp.program
robjects.r(program)
sql_query = robjects.r(f'sink(); sql_render(out, bare_identifier_ok=T)')
except:
logger.error('Error while trying to convert R code to SQL.')
sql_query = None
exit_code = ExitCode.SQL_FAILED if exit_code != ExitCode.NON_OPTIMAL else ExitCode.SQL_FAILED_NON_OPTIMAL
print()
if util.get_config().print_r:
pass
print("------------------------------------- R Solution ---------------------------------------\n")
print(specification.r_init + '\n' + interp.program)
if sql_query is not None:
print()
print("+++++++++++++++++++++++++++++++++++++ SQL Solution +++++++++++++++++++++++++++++++++++++\n")
print(beautifier(str(sql_query)[6:]))
else:
print('Failed to generate SQL query')
else:
if exceeded_max_loc:
exit_code = ExitCode.END_SEARCH_SPACE
if not solution_found:
print("No solution found")
def update_stats(attempts, rejects, fails, blocks, emptys, enum_t, analysis_t, init_t, block_t, redundant):
global n_attempts, n_rejects, n_fails, n_blocks, empty_output, enum_time, analysis_time, init_time, block_time, redundant_lines
n_attempts += attempts
n_rejects += rejects
n_fails += fails
n_blocks += blocks
empty_output += emptys
enum_time += enum_t
analysis_time += analysis_t
init_time += init_t
block_time += block_t
redundant_lines += redundant
def increment_cubes():
global n_cubes
n_cubes += 1
def store_solution(sol, size: int, optimal: bool):
global solution, solution_size, exit_code, solution_found
solution = sol
solution_size = size
exit_code = ExitCode.OK if optimal else ExitCode.NON_OPTIMAL
solution_found = True
| 4,884 |
op_amp_calc.py
|
tomsmoberly/op_amp_calculator
| 0 |
2170044
|
#!/usr/bin/env python3
# Copyright 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
def get_forced_yn(question):
question = question + ' Please enter [y/n] without brackets.\n'
i = input(question)
while (i.lower() != 'y' and i.lower() != 'n'):
i = input('Invalid response. Please try again.\n' + question)
if(i.lower() == 'y'):
return True
else:
return False
def is_float(float_string):
try:
float(float_string)
return True
except ValueError:
return False
def compress_resistance(expanded):
if(expanded >= 1000000):
return str(expanded/1000000) + 'M'
elif(expanded >= 1000):
return str(expanded/1000) + 'K'
return str(expanded)
def expand_resistance(shorthand, r_min, r_max):
if(len(shorthand) == 0):
return -2
mult = 1
if shorthand[-1].lower() == 'm':
mult = 1000000
shorthand = shorthand[:-1]
elif shorthand[-1].lower() == 'k':
mult = 1000
shorthand = shorthand[:-1]
if not is_float(shorthand):
return -1
else:
# print("Min, val, max: ",r_min,(float(shorthand)*mult),r_max)
if(r_min != -2):
if((float(shorthand)*mult) < r_min):
return -2
if(r_max != -2):
if((float(shorthand)*mult) > r_max):
return -2
return float(shorthand)*mult
def get_float_or_nothing(message):
i = input(message + '\n')
while not is_float(i) and len(i) > 0:
i = input('Invalid input. ' + message + '\n')
if len(i) == 0:
return None
return float(i)
def get_nearest_gain_vals(vals, gain, inverting, undershoot, overshoot):
nearest_gain = -1
nearest_rf = -1
nearest_rin = -1
for rf in vals:
for rin in vals:
if inverting:
this_gain = rf/rin
if abs(this_gain - gain) < abs(gain - nearest_gain):
if (not overshoot or this_gain > gain) and (not undershoot or this_gain < gain):
nearest_gain = this_gain
nearest_rf = rf
nearest_rin = rin
else:
this_gain = (1+rf/rin)
if abs((this_gain) - gain) < abs(gain - nearest_gain):
if (not overshoot or this_gain > gain) and (not undershoot or this_gain < gain):
nearest_gain = this_gain
nearest_rf = rf
nearest_rin = rin
return (nearest_gain, nearest_rf, nearest_rin)
def op_amp_gain_calc():
try:
with open('resistor_values.txt') as fp:
vals = fp.read().splitlines()
except FileNotFoundError:
print('File resistor_values.txt was not found. Aborting.')
return False
i = input('What is the desired gain?\n')
while not is_float(i):
i = input('What is the desired gain?\n')
gain = float(i)
i = input('Is the amp [i]nverting or [n]on-inverting?\n')
while (i.lower() != 'i' and i.lower() != 'n'):
i = input('Invalid response. Please enter one of the letters in brackets to choose.\nIs the amp [i]nverting or [n]on-inverting?\n')
inverting = True if i.lower() == 'i' else False
r_min = input('If you would like to limit the MINIMUM resistor value, enter the min value now (example: 2.2k). Else leave line blank.\n')
r_max = input('If you would like to limit the MAXIMUM resistor value, enter the max value now (example: 680k). Else leave line blank.\n')
r_min = expand_resistance(r_min, -2, -2)
r_max = expand_resistance(r_max, -2, -2)
expanded_vals = list()
for val in vals:
expanded_val = expand_resistance(val, r_min, r_max)
if(expanded_val < 0):
if(expanded_val == -1):
print('Could not expand value and it will not be used:', val)
else:
expanded_vals.append(expanded_val)
overshoot = get_forced_yn('If an exact match is not possible, does the gain need to be LARGER than the target?')
if not overshoot:
undershoot = get_forced_yn('If an exact match is not possible, does the gain need to be SMALLER than the target?')
gain, rf, rin = get_nearest_gain_vals(expanded_vals, gain, inverting, undershoot, overshoot)
print('Nearest R_f =', compress_resistance(rf))
print('Nearest R_in =', compress_resistance(rin))
print('Nearest gain =', gain)
i = input('Enter an input voltage to calculate the output voltage with this gain. Leave blank to skip.\n')
while (not is_float(i) and len(i) > 0):
i = input('Invalid input. Enter an input voltage to calculate the output voltage with this nearest gain. Leave blank to skip.\n')
if(len(i) == 0):
return True
input_voltage = float(i)
print('Output voltage =', input_voltage*gain)
return True
def main():
op_amp_gain_calc()
if __name__ == '__main__':
main()
| 5,981 |
terminal_mnist.py
|
sebag90/NeuralNetwork
| 0 |
2168678
|
from nn.network import Network
from sklearn.metrics import accuracy_score
import numpy as np
from sklearn.metrics import confusion_matrix
import tensorflow as tf
from sklearn import metrics
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (x_train.shape[0], 28 * 28))
x_test = np.reshape(x_test, (x_test.shape[0], 28 * 28))
y_train = tf.keras.utils.to_categorical(y_train)
x_train = (x_train/255).astype('float32')
x_test = (x_test/255).astype('float32')
net = Network()
net.init(input_dimension=784, loss_function="cross entropy", layers=[
{"units": 128, "activation": "relu", "type":"dense"},
{"units": 64, "activation": "relu", "type":"dense"},
{"units": 10, "activation": "softmax", "type":"dense"}
])
net.fit(x_train, y_train, epochs=10)
y_pred = net.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
cmatrix = confusion_matrix(y_test, y_pred)
print(cmatrix)
print(f"Accuracy score: {metrics.accuracy_score(y_test, y_pred):10.5}")
| 1,020 |
simple_ml/bayes.py
|
Yangruipis/simple_ml
| 25 |
2169483
|
# -*- coding:utf-8 -*-
from __future__ import division, absolute_import
from simple_ml.base.base_error import *
from simple_ml.evaluation import *
from simple_ml.base.base_model import *
from simple_ml.base.base_enum import *
import numpy as np
__all__ = [
'NaiveBayes',
'BayesMinimumRisk',
'BayesMinimumError'
]
class NaiveBayes(BaseClassifier):
__doc__ = "Naive Bayes Classifier"
def __init__(self):
super(NaiveBayes, self).__init__()
self.is_feature_binary = []
self.prob_array = None
def fit(self, x, y):
super(NaiveBayes, self).fit(x, y)
if self.label_type == LabelType.continuous:
raise LabelTypeError
self.is_feature_binary = list(map(self._is_binary, self.x.T))
self._fit()
@staticmethod
def _is_binary(column):
keys = np.unique(column)
if np.array_equal(keys, np.array([0, 1])):
return True
elif keys.shape[0] == 2:
raise LabelTypeError("Binary label must be 0 or 1")
return False
@staticmethod
def _get_normal_prob(value, mu, sigma2):
cons = np.sqrt(2 * np.pi * sigma2)
return 1 / cons * np.exp(- (value - mu)**2 / (2 * sigma2))
@property
def posterior_prob(self):
return self.prob_array
def _fit(self):
y_values = np.unique(self.y)
self.prob_array = np.zeros((len(y_values), self.variable_num+1)) # 第一列为标签的先验概率
self.continuous_record_dict = {} # 用来存储连续变量在prob_array中对应的位置,以及其期望和方差
for i, y in enumerate(y_values):
y_amount = self.y[self.y == y].shape[0]
self.prob_array[i, 0] = (y_amount + 1) / (self.sample_num + len(y_values)) # 拉普拉斯平滑
for j, is_binary in enumerate(self.is_feature_binary):
feature_series = self.x[self.y == y, j] # 此时只有0或1元素,只记录1的概率,0的概率用1减去
if is_binary:
self.prob_array[i, j+1] = (np.sum(feature_series) + 1) / (y_amount + len(np.unique(self.x[:, j])))
else:
mu = np.mean(feature_series)
sigma2 = np.var(feature_series)
self.prob_array[i, j+1] = -1
self.continuous_record_dict[(i, j+1)] = (mu, sigma2)
def predict(self, x):
if self.prob_array is None:
raise ModelNotFittedError
super(NaiveBayes, self).predict(x)
return np.array(list(map(self._predict_single_sample, x)))
def _predict_single_sample(self, x):
# 1. 更新prob_array中连续变量的取值
p = np.ones(self.prob_array.shape[0])
for i in range(self.prob_array.shape[0]):
p *= self.prob_array[i, 0] # 先验先乘进去
for j in range(1, self.prob_array.shape[1]):
if self.prob_array[i, j] == -1:
mu, sigma2 = self.continuous_record_dict[(i, j)]
p[i] *= self._get_normal_prob(x[j-1], mu, sigma2)
else:
if x[j-1] == 1:
p[i] *= self.prob_array[i, j]
else:
p[i] *= (1 - self.prob_array[i, j])
return np.argmax(p)
def score(self, x, y):
super(NaiveBayes, self).score(x, y)
y_predict = self.predict(x)
if self.label_type == LabelType.binary:
return classify_f1(y_predict, y)
else:
return classify_f1_macro(y_predict, y)
def classify_plot(self, x, y, title=""):
classify_plot(self.new(), self.x, self.y, x, y, title=self.__doc__ + title)
def new(self):
return NaiveBayes()
class BayesMinimumError(BaseClassifier):
__doc__ = "Bayes Minimum Error"
def __init__(self):
super(BayesMinimumError, self).__init__()
self._mu = None
self._sigma = None
self._prior = None
self.labels = None
@property
def sigma(self):
return self._sigma
@property
def mu(self):
return self._mu
def fit(self, x, y):
super(BayesMinimumError, self).fit(x, y)
if self.label_type == LabelType.continuous:
raise LabelTypeError
self._get_normal_distribution()
def _get_normal_distribution(self):
_y = np.unique(self.y)
self.labels = _y
self._prior = [len(self.y[self.y == i]) / self.sample_num for i in _y]
self._mu = []
self._sigma = []
for i in _y:
_x = self.x[self.y == i]
self._mu.append(self._get_mu(_x))
self._sigma.append(self._get_sigma(_x))
def _get_probability(self, x):
res = []
for i in range(len(self._mu)):
temp = 1/(np.sqrt(2*np.pi) ** self.variable_num * np.sqrt(np.linalg.det(self._sigma[i])))
temp *= np.exp(-1 / 2 * np.dot(np.dot((x - self._mu[i]), np.linalg.inv(self._sigma[i])), (x - self._mu[i])))
temp *= self._prior[i]
res.append(temp)
return res
@staticmethod
def _get_mu(x):
return np.mean(x, axis=0)
@staticmethod
def _get_sigma(x):
return np.cov(x.T)
def predict(self, x):
if self._mu is None:
raise ModelNotFittedError
super(BayesMinimumError, self).predict(x)
return np.array([self._predict_single(i) for i in x])
def _predict_single(self, x):
res = self._get_probability(x)
return self.labels[np.argmax(res)]
def score(self, x, y):
super(BayesMinimumError, self).score(x, y)
y_predict = self.predict(x)
if self.label_type == LabelType.binary:
return classify_f1(y_predict, y)
else:
return classify_f1_macro(y_predict, y)
def classify_plot(self, x, y, title=""):
classify_plot(self.new(), self.x, self.y, x, y, title=self.__doc__+title)
def new(self):
return BayesMinimumError()
class BayesMinimumRisk(BayesMinimumError):
__doc__ = "Bayes Minimum Risk"
def __init__(self, cost_mat):
"""
初始化,保存分类损失矩阵
:param cost_mat: 分类损失矩阵
要求:
1. m x m 维,m为所有类别数目
2. 第i行第j列表示将属于类别i的样本分到类别j所造成的损失
3. 每一行,每一列的类别必须按照数值从小到大的顺序排列,
比如第i行表示在np.unique(y)中第i个label
"""
super(BayesMinimumRisk, self).__init__()
self.cost_mat = cost_mat
def fit(self, x, y):
label_num = len(np.unique(y))
if self.cost_mat.shape[0] != label_num or self.cost_mat.shape[1] != label_num:
raise CostMatMismatchError("损失矩阵维度不匹配")
super(BayesMinimumRisk, self).fit(x, y)
def _predict_single(self, x):
prob = self._get_probability(x)
alpha = np.dot(prob, self.cost_mat)
return self.labels[np.argmin(alpha)]
def new(self):
return BayesMinimumRisk(self.cost_mat)
| 6,898 |
install_jupyter_hub.py
|
Ilyushin/jupyterhub-container-tensorflow2--py3
| 0 |
2170465
|
#!/usr/bin/env python
import os
from subprocess import check_call
import sys
V = os.environ['JUPYTERHUB_VERSION']
pip_install = [
sys.executable, '-m', 'pip', 'install', '--no-cache', '--upgrade',
'--upgrade-strategy', 'only-if-needed',
]
if V == 'master':
req = 'https://github.com/jupyterhub/jupyterhub/archive/master.tar.gz'
else:
version_info = [ int(part) for part in V.split('.') ]
version_info[-1] += 1
upper_bound = '.'.join(map(str, version_info))
vs = '>=%s,<%s' % (V, upper_bound)
req = 'jupyterhub%s' % vs
check_call(pip_install + [req])
| 585 |
Analysis_scripts/qname_ip_map.py
|
hyojoonkim/Meta4
| 0 |
2168219
|
################################################################################
# Python script for qname-ip mappting
# - Description:
# - Author: <NAME> (<EMAIL>)
################################################################################
################################################################################
# Copyright (C) 2020 <NAME> (Princeton University)
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import argparse, pickle
import os,sys,copy,datetime
#import time,pytz, calendar
import json
#from scapy.all import *
def make_dictionary(input_file):
# dictionary { ip : (set_of_qnames, set_of_int(epochs)) }
ip_qname_map = {}
# Read file
fd = open(input_file, 'r')
entries = fd.readlines()
# Go through file
for e in entries:
epoch, qname, ip_list = "","",""
splitted = e.split("\t")
if len(splitted) == 3:
epoch, qname, ip_list = [str(i) for i in splitted]
else:
print("Weird entry. Print and ignore: ", splitted)
continue
# Get IP list. Make epoch to int.
ip_list = ip_list.rstrip("\n").split(",")
epoch = int(float(epoch))
# Go through IPs
for ip in ip_list:
if ip in ip_qname_map:
val_in_map_tuple = ip_qname_map[ip]
# if qname != val_in_map_tuple[0]:
# print ("Mismatch present: {}:{},{}".format(ip,qname, val_in_map_tuple[0]))
# add this qname to qname set
qname_set = val_in_map_tuple[0]
qname_set.add(qname)
# add this epoch to epoch set
epoch_set = val_in_map_tuple[1]
epoch_set.add(epoch)
# update
val_tuple = (qname_set,epoch_set)
ip_qname_map[ip] = val_tuple
else:
val_tuple = (set([qname]),set([epoch]))
ip_qname_map[ip] = val_tuple
# for e in ip_qname_map:
# print (e,ip_qname_map[e])
return ip_qname_map
def save_data_as_pickle(data, filename, output_dir):
print ('\nSaving Result: %s\n' %(str(filename) + '.p'))
pickle_fd = open(str(output_dir) + str(filename) + '.p','wb')
pickle.dump(data,pickle_fd)
pickle_fd.close()
def main():
parser = argparse.ArgumentParser(description='Script for qname-ip mapping')
parser.add_argument('-i', dest='input_file', action='store', required=True,
help='tshark script output file')
parser.add_argument('-o', dest='output_pickle', action='store', required=True,
help='Output pickle file')
# Parse
args = parser.parse_args()
# Check number of arguments.
if len(sys.argv[1:])!=4:
print ("\nERROR: Wrong number of parameters. \n")
parser.print_help()
sys.exit(-1)
# Check validity of option values
if os.path.isfile(args.input_file) is False:
print ("\nERROR: Specifid file does not exist. Abort.\n")
parser.print_help()
sys.exit(-1)
# Make dictionary
ip_qname_map = make_dictionary(args.input_file)
save_data_as_pickle(ip_qname_map, args.output_pickle, "./")
if __name__ == '__main__':
main()
| 3,942 |
myapp/migrations/0001_initial.py
|
Ponyhead/CSCI210FinalProject
| 0 |
2170425
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-01 04:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Division',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('titles', models.CharField(max_length=100)),
('division', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='myapp.Division')),
],
),
]
| 1,086 |
eqpy/numpy.py
|
eriknw/eqpy
| 5 |
2169294
|
import sympy
from ._apimappings import numpytosympy
locals().update((key, val) if callable(val) else (key, getattr(sympy, val))
for key, val in numpytosympy.items())
del sympy
| 193 |
rom.py
|
mrsonicblue/peek-scan-2
| 0 |
2170163
|
import hashlib
import zipfile
import zlib
class Rom:
BUF_SIZE = 65536
def __init__(self, path, core):
self.name = path.name
self.path = path
self.stat = path.stat()
self.core = core
self._hashed = False
self._header_crc32 = None
self._header_md5 = None
self._header_sha1 = None
self._noheader_crc32 = None
self._noheader_md5 = None
self._noheader_sha1 = None
def read_chunks(self, f):
# If header exists, first chunk needs to be that big
if self.core.rom_header_size > 0:
yield f.read(self.core.rom_header_size)
while True:
chunk = f.read(Rom.BUF_SIZE)
if not chunk:
break
yield chunk
def open_file(self):
if self.path.suffix == '.zip':
with zipfile.ZipFile(str(self.path)) as zip:
zipfiles = zip.namelist()
if len(zipfiles) == 0:
raise Exception('No files in archive')
with zip.open(zipfiles[0]) as f:
for chunk in self.read_chunks(f):
yield chunk
else:
with open(str(self.path), 'rb') as f:
for chunk in self.read_chunks(f):
yield chunk
def hash(self):
# Some databases skip a portion of the file when hashing
# for some systems. For those systems, we need to calculate
# two sets of hashes.
if self.core.rom_header_size > 0:
self.hash_nonzero_header()
else:
self.hash_zero_header()
def hash_nonzero_header(self):
header_crc32 = 0
header_md5 = hashlib.md5()
header_sha1 = hashlib.sha1()
noheader_crc32 = 0
noheader_md5 = hashlib.md5()
noheader_sha1 = hashlib.sha1()
chunk_iter = iter(self.open_file())
try:
chunk = next(chunk_iter)
header_crc32 = zlib.crc32(chunk, header_crc32)
header_md5.update(chunk)
header_sha1.update(chunk)
except StopIteration:
pass
while True:
try:
chunk = next(chunk_iter)
header_crc32 = zlib.crc32(chunk, header_crc32)
header_md5.update(chunk)
header_sha1.update(chunk)
noheader_crc32 = zlib.crc32(chunk, noheader_crc32)
noheader_md5.update(chunk)
noheader_sha1.update(chunk)
except StopIteration:
break
self._hashed = True
self._header_crc32 = format(header_crc32 & 0xFFFFFFFF, '08x')
self._header_md5 = header_md5.hexdigest()
self._header_sha1 = header_sha1.hexdigest()
self._noheader_crc32 = format(noheader_crc32 & 0xFFFFFFFF, '08x')
self._noheader_md5 = noheader_md5.hexdigest()
self._noheader_sha1 = noheader_sha1.hexdigest()
def hash_zero_header(self):
crc32 = 0
md5 = hashlib.md5()
sha1 = hashlib.sha1()
for chunk in self.open_file():
crc32 = zlib.crc32(chunk, crc32)
md5.update(chunk)
sha1.update(chunk)
self._hashed = True
self._header_crc32 = format(crc32 & 0xFFFFFFFF, '08x')
self._header_md5 = md5.hexdigest()
self._header_sha1 = sha1.hexdigest()
self._noheader_crc32 = self._header_crc32
self._noheader_md5 = self._header_md5
self._noheader_sha1 = self._header_sha1
def crc32(self, with_header=True):
if not self._hashed:
self.hash()
if with_header:
return self._header_crc32
return self._noheader_crc32
def md5(self, with_header=True):
if not self._hashed:
self.hash()
if with_header:
return self._header_md5
return self._noheader_md5
def sha1(self, with_header=True):
if not self._hashed:
self.hash()
if with_header:
return self._header_sha1
return self._noheader_sha1
| 4,121 |
main2.py
|
Pooolg/neko-telegram-autopostbot
| 2 |
2169385
|
# -*- coding: utf-8 -*-
import time
import requests
tg_token = "token"
channel = "@example"
def response(r):
if r['ok'] is True:
print('[' + time.ctime(time.time()) + ']', 'OK:', r['ok'], ';#' + str(r['result']['message_id']))
else:
print('[' + time.ctime(time.time()) + ']', 'OK:', r['ok'], '; Error:',r['error_code'],'\n',r['description'],)
while True:
def postnekopic():
picURL = requests.get("https://nekos.life/api/v2/img/neko").json()['url']
r = requests.get(
"https://api.telegram.org/bot" + tg_token + "/sendPhoto?chat_id=" + channel + "&photo=" + picURL + "&caption=%23neko").json()
response(r)
def postbakapic():
picURL = requests.get("https://nekos.life/api/v2/img/baka").json()['url']
r = requests.get(
"https://api.telegram.org/bot" + tg_token + "/sendPhoto?chat_id=" + channel + "&photo=" + picURL + "&caption=%23baka").json()
response(r)
def postfoxgirlpic():
picURL = requests.get("https://nekos.life/api/v2/img/fox_girl").json()['url']
r = requests.get(
"https://api.telegram.org/bot" + tg_token + "/sendPhoto?chat_id=" + channel + "&photo=" + picURL + "&caption=%23fox_girl").json()
response(r)
def postpokepic():
picURL = requests.get("https://nekos.life/api/v2/img/poke").json()['url']
r = requests.get(
"https://api.telegram.org/bot" + tg_token + "/sendPhoto?chat_id=" + channel + "&photo=" + picURL + "&caption=%23poke").json()
response(r)
def postkisspic():
picURL = requests.get("https://nekos.life/api/v2/img/kiss").json()['url']
r = requests.get(
"https://api.telegram.org/bot" + tg_token + "/sendPhoto?chat_id=" + channel + "&photo=" + picURL + "&caption=%23kiss").json()
response(r)
postnekopic()
time.sleep(3600)
postfoxgirlpic()
time.sleep(3600)
postpokepic()
time.sleep(3600)
postbakapic()
time.sleep(3600)
postnekopic()
time.sleep(100)
postkisspic()
time.sleep(3600)
| 2,079 |
aoc/solutions/day8/a.py
|
witchtrash/aoc2021
| 1 |
2170356
|
from aoc.lib.util import Input, get_problem_input
problem_input: Input = get_problem_input()
test_input: Input = get_problem_input(test=True)
def solve(problem_input: Input) -> int:
total = 0
for line in problem_input.lines():
_signals, output = line.split("|")
for s in output.split():
match len(s):
case 2:
total += 1
case 3:
total += 1
case 4:
total += 1
case 7:
total += 1
return total
def test() -> str:
return str(solve(test_input))
def run() -> str:
return str(solve(problem_input))
| 691 |
logger.py
|
cookster9/cook-crypto-trader
| 0 |
2166152
|
import boto3
from datetime import datetime
import logging
from config import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
s3_client = boto3.client('s3',
aws_access_key_id = AWS_ACCESS_KEY_ID,
aws_secret_access_key = AWS_SECRET_ACCESS_KEY,
region_name = 'us-east-2')
def create_logger(logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('{}_{}.log'.format(logger_name, datetime.now().strftime('%m%d-%H%M%S')))
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
return logger
def upload_log(logger):
file_name = logger.handlers[0].baseFilename
directory = datetime.now().date().isoformat()
key = "{}/{}".format(directory, file_name.split('/')[-1])
bucket_name = "cook-crypto-trader-logging"
s3_client.upload_file(Filename = file_name, Bucket = bucket_name, Key = key)
def logger(func):
def function_wrapper(*args, **kwargs):
function_name = func.__name__
logger = create_logger(function_name)
logger.info("Now running - {}".format(function_name))
resp = func(logger = logger, *args, **kwargs)
upload_log(logger)
return resp
return function_wrapper
| 1,488 |
src/cirrus/netcdf2postgis.py
|
jairomr/CempaNetcdf2Postgis
| 1 |
2169440
|
import shutil
from glob import glob
from multiprocessing import Pool
from time import time
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import Dataset
from cirrus.netCDFtoTIFF import nc2tiff
from cirrus.util.config import is_goias, lats, logger, lons, ormdtype, settings
from cirrus.util.db import engine, save_df_bd
from cirrus.util.hash import data_frame2hash, generate_file_md5
from cirrus.util.functions import ( # isort:skip
exists_in_the_bank,
get_list_nc,
get_time,
save_hash,
)
def netcsf2sql(file_name: str, rootgrp: Dataset, xr_file, force_save_db):
"""_summary_
Args:
file_name (str): _description_
rootgrp (Dataset): _description_
Returns:
bool: _description_
"""
error = False
for name in settings.vars:
try:
tempc = rootgrp.variables[name][:]
logger.info(
f"Processando varivael {name} de {file_name.split('/')[-1]}"
)
vtime, *_ = [
x.flatten()
for x in np.meshgrid(
get_time(file_name), lats, lons, indexing='ij'
)
]
camadas = {}
if len(np.squeeze(tempc)) == 19:
for c, var in enumerate(np.squeeze(tempc), 1):
camadas[f'{name}_{c:02}'] = var.flatten()
nc2tiff(xr_file, name, f'{name}_{c:02}', file_name, c - 1)
else:
camadas = {f'{name}': np.squeeze(tempc).flatten()}
nc2tiff(xr_file, name, name, file_name)
temp_df = pd.DataFrame(
{
'datetime': vtime,
'goias': is_goias['goias'].array,
**camadas,
'point_gid': is_goias.index,
}
)
temp_df = temp_df.dropna(subset=['goias'])
temp_df['datetime'] = pd.to_datetime(temp_df['datetime'])
temp_df = temp_df.drop(['goias'], axis=1) # .set_index('datetime')
df_hash = data_frame2hash(name, temp_df)
if force_save_db or not exists_in_the_bank(df_hash):
logger.info(
f"salvando no banco {file_name.split('/')[0]} {name}"
)
try:
# temp_df.to_sql(
# name, engine, dtype=ormdtype[name], if_exists='append'
# )
save_df_bd(temp_df, name)
save_hash(df_hash)
except Exception:
error = True
logger.exception('Erro ao salva no Banco?!')
else:
logger.info('Ja tem no banco')
except Exception:
logger.exception('What?!')
error = True
return error
def load_file(args):
"""_summary_
Args:
file (_type_): _description_
Returns:
_type_: _description_
"""
file, force_save_db = args
logger.info(f'force = {force_save_db} in {file}')
file_hash = generate_file_md5(file)
if (not exists_in_the_bank(file_hash)) or settings.IGNOREHASHFILE:
rootgrp = Dataset(file)
xr_file = xr.open_dataset(file)
if not netcsf2sql(file, rootgrp, xr_file, force_save_db):
save_hash(file_hash)
return {'file': file, 'status': 'sucesso'}
else:
logger.warning('Teve error')
return {'file': file, 'status': 'error'}
else:
logger.info('File ja foi salvo no banco')
return {
'file': file,
'status': 'sucesso',
'mensagem': 'Ja foi salvo no banco',
}
def main(force_save_db=False):
"""_summary_"""
main_start = time()
with Pool(settings.N_POOL) as workers:
result = workers.map(
load_file,
[(file, force_save_db) for file in get_list_nc(settings.BINFILES)],
)
# logger.info(result)
logger.info(
f'Numero de pool {settings.N_POOL} force_save = {force_save_db}'
)
logger.info(f'tempo total = {time() - main_start}s')
logger.info(f'criando {settings.BIGTIFF}')
with open(settings.BIGTIFF, 'wb') as wfd:
for file in glob(f'{settings.DIRMAP}/*/*/*.map'):
with open(file, 'rb') as fd:
shutil.copyfileobj(fd, wfd)
if __name__ == '__main__':
main()
| 4,442 |
align.py
|
rlorigro/nanopore_assembly_and_polishing_assessment
| 11 |
2166153
|
from modules.align import *
from handlers.FileManager import FileManager
import argparse
def main(ref_sequence_path, reads_sequence_path, max_threads=None, output_dir=None, minimap_preset="map-ont", k=15):
if output_dir is None:
output_dir = "./"
else:
FileManager.ensure_directory_exists(output_dir)
reads_vs_ref_bam_path = align_minimap(output_dir=output_dir,
ref_sequence_path=ref_sequence_path,
reads_sequence_path=reads_sequence_path,
preset=minimap_preset,
max_threads=max_threads,
k=k)
if __name__ == "__main__":
'''
Processes arguments and performs tasks to generate the pileup.
'''
parser = argparse.ArgumentParser()
parser.add_argument(
"--sequences",
type=str,
required=True,
help="file path of FASTQ or FASTA sequence file"
)
parser.add_argument(
"--ref",
type=str,
required=True,
help="FASTA file path of true reference to be compared against"
)
parser.add_argument(
"--output_dir",
type=str,
required=False,
help="desired output directory path (will be created during run time if doesn't exist)"
)
parser.add_argument(
"--minimap_preset",
type=str,
default="map-ont",
choices=["map-ont", "asm5", "asm10", "asm20"],
required=False,
help="which of the minimap alignment presets to use: 'map-ont', 'asm5', 'asm10', 'asm20'"
)
parser.add_argument(
"--k",
type=int,
default=15,
required=False,
help="what size k-mer to use for minimizers"
)
parser.add_argument(
"--max_threads",
type=int,
default=None,
required=False,
help="how many vCPU to allocate for minimap"
)
args = parser.parse_args()
main(reads_sequence_path=args.sequences,
ref_sequence_path=args.ref,
output_dir=args.output_dir,
minimap_preset=args.minimap_preset,
max_threads=args.max_threads,
k=args.k)
| 2,261 |
dadvisor/__init__.py
|
dadvisor/core
| 0 |
2169316
|
import asyncio
import atexit
from dadvisor.nodes import NodeCollector
from dadvisor.containers import ContainerCollector
from dadvisor.analyzer import Analyzer
from dadvisor.inspector import InspectorThread
from dadvisor.log import log
from dadvisor.nodes.node_actions import remove_node
from dadvisor.stats import StatsCollector
from dadvisor.web import get_app, run_app
def run_forever():
""" Starts the program and creates all tasks """
loop = asyncio.new_event_loop()
# Create objects and threads
node_collector = NodeCollector(loop)
container_collector = ContainerCollector()
traffic_analyzer = Analyzer(container_collector, node_collector, loop)
inspector_thread = InspectorThread(node_collector, container_collector, traffic_analyzer)
stats_collector = StatsCollector(node_collector, container_collector)
app = get_app(loop, node_collector, traffic_analyzer, container_collector)
# Start threads
inspector_thread.start()
# Create tasks
loop.create_task(node_collector.set_my_node_stats())
loop.create_task(run_app(app))
loop.create_task(node_collector.run())
loop.create_task(container_collector.run())
loop.create_task(stats_collector.run())
@atexit.register
def on_exit():
log.info('Stopping loop')
remove_node(loop, node_collector.my_node)
log.info('Started and running')
loop.run_forever()
| 1,410 |
hillSketch/code/sketch/exact.py
|
ViskaWei/hill-sketch
| 3 |
2170521
|
import time
import numpy as np
import pandas as pd
from collections import Counter
from code.data.stream import horner_decode
def get_exact_HH(stream):
print(f'=============exact counting HHs==============')
t0=time.time()
exactHH=np.array(Counter(stream).most_common())
t=time.time()-t0
print('exact counting time:{:.2f}'.format(t))
return exactHH[:,0], exactHH[:,1], t
def get_HH_pd(stream,base,ftr_len, dtype):
HH,freq,t=get_exact_HH(stream)
HHfreq=np.vstack((HH,freq))
mat_decode_HH=horner_decode(HH,base,ftr_len, dtype)
assert (mat_decode_HH.min().min()>=0) & (mat_decode_HH.max().max()<=base-1)
dfHH=pd.DataFrame(np.hstack((mat_decode_HH,HHfreq.T)), columns=list(range(ftr_len))+['HH','freq'])
dfHH['rk']=dfHH['freq'].cumsum()
dfHH['ra']=dfHH['rk']/dfHH['rk'].values[-1]
return dfHH
| 849 |
src/frame_matcher/frame_comparer.py
|
JacobEkedahl/detect-intros-from-video
| 5 |
2168719
|
# ------------------------------------------------------------------------- #
# Compare two h files with different types of image sim algorithms #
# ------------------------------------------------------------------------- #
import cv2
import imagehash
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
from utils import constants as c
def find_all_matches_hash_intro(hashes_A, hashes_B, intro_B, threshold):
result = []
result_intro = []
for hash_A in hashes_A:
for hash_B in hashes_B:
diff = hash_A["hash"] - hash_B["hash"]
if diff < threshold:
result.append({"count": hash_A["count"], "sec": hash_A["sec"]})
if intro_B is not None and hash_B["sec"] >= intro_B["start"] and hash_B["sec"] <= intro_B["end"]:
result_intro.append({"count": hash_A["count"], "sec": hash_A["sec"]})
break
return (result, result_intro)
| 877 |
spampy/spam_classifier.py
|
fossabot/spampy
| 1 |
2170345
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import scipy.io as sio
import numpy as np
from os.path import join, dirname
from sklearn import svm
from spampy import email_processor
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
# Parent directory
parent_directory_path = dirname(__file__)
# Support Vector Machine
linear_svm = svm.SVC(C=0.1, kernel='linear')
linear_svc = LinearSVC()
def load_training_set():
"""
Load training set and return features and labels.
Returns:
Training features and labels.
"""
# Training set
training_set = join(parent_directory_path, 'datasets/spamTrain.mat')
dataset = sio.loadmat(training_set)
X, y = dataset['X'], dataset['y']
return X, y
def load_test_set():
"""
Load test set and return features and labels.
Returns:
Test features and labels.
"""
training_set = join(parent_directory_path, 'datasets/spamTest.mat')
dataset = sio.loadmat(training_set)
Xtest, ytest = dataset['Xtest'], dataset['ytest']
return Xtest, ytest
def train_svm():
"""
Fit SVM with features and labels.
"""
X, y = load_training_set()
linear_svm.fit(X, y.flatten())
def classify_email(email):
"""
Classify spam possibility of given email.
Args:
email (str):
Raw e-mail.
Returns:
Spam or not.
"""
train_svm()
vocablary_dict = email_processor.get_vocablary_dict()
feature_vector = email_processor.feature_vector_from_email(email, vocablary_dict)
double_dimesion_email = np.reshape(feature_vector, (-1, 1899))
spam_prediction = linear_svm.predict(double_dimesion_email)
return spam_prediction
def classify_email_with_enron(email):
"""
Classify spam possibility of given email with enron dataset.
Args:
email (str):
Raw e-mail.
Returns:
Spam or not.
"""
vocablary_dict = email_processor.create_enron_dictionary()
feature_vector = email_processor.feature_vector_from_email(email, vocablary_dict)
double_dimesion_email = np.reshape(feature_vector, (-1, 3000))
if os.path.exists('enron_features_matrix.npy') == False & os.path.exists('enron_labels.npy') == False:
features_matrix, labels = email_processor.extract_enron_features()
np.save('enron_features_matrix.npy', features_matrix)
np.save('enron_labels.npy', labels)
else:
features_matrix = np.load('enron_features_matrix.npy')
labels = np.load('enron_labels.npy')
X_train, _, y_train, _ = train_test_split(features_matrix, labels, test_size=0.40)
linear_svc.fit(X_train, y_train)
return linear_svc.predict(double_dimesion_email)
| 2,728 |
Sentinel1.py
|
geospatial-jeff/cognition-datasources-sentinel1
| 0 |
2168611
|
import os
import utm
from sentinelsat.sentinel import SentinelAPI
from datasources.stac.query import STACQuery
from datasources.stac.item import STACItem
from datasources.sources.base import Datasource
"""Mappings between API and STAC attributes"""
stac_to_api = {
'sar:polarization': lambda n: {'polarisationmode': ' '.join(n['sar:polarization']['eq'])},
'sar:absolute_orbit': lambda n: {'orbitnumber': n['sar:absolute_orbit']['eq']},
'sar:type': lambda n: {'producttype': n['sar:type']['eq']},
'sar:instrument_mode': lambda n: {'sensoroperationalmode': n['sar:instrument_mode']['eq']},
'eo:epsg': lambda n: {'epsg': n['eo:epsg']['eq']},
'legacy:lastorbitnumber': lambda n: {'lastorbitnumber': n['legacy:lastorbignumber']['eq']},
'legacy:swathidentifier': lambda n: {'swathidentifier': n['legacy:swathidentifier']}
}
api_to_stac = {
'beginposition': lambda n: {'dr:start_datetime': n['beginposition'], 'datetime': n['beginposition']},
'endposition': lambda n: {'dr:end_position': n['endposition']},
'platformname': lambda n: {'sar:platform': n['platformname'] + n['title'][2], 'sar:constellation': n['platformname']},
'instrumentname': lambda n: {"sar:instrument": n['instrumentname']},
'sensoroperationalmode': lambda n: {"sar:instrument_mode": n['sensoroperationalmode']},
'instrumentshortname': lambda n: {"sar:frequency_band": n["instrumentshortname"][4]},
'polarisationmode': lambda n: {"sar:polarization": n["polarisationmode"].split(' ')},
'orbitdirection': lambda n: {"sar:pass_direction": n["orbitdirection"].lower()},
'producttype': lambda n: {"sar:type": n['producttype']},
'link_icon': lambda n: {"asset_thumbnail": {"href": n['link_icon'], "title": "Thumbnail"}},
'link': lambda n: {"asset_analytic": {"href": n['link'], "title": "SAR Asset"}},
'id': lambda n: {"id": n['id']},
'swathidentifier': lambda n: {"legacy:swathidentifier": n['swathidentifier']},
'lastorbitnumber': lambda n: {"legacy:lastorbitnumber": n['lastorbitnumber']}
}
class Sentinel1(Datasource):
tags = ['SAR', 'Satellite', 'Raster']
def __init__(self, manifest):
super().__init__(manifest)
self.api = SentinelAPI(os.getenv('COPERNICUS_USER'), os.getenv('COPERNICUS_PASSWORD'))
self.api.api_url = "https://scihub.copernicus.eu/dhus/"
def search(self, spatial, temporal=None, properties=None, limit=10, **kwargs):
stac_query = STACQuery(spatial, temporal)
query_body = {'area': stac_query.wkt(),
'limit': limit,
'platformname': 'Sentinel-1',
}
if temporal:
query_body.update({'date': stac_query.temporal})
if properties:
api_props = {}
for prop in properties:
api_props.update(stac_to_api[prop](properties))
query_body.update(api_props)
self.manifest.searches.append([self,query_body])
def execute(self, query):
epsg_check = query.pop('epsg') if 'epsg' in list(query) else None
products = self.api.query(**query)
response = self.api.to_geojson(products)
stac_items = []
for feat in response['features']:
stac_props = {}
# Calculate bbox from coords
if feat['geometry']['type'] == 'MultiPolygon':
xcoords = [x[0] for x in feat['geometry']['coordinates'][0][0]]
ycoords = [y[1] for y in feat['geometry']['coordinates'][0][0]]
feat['geometry']['coordinates'] = feat['geometry']['coordinates'][0]
feat['geometry']['type'] = 'Polygon'
else:
xcoords = [x[0] for x in feat['geometry']['coordinates'][0]]
ycoords = [y[1] for y in feat['geometry']['coordinates'][0]]
feat.update({"bbox": [min(xcoords), min(ycoords), max(xcoords), max(ycoords)]})
# Find EPSG of WGS84 UTM zone from centroid of bbox
centroid = [(feat['bbox'][1] + feat['bbox'][3]) / 2, (feat['bbox'][0] + feat['bbox'][2]) / 2]
utm_zone = utm.from_latlon(*centroid)
epsg = '32' + '5' + str(utm_zone[2]) if centroid[0] < 0 else '32' + '6' + str(utm_zone[2])
stac_props.update({'eo:epsg': int(epsg)})
if epsg_check:
if int(epsg) != epsg_check:
continue
# Replace properties with STAC properties
for prop in feat['properties']:
if prop in list(api_to_stac):
stac_props.update(api_to_stac[prop](feat['properties']))
feat['properties'] = stac_props
# Move assets from properties to feature
feat.update({"assets": {"analytic": feat['properties'].pop("asset_analytic"),
"thumbnail": feat['properties'].pop("asset_thumbnail")}})
# Update ID
feat.update({"id": stac_props.pop("id")})
# Validate STAC item
STACItem.load(feat)
stac_items.append(feat)
return stac_items
| 5,123 |
src/feature_dtw/notebook.py
|
H00N24/visual-analysis-of-big-time-series-datasets
| 2 |
2169129
|
from typing import Any
import numpy as np
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from tqdm.notebook import tqdm
from .fdtw import FeatureDTWTransformer as _FeatureDTWTransformer
class FeatureDTWTransformer(_FeatureDTWTransformer):
"""Feature DTW transformer
The Feature DTW transformer [1] transforms a set of time series into a feature space
representation using randomly selected prototypes [2].
The `featuredtw.notebook.FeatureDTWTransformer` uses the `tqdm` library to display
the progress bar of distance computation.
Args:
n_components (int, optional): Number of prototypes. Defaults to 30.
copy_prototypes (bool, optional):
If True it copies prototypes as standalone `np.ndarray` or only use indices
from the original X. Defaults to True.
metric (Union[str, Callable[..., float]], optional):
Distance measure used for creating feature vectors. It's possible to use any
distance measure from `sklearn.metrics` or stanalone callabe function.
Defaults to "euclidean".
metric_param (dict, optional):
Parameters for the distance function. Defaults to {}.
random_state (Optional[int], optional): Random state. Defaults to None.
n_jobs (Optional[int], optional):
The number of parallel jobs. Defaults to None.
References:
[1] <NAME>. (2015). Using dynamic time warping distances as features for
improved time series classification. Data Mining and Knowledge Discovery.
30. 10.1007/s10618-015-0418-x.
[2] <NAME>, <NAME>, <NAME>, <NAME>,
Efficient temporal pattern recognition by means of dissimilarity space
embedding with discriminative prototypes, Pattern Recognition, Volume 64, 2017,
Pages 268-276, ISSN 0031-3203, https://doi.org/10.1016/j.patcog.2016.11.013.
(https://www.sciencedirect.com/science/article/pii/S0031320316303739)
"""
def __pairwise_distances(self, X: Any, Y: Any) -> np.ndarray:
"""Parwise distance with progress bar
Function for computing distance between two sets of time series with
progress bar from `tqdm`.
Args:
X (array-like, shape (x_sample, x_length)):
Set of time series of an arbitrary length.
Y (array-like, shape (y_sample, y_length)):
Set of time series of an arbitrary length.
Returns:
np.ndarray: Distance matrix of shape (x_samples, y_samples)
"""
if callable(self.metric):
return np.vstack(
[
[self.metric(x, y, **self.metric_param) for y in Y]
for x in tqdm(X, )
]
)
else:
return np.vstack(
[
PAIRWISE_DISTANCE_FUNCTIONS[self.metric](
[x], Y, **self.metric_param
).flatten()
for x in tqdm(X)
]
)
| 3,075 |
pvi/models/logistic_regression.py
|
MattAshman/pvi
| 1 |
2167616
|
import torch
import numpy as np
from torch import distributions, nn, optim
from .base import Model
from pvi.distributions.exponential_family_distributions import \
MultivariateGaussianDistribution
class LogisticRegressionModel(Model, nn.Module):
"""
Logistic regression model with a multivariate Gaussian approximate
posterior.
"""
conjugate_family = None
def __init__(self, include_bias=True, **kwargs):
self.include_bias = include_bias
Model.__init__(self, **kwargs)
nn.Module.__init__(self)
def get_default_nat_params(self):
if self.include_bias:
return {
"np1": torch.tensor([0.]*(self.config["D"] + 1)),
"np2": torch.tensor(
[1.]*(self.config["D"] + 1)).diag_embed(),
}
else:
return {
"np1": torch.tensor([0.] * self.config["D"]),
"np2": torch.tensor([1.] * self.config["D"]).diag_embed(),
}
def get_default_config(self):
return {
"D": None,
"optimiser_class": optim.Adam,
"optimiser_params": {"lr": 1e-3},
"reset_optimiser": True,
"epochs": 100,
"batch_size": 100,
"num_elbo_samples": 1,
"num_predictive_samples": 1,
"print_epochs": 10,
"use_probit_approximation": True,
}
def get_default_hyperparameters(self):
"""
:return: A default set of ε for the model.
"""
return {}
def forward(self, x, q, **kwargs):
"""
Returns the (approximate) predictive posterior distribution of a
Bayesian logistic regression model.
:param x: The input locations to make predictions at.
:param q: The approximate posterior distribution q(θ).
:return: ∫ p(y | θ, x) q(θ) dθ ≅ (1/M) Σ_m p(y | θ_m, x) θ_m ~ q(θ).
"""
if self.config["use_probit_approximation"]:
# Use Probit approximation.
q_loc = q.std_params["loc"]
if self.include_bias:
x_ = torch.cat(
(x, torch.ones(len(x)).to(x).unsqueeze(-1)), dim=1)
else:
x_ = x
x_ = x_.unsqueeze(-1)
if str(type(q)) == str(MultivariateGaussianDistribution):
q_cov = q.std_params["covariance_matrix"]
else:
q_scale = q.std_params["scale"]
q_cov = q_scale.diag_embed() ** 2
denom = x_.transpose(-1, -2).matmul(q_cov).matmul(x_).reshape(-1)
denom = (1 + np.pi * denom / 8) ** 0.5
logits = q_loc.unsqueeze(-2).matmul(x_).reshape(-1) / denom
return distributions.Bernoulli(logits=logits)
else:
thetas = q.distribution.sample(
(self.config["num_predictive_samples"],))
comp_ = self.likelihood_forward(x, thetas)
comp = distributions.Bernoulli(logits=comp_.logits.T)
mix = distributions.Categorical(torch.ones(len(thetas),).to(x))
return distributions.MixtureSameFamily(mix, comp)
def likelihood_forward(self, x, theta, **kwargs):
"""
Returns the model's likelihood p(y | θ, x).
:param x: Input of shape (*, D).
:param theta: Parameters of shape (*, D + 1).
:return: Bernoulli distribution.
"""
assert len(x.shape) in [1, 2], "x must be (*, D)."
assert len(theta.shape) in [1, 2], "theta must be (*, D)."
if self.include_bias:
x_ = torch.cat((x, torch.ones(len(x)).to(x).unsqueeze(-1)), dim=1)
else:
x_ = x
if len(theta.shape) == 1:
logits = x_.unsqueeze(-2).matmul(theta.unsqueeze(-1))
else:
if len(x.shape) == 1:
x_r = x_.unsqueeze(0).repeat(len(theta), 1)
logits = x_r.unsqueeze(-2).matmul(
theta.unsqueeze(-1)).reshape(-1)
else:
x_r = x_.unsqueeze(0).repeat(len(theta), 1, 1)
theta_r = theta.unsqueeze(1).repeat(1, len(x_), 1)
logits = x_r.unsqueeze(-2).matmul(
theta_r.unsqueeze(-1)).reshape(len(theta), len(x_))
return distributions.Bernoulli(logits=logits)
def conjugate_update(self, data, q, t=None):
"""
:param data: The local data to refine the model with.
:param q: The current global posterior q(θ).
:param t: The the local factor t(θ).
:return: q_new, t_new, the new global posterior and the new local
contribution.
"""
raise NotImplementedError
| 4,725 |
lists/uksic2007/map.py
|
openregister/industrial-classification-data
| 1 |
2170541
|
#!/usr/bin/env python3
import sys
from xlrd import open_workbook
sep = '\t'
fields = ['industrial-classification', 'name']
print(sep.join(fields))
#
# read spreadsheet
#
book = open_workbook(sys.argv[1])
sheet = book.sheet_by_index(0)
ncols = sheet.ncols
for r in range(2, sheet.nrows):
cells = [sheet.cell(r, c).value for c in range(0, ncols) if sheet.cell(r, c) != '']
line = sep.join(cells).strip()
if line != '' and sep in line:
print(line)
| 472 |
models/sewrnetv2.py
|
ZAKAUDD/LightNet
| 737 |
2170344
|
import torch
import torch.nn as nn
from functools import partial
from collections import OrderedDict
from modules import IdentityResidualBlock, ASPPInPlaceABNBlock, ABN, InPlaceABNWrapper
class SEWiderResNetV2(nn.Module):
def __init__(self, structure, norm_act=ABN, classes=0, dilation=True, is_se=True,
in_size=(448, 896), aspp_out=512, fusion_out=64, aspp_sec=(12, 24, 36)):
"""
Wider ResNet with pre-activation (identity mapping) and Squeeze & Excitation(SE) blocks
:param structure: (list of int) Number of residual blocks in each of the six modules of the network.
:param norm_act: (callable) Function to create normalization / activation Module.
:param classes: (int) Not `0` for segmentation task
:param dilation: (bool) `True` for segmentation task
:param is_se: (bool) Use Squeeze & Excitation (SE) or not
:param in_size: (tuple of int) Size of the input image
:param out_sec: (tuple of int) Number of channels of the ASPP output
:param aspp_sec: (tuple of int) Dilation rate used in ASPP
"""
super(SEWiderResNetV2, self).__init__()
self.structure = structure
self.dilation = dilation
self.classes = classes
if len(structure) != 6:
raise ValueError("Expected a structure with six values")
# Initial layers
self.mod1 = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))
]))
# Groups of residual blocks
in_channels = 64
channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048), (1024, 2048, 4096)]
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
if not dilation:
dil = 1
stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1
else:
if mod_id == 3:
dil = 2
elif mod_id == 4:
dil = 4
elif mod_id == 5:
dil = 8
else:
dil = 1
stride = 2 if block_id == 0 and mod_id == 2 else 1
if mod_id == 4:
drop = partial(nn.Dropout2d, p=0.2)
elif mod_id == 5:
drop = partial(nn.Dropout2d, p=0.3)
else:
drop = None
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels, channels[mod_id], norm_act=norm_act,
stride=stride, dilation=dil, dropout=drop, is_se=is_se)
))
# Update channels and p_keep
in_channels = channels[mod_id][-1]
# Create module
if mod_id < 2:
self.add_module("pool%d" % (mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1))
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
# Pooling and predictor
# self.feat_out = nn.Sequential(OrderedDict([("out_norm", norm_act(in_channels)),
# ("out_down", nn.Conv2d(in_channels, 1024,
# kernel_size=1, stride=1,
# padding=0, bias=True))]))
self.bn_out = norm_act(in_channels)
if classes != 0:
self.stg3_fusion = nn.Conv2d(channels[1][1], fusion_out, kernel_size=1, stride=1, padding=0, bias=False)
self.aspp = nn.Sequential(OrderedDict([("aspp", ASPPInPlaceABNBlock(channels[5][2], aspp_out,
feat_res=(int(in_size[0] / 8), int(in_size[1] / 8)),
up_ratio=2, aspp_sec=aspp_sec))]))
self.score = nn.Sequential(OrderedDict([("conv", nn.Conv2d(aspp_out+fusion_out, classes,
kernel_size=3, stride=1,
padding=1, bias=True)),
("up", nn.Upsample(size=in_size, mode='bilinear'))]))
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
# channel_shuffle: shuffle channels in groups
# +++++++++++++++++++++++++++++++++++++++++++++++++++ #
@staticmethod
def _channel_shuffle(x, groups):
"""
Channel shuffle operation
:param x: input tensor
:param groups: split channels into groups
:return: channel shuffled tensor
"""
batch_size, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batch_size, groups, channels_per_group, height, width)
# transpose
# - contiguous() required if transpose() is used before view().
# See https://github.com/pytorch/pytorch/issues/764
x = torch.transpose(x, 1, 2).contiguous().view(batch_size, -1, height, width)
return x
def forward(self, img):
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 1. Encoder: feature extraction
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
stg1 = self.mod1(img) # (N, 64, 448, 896) 1/1
stg2 = self.mod2(self.pool2(stg1)) # (N, 128, 224, 448) 1/2 3
stg3 = self.mod3(self.pool3(stg2)) # (N, 256, 112, 224) 1/4 3
stg4 = self.mod4(stg3) # (N, 512, 56, 112) 1/8 Stride=2 6
stg4 = self.mod5(stg4) # (N, 1024, 56, 112) 1/8 dilation=2 3
stg4 = self.mod6(stg4) # (N, 2048, 56, 112) 1/8 dilation=4 1
stg4 = self.mod7(stg4) # (N, 4096, 56, 112) 1/8 dilation=8 1
stg4 = self.bn_out(stg4) # (N, 4096, 56, 112) 1/8
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 2. Decoder: multi-scale feature fusion
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
if self.classes != 0:
# (N, 4096, H/8, W/8) -> (N, 512, H/4, W/4)
de_stg1 = self.aspp(stg4)[1]
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# 3. Classifier: pixel-wise classification-segmentation
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
net_out = self.score(torch.cat([de_stg1, self.stg3_fusion(stg3)], dim=1))
return net_out
else:
return stg4
if __name__ == '__main__':
import os
import time
from torch.autograd import Variable
pre_weight = torch.load("/zfs/zhang/TrainLog/weights/wrnet_pretrained.pth.tar")
dummy_in = Variable(torch.randn(1, 3, 448, 896), requires_grad=True)
model = SEWiderResNetV2(structure=[3, 3, 6, 3, 1, 1],
norm_act=partial(InPlaceABNWrapper, activation="leaky_relu", slope=0.1),
classes=19, dilation=True, is_se=True, in_size=(448, 896),
aspp_out=512, fusion_out=64, aspp_sec=(12, 24, 36))
model_dict = model.state_dict()
keys = list(pre_weight.keys())
keys.sort()
for k in keys:
if "score" in k:
pre_weight.pop(k)
state = {"model_state": pre_weight}
torch.save(state, "{}sewrnetv2_model.pkl".format("/zfs/zhang/TrainLog/weights/"))
| 7,831 |
jmetal/operator/test/test_mutation.py
|
LuckysonKhaidem/ProjectAlpha
| 1 |
2169721
|
import unittest
from jmetal.core.solution import BinarySolution, FloatSolution, IntegerSolution
from jmetal.operator.mutation import BitFlip, Uniform, SimpleRandom, Polynomial, IntegerPolynomial
class PolynomialMutationTestMethods(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
mutation = Polynomial(1.0)
self.assertIsNotNone(mutation)
def test_should_constructor_create_a_valid_operator(self):
operator = Polynomial(0.5, 20)
self.assertEqual(0.5, operator.probability)
self.assertEqual(20, operator.distribution_index)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
Polynomial(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
Polynomial(-12)
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = Polynomial(0.0)
solution = FloatSolution(2, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change__if_the_probability_is_one(self):
operator = Polynomial(1.0)
solution = FloatSolution(2, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
FloatSolution.lower_bound = [-5, -5, -5]
FloatSolution.upper_bound = [5, 5, 5]
mutated_solution = operator.execute(solution)
self.assertNotEqual([1.0, 2.0, 3.0], mutated_solution.variables)
class BitFlipTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
solution = BitFlip(1.0)
self.assertIsNotNone(solution)
def test_should_constructor_create_a_valid_operator(self):
operator = BitFlip(0.5)
self.assertEqual(0.5, operator.probability)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
BitFlip(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
BitFlip(-12)
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = BitFlip(0.0)
solution = BinarySolution(number_of_variables=1, number_of_objectives=1)
solution.variables[0] = [True, True, False, False, True, False]
mutated_solution = operator.execute(solution)
self.assertEqual([True, True, False, False, True, False], mutated_solution.variables[0])
def test_should_the_solution_change_all_the_bits_if_the_probability_is_one(self):
operator = BitFlip(1.0)
solution = BinarySolution(number_of_variables=2, number_of_objectives=1)
solution.variables[0] = [True, True, False, False, True, False]
solution.variables[1] = [False, True, True, False, False, True]
mutated_solution = operator.execute(solution)
self.assertEqual([False, False, True, True, False, True], mutated_solution.variables[0])
self.assertEqual([True, False, False, True, True, False], mutated_solution.variables[1])
class UniformMutationTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
operator = Uniform(0.3)
operator2 = Uniform(0.3, 0.7)
self.assertIsNotNone(operator)
self.assertIsNotNone(operator2)
def test_should_constructor_create_a_valid_operator(self):
operator = Uniform(0.5, 20)
self.assertEqual(0.5, operator.probability)
self.assertEqual(20, operator.perturbation)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
Uniform(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
Uniform(-12)
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = Uniform(0.0, 3.0)
solution = FloatSolution(3, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_if_the_probability_is_one(self):
operator = Uniform(1.0, 3.0)
solution = FloatSolution(3, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertNotEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_between_max_and_min_value(self):
operator = Uniform(1.0, 5)
solution = FloatSolution(4, 1, 0, [-1, 12, -3, -5], [1, 17, 3, -2])
solution.variables = [-7.0, 3.0, 12.0, 13.4]
mutated_solution = operator.execute(solution)
for i in range(solution.number_of_variables):
self.assertGreaterEqual(mutated_solution.variables[i], solution.lower_bound[i])
self.assertLessEqual(mutated_solution.variables[i], solution.upper_bound[i])
class RandomMutationTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
operator = SimpleRandom(1.0)
self.assertIsNotNone(operator)
def test_should_constructor_create_a_valid_operator(self):
operator = SimpleRandom(0.5)
self.assertEqual(0.5, operator.probability)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
SimpleRandom(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
SimpleRandom(-12)
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = SimpleRandom(0.0)
solution = FloatSolution(3, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_if_the_probability_is_one(self):
operator = SimpleRandom(1.0)
solution = FloatSolution(3, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1.0, 2.0, 3.0]
mutated_solution = operator.execute(solution)
self.assertNotEqual([1.0, 2.0, 3.0], mutated_solution.variables)
def test_should_the_solution_change_between_max_and_min_value(self):
operator = SimpleRandom(1.0)
solution = FloatSolution(4, 1, 0, [-1, 12, -3, -5], [1, 17, 3, -2])
solution.variables = [-7.0, 3.0, 12.0, 13.4]
mutated_solution = operator.execute(solution)
for i in range(solution.number_of_variables):
self.assertGreaterEqual(mutated_solution.variables[i], solution.lower_bound[i])
self.assertLessEqual(mutated_solution.variables[i], solution.upper_bound[i])
class IntegerPolynomialMutationTestCases(unittest.TestCase):
def test_should_constructor_create_a_non_null_object(self):
operator = IntegerPolynomial(1.0)
self.assertIsNotNone(operator)
def test_should_constructor_create_a_valid_operator(self):
operator = IntegerPolynomial(0.5, 20)
self.assertEqual(0.5, operator.probability)
self.assertEqual(20, operator.distribution_index)
def test_should_constructor_raise_an_exception_if_the_probability_is_greater_than_one(self):
with self.assertRaises(Exception):
IntegerPolynomial(2)
def test_should_constructor_raise_an_exception_if_the_probability_is_lower_than_zero(self):
with self.assertRaises(Exception):
IntegerPolynomial(-12)
def test_should_the_solution_remain_unchanged_if_the_probability_is_zero(self):
operator = IntegerPolynomial(0.0)
solution = IntegerSolution(2, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1, 2, 3]
mutated_solution = operator.execute(solution)
self.assertEqual([1, 2, 3], mutated_solution.variables)
self.assertEqual([True, True, True], [isinstance(x, int) for x in mutated_solution.variables])
def test_should_the_solution_change__if_the_probability_is_one(self):
operator = IntegerPolynomial(1.0)
solution = IntegerSolution(2, 1, 0, [-5, -5, -5], [5, 5, 5])
solution.variables = [1, 2, 3]
mutated_solution = operator.execute(solution)
self.assertNotEqual([1, 2, 3], mutated_solution.variables)
self.assertEqual([True, True, True], [isinstance(x, int) for x in mutated_solution.variables])
if __name__ == '__main__':
unittest.main()
| 9,114 |
xcode.py
|
ctcampbell/veracode-xcode-wrapper
| 0 |
2168945
|
#!/usr/bin/env python
import os
import shlex
import subprocess
from datetime import datetime
import wrapper
wrapper_command = "java -jar /Users/ccampbell/Veracode/Source/vosp-api-wrappers-java-19.2.5.6.jar -action uploadandscan -appname verademo-swift -createprofile false -version '{}' -filepath '{}'"
def build_bca():
archive_file = os.environ["ARCHIVE_PATH"]
try:
output = subprocess.check_output(["vcxcodepkg", "--noui", "-a", archive_file])
print(output)
except subprocess.CalledProcessError as e:
print(e.output)
else:
output_split = output.rsplit(" Path: ", 1)
if len(output_split) == 2:
bca_file = output_split[1][:-1]
date = datetime.utcnow().strftime("%-d %b %Y %H:%M")
command = shlex.split(wrapper_command.format(date, bca_file))
wrapper.run_wrapper(command)
if __name__ == "__main__":
build_bca()
| 926 |
Practice Problems/01-Strings/Intermediate/split_string_pairs.py
|
vishnu-rvn/PyPractice
| 9 |
2170380
|
__author__ = "<NAME>"
def split_string_pairs(test_string):
"""
Problem Statement-
Given a string, split it by pairs of two character. If the last character
is not a pair, append _ to the last one to make it a pair. Return
an empty list if string is empty.
Input-
a. test_string: str
Its string which should be splitted, it can consist of any character.
Output-
The function should return the pair as a list.
Example-
>>> split_string("abc")
['ab', 'c_']
>>> split_string("abcd")
['ab', 'cd']
"""
pass
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
from test_strings_beginner import test_one
test_one('test_split_string_pairs')
| 788 |
src/model/struct/data/data_item.py
|
Amourspirit/ooo_uno_tmpl
| 0 |
2170371
|
# coding: utf-8
from dataclasses import dataclass
from typing import Union, List
@dataclass
class DataItem:
name: str
type: str
desc: List[str]
origin: Union[str, None]
origtype: Union[str, None]
| 217 |
output/models/nist_data/atomic/non_positive_integer/schema_instance/nistschema_sv_iv_atomic_non_positive_integer_min_inclusive_4_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1 |
2170302
|
from output.models.nist_data.atomic.non_positive_integer.schema_instance.nistschema_sv_iv_atomic_non_positive_integer_min_inclusive_4_xsd.nistschema_sv_iv_atomic_non_positive_integer_min_inclusive_4 import NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4
__all__ = [
"NistschemaSvIvAtomicNonPositiveIntegerMinInclusive4",
]
| 332 |
main.py
|
Garrett-N/stashy
| 0 |
2170322
|
import stashy
from authentication import *
from pprint import pprint
stash = stashy.connect(msbbURL, token=msbbToken)
# grab AWS project
aws = next((item for item in stash.projects.list() if item['key'] == 'AWS'))
aws_repos = stash.projects[aws.get('key')].repos.list()
#i'm only intrested in two repos (for now)
| 319 |
pytorch_toolkit/action_recognition/action_recognition/models/backbone/__init__.py
|
morkovka1337/openvino_training_extensions
| 256 |
2170317
|
from collections import namedtuple
from torch import nn
from . import resnet
from . import mobilenetv2
from . import rmnet
Encoder = namedtuple('Encoder', ('model', 'features', 'features_shape'))
def make_encoder(name, input_size=224, input_channels=3, pretrained=None):
"""Make encoder (backbone) with a given name and parameters"""
features_size = input_size // 32
num_features = 2048
if name.startswith('resnet'):
model = getattr(resnet, name)(pretrained=pretrained, num_channels=input_channels)
features = nn.Sequential(*list(model.children())[:-2])
num_features = 512 if int(name[6:]) < 50 else 2048
elif name.startswith('mobilenetv2'):
model = mobilenetv2.MobileNetV2(input_size=input_size, pretrained=None)
features = model.features
num_features = 1280
elif name.startswith('rmnet'):
model = rmnet.RMNetClassifier(1000, pretrained=None)
features = nn.Sequential(*list(model.children())[:-2])
num_features = 512
elif name.startswith('se_res'):
model = load_from_pretrainedmodels(name)(pretrained='imagenet' if pretrained else None)
features = nn.Sequential(*list(model.children())[:-2])
else:
raise KeyError("Unknown model name: {}".format(name))
features_shape = (num_features, features_size, features_size)
return Encoder(model, features, features_shape)
def load_from_pretrainedmodels(model_name):
import pretrainedmodels
return getattr(pretrainedmodels, model_name)
| 1,533 |
iss/exec/facets.py
|
prise6/smart-iss-posts
| 0 |
2170525
|
import os
import base64
import pandas as pd
import numpy as np
from facets_overview.generic_feature_statistics_generator import GenericFeatureStatisticsGenerator
from iss.init_config import CONFIG
from iss.tools import Tools
SPRITE_NB_LIGNE = 145
SPRITE_NB_COLONNE = 100
TARGET_SIZE_WIDTH = 48*2
TARGET_SIZE_HEIGHT = 27*2
LIMIT = 14499
def request_data(config, db_manager):
sql = """
SELECT
v1.pictures_id,
v1.pictures_x as v1_x,
v1.pictures_y as v1_y,
CAST(v1.label AS CHAR) as v1_label,
v2.pictures_x as v2_x,
v2.pictures_y as v2_y,
CAST(v2.label AS CHAR) as v2_label,
v3.pictures_x as v3_x,
v3.pictures_y as v3_y,
CAST(v3.label AS CHAR) as v3_label,
loc.pictures_timestamp,
loc.pictures_location_text,
loc.pictures_latitude,
loc.pictures_longitude
FROM iss.pictures_embedding AS v1
INNER JOIN iss.pictures_embedding v2
ON v1.pictures_id = v2.pictures_id
AND v2.clustering_type = v1.clustering_type
AND v2.clustering_model_type = v1.clustering_model_type
AND v2.clustering_model_name = v2.clustering_model_name
AND v2.clustering_version = 2
INNER JOIN iss.pictures_embedding v3
ON v1.pictures_id = v3.pictures_id
AND v3.clustering_type = v1.clustering_type
AND v3.clustering_model_type = v1.clustering_model_type
AND v3.clustering_model_name = v1.clustering_model_name
AND v3.clustering_version = 3
LEFT JOIN iss.pictures_location loc
ON loc.pictures_id = v1.pictures_id
WHERE v1.clustering_version = %s
ORDER BY pictures_id ASC LIMIT %s"""
db_manager.cursor.execute(sql, (1, LIMIT))
results = db_manager.cursor.fetchall()
return pd.DataFrame(results, columns=db_manager.cursor.column_names)
def create_sprite(config, df):
images_array = [Tools.read_np_picture(os.path.join(config.get('directory')['collections'], "%s.jpg" % picture_id), target_size = (TARGET_SIZE_HEIGHT, TARGET_SIZE_WIDTH)) for picture_id in df['pictures_id']]
sprite = np.zeros((TARGET_SIZE_HEIGHT*SPRITE_NB_LIGNE, TARGET_SIZE_WIDTH*SPRITE_NB_COLONNE, 3))
index = 0
for i in range(SPRITE_NB_LIGNE):
for j in range(SPRITE_NB_COLONNE):
sprite[(i*TARGET_SIZE_HEIGHT):(i+1)*TARGET_SIZE_HEIGHT, (j*TARGET_SIZE_WIDTH):(j+1)*TARGET_SIZE_WIDTH, :] = images_array[index]
index += 1
if index >= len(images_array):
break
if index >= len(images_array):
break
img = Tools.display_one_picture(sprite)
return img
def generate_facets(config, df):
proto = GenericFeatureStatisticsGenerator().ProtoFromDataFrames([{'name': 'facets-iss', 'table': df}])
protostr = base64.b64encode(proto.SerializeToString()).decode("utf-8")
HTML_TEMPLATE = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html" >
<facets-overview id="elem"></facets-overview>
<script>
document.querySelector("#elem").protoInput = "{protostr}";
</script>"""
html = HTML_TEMPLATE.format(protostr=protostr)
return html
def generate_facets_dive(config, df, relative_sprite_path):
jsonstr = df.to_json(orient = 'records')
HTML_TEMPLATE = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/webcomponentsjs/1.3.3/webcomponents-lite.js"></script>
<link rel="import" href="https://raw.githubusercontent.com/PAIR-code/facets/1.0.0/facets-dist/facets-jupyter.html">
<facets-dive id="elem" height="600" cross-origin="anonymous" sprite-image-width="{sprite_width}" sprite-image-height="{sprite_height}">
</facets-dive>
<script>
var data = {jsonstr};
var atlas_url = "{atlas_url}";
document.querySelector("#elem").data = data;
document.querySelector("#elem").atlasUrl = atlas_url;
</script>"""
html = HTML_TEMPLATE.format(jsonstr=jsonstr, atlas_url = relative_sprite_path, sprite_width=TARGET_SIZE_WIDTH, sprite_height=TARGET_SIZE_HEIGHT)
return html
def main():
## db manager
db_manager = Tools.create_db_manager(CONFIG)
## request data
df = request_data(CONFIG, db_manager)
## create sprite
sprite = create_sprite(CONFIG, df)
## save sprite
sprite.save(os.path.join(CONFIG.get('directory')['reports'], 'figures', 'sprite_altas.png'), "PNG")
## generate facets
html_facets = generate_facets(CONFIG, df)
with open(os.path.join(CONFIG.get('directory')['reports'], 'facets.html'),'w') as f:
f.write(html_facets)
## generate facets-dive
html_facets_dive = generate_facets_dive(CONFIG, df, './figures/sprite_altas.png')
with open(os.path.join(CONFIG.get('directory')['reports'], 'facets-dive.html'), 'w') as f:
f.write(html_facets_dive)
if __name__ == '__main__':
main()
| 5,108 |
ExamplesPython_3.6/Chapter1/ImageDisplay.py
|
Nixon-Aguado/Feature-Extraction-and-Image-Processing-Book-Examples
| 30 |
2170278
|
'''
Feature Extraction and Image Processing
<NAME> & <NAME>
http://www.southampton.ac.uk/~msn/book/
Chapter 1
ImageDipslay: Loads and displays an image. Shows a surface and prints pixel data
'''
# Set module functions
from ImageUtilities import imageReadL, showImageL, createImageF
from PrintUtilities import printImageRangeL
from PlotUtilities import plotColorSurface, plot3DColorHistogram
'''
Parameters:
pathToDir = Input image directory
imageName = Input image name
'''
pathToDir = "../../Images/Chapter1/Input/"
imageName = "SmoothSquare.png"
# Read image into array
inputImage, width, height = imageReadL(pathToDir + imageName)
# Show input image
showImageL(inputImage)
# Print pixel's values in an image range
printImageRangeL(inputImage, [0, width-1], [0, height-1])
# Create an image to store the z values for surface
outputZ = createImageF(width, height)
# Three float array to store colors of the surface
colorsRGB = createImageF(width, height, 3)
# Set surface and color values
for x in range(0, width):
for y in range(0, height):
pixelValue = float(inputImage[y,x])
outputZ[y,x] = 255 - pixelValue
pointColour = float(inputImage[y,x])/255.0
colorsRGB[y,x] = [pointColour, pointColour, pointColour]
# Plot surface
plotColorSurface(outputZ, colorsRGB, [0,400], 1)
# Plot histogram
plot3DColorHistogram(outputZ, colorsRGB, [0,400])
| 1,397 |
tests/conftest.py
|
franc6/ics_calendar
| 29 |
2168146
|
import pytest
import json
from dateutil import parser as dtparser
from custom_components.ics_calendar.icalendarparser import ICalendarParser
def datetime_hook(pairs):
dict = {}
for key, value in pairs:
if isinstance(value, str):
try:
dict[key] = dtparser.parse(value)
except ValueError:
dict[key] = value
else:
dict[key] = value
return dict
@pytest.fixture
def ics_parser():
return ICalendarParser.get_instance("ics")
@pytest.fixture
def icalevents_parser():
return ICalendarParser.get_instance("icalevents")
@pytest.fixture
def parser(which_parser, request):
return request.getfixturevalue(which_parser)
@pytest.fixture()
def calendar_data(fileName):
with open(f"tests/{fileName}") as f:
return f.read().replace("\0", "")
@pytest.fixture()
def expected_data(fileName):
with open(f"tests/{fileName}.expected.json") as f:
return json.loads(f.read(), object_pairs_hook=datetime_hook)
@pytest.helpers.register
def assert_event_list_size(expected, event_list):
assert event_list is not None
assert expected == len(event_list)
@pytest.helpers.register
def compare_event_list(expected, actual):
for e, a in zip(expected, actual):
pytest.helpers.compare_event(e, a)
@pytest.helpers.register
def compare_event(expected, actual):
for key in expected.keys():
assert expected[key] == actual[key]
| 1,468 |
specter/generate_candidates.py
|
zoranmedic/LCR-design
| 0 |
2169228
|
import argparse
import jsonlines
import faiss
import numpy as np
import random
import json
from math import log2
from utils import year_from_id
def get_candidates(paper_embeddings, query_embeddings, dataset, year_cids=None):
# Load paper embeddings
paper_ids = []
paper_embs = []
with jsonlines.open(paper_embeddings) as reader:
for line in reader:
if dataset == 'acl' and year_from_id(line['paper_id']) <= year or dataset == 'refseer':
paper_ids.append(line['paper_id'])
paper_embs.append(np.array(line['embedding']).astype('float32'))
print(f'Loaded {len(paper_ids)} paper embeddings.')
# Load context embeddings
context_ids = []
context_embs = []
with jsonlines.open(query_embeddings) as reader:
for line in reader:
if dataset == 'acl' and line['paper_id'] in year_cids or dataset == 'refseer':
context_ids.append(line['paper_id'])
context_embs.append(np.array(line['embedding']).astype('float32'))
print(f'Loaded {len(context_ids)} context embeddings.')
# Normalize embeddings before creating index
paper_embs = np.array(paper_embs)
context_embs = np.array(context_embs)
# Setup index on GPU
res = faiss.StandardGpuResources() # use a single GPU
d = paper_embs.shape[1]
index_flat = faiss.IndexFlatL2(d) # IndexFlatIP(d)
gpu_index_flat = faiss.index_cpu_to_gpu(res, 0, index_flat)
# Index paper embeddings
gpu_index_flat.add(paper_embs) # add vectors to the index
k = 1024 # get 1024 nearest neighbors (max for GPU)
D, I = gpu_index_flat.search(context_embs, k) # actual search
for cid, scores, neighbours in zip(context_ids, D, I):
citing, cited = cid.split('_')[:2]
neighbours_pids = [paper_ids[i] for i in neighbours]
candidates = [i for i in neighbours_pids if i != citing][:1000] # store top 1000 candidates
context_candidates[cid] = candidates
return context_candidates
def main(args):
cids = set([i['context_id'] for i in json.load(open(args.train_pairs))])
context_candidates = {}
if args.dataset == 'acl':
years = set([year_from_id(i.split('_')[0]) for i in cids])
for year in years:
year_cids = set(cid for cid in cids if year_from_id(cid.split('_')[0]) == year)
year_candidates = get_candidates(args.paper_embeddings, args.query_embeddings, args.dataset, year_cids)
for k in year_candidates:
context_candidates[k] = year_candidates[k]
else:
context_candidates = get_candidates(args.paper_embeddings, args.query_embeddings, args.dataset)
json.dump(context_candidates, open(args.output_file, 'wt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('train_pairs')
parser.add_argument('paper_embeddings')
parser.add_argument('query_embeddings')
parser.add_argument('output_file')
parser.add_argument('-dataset', default='acl')
args = parser.parse_args()
main(args)
| 3,165 |
test/test_ezsignbulksendsignermapping_get_object_v1_response.py
|
ezmaxinc/eZmax-SDK-python
| 0 |
2170512
|
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.common_response import CommonResponse
from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug
from eZmaxApi.model.common_response_obj_debug_payload import CommonResponseObjDebugPayload
from eZmaxApi.model.ezsignbulksendsignermapping_get_object_v1_response_all_of import EzsignbulksendsignermappingGetObjectV1ResponseAllOf
from eZmaxApi.model.ezsignbulksendsignermapping_get_object_v1_response_m_payload import EzsignbulksendsignermappingGetObjectV1ResponseMPayload
globals()['CommonResponse'] = CommonResponse
globals()['CommonResponseObjDebug'] = CommonResponseObjDebug
globals()['CommonResponseObjDebugPayload'] = CommonResponseObjDebugPayload
globals()['EzsignbulksendsignermappingGetObjectV1ResponseAllOf'] = EzsignbulksendsignermappingGetObjectV1ResponseAllOf
globals()['EzsignbulksendsignermappingGetObjectV1ResponseMPayload'] = EzsignbulksendsignermappingGetObjectV1ResponseMPayload
from eZmaxApi.model.ezsignbulksendsignermapping_get_object_v1_response import EzsignbulksendsignermappingGetObjectV1Response
class TestEzsignbulksendsignermappingGetObjectV1Response(unittest.TestCase):
"""EzsignbulksendsignermappingGetObjectV1Response unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignbulksendsignermappingGetObjectV1Response(self):
"""Test EzsignbulksendsignermappingGetObjectV1Response"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignbulksendsignermappingGetObjectV1Response() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1,939 |
notebooks/cointegration/Crypto/Alpha Vantage Client.py
|
lamtrinh259/crypto_backend
| 1 |
2169911
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: warrenfulton
"""
'''
This is a client program to authenticate and create calls and requests to the Alpha Vantage financial API
'''
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.foreignexchange import ForeignExchange
from alpha_vantage.techindicators import TechIndicators
import requests
import pandas as pd
import time
class AVClient:
#initialize client with api key
def __init__(self):
try:
with open("api_key.txt", "r") as f:
self.key = f.read()
except IOError:
print("IOError: Could Not Find API Key")
self.url = "https://www.alphavantage.co/query?"
#request function to return stock time series
#function and symbol are required
#default interval is 1min for intraday data
#default outputsize and data type are 'compact' and 'pandas' respectively
#returns pandas dataframe of price information
def getTimeSeries(self, function, symbols, interval='1min', outputsize="compact", datatype="pandas"):
#check if datatype is valid and create instance of time series
if datatype != 'pandas' and datatype != 'json' and datatype != 'csv':
print("Invalid Datatype: Vaible options are 'pandas', 'json', 'csv'")
return -1
else:
ts = TimeSeries(key=self.key, output_format=datatype)
#check if outputsize is a valid input
if outputsize != 'compact' and outputsize != 'full':
print("Invalide Output Size: Viable options are 'compact' and 'full'")
return -1
#determine what time series the user wants and build request
if function == 'intraday':
valid_intervals = ['1min', '5min', '15min', '30min', '60min']
if interval not in valid_intervals:
print("Invalid Interval: Viable options are '1min', '5min', '15min', '30min', '60min'")
return -1
else:
data, meta_data = ts.get_intraday(symbol=symbols, interval=interval, outputsize=outputsize)
elif function == 'daily':
data, meta_data = ts.get_daily(symbol=symbols, outputsize=outputsize)
elif function == 'weekly':
data, meta_data = ts.get_weekly(symbol=symbols)
elif function == 'monthly':
data, meta_data = ts.get_monthly(symbol=symbols)
else:
print("Invalid Function: Viable options are 'intraday', 'daily', 'weekly', and 'monthly'" )
return -1
return data
#ruequest function to return fundamental data of a company
#required parameters are function and symbol
#returns a dictionary of fundamental data
def getFundamentals(self, function, symbol):
#check if function is valid
valid_functions = ['OVERVIEW', 'INCOME_STATEMENT', 'BALANCE_SHEET', 'CASH_FLOW']
if function not in valid_functions:
print("Invalid Function: Viable options are 'OVERVIEW', 'INCOME_STATEMENT', 'BALANCE_SHEET', 'CASH_FLOW'")
return -1
#build url and data
params = {'function': function,
'symbol': symbol,
'apikey': self.key}
response = requests.get(url=self.url, params=params)
data = response.json()
return data
#Function to grab technical indicators for any equity or currency pair
#parameters are 'function' (string): which technical indicator you want (e.g. 'SMA', 'EMA', 'VWAP', etc.)
#'symbol' (string): the ticker of the equity or currency pair
#'interval' (string): daily, weekly, or monthly data
#'time_period' (int): the time period for the technical indicator (e.g. 10, 50, 100, OR 200 SMA)
#'series_type' (string): open, high, low, or close data
#returns a dictionary of the values over the given interval
def getTechIndicators(self, function, symbol, interval, time_period, series_type):
params = {'function': function,
'symbol': symbol,
'interval': interval,
'time_period': time_period,
'series_type': series_type,
'apikey': self.key}
response = requests.get(url=self.url, params=params)
data = response.json()
return data
#works with any cryptocurrency and/or real currency
#paramters are 'from_currency' (string), 'to_currency' (string) (e.g. from 'BTC' to 'XRP')
#returens a dictionary of the given current exchange rate
def getCurrencyExchangeRates(self, from_currency, to_currency):
params = {'function': 'CURRENCY_EXCHANGE_RATE',
'from_currency': from_currency,
'to_currency': to_currency,
'apikey': self.key}
response = requests.get(url=self.url, params=params)
data = response.json()
return data
#function to grab time sereies data for forex currency
#parameters are 'function' (string): intraday, daily, weekly, or monthly data
#'from_symbol' (string): ticker of first currency
#'to_symbol' (string): ticker of second currency
#returns a dictionary time series data for the given exchange rate
def getForexTimeSeries(self, function, from_symbol, to_symbol):
if function == 'daily':
function = 'FX_DAILY'
elif function == 'weekly':
function = 'FX_WEEKLY'
elif function == 'monthly':
function = 'FX_MONTHLY'
else:
print('Invalid Function: valid functions are intraday, daily, weekly and monthly.')
params = {'function': function,
'from_symbol': from_symbol,
'to_symbol': to_symbol,
'outputsize': 'full',
'apikey': self.key}
response = requests.get(url=self.url, params=params)
data = response.json()
return data
#works the same as getForexTimeSeries but with crypto tickers
#parameter 'market' (string) specifies which market you want to grab the current price data from (e.g. CNY)
#returns a dictionary of the time series data
def getCryptoTimeSeries(self, function, symbol, market):
if function == 'daily':
function = 'DIGITAL_CURRENCY_DAILY'
elif function == 'weekly':
function = 'DIGITAL_CURRENCY_WEEKLY'
elif function == 'monthly':
function = 'DIGITAL_CURRENCY_MONTHLY'
else:
print("Invalid Function: Valid functions are 'daily', 'weekly', and 'monthly'")
params = {'function': function,
'symbol': symbol,
'market': market,
'apikey': self.key}
response = requests.get(url=self.url, params=params)
data = response.json()
return data
client = AVClient()
data = client.getCryptoTimeSeries('daily', 'BTC', 'USD')
df = pd.DataFrame.from_dict(data)
df.to_csv('Bitcoin.csv')
| 6,889 |
tests/test.py
|
dermasmid/py-jsoneditor
| 14 |
2169034
|
import sys
import os
import platform
if platform.system() == 'Windows':
separator = '\\'
else:
separator = '/'
sys.path.insert(0, '/'.join(os.path.dirname(os.path.realpath(__file__)).split(separator)[:-1]) + '/jsoneditor')
import jsoneditor
import requests
# Test dict
jsoneditor.editjson(requests.get('https://jsonplaceholder.typicode.com/posts').json())
# Test string
jsoneditor.editjson(requests.get('https://jsonplaceholder.typicode.com/comments').text)
# Test editing
jsoneditor.editjson({'hi': '#466'}, print, {'colorPicker': True}, run_in_thread= True)
# Test urls
jsoneditor.editjson('https://jsonplaceholder.typicode.com/users')
# Test csv
jsoneditor.editjson('test.csv', is_csv=True)
| 716 |
buildscripts/resmokeconfig/__init__.py
|
SunguckLee/real-mongodb
| 4 |
2168964
|
from __future__ import absolute_import
from .suites import NAMED_SUITES
from .loggers import NAMED_LOGGERS
| 112 |
people/migrations/0002_auto_20190502_2253.py
|
s-a-f-e/backend
| 1 |
2167928
|
# Generated by Django 2.2.1 on 2019-05-02 22:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='person',
name='phone',
field=models.CharField(max_length=20, unique=True),
),
]
| 386 |
test/asana/helpers/test_convert_urls_to_links.py
|
isabella232/SGTM
| 0 |
2169893
|
from html import escape
from src.asana.helpers import convert_urls_to_links, _link
from test.impl.base_test_case_class import BaseClass
class TestConvertUrlsToLinks(BaseClass):
def test_no_urls_returns_original(self):
self.assertEqual("foo", convert_urls_to_links("foo"))
def test_wrap_url_in_a_tag(self):
input_text = "Hey check out https://www.asana.com to work together effortlessly"
expected_output = 'Hey check out <A href="https://www.asana.com">https://www.asana.com</A> to work together effortlessly'
self.assertEqual(expected_output, convert_urls_to_links(input_text))
def test_wrap_prefix_url_in_a_tag(self):
input_text = "https://www.asana.com is our website"
expected_output = (
'<A href="https://www.asana.com">https://www.asana.com</A> is our website'
)
self.assertEqual(expected_output, convert_urls_to_links(input_text))
def test_wrap_suffix_url_in_a_tag(self):
input_text = "Our website is https://www.asana.com"
expected_output = (
'Our website is <A href="https://www.asana.com">https://www.asana.com</A>'
)
self.assertEqual(expected_output, convert_urls_to_links(input_text))
def test_wrap_multiple_urls_in_a_tag(self):
input_text = "Hey check out https://www.asana.com to work together effortlessly. We're hiring at https://asana.com/jobs"
expected_output = 'Hey check out <A href="https://www.asana.com">https://www.asana.com</A> to work together effortlessly. We\'re hiring at <A href="https://asana.com/jobs">https://asana.com/jobs</A>'
self.assertEqual(expected_output, convert_urls_to_links(input_text))
def test_dont_wrap_urls_that_already_are_wrapped_in_a_tag(self):
input_text = 'Hey check out <A href="https://www.asana.com/a/b/123">https://www.asana.com/a/b/123</A> to work together effortlessly'
self.assertEqual(input_text, convert_urls_to_links(input_text))
def test_markdown_wraped_urls_still_get_converted(self):
url = "https://app.asana.com/0/0/12345"
input_text = "Pull Request synchronized with [Asana task]({})".format(url)
self.assertEqual(
'Pull Request synchronized with [Asana task](<A href="{}">{}</A>)'.format(
url, url
),
convert_urls_to_links(input_text),
)
if __name__ == "__main__":
from unittest import main as run_tests
run_tests()
| 2,476 |
xirvik/commands/util.py
|
Tatsh/xirvik-tools
| 4 |
2170355
|
"""Utility functions for CLI commands."""
from os.path import expanduser
from types import FrameType
from typing import Any, Callable, Iterator, Optional, Sequence, Type, Union
import functools
import itertools
import logging
import pathlib
import re
import sys
import warnings
from click.core import ParameterSource
from loguru import logger
import click
import xdg
import yaml
__all__ = ('common_options_and_arguments', 'complete_hosts', 'complete_ports',
'setup_log_intercept_handler', 'setup_logging')
def setup_logging(debug: Optional[bool] = False) -> None:
"""Shared function to enable logging."""
if debug: # pragma: no cover
setup_log_intercept_handler()
logger.enable('')
else:
logger.configure(handlers=(dict(
format='<level>{message}</level>',
level='INFO',
sink=sys.stderr,
),))
def common_options_and_arguments(
func: Callable[..., None]) -> Callable[..., None]:
"""
Shared options and arguments, to be used as a decorator with
click.command().
"""
@click.option('-u', '--username', default=None, help='Xirvik user')
@click.option('-p', '--password', help='<PASSWORD>')
@click.option('-r',
'--max-retries',
type=int,
default=10,
help='Number of retries for each request (passed to client)')
@click.option('-d',
'--debug',
is_flag=True,
help='Enable debug level logging')
@click.option(
'--backoff-factor',
default=5,
type=int,
help=('Back-off factor used when calculating time to wait to retry '
'a failed request'))
@click.option('--netrc',
default=expanduser('~/.netrc'),
help='netrc file path')
@click.option('-C', '--config', help='Configuration file')
@click.option('-H',
'--host',
help='Xirvik host (without protocol)',
shell_complete=complete_hosts)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> None: # pragma: no cover
return func(*args, **kwargs)
return wrapper
def _clean_host(s: str) -> str:
# Attempt to not break IPv6 addresses
if '[' not in s and (re.search(r'[0-9]+\:[0-9]+', s) or s == '::1'):
return s
# Remove brackets and remove port at end
return re.sub(r'[\[\]]', '', re.sub(r'\:[0-9]+$', '', s))
def _read_ssh_known_hosts() -> Iterator[str]:
try:
with open(expanduser('~/.ssh/known_hosts')) as f:
for line in f.readlines():
host_part = line.split()[0]
if ',' in host_part:
yield from (_clean_host(x) for x in host_part.split(','))
else:
yield _clean_host(host_part)
except FileNotFoundError:
pass
def _read_netrc_hosts() -> Iterator[str]:
try:
with open(expanduser('~/.netrc')) as f:
yield from (x.split()[1] for x in f.readlines())
except FileNotFoundError:
pass
def complete_hosts(_: Any, __: Any, incomplete: str) -> Sequence[str]:
"""
Returns a list of hosts from SSH known_hosts and ~/.netrc for completion.
"""
return [
k
for k in itertools.chain(_read_ssh_known_hosts(), _read_netrc_hosts())
if k.startswith(incomplete)
]
def complete_ports(_: Any, __: Any, incomplete: str) -> Sequence[str]:
"""Returns common ports for completion."""
return [k for k in ('80', '443', '8080') if k.startswith(incomplete)]
class InterceptHandler(logging.Handler): # pragma: no cover
"""Intercept handler taken from Loguru's documentation."""
def emit(self, record: logging.LogRecord) -> None:
level: Union[str, int]
# Get corresponding Loguru level if it exists
try:
level = logger.level(record.levelname).name
except ValueError:
level = record.levelno
# Find caller from where originated the logged message
frame: Optional[FrameType] = logging.currentframe()
depth = 2
while frame and frame.f_code.co_filename == logging.__file__:
frame = frame.f_back
depth += 1
logger.opt(depth=depth, exception=record.exc_info).log(
level, record.getMessage())
def setup_log_intercept_handler() -> None: # pragma: no cover
"""Sets up Loguru to intercept records from the logging module."""
logging.basicConfig(handlers=(InterceptHandler(),), level=0)
def command_with_config_file(
config_file_param_name: str = 'config',
default_section: Optional[str] = None) -> Type[click.Command]:
"""
Returns a custom command class that can read from a configuration file
in place of missing arguments.
Parameters
----------
config_file_param_name : str
The name of the parameter given to Click in ``click.option``.
default_section : Optional[str]
Default top key of YAML to read from.
"""
home = pathlib.Path.home()
default_config_file_path = xdg.xdg_config_home() / 'xirvik.yml'
if sys.platform == 'win32': # pragma: no cover
default_config_file_path = (home /
'AppData/Roaming/xirvik-tools/config.yml')
elif sys.platform == 'darwin': # pragma: no cover
default_config_file_path = (
home / 'Library/Application Support/xirvik-tools/config.yml')
class _ConfigFileCommand(click.Command):
def invoke(self, ctx: click.Context) -> Any:
config_file_path = (ctx.params.get(config_file_param_name,
default_config_file_path)
or default_config_file_path)
config_data: Any = {}
try:
with open(config_file_path) as f:
config_data = yaml.safe_load(f)
except FileNotFoundError: # pragma no cover
pass
if isinstance(config_data, dict):
alt_data = (config_data.get(default_section, {})
if default_section is not None else {})
for param in ctx.params.keys():
if (ctx.get_parameter_source(param) ==
ParameterSource.DEFAULT):
yaml_param = param.replace('_', '-')
if yaml_param in alt_data:
ctx.params[param] = alt_data[yaml_param]
elif yaml_param in config_data:
ctx.params[param] = config_data[yaml_param]
ctx.params[config_file_param_name] = config_file_path
else: # pragma no cover
warnings.warn(f'Unexpected type in {config_file_path}: ' +
str(type(config_data)))
return super().invoke(ctx)
return _ConfigFileCommand
| 7,045 |
src/vtra/analysis/province_flow_mapping/commune_poi_analysis.py
|
oi-analytics/oia-transport-archive
| 1 |
2167944
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 14 15:41:39 2018
@author: elcok
"""
import geopandas as gpd
import pandas as pd
import os
import igraph as ig
import numpy as np
import sys
from vtra.utils import load_config,extract_value_from_gdf,get_nearest_node,gdf_clip,count_points_in_polygon
from vtra.transport_network_creation import province_shapefile_to_network, add_igraph_time_costs_province_roads
def netrev_edges(region_name,start_points,end_points,graph,save_edges = True,output_path ='',excel_writer =''):
"""
Assign net revenue to roads assets in Vietnam
Inputs are:
start_points - GeoDataFrame of start points for shortest path analysis.
end_points - GeoDataFrame of potential end points for shorest path analysis.
G - iGraph network of the province.
save_edges -
Outputs are:
Shapefile with all edges and the total net reveneu transferred along each edge
GeoDataFrame of total net revenue transferred along each edge
"""
save_paths = []
path_index = 0
for iter_,place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
pos0_i = graph.vs[node_dict[place['NEAREST_G_NODE']]]
pos1_i = graph.vs[node_dict[closest_center]]
if pos0_i != pos1_i:
path = graph.get_shortest_paths(pos0_i,pos1_i,weights='min_cost',output="epath")
get_od_pair = (place['NEAREST_G_NODE'],closest_center)
get_path = [graph.es[n]['edge_id'] for n in path][0]
get_dist = sum([graph.es[n]['length'] for n in path][0])
get_time = sum([graph.es[n]['min_time'] for n in path][0])
get_travel_cost = sum([graph.es[n]['min_cost'] for n in path][0])
path_index += 1
save_paths.append(('path_{}'.format(path_index),get_od_pair,get_path,place['netrev'],get_travel_cost,get_dist,get_time))
except:
print(iter_)
save_paths_df = pd.DataFrame(save_paths,columns = ['path_index','od_nodes','edge_path','netrev','travel_cost','distance','time'])
save_paths_df.to_excel(excel_writer,province_name,index = False)
excel_writer.save()
del save_paths_df
all_edges = [x['edge_id'] for x in graph.es]
all_edges_geom = [x['geometry'] for x in graph.es]
gdf_edges = gpd.GeoDataFrame(pd.DataFrame([all_edges,all_edges_geom]).T,crs='epsg:4326')
gdf_edges.columns = ['edge_id','geometry']
gdf_edges['netrev'] = 0
for path in save_paths:
gdf_edges.loc[gdf_edges['edge_id'].isin(path[2]),'netrev'] += path[3]
if save_edges == True:
gdf_edges.to_file(os.path.join(output_path,'weighted_edges_district_center_flows_{}.shp'.format(region_name)))
return gdf_edges
if __name__ == '__main__':
data_path,calc_path,output_path = load_config()['paths']['data'],load_config()['paths']['calc'],load_config()['paths']['output']
# provinces to consider
province_list = ['Thanh Hoa','Binh Dinh','Lao Cai']
# province_list = ['<NAME>']
district_committe_names = ['district_people_committee_points_thanh_hoa.shp','district_province_peoples_committee_point_binh_dinh.shp','district_people_committee_points_lao_cai.shp']
shp_output_path = os.path.join(output_path,'flow_mapping_shapefiles')
flow_output_excel = os.path.join(output_path,'flow_mapping_paths','province_roads_district_center_flow_paths.xlsx')
excl_wrtr = pd.ExcelWriter(flow_output_excel)
for prn in range(len(province_list)):
province = province_list[prn]
# set all paths for all input files we are going to use
province_name = province.replace(' ','').lower()
edges_in = os.path.join(data_path,'Roads','{}_roads'.format(province_name),'vietbando_{}_edges.shp'.format(province_name))
nodes_in = os.path.join(data_path,'Roads','{}_roads'.format(province_name),'vietbando_{}_nodes.shp'.format(province_name))
population_points_in = os.path.join(data_path,'Points_of_interest','population_points.shp')
commune_center_in = os.path.join(data_path,'Points_of_interest',district_committe_names[prn])
province_path = os.path.join(data_path,'Vietnam_boundaries','who_boundaries','who_provinces.shp')
commune_path = os.path.join(data_path,'Vietnam_boundaries','boundaries_stats','commune_level_stats.shp')
# path_width_table = os.path.join(data_path,'Roads','road_properties','road_properties.xlsx')
# load provinces and get geometry of the right province
provinces = gpd.read_file(province_path)
provinces = provinces.to_crs({'init': 'epsg:4326'})
province_geom = provinces.loc[provinces.NAME_ENG == province].geometry.values[0]
#clip all to province
prov_pop = gdf_clip(population_points_in,province_geom)
prov_commune_center = gdf_clip(commune_center_in,province_geom)
if 'OBJECTID' not in prov_commune_center.columns.values.tolist():
prov_commune_center['OBJECTID'] = prov_commune_center.index
print (prov_commune_center)
prov_communes = gdf_clip(commune_path,province_geom)
# load nodes and edges
nodes = gpd.read_file(nodes_in)
nodes = nodes.to_crs({'init': 'epsg:4326'})
sindex_nodes = nodes.sindex
# get revenue values for each village
# first create sindex of all villages to count number of villages in commune
prov_pop_sindex = prov_pop.sindex
# create new column in prov_communes with amount of villages
prov_communes['n_villages'] = prov_communes.geometry.apply(lambda x: count_points_in_polygon(x,prov_pop_sindex))
prov_communes['netrev_village'] = (prov_communes['netrevenue']*prov_communes['nfirm'])/prov_communes['n_villages']
commune_sindex = prov_communes.sindex
# give each village a net revenue based on average per village in commune
prov_pop['netrev'] = prov_pop.geometry.apply(lambda x: extract_value_from_gdf(x,commune_sindex,prov_communes,'netrev_village'))
# and use average if commune has no stats
# prov_pop.loc[prov_pop['netrev'] == 0,'netrev'] = prov_pop['netrev'].mean()
# get nearest node in network for all start and end points
prov_pop['NEAREST_G_NODE'] = prov_pop.geometry.apply(lambda x: get_nearest_node(x,sindex_nodes,nodes,'NODE_ID'))
prov_commune_center['NEAREST_G_NODE'] = prov_commune_center.geometry.apply(lambda x: get_nearest_node(x,sindex_nodes,nodes,'NODE_ID'))
# prepare for shortest path routing, we'll use the spatial index of the centers
# to find the nearest center for each population point
sindex_commune_center = prov_commune_center.sindex
prov_pop['NEAREST_C_CENTER'] = prov_pop.geometry.apply(lambda x: get_nearest_node(x,sindex_commune_center,prov_commune_center,'OBJECTID'))
# load network
# G = province_shapefile_to_network(edges_in,path_width_table)
G = province_shapefile_to_network(edges_in)
G = add_igraph_time_costs_province_roads(G,0.019)
nodes_name = np.asarray([x['name'] for x in G.vs])
nodes_index = np.asarray([x.index for x in G.vs])
node_dict = dict(zip(nodes_name,nodes_index))
# get updated edges
edges_updated = netrev_edges(province_name,prov_pop,prov_commune_center,G,save_edges = True,output_path = shp_output_path,excel_writer = excl_wrtr)
| 6,948 |
presto/verify.py
|
pwgbots/presto
| 0 |
2170407
|
# Software developed by <NAME> for the PrESTO project
# Code repository: https://github.com/pwgbots/presto
# Project wiki: http://presto.tudelft.nl/wiki
"""
Copyright (c) 2019 Delft University of Technology
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Count
from django.shortcuts import render
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .models import PrestoBadge, LetterOfAcknowledgement
# python modules
import os
import sys
import traceback
# presto modules
from presto.generic import generic_context, warn_user, inform_user
from presto.utils import log_message
# no login required for verification of acknowledgement letters
@method_decorator(csrf_exempt, name='dispatch')
def verify(request, **kwargs):
context = generic_context(request)
# an authentication code in the URL should generate a JavaScript AJAX call
context['hex'] = kwargs.get('hex', '')
context['page_title'] = 'Presto Award Verification'
return render(request, 'presto/verify.html', context)
| 2,248 |
codes_auto/82.remove-duplicates-from-sorted-list-ii.py
|
smartmark-pro/leetcode_record
| 0 |
2169942
|
#
# @lc app=leetcode.cn id=82 lang=python3
#
# [82] remove-duplicates-from-sorted-list-ii
#
None
# @lc code=end
| 111 |
discernwise/services/train/config.py
|
eeriksp/discernwise
| 3 |
2169300
|
from dataclasses import dataclass, InitVar
from pathlib import Path
from config import Config
@dataclass
class TrainingConfig(Config):
epochs: int = 2
batch_size: int = 32
data_dir_str: InitVar[str] = None # the path of the dataset
def __post_init__(self, img_height: int, img_width: int, model_path_str: str, data_dir_str: str):
super().__post_init__(img_height, img_width, model_path_str)
self.data_dir = Path(data_dir_str).resolve()
| 473 |
main.py
|
BerkayKOCAK/scraper-bot
| 5 |
2169970
|
"""
SCARAPER-BOT CLI
<NAME> - 2020 May
CLI Web page scraper for product prices.
Written with Python 3.7.3
Additional libraries:
*Beautiful Soup 4.9^
*PyInquirer
*PyFiglet
"""
from __future__ import print_function, unicode_literals
from src import scraper, utils, scrape_elements
from PyInquirer import style_from_dict, Token, prompt, Separator
from pprint import pprint
from pyfiglet import Figlet
import asyncio
style1 = style_from_dict({
Token.Separator: '#cc<PASSWORD>',
Token.QuestionMark: '#<PASSWORD>',
Token.Selected: '#<PASSWORD>', # default
Token.Pointer: '#<PASSWORD>',
Token.Instruction: '', # default
Token.Answer: '#<PASSWORD> bold',
Token.Question: '',
})
style2 = style_from_dict({
Token.Separator: '#33FFEC',
Token.QuestionMark: '#6<PASSWORD> bold',
Token.Selected: '#33FFEC', # default
Token.Pointer: '#AF<PASSWORD> bold',
Token.Instruction: '#EC<PASSWORD>', # defaults
Token.Answer: '#AF<PASSWORD> bold',
Token.Question: '#EC<PASSWORD>3',
})
template_vendor_selection = [
{
'type': 'checkbox',
'message': 'Selected Options',
'name': 'vendors', #name of selected items array
'choices': [
Separator(' = Vendors = ')
],
'validate': lambda answer: 'You must choose at least one topping.' \
if len(answer) == 0 else True
}
]
template_product_selection = [
{
'type': 'checkbox',
'message': 'Selected Options',
'name': 'products',
'choices': [
Separator(' = Products = ')
],
'validate': lambda answer: 'You must choose at least one topping.' \
if len(answer) == 0 else True
}
]
def main ():
f = Figlet(font='cyberlarge')
print(f.renderText(' - SCRAPER - '))
print(f.renderText(' * By Berkay * '))
utils.instructions()
try:
utils.vendor_folder_mapping()
vendor_selection = utils.menu_add_vendors(template_vendor_selection)
#TODO - delete outputs completely or take it to a old_outputs folder
except Exception as identifier:
print(" - ERROR AT MAPPING INITIALIZE -")
print(identifier)
exit(0)
while(True):
vendors = prompt(vendor_selection, style=style1)
if(len(vendors['vendors']) != 0):
print("Selected Vendors : "+str(vendors['vendors']))
asyncio.run(utils.timeout(1))
for vendor in vendors['vendors']:
utils.product_file_mapping(vendor)
product_selection = utils.menu_add_products(template_product_selection)
if(len(product_selection[0].get("choices"))>1):
products = prompt(product_selection, style=style2)
if (len(products['products']) != 0):
print("Selected Products : "+str(products['products']))
asyncio.run(utils.timeout(1))
asyncio.run(scraper.scraper_init(vendors['vendors'], products['products']))
else:#maybe throw this during mapping
print("No Product File Found For Vendor : "+str(vendors['vendors']))
break
else:
pass
main()
| 3,289 |
tspvisual/solvers/ts.py
|
bcyran/tsp-visual
| 0 |
2168918
|
from copy import deepcopy
from itertools import product
from math import inf
from tspvisual.solver import Property, Solver, SolverState
from tspvisual.solvers.greedy import GreedySolver
from tspvisual.tsp import TSP, Neighbourhood, Path
class TSSolver(Solver):
"""Tabu Search solver for TSP.
"""
name = "<NAME>"
properties = [
Property('Iterations', 'iterations', int, 100),
Property('Cadence', 'cadence', int, 18),
Property('Neighbourhood', 'neighbourhood', Neighbourhood, 'INVERT'),
Property('Reset threshold', 'reset_threshold', int, 45),
Property('Stop threshold', 'stop_threshold', int, 100),
Property('Run time', 'run_time', int, 0)
]
def __init__(self):
super().__init__()
self.iterations = 1000
self.cadence = 18
self.neighbourhood = Neighbourhood.INVERT
self.reset_threshold = 45
self.stop_threshold = 450
self.run_time = 0
def _setup(self):
"""Sets up instance-specific data structures.
"""
self._tabu = [[0 for _ in range(self.tsp.dimension)]
for _ in range(self.tsp.dimension)]
def solve(self, tsp, steps=True):
# Make sure given argument is of correct type
if not isinstance(tsp, TSP):
raise TypeError('solve() argument has to be of type \'TSP\'')
self.tsp = tsp
self._setup()
# Total iteration number or time for calculating progress
if steps:
total = self.run_time if self.run_time else self.iterations
# Starting path from a greedy solver
greedy_solver = GreedySolver()
cur_path = greedy_solver.result(self.tsp)
# Current minimum path
min_path = deepcopy(cur_path)
# Counter of non-improving iterations since last reset
reset_counter = 0
# Counter of iterations since last improvement
stop_counter = 0
# Start the timer
self._start_timer()
for i in range(self.iterations):
# Yield the solver state
if steps:
# Current iteration number or time
current = i if not self.run_time else self._time_ms()
yield SolverState(self._time(), current / total,
deepcopy(cur_path), deepcopy(min_path))
# Find best neighbour of the current path
cur_path = self._min_neighbour(cur_path)
if cur_path.distance < min_path.distance:
# Keep this neighbour if it's better than current minimum
min_path = deepcopy(cur_path)
reset_counter, stop_counter = 0, 0
else:
# Otherwise increment reset and stop counters
reset_counter += 1
stop_counter += 1
# Terminate search if threshold of iterations is exceeded
if not self.run_time and self.stop_threshold and \
stop_counter >= self.stop_threshold:
break
# Restart with random solution if reset threshold is exceeded
if reset_counter >= self.reset_threshold:
cur_path.shuffle(0, self.tsp.dimension + 1)
cur_path.distance = self.tsp.path_dist(cur_path)
reset_counter = 0
self._update_tabu()
# Terminate search after exceeding specified runtime
if self.run_time and self._time_ms() >= self.run_time:
break
yield SolverState(self._time(), 1, None, deepcopy(min_path), True)
def _min_neighbour(self, path):
"""Finds shortest neighbour of the given path.
:param Path path: Path whose neighbourhood will be searched.
"""
min_neigh = Path(self.tsp.dimension + 1)
min_neigh.distance = inf
best_move = ()
# Iterate through all possible 2-city moves
for i, j in product(range(1, self.tsp.dimension), repeat=2):
# Skip redundant moves
if self.neighbourhood == Neighbourhood.SWAP or \
self.neighbourhood == Neighbourhood.INVERT:
if j <= i:
continue
if self.neighbourhood == Neighbourhood.INSERT:
if abs(i - j) == 1 and i > j:
continue
# Skip tabu moves
if self._tabu[i][j]:
continue
# Perform the move
cur_neigh = deepcopy(path)
cur_neigh.move(self.neighbourhood, i, j)
cur_neigh.distance = self.tsp.path_dist(cur_neigh)
# If resulting path is better than current minimum keep its
# length and move indexed
if cur_neigh.distance < min_neigh.distance:
min_neigh, best_move = cur_neigh, (i, j)
# Tabu found move
if best_move:
self._tabu[best_move[0]][best_move[1]] = self.cadence
self._tabu[best_move[1]][best_move[0]] = self.cadence
# In small instances it can happen all neighbours are already on tabu
# list, if that happens we cannot return an empty path
return min_neigh if min_neigh.distance != inf else path
def _update_tabu(self):
"""Updates tabu list by decrementing all non-zero entries.
"""
for i, j in product(range(self.tsp.dimension), repeat=2):
if self._tabu[i][j] > 0:
self._tabu[i][j] -= 1
| 5,544 |
examples/compare_filters.py
|
teresaupdyke/ohw21-proj-radar-qc
| 1 |
2169657
|
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from radarqc.csfile import CSFile
from radarqc.dataset import DataSet
from radarqc.filtering import (
NoiseFilter,
PCAFilter,
PreFitPCAFilter,
SpectrumFilter,
)
from radarqc.processing import (
Abs,
CompositeProcessor,
GainCalculator,
Normalize,
SignalProcessor,
)
from radarqc import csfile
from typing import Iterable
def plot_vertical(*images) -> None:
image = np.concatenate(images)
plt.imshow(image, aspect=4)
plt.colorbar()
plt.show()
def _time(fn, *args, **kwargs) -> tuple:
t = time.time()
result = fn(*args, **kwargs)
return result, time.time() - t
def log_timing(t: float, name: str) -> None:
line_length = 80
print("-" * line_length)
print("Time taken for {}: {} sec".format(name, t))
print("-" * line_length)
def create_preprocessor() -> SignalProcessor:
reference_gain = 34.2
return CompositeProcessor(
Abs(), GainCalculator(reference=reference_gain), Normalize()
)
def compare_filters(
spectrum: np.ndarray, filters: Iterable[SpectrumFilter]
) -> None:
for filt in filters:
name = filt.__class__.__name__
filtered, t = _time(filt, spectrum)
log_timing(t, name)
plot_vertical(spectrum, filtered)
def generate_paths(dir: str) -> Iterable[str]:
return glob.glob(os.path.join(dir, "*.cs"))
def main():
base = "../../codar"
paths = generate_paths(base)
preprocess = create_preprocessor()
dataset = DataSet(paths, preprocess)
num_components = 0.8
noise = NoiseFilter(threshold=0.18, window_std=0.02)
pca = PCAFilter(num_components=num_components)
prefit = PreFitPCAFilter(dataset.spectra, num_components)
path = "../../codar/CSS_ASSA_21_06_26_1400.cs"
with open(path, "rb") as f:
cs = csfile.load(f, preprocess)
filters = [prefit, pca, noise]
compare_filters(cs.antenna3, filters)
if __name__ == "__main__":
main()
| 2,042 |
landslide_sentry/cache_manager/file_registry.py
|
ChenyuZhang16/nasa-space-app-sentry
| 1 |
2165834
|
REGISTERED_FILE = []
EAST_TEXT_DETECTOR = {
"key": "M_ALL_006.hdf5",
"download_name": "M_ALL_006.hdf5",
"name": "M_ALL_006.hdf5",
"dir_path": "model",
"url": "https://drive.google.com/u/0/uc?id=1SCPfjFTn3f6-Ofzx1tgV0xnJkixXLhsN&export=download",
"compression_method": None,
"from_google_drive": True,
"google_drive_id": "1SCPfjFTn3f6-Ofzx1tgV0xnJkixXLhsN",
}
REGISTERED_FILE.append(EAST_TEXT_DETECTOR)
EAST_TEXT_DETECTOR = {
"key": "NASA_Landslide_Catalog_2008_2021.html",
"download_name": "NASA_Landslide_Catalog_2008_2021.html",
"name": "NASA_Landslide_Catalog_2008_2021.html",
"dir_path": "HTML",
"url": "https://drive.google.com/u/0/uc?id=1zttpFp3vrHMqCk6jkQOTAX_cFd1NoxIG&export=download",
"compression_method": None,
"from_google_drive": True,
"google_drive_id": "1zttpFp3vrHMqCk6jkQOTAX_cFd1NoxIG&export",
}
REGISTERED_FILE.append(EAST_TEXT_DETECTOR)
EAST_TEXT_DETECTOR = {
"key": "nasa_global_landslide_catalog_point.csv",
"download_name": "nasa_global_landslide_catalog_point.csv",
"name": "nasa_global_landslide_catalog_point.csv",
"dir_path": "data",
"url": "https://maps.nccs.nasa.gov/arcgis/sharing/content/items/eec7aee8d2e040c7b8d3ee5fd0e0d7b9/data",
"compression_method": None,
"from_google_drive": False,
"google_drive_id": None,
}
REGISTERED_FILE.append(EAST_TEXT_DETECTOR)
def getResourceRecord(key: str, registry=REGISTERED_FILE):
"""
Find the file record in the registry with matching key
INPUTS:
key - str: the file (record) key. The complete list of registered keys can be
found in mmxai/utils/cache_manager/file_registry.py
registry - list: list containing the record of registered files.
RETURNS:
dict: record dict with matching key
"""
for entry in registry:
if entry["key"] == key:
return entry
raise ValueError(f"Incorrect file key: {key} is not in mmxai cache registry!")
| 1,995 |
util/latlng_recon_geojson_test.py
|
rpatil524/data
| 0 |
2169862
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for util.latlng_recon_geojson"""
import json
import os
import sys
import unittest
from shapely import geometry
from unittest import mock
sys.path.append(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))))
from util import latlng_recon_geojson
_SC_COUNTY_GJ_STR = """
{"type": "Polygon", "coordinates": [[[-122.202653, 37.363046], [-122.026107, 37.16681], [-121.575402, 36.893033], [-121.488949, 36.983148], [-121.215406, 36.961248], [-121.23711, 37.157204], [-121.399019, 37.150135], [-121.45575, 37.24944], [-121.409075, 37.380672], [-121.472952, 37.482333], [-122.115161, 37.46628], [-122.202653, 37.363046]]]}
"""
_ZIP_94041_GJ_STR = """
{"type": "Polygon", "coordinates": [[[-122.09562, 37.39428], [-122.096323, 37.393119], [-122.093774, 37.392494], [-122.09255, 37.389938], [-122.09128, 37.38951], [-122.080708, 37.384256], [-122.07758, 37.38254], [-122.07666, 37.38388], [-122.07523, 37.38315], [-122.07612, 37.38201], [-122.072794, 37.380387], [-122.07188, 37.38151], [-122.07051, 37.38089], [-122.068999, 37.382159], [-122.068418, 37.384224], [-122.067774, 37.378295], [-122.06243, 37.37632], [-122.06099, 37.37742], [-122.060203, 37.37959], [-122.059226, 37.380059], [-122.062096, 37.38068], [-122.061869, 37.381343], [-122.05932, 37.3808], [-122.058148, 37.381386], [-122.057883, 37.383031], [-122.057211, 37.384908], [-122.05533, 37.38648], [-122.057857, 37.387535], [-122.06291, 37.38909], [-122.091312, 37.400534], [-122.092117, 37.396977], [-122.093738, 37.397298], [-122.09457, 37.39595], [-122.092358, 37.395033], [-122.093435, 37.393435], [-122.09562, 37.39428]]]}
"""
def _mock_get_gj(place_type, parent_place):
# In this test, we pretend USA has the geoshape of SC County!
if place_type == 'Country':
return {'country/USA': geometry.shape(json.loads(_SC_COUNTY_GJ_STR))}
else:
return {'geoId/06': geometry.shape(json.loads(_ZIP_94041_GJ_STR))}
class LatlngReconGeojsonTest(unittest.TestCase):
@mock.patch('util.latlng_recon_geojson._get_geojsons')
@mock.patch('util.latlng_recon_geojson._get_continent_map')
def test_main(self, mock_cmap, mock_gj):
mock_cmap.return_value = {'country/USA': ['northamerica']}
mock_gj.side_effect = _mock_get_gj
ll2p = latlng_recon_geojson.LatLng2Places()
# Cascal in MTV exists in both "state" (94041) and "country" (SC county)
self.assertEqual(ll2p.resolve(37.391, -122.081),
['geoId/06', 'country/USA', 'northamerica'])
# Zareen's doesn't exist in the "state".
self.assertEqual(ll2p.resolve(37.419, -122.079),
['country/USA', 'northamerica'])
# Bi-rite creamery in SF exists in neither.
self.assertEqual(ll2p.resolve(37.762, -122.426), [])
if __name__ == '__main__':
unittest.main()
| 3,473 |
MLNet-2.0/plotting/clustering/plot.py
|
bt3gl-labs/ML-Classifying-Complex-Networks
| 15 |
2169511
|
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2014"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from itertools import cycle
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import MeanShift, estimate_bandwidth
import os
import json
from constants import FEATURES, INPUT_FOLDER, OUTPUT_FOLDER, TYPE_NOR, FEATURES_WE_WANT,FEATURES_INDEX
from matplotlib.colors import ListedColormap
''' Show below is a logistic-regression classifiers decision boundaries on the iris dataset. The datapoints are colored according to their labels.'''
def get_input_path(number, typen):
return INPUT_FOLDER + 'together' + str(number) + '_train_0.8_' + typen + '_with_outlier.data',INPUT_FOLDER + 'together' \
+ str(number) + '_test_0.8_' + typen +'_with_outlier.data'
def get_output_path(typen, number, feat, folder):
if not os.path.exists(OUTPUT_FOLDER):
os.makedirs(OUTPUT_FOLDER)
outfolder = OUTPUT_FOLDER + folder
if not os.path.exists(outfolder):
os.makedirs(outfolder)
return outfolder + 'set' + number + feat + '_0.8_' + typen
def get_data_here(inputfile, inputfilet, indexx, indexy, indexz):
data = np.loadtxt(inputfile, delimiter = ',')
feat1 = data[:, indexx]
feat2 = data[:, indexy]
feat3 = data[:, indexz]
clas = data[:,-1]
X = []
Y = []
for i, f1 in enumerate(feat1):
if f1 != 0 and feat2[i] != 0 and feat3[i] != 0:
X.append([f1, feat2[i], feat3[i]])
Y.append(clas[i])
X = np.array(X)
Y = np.array(Y)
return X, Y
def get_data_here_entire(inputfile):
data = np.loadtxt(inputfile, delimiter = ',')
X = data[:, :-2]
Y = data[:,-1]
return X, Y
def plotting_kmeans(X, Y, labelx, labely, labelz, outputfile):
np.random.seed(5)
centers = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
estimators = {'k_means_3': KMeans(n_clusters=3), 'k_means_iris_4': KMeans(n_clusters=4), 'k_means_iris_6': KMeans(n_clusters=6)}
fignum = 1
# kmeans
for name, est in estimators.iteritems():
pl.clf()
pl.cla()
fig = pl.figure(fignum)
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_zlabel(labelz)
pl.savefig(outputfile + '_' + name + '_KMEANS.png' , orientation='landscape')
pl.clf()
pl.cla()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
ax.scatter(X[Y==1, 0], X[Y==1, 1], X[Y==1, 2], c="r")
ax.scatter(X[Y==2, 0], X[Y==2, 1], X[Y==2, 2], c='b')
ax.scatter(X[Y==3, 0], X[Y==3, 1], X[Y==3, 2], c='y')
ax.scatter(X[Y==4, 0], X[Y==4, 1], X[Y==4, 2], c='g')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel(labelx)
ax.set_xlabel(labelx)
ax.set_ylabel(labely)
ax.set_zlabel(labelz)
pl.savefig(outputfile + '_GROUND.png' , orientation='landscape')
fignum = fignum + 1
def plotting_aff(X, Y, labelx, labely, labelz, outputfile):
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
pl.figure(1)
pl.clf()
pl.cla()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
pl.plot(X[my_members, 0], X[my_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=14)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.savefig(outputfile + '_' + labelx + '_' +labely + '_'+labelz + '_aff.png' , orientation='landscape')
def plotting_aff_dbscan(X, labels_true, outputfile):
X = StandardScaler().fit_transform(X)
db = DBSCAN(eps=0.5, min_samples=70).fit(X)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, labels))
pl.clf()
pl.cla()
unique_labels = set(labels)
colors = pl.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1: markersize = 14
else: markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col, markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.savefig(outputfile + '_aff_dbscan.png' , orientation='landscape')
def main():
''' OH MY GOD SO MANY FORS... I HOPE NOBODY SEES THIS'''
# plot config
with open("conf.json") as json_file:
s = json.load(json_file)
pl.rcParams.update(s)
# for each set
for number in range(1, 6):
print '... Starting set ' + str(number) + ' for norm ' + 'xmin'
alread = set()
for nx, axisx in enumerate(FEATURES_WE_WANT):
inputfile, inputfilet = get_input_path(number, 'xmin')
indexx = FEATURES_INDEX[nx]
alread.add(axisx)
for ny, axisy in enumerate(FEATURES_WE_WANT):
if axisy not in alread:
indexy = FEATURES_INDEX[ny]
alread.add(axisy)
for nz, axisz in enumerate(FEATURES_WE_WANT):
if axisz not in alread:
indexz = FEATURES_INDEX[nz]
alread.add(axisz)
print 'Features: ' + axisx + '_' + axisy + '_' + axisz
X, Y = get_data_here(inputfile, inputfilet, indexx, indexy, indexz)
labelx = axisx
labely = axisy
labelz = axisz
# plot kmeans
outputfile = get_output_path('xmin', str(number), axisx + '_' + axisy + '_' + axisz, 'plots_kmeans/')
plotting_kmeans(X, Y, labelx, labely, labelz, outputfile)
# plot clust
outputfile = get_output_path('xmin', str(number), axisx + '_' + axisy + '_' + axisz, 'plots_aff/')
plotting_aff(X, Y, labelx, labely, labelz, outputfile)
# plot clust all
print '\nStarting affinity all ...'
X, Y = get_data_here_entire(inputfile)
outputfile = get_output_path('xmin', str(number), axisx + '_' + axisy + '_' + axisz, 'plots_aff_all/')
plotting_aff(X, Y, labelx, labely, labelz, outputfile)
# plot clust all
print '\nStarting dbsc scan ...'
X, Y = get_data_here_entire(inputfile)
outputfile = get_output_path('xmin', str(number), axisx + '_' + axisy + '_' + axisz, 'plots_aff_dbscan/')
plotting_aff_dbscan(X, Y, outputfile)
print('Done!\n')
if __name__ == '__main__':
main()
| 8,823 |
python/dynamic_programming/knapsack.py
|
rcanepa/cs-fundamentals
| 0 |
2168692
|
"""0/1 Knapsack Problem (0/1 means that items cannot be divided)
Given a bag which can only hold a weight W and a list of items,
each one with a weight Wi and a price Pi, which items should be
put on the bag to maximize the total value of the bag?
Example:
Input:
W = 4
i1 = (W1 = 2, P1 = 1)
i2 = (W2 = 1, P2 = 2)
i3 = (W3 = 3, P3 = 3)
i4 = (W4 = 2, P4 = 3)
Solutions:
i2, i4 => (W = 3, P = 5)
i2, i3 => (W = 4, P = 5)
"""
from collections import namedtuple
def knapsack(max_weight, items):
"""
0 1 2 3 ... w
0 no item
1 item 1
2 item 2
...
n item n
"""
n = len(items) + 1
m = max_weight + 1
dp = [[0] * m for _ in range(n)]
for i_index in range(1, n):
current_item = items[i_index - 1]
for current_weight in range(1, m):
if current_item.w <= current_weight:
dp[i_index][current_weight] = max(
current_item.v + dp[i_index - 1][current_weight - current_item.w],
dp[i_index - 1][current_weight]
)
else:
dp[i_index][current_weight] = dp[i_index][current_weight - 1]
return dp[n - 1][m - 1]
Item = namedtuple("Item", ["w", "v"]) # w = weight, v = value
if __name__ == "__main__":
max_weight = 7
items = [
Item(1, 1),
Item(3, 4),
Item(4, 5),
Item(5, 7)
]
max_value = knapsack(max_weight, items)
print("Max value = ", max_value)
assert max_value == 9
| 1,599 |
image_labelling_tool/managers.py
|
shril/image-annotation
| 61 |
2168077
|
import datetime
from django.db import models
from django.utils import timezone
class LabelsManager (models.Manager):
def empty(self):
return self.filter(labels_json_str='[]')
def not_empty(self):
return self.exclude(labels_json_str='[]')
def modified_by_user(self, user):
return self.filter(last_modified_by=user)
def locked_by_user(self, user):
return self.filter(locked_by=user)
def unlocked(self):
now = timezone.now()
return self.filter(locked_by=None) | self.filter(lock_expiry_datetime__gte=now)
| 577 |
capstone/house-energy/sim/house_simulator.py
|
FlorianPydde/training-rf-bonsai
| 0 |
2168982
|
import numpy as np
import matplotlib.pyplot as plt
import math
class House():
def __init__(self, K: float = 0.5, C: float = 0.3, Qhvac: float = 9, hvacON: float = 0, occupancy: float = 1, Tin_initial: float = 30):
self.K = K # thermal conductivity
self.C = C # thermal capacity
self.Tin = Tin_initial # Inside Temperature
self.Qhvac = Qhvac # Cooling capacity
self.hvacON = hvacON # control action = 0 (OFF) or 1 (ON)
# 0 (no one in the room) or 1 (somebody in the room)
self.occupancy = occupancy
self.Phvac = Qhvac # Electric power capacity
self.minute = 0
self.hours = 0
self.total_power = 5 # total power consumption
self.max_iterations = 0
plt.close()
def setup_schedule(
self,
custom_t_out: list = [],
max_iterations: int = 24 * 60 // 5,
timestep: int = 5,
schedule_index: int = 0,
starting_hour = 0,
add_forecast_noise = 0,
tset_day_start: int = 7,
tset_day_end: int = 22,
t_set_day:int = 23,
t_set_night:int = 18,
t_mid_point:int = 25,
t_amplitude: int = 5):
""" define the Tset_schedule, Tout_schedule, the length of schedule, timestep
"""
self.add_forecast_noise = add_forecast_noise
self.timestep = max(
1, timestep) # keep in minutes here to keep number of minutes for days consistent
starting_hour_index = self.__time_to_index(starting_hour, timestep)
# an iteration spans 1 day
self.max_iterations = max_iterations
self.occupancy_schedule = np.full(self.max_iterations, 1)
self.Tset_schedule = np.full(self.max_iterations, 25)
a, b = self.__time_to_index(
tset_day_start, timestep), self.__time_to_index(tset_day_end, timestep)
self.Tset_schedule[:a] = t_set_night
self.Tset_schedule[b:] = t_set_night
self.Tset_schedule[a:b] = t_set_day
if len(custom_t_out) > 1:
self.Tout_schedule = list(custom_t_out)
# generate sinus weather
elif schedule_index == 1:
self.Tout_schedule = self.generate_Tout(
t_mid_point=t_mid_point, t_amplitude=t_amplitude, timestep=timestep)
# generate sinus weather add noise
elif schedule_index == 2:
self.Tout_schedule = self.generate_Tout(
t_mid_point=t_mid_point, t_amplitude=t_amplitude, timestep=timestep)
self.Tout_schedule = self.Tout_schedule + \
np.random.normal(0, 0.25, size=len(self.Tout_schedule))
else:
# constant outside weather
self.Tout_schedule = np.full(self.max_iterations, 32)
self.Tset_schedule = np.roll(self.Tset_schedule, -starting_hour_index)
self.Tout_schedule = np.roll(self.Tout_schedule, -starting_hour_index)
self.Tset = self.Tset_schedule[0] # Set Temperature
self.Tout = self.Tout_schedule[0] # Outside temperature
self.T_forecast_1 = self.Tout_schedule[1]# Set Temperature i+1 and noise
self.T_forecast_2 = self.Tout_schedule[2] # Set Temperature i+2
self.T_forecast_3 = self.Tout_schedule[3] # Set Temperature i+3
self.T_forecast_4 = self.Tout_schedule[4]
self.T_forecast_5 = self.Tout_schedule[5]
if add_forecast_noise > 0:
self.T_forecast_1 += np.random.normal(0, 0.1)# Set Temperature i+1 and noise
self.T_forecast_2 += np.random.normal(0, 0.25) # Set Temperature i+2
self.T_forecast_3 += np.random.normal(0, 0.5) # Set Temperature i+3
self.T_forecast_4 += np.random.normal(0, 0.75)
self.T_forecast_5 += np.random.normal(0, 1)
# For plotting only
self.time_to_plot = [0]
self.Tin_to_plot = [self.Tin]
self.Tset_to_plot = [self.Tset]
self.Tout_to_plot = [self.Tout]
self.__iter__()
def generate_Tout(self, t_mid_point: int = 25, t_amplitude: int = 5, timestep: int = 5):
"""Use a sinus function to create a change in temperature
"""
weather = [
t_amplitude*math.sin((x-6*60)*math.pi/(12*60))+t_mid_point for x in range(24*60)]
filtered_weather = weather[::timestep]
return filtered_weather
def update_Tout(self, Tout_new):
self.Tout = Tout_new # Update to new outside temperature
def update_Tset(self, Tset_new):
self.Tset = Tset_new # Update to new setpoint temperature
def update_hvacON(self, hvacONnew):
self.hvacON = hvacONnew # update to new hvacON
def update_occupancy(self, occupancy_new):
self.occupancy = occupancy_new # update to new occupancy
def update_Tin(self):
"""Update inside temperation.
Describes the inside temperature evolution as a function of all other variables.
"""
# Note timestep is converted to seconds here, in order to keep units consistent in SI for update.
self.Tin = self.Tin - (self.timestep/60) / self.C * \
(self.K * (self.Tin - self.Tout) + self.Qhvac * self.hvacON)
self.__next__()
self.Tset_to_plot.append(self.Tset)
self.Tin_to_plot.append(self.Tin)
self.Tout_to_plot.append(self.Tout)
self.time_to_plot.append(self.iteration * 5)
def update_time(self):
self.minute, self.hours = self.__index_to_time(self.iteration)
def update_total_power(self):
self.total_power += self.get_Power()
def get_Power(self):
COP = 3
power = self.Phvac * self.hvacON * COP
return power
def show(self):
self.fig, self.ax = plt.subplots(1, 1)
self.ax.clear()
self.ax.plot(self.time_to_plot, self.Tin_to_plot, label='Tin')
self.ax.plot(self.time_to_plot, self.Tset_to_plot, label='Tset')
self.ax.plot(self.time_to_plot, self.Tout_to_plot, label='Tout')
self.ax.set_xlabel('Time [min]')
self.ax.set_ylabel(r'Temperature [$^\circ$C]')
plt.legend()
plt.pause(np.finfo(np.float32).eps)
# print the object nicely
def __str__(self):
string_to_print = []
for key in self.__dict__:
string_to_print.append("{key}='{value}'".format(
key=key, value=self.__dict__[key]))
return ', '.join(string_to_print)
def __repr__(self):
return self.__str__()
def __iter__(self):
self.iteration = 0
return self
def __next__(self):
if self.iteration < self.max_iterations:
self.update_Tset(self.Tset_schedule[self.iteration])
self.T_forecast_1 = self.Tout_schedule[int((self.iteration+1)%self.max_iterations)]
self.T_forecast_2 = self.Tout_schedule[int((self.iteration+2)%self.max_iterations)]
self.T_forecast_3 = self.Tout_schedule[int((self.iteration+3)%self.max_iterations)]
self.T_forecast_4 = self.Tout_schedule[int((self.iteration+4)%self.max_iterations)]
self.T_forecast_5 = self.Tout_schedule[int((self.iteration+5)%self.max_iterations)]
if self.add_forecast_noise > 0:
self.T_forecast_1 += np.random.normal(0, 0.1)
self.T_forecast_2 += np.random.normal(0, 0.25)
self.T_forecast_3 += np.random.normal(0, 0.5)
self.T_forecast_4 += np.random.normal(0, 0.75)
self.T_forecast_5 += np.random.normal(0, 1)
self.update_Tout(self.Tout_schedule[self.iteration])
self.update_occupancy(self.occupancy_schedule[self.iteration])
self.update_time()
self.update_total_power()
self.iteration += 1
else:
StopIteration
def __time_to_index(self, hours, timestep=5):
hours_index = int(hours * 60 / timestep)
return hours_index
def __index_to_time(self, iteration, timestep=5):
minute = int(iteration * timestep % 60)
hours = int(iteration*timestep/60 % 24)
return minute, hours
if __name__ == '__main__':
import random
house = House(K=0.5, C=0.3, Qhvac=9)
days = 7
timestep = 5
for episode in range(1):
house.setup_schedule(timestep=timestep,
schedule_index=1,
starting_hour=12,
t_set_day=30,
t_set_night=30,
)
for i in range(house.max_iterations):
# house.update_hvacON(random.randint(0, 1))
house.update_hvacON(1)
house.update_Tin()
print(f'Hour : {house.hours}', f'Minute : {house.minute}', f'Tout : {house.Tout}', f'Tin : {house.Tin}',
f'HvacOn : {house.hvacON}', f'Tset : {house.Tset}', f'Total Power : {house.total_power}')
house.show()
| 8,975 |
Sketches/MPS/BugReports/FixTests/Kamaelia/Tools/AxonVisualiser.py
|
sparkslabs/kamaelia_orig
| 12 |
2169571
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import sys
from Kamaelia.Visualisation.Axon.AxonVisualiserServer import AxonVisualiserServer
from Kamaelia.Visualisation.Axon.AxonVisualiserServer import AxonVisualiserServer, AxonVisualiser, text_to_token_lists
from Kamaelia.UI.GraphicDisplay import PygameDisplay
from Kamaelia.Util.Introspector import Introspector
from Kamaelia.Internet.TCPClient import TCPClient
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.Util.Introspector import Introspector
from Kamaelia.Visualisation.PhysicsGraph import parseArgs as _parseArgs
def parseArgs(args, extraShortArgs="", extraLongArgs=[]):
shortargs = "n" + extraShortArgs
longargs = ["navelgaze","introspect="] + extraLongArgs
dictArgs, optlist, remargs = _parseArgs(args, shortargs, longargs)
if "help" in dictArgs:
dictArgs["help"] += " -n, --navelgaze\n" + \
" Directly wire in an introspector instead of listening on a port\n\n" + \
" --introspect=server:port\n\n" + \
" Plug in an introspector that sends data to 'server' on 'port'\n" + \
" (have fun! - loop back: \"--port=1500 --introspect=127.0.0.1:1500\")\n\n"
else:
for o,a in optlist:
if o in ("-n","--navelgaze"):
dictArgs['navelgaze'] = True
if o in ("--introspect="):
import re
match = re.match(r"^([^:]+):(\d+)$", a)
server=match.group(1)
port=int(match.group(2))
dictArgs['introspect'] = (server,port)
return dictArgs, optlist, remargs
if __name__=="__main__":
from Kamaelia.Util.Console import ConsoleEchoer
dictArgs, optlist, remargs = parseArgs(sys.argv[1:])
if "help" in dictArgs:
print dictArgs["help"]
sys.exit(0)
resolution = dictArgs.get("screensize",(800,600))
doNavelgaze = dictArgs.pop("navelgaze", None)
doIntrospect = dictArgs.pop("introspect", None)
pgd = PygameDisplay(width=resolution[0],height=resolution[1]).activate()
PygameDisplay.setDisplayService(pgd)
if doIntrospect is not None:
(server, port) = doIntrospect
Pipeline( Introspector(),
TCPClient(server, port)
).activate()
if doNavelgaze:
if "serverPort" in dictArgs:
raise ValueError("Makes no sense to navelgaze and use --port option - they're mutually exclusive")
app = Pipeline(
Introspector(),
ConsoleEchoer(forwarder=True),
text_to_token_lists(),
AxonVisualiser(caption="Axon / Kamaelia Visualiser", **dictArgs)
)
else:
app = AxonVisualiserServer(caption="Axon / Kamaelia Visualiser", **dictArgs)
app.run()
| 3,796 |
entertainment_center.py
|
yasseralaa/Movie-Trailer-Website
| 0 |
2166750
|
import media
import fresh_tomatoes
import json
import urllib2
# main function
def main():
# the movie db api
moviedbapi = "http://api.themoviedb.org/3/discover/mo" \
"vie?api_key=<KEY>"
# using urllib to load api content and json.load
# method to extract it's json data
moviedbdata = json.load(urllib2.urlopen(moviedbapi))
# list to add movie objects
movies = []
# iterate throw json array of objects and extract data values with keys
for movie in moviedbdata['results']:
movie_object = media.Movie(
movie['title'],
"http://image.tmdb.org/t/p/w185" + str(movie['poster_path']),
get_trailer(movie['id']),
movie['vote_average'])
movies.append(movie_object)
# calling the "open_movies_page" func from "fresh_tomatoes" module
fresh_tomatoes.open_movies_page(movies)
# consuming trailer api with movie id
def get_trailer(movie_id):
movietrailersapi = "http://api.themoviedb.org/3/movie/" + str(
movie_id) + "/videos?api_key=<KEY>"
moviedata = json.load(urllib2.urlopen(movietrailersapi))
return "https://www.youtube.com/watch?v=" + moviedata['results'][0]['key']
# ensure that main func executing first
if __name__ == '__main__':
main()
| 1,288 |
recommendation/utils/logger.py
|
wikimedia/research-recommendation-api
| 3 |
2168244
|
import logging
import time
from recommendation.utils import configuration
import recommendation
log = logging.getLogger(__name__)
def initialize_logging():
logging.basicConfig(format=configuration.get_config_value('logging', 'format'),
level=logging.WARNING)
log = logging.getLogger(recommendation.__name__)
log.setLevel(logging.getLevelName(configuration.get_config_value('logging', 'level')))
def timeit(method):
"""Decorator for measuring function run times"""
def timed(*args, **kw):
t1 = time.time()
result = method(*args, **kw)
t2 = time.time()
log.debug('%r run time: %2.2f s', method.__name__, t2 - t1)
return result
return timed
| 732 |
samples_to_fingerprints.py
|
skratchdot/media-tools
| 13 |
2169381
|
# -*- coding: utf-8 -*-
# https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html
import argparse
import audioread
from lib.audio_utils import *
from lib.cache_utils import *
from lib.collection_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.processing_utils import *
import librosa
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import os
import numpy as np
from pprint import pprint
from skimage.measure import block_reduce
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples.csv", help="Input file")
parser.add_argument('-dir', dest="AUDIO_DIRECTORY", default="media/sample/", help="Input file")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/features.p", help="Output file")
parser.add_argument('-cellw', dest="CELL_W", default=32, type=int, help="Width of each cell")
parser.add_argument('-cellh', dest="CELL_H", default=32, type=int, help="Height of each cell")
parser.add_argument('-threads', dest="THREADS", default=4, type=int, help="Number of threads")
parser.add_argument('-log', dest="USE_LOG", action="store_true", help="Use log for fingerprint?")
a = parser.parse_args()
# Read files
fieldNames, rows = readCsv(a.INPUT_FILE)
rowCount = len(rows)
rows = addIndices(rows)
rows = prependAll(rows, ("filename", a.AUDIO_DIRECTORY))
# Make sure output dirs exist
makeDirectories(a.OUTPUT_FILE)
# find unique filepaths
print("Matching samples to files...")
filenames = list(set([row["filename"] for row in rows]))
params = [{
"samples": [row for row in rows if row["filename"]==fn],
"filename": fn
} for fn in filenames]
fileCount = len(params)
progress = 0
# Adapted from: https://github.com/kylemcdonald/AudioNotebooks/blob/master/Samples%20to%20Fingerprints.ipynb
def getFingerPrint(y, sr, start, dur, n_fft=2048, hop_length=512, window=None, use_logamp=False):
global a
if len(y) < 1:
return np.zeros((a.CELL_H, a.CELL_W))
# take at most one second
dur = min(dur, 1000)
# analyze just the sample
i0 = int(round(start / 1000.0 * sr))
i1 = int(round((start+dur) / 1000.0 * sr))
y = y[i0:i1]
reduce_rows = 10 # how many frequency bands to average into one
reduce_cols = 1 # how many time steps to average into one
crop_rows = a.CELL_H # limit how many frequency bands to use
crop_cols = a.CELL_W # limit how many time steps to use
if not window:
window = np.hanning(n_fft)
S = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, window=window)
amp = np.abs(S)
if reduce_rows > 1 or reduce_cols > 1:
amp = block_reduce(amp, (reduce_rows, reduce_cols), func=np.mean)
if amp.shape[1] < crop_cols:
amp = np.pad(amp, ((0, 0), (0, crop_cols-amp.shape[1])), 'constant')
amp = amp[:crop_rows, :crop_cols]
if use_logamp:
amp = librosa.amplitude_to_db(amp**2)
amp -= amp.min()
if amp.max() > 0:
amp /= amp.max()
amp = np.flipud(amp) # for visualization, put low frequencies on bottom
return amp
def processFile(p):
global progress
global rowCount
global a
fingerprints = []
# load audio
fn = getAudioFile(p["filename"])
try:
y, sr = loadAudioData(fn)
except audioread.macca.MacError:
y = []
sr = 48000
for sample in p["samples"]:
fingerprint = getFingerPrint(y, sr, sample["start"], sample["dur"], use_logamp=a.USE_LOG)
fingerprints.append({
"index": sample["index"],
"fingerprint": fingerprint
})
progress += 1
printProgress(progress, rowCount)
return fingerprints
print("Processing fingerprints...")
data = []
if a.THREADS == 1:
for p in params:
processFile(p)
else:
threads = getThreadCount(a.THREADS)
pool = ThreadPool(threads)
data = pool.map(processFile, params)
pool.close()
pool.join()
data = flattenList(data)
data = sorted(data, key=lambda d: d["index"])
fingerprints = [d["fingerprint"] for d in data]
saveCacheFile(a.OUTPUT_FILE, fingerprints, overwrite=True)
print("Done.")
| 4,186 |
app/compute.py
|
sorengoyal/combat-evolved
| 0 |
2169845
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 15:41:20 2017
@author: sogoyal
"""
from app.plab.geospatial import Geospatial
import time
'''
Takes in coordinates -
sample coordinate structure -
coordinates = [
[
[
-121.95789277553557,
37.417830946910904
],
[
-121.95595085620879,
37.416510162308874
],
[
-121.95349395275115,
37.41863618802896
],
[
-121.95355296134949,
37.41921561543447
],
[
-121.95789277553557,
37.417830946910904
]
]
]
(latitute, longitude)
Returns the 4 images of seasons with NDVI
'''
def ndviImages(coordinates):
geo = Geospatial('3d42933f4c284a3b8dd2c5200e97da00')
filters = geo.createFilters(coordinates)
graph = []#{"Spring": 0, "Summer": 0, "Fall": 0, "Winter": 0}
images = []
masks = []
maxvalue = 0
for i in range(0,1):
image = geo.getImage(filters[i])
ndvi, mask = geo.computeNDVI(image)
graph.append(ndvi.sum()/mask.sum())
images.append(image)
masks.append(mask)
if(maxvalue < ndvi.max()):
maxvalue = ndvi.max()
for i in range(0,1):
image = images[i]
mask = masks[i]
for m in range(0,image.shape[0]):
for n in range(0,image.shape[1]):
image[m,n] = 255*image[m,n]/maxvalue
geo.writeImageToFile("app/images/file" + str(i) + ".png", ndvi, mask)
| 1,544 |
lstm_with_wordpieces/sequence_utils.py
|
hubertkarbowy/awd_lstm_tensorflow2
| 0 |
2170137
|
import argparse
import json
import tensorflow as tf
from awd_lstm_tensorflow2.ulmfit_tf2_heads import SPMNumericalizer
def pretty_print_tagged_sequences(subword_pieces, labels, intents, limit=5):
for i in range(len(subword_pieces)):
l = list(zip(subword_pieces[i], labels[i]))
print(l)
if intents not in [None, []]:
print(f"---> Intent: {intents[i]}")
if i > limit:
break
def mk_labels(ls_json):
labels_set = set()
for document in ls_json:
all_tagged = document['label']
for tagged in all_tagged:
labels_set.update(tagged['labels'])
label_index = {label:index for index,label in enumerate(sorted(labels_set))}
# index_label = {index:label for index,label in enumerate(sorted(labels_set))}
return label_index
def label_studio_to_tagged_subwords(*, spm_args, label_studio_min_json): # todo: return intents
spm_layer = SPMNumericalizer(name="SPM_layer",
spm_path=spm_args['spm_model_file'],
add_bos=spm_args.get('add_bos') or False,
add_eos=spm_args.get('add_eos') or False,
lumped_sents_separator="")
spmproc = spm_layer.spmproc
ls_json = json.load(open(label_studio_min_json, 'r', encoding='utf-8'))
# tokenize with offsets
nl_texts = [document['text'] for document in ls_json]
spans = [document.get('label') or [] for document in ls_json]
print(f"First 10 texts:")
print(nl_texts[0:10])
# map spans to subwords
tokenized = []
tokenized_pieces = []
tokenized_labels = []
intents = []
for doc_id in range(len(nl_texts)):
# Tensorflow's tokenize_with_offsets is broken with SentencePiece
#token_offsets = list(zip(begins[i].numpy().tolist(), ends[i].numpy().tolist()))
#pieces = [t.decode(encoding='utf-8') for t in spmproc.id_to_string(piece_ids[i]).numpy().tolist()]
curr_tokens = []
curr_pieces = []
curr_entities = []
i = 0
if spans[doc_id] == []:
entity_end=0
for span in spans[doc_id]:
j = entity_beg = span['start']
entity_end = span['end']
label_class = span['labels'][0] # assume labels don't overlap
# tokenize everything before the label span
res = spmproc.tokenize(nl_texts[doc_id][i:j]).numpy().tolist()
curr_tokens.extend(res)
curr_entities.extend(['O']*len(res))
# inside the label span
res = spmproc.tokenize(nl_texts[doc_id][j:entity_end]).numpy().tolist()
curr_tokens.extend(res)
curr_entities.extend([label_class]*len(res))
# from the last label to EOS
res = spmproc.tokenize(nl_texts[doc_id][entity_end:]).numpy().tolist()
curr_tokens.extend(res)
curr_entities.extend(['O']*len(res))
curr_pieces = [t.decode(encoding='utf-8') for t in spmproc.id_to_string(curr_tokens).numpy().tolist()]
tokenized.append(curr_tokens)
tokenized_pieces.append(curr_pieces)
tokenized_labels.append(curr_entities)
if ls_json[doc_id].get('intent') is not None:
intents.append(ls_json[doc_id].get('intent'))
return tokenized, tokenized_pieces, tokenized_labels, intents
def main(args):
spm_args = {'spm_model_file': args['spm_model_file'], 'add_bos': False, 'add_eos': False}
token_ids, token_pieces, token_labels = label_studio_to_tagged_subwords(spm_args=spm_args,
label_studio_min_json=args['label_studio_min_json'])
pretty_print_tagged_sequences(token_pieces, token_labels, intents)
if __name__ == "__main__":
argz = argparse.ArgumentParser()
argz.add_argument('--label-studio-min-json', required=False)
argz.add_argument('--spm-model-file', required=False)
args = vars(argz.parse_args())
main(args)
| 4,014 |
app.py
|
snoop2head/comments_tracker
| 2 |
2168297
|
from daily_crawl import update_comment_collection
from daily_db_compare import send_new_info, new_comment_info_list
# first, update comment collection
update_comment_collection()
# second, update archive collection
# third, send the new comment information to slack chatbot
send_new_info(new_comment_info_list())
| 315 |
stlmeasure.py
|
Mister-SOSA/stl-measure.py
| 0 |
2170420
|
''' A simple, easy to use Python module which allows you to quickly measure STL files.'''
from stl import mesh
from termcolor import colored
accepted_units = ['m', 'cm', 'mm', 'in', 'ft']
def get_dimensions(obj, units):
'''Accepts a mesh object and desired units of measurement. Returns the X, Y, and Z dimensions of the bounding box of the mesh in a dictionary.'''
try:
obj = mesh.Mesh.from_file(obj)
except Exception as e:
raise ValueError(e)
minx = obj.x.min()
maxx = obj.x.max()
miny = obj.y.min()
maxy = obj.y.max()
minz = obj.z.min()
maxz = obj.z.max()
dimension_x = maxx - minx
dimension_y = maxy - miny
dimension_z = maxz - minz
if (units == "m"):
dimensions_dict = {
"x": dimension_x,
"y": dimension_y,
"z": dimension_z
}
return dimensions_dict
elif (units == "cm"):
dimensions_dict = {
"x": dimension_x * 100,
"y": dimension_y * 100,
"z": dimension_z * 100
}
return dimensions_dict
elif (units == "mm"):
dimensions_dict = {
"x": dimension_x * 1000,
"y": dimension_y * 1000,
"z": dimension_z * 1000
}
return dimensions_dict
elif (units == "in"):
dimensions_dict = {
"x": dimension_x * 39.37007874,
"y": dimension_y * 39.37007874,
"z": dimension_z * 39.37007874
}
return dimensions_dict
elif (units == "ft"):
dimensions_dict = {
"x": dimension_x * 3.28084,
"y": dimension_y * 3.28084,
"z": dimension_z * 3.28084
}
return dimensions_dict
else:
raise ValueError(
'The unit you passed to get_dimensions() \"' + colored(units, 'red') + '\" was invalid. Possible units are ' +
(', ').join(accepted_units)
)
def get_box_volume(obj, units):
'''Accepts a mesh object and desired units of measurement. Returns a numeric value which represents the volume of the bounding box of the object.'''
try:
obj = mesh.Mesh.from_file(obj)
except Exception as e:
raise ValueError(e)
minx = obj.x.min()
maxx = obj.x.max()
miny = obj.y.min()
maxy = obj.y.max()
minz = obj.z.min()
maxz = obj.z.max()
dimension_x = maxx - minx
dimension_y = maxy - miny
dimension_z = maxz - minz
mesh_volume = dimension_x * dimension_y * dimension_z
if (units == "m"):
return mesh_volume
elif (units == "cm"):
return mesh_volume * 100
elif (units == "mm"):
return mesh_volume * 1000
elif (units == "in"):
return mesh_volume * 39.37007874
elif (units == "ft"):
return mesh_volume * 3.28084
else:
raise ValueError(
'The unit you passed to get_volume() \"' + colored(units, 'red') + '\" was invalid. Possible units are ' +
(', ').join(accepted_units)
)
| 3,031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.