max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
public/scripts/python/test/Suite2/test18.py
jimb245/scriptremote
0
2172079
# # Multiple locations with content/files, # same file tag used at different locations. # import os import unittest import srutil import srio import credentials import filecmp class Test(unittest.TestCase): def runTest(self): proj_name = 'TEST(suite2)-Project18' job_name = 'Job' loc_name1 = 'Location1' loc_name2 = 'Location2' loc_name3 = 'Location3' file_key = 'file' user = credentials.SRUSER token = credentials.SRTOKEN passphrase = '<PASSWORD>' path1 = os.getcwd() + '/Suite2/data.txt' newpath1 = os.getcwd() + '/Suite2/test18.txt' path2 = os.getcwd() + '/Suite2/heatmaps.png' newpath2 = os.getcwd() + '/Suite2/test18.png' path3 = os.getcwd() + '/Suite2/tiger.svg' newpath3 = os.getcwd() + '/Suite2/test18.svg' result = srio.SR_start(user, token, proj_name, job_name, passphrase) if (result[0] != srio.SR_OK): self.fail() result = srio.SR_send(loc_name1, data_array=[{'name':'a1','value':'Hello world 1'}], file_array=[{'key': file_key, 'path': path1}], reply=False) if (result[0] != srio.SR_OK): self.fail() msgid1 = srio.sr_msgid result = srio.SR_send(loc_name2, data_array=[{'name':'a2','value':'Hello world 2'}], file_array=[{'key': file_key, 'path': path2}], reply=False) if (result[0] != srio.SR_OK): self.fail() msgid2 = srio.sr_msgid result = srio.SR_send(loc_name3, data_array=[{'name':'a3','value':'Hello world 3'}], file_array=[{'key': file_key, 'path': path3}], reply=False) if (result[0] != srio.SR_OK): self.fail() msgid3 = srio.sr_msgid result = srio.SR_end() if (result[0] != srio.SR_END): self.fail() result = srutil.SR_get_content(loc_name1, msgid1) if result[0] != srio.SR_OK: self.fail() data = result[1] if (u'content' not in data) or (data[u'content'] != u'[{"name": "a1", "value": "Hello world 1"}]'): self.fail() result = srutil.SR_get_file(loc_name1, msgid1, file_key, 'text', newpath1) if result[0] != srio.SR_OK: self.fail() if not filecmp.cmp(path1, newpath1): self.fail() result = srutil.SR_get_content(loc_name2, msgid2) if result[0] != srio.SR_OK: self.fail() data = result[1] if (u'content' not in data) or (data[u'content'] != u'[{"name": "a2", "value": "Hello world 2"}]'): self.fail() result = srutil.SR_get_file(loc_name2, msgid2, file_key, 'binary', newpath2) if result[0] != srio.SR_OK: self.fail() if not filecmp.cmp(path2, newpath2): self.fail() result = srutil.SR_get_content(loc_name3, msgid3) if result[0] != srio.SR_OK: self.fail() data = result[1] if (u'content' not in data) or (data[u'content'] != u'[{"name": "a3", "value": "Hello world 3"}]'): self.fail() result = srutil.SR_get_file(loc_name3, msgid3, file_key, 'text', newpath3) if result[0] != srio.SR_OK: self.fail() if not filecmp.cmp(path3, newpath3): self.fail() srutil.SR_delete_project()
3,358
aml/2_a_marching_cubes.py
davidstutz/aml-improved-shape-completion
9
2172828
import os import sys sys.path.insert(1, os.path.realpath('/BS/dstutz/work/shape-completion/code/lib/py/')) import utils import argparse import mcubes def get_parser(): """ Get parser. :return: parser :rtype: argparse.ArgumentParser """ parser = argparse.ArgumentParser('Read LTSDF file and run marching cubes.') parser.add_argument('--input', type=str, help='Input HDF5 file.') parser.add_argument('--output', type=str, help='Output directory.') parser.add_argument('--n_observations', type=int, default=10, help='Number of observations per model, should be 10.') return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() predictions = utils.read_hdf5(args.input) print('[Experiments] read ' + args.input) utils.makedir(args.output) print('[Experiments] created ' + args.output) for n in range(predictions.shape[0]): k = n%args.n_observations off_directory = args.output + '/%d/' % k utils.makedir(off_directory) off_file = off_directory + '/%d.off' % (n // args.n_observations) if not os.path.exists(off_file): vertices, triangles = mcubes.marching_cubes(-predictions[n][1].transpose(1, 0, 2), 0) mcubes.export_off(vertices, triangles, off_file) print('[Experiments] wrote ' + off_file)
1,375
supports/pyload/src/pyload/plugins/downloaders/CzshareCom.py
LuckyNicky/pycrawler
1
2172029
# -*- coding: utf-8 -*- import re from datetime import timedelta from ...core.utils import parse from ..base.simple_downloader import SimpleDownloader class CzshareCom(SimpleDownloader): __name__ = "CzshareCom" __type__ = "downloader" __version__ = "1.11" __status__ = "testing" __pyload_version__ = "0.5" __pattern__ = ( r"https?://(?:www\.)?(czshare|sdilej)\.(com|cz)/(\d+/|download\.php\?).+" ) __config__ = [ ("enabled", "bool", "Activated", True), ("use_premium", "bool", "Use premium account if available", True), ("fallback", "bool", "Fallback to free download if premium fails", True), ("chk_filesize", "bool", "Check file size", True), ("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10), ] __description__ = """CZshare.com downloader plugin, now Sdilej.cz""" __license__ = "GPLv3" __authors__ = [("zoidberg", "<EMAIL>"), ("ondrej", "<EMAIL>")] NAME_PATTERN = r'<div class="tab" id="parameters">\s*<p>\s*Cel. n.zev: <a href=.*?>(?P<N>.+?)</a>' SIZE_PATTERN = r'<div class="tab" id="category">(?:\s*<p>[^\n]*</p>)*\s*Velikost:\s*(?P<S>[\d .,]+)(?P<U>[\w^_]+)\s*</div>' OFFLINE_PATTERN = r'<div class="header clearfix">\s*<h2 class="red">' SIZE_REPLACEMENTS = [(" ", "")] URL_REPLACEMENTS = [ (r"http://[^/]*/download.php\?.*?id=(\w+).*", r"http://sdilej.cz/\1/x/") ] CHECK_TRAFFIC = True FREE_URL_PATTERN = r'<a href="(.+?)" class="page-download">[^>]*alt="(.+?)" /></a>' FREE_FORM_PATTERN = r'<form action="download\.php" method="post">\s*<img src="captcha\.php" id="captcha" />(.*?)</form>' PREMIUM_FORM_PATTERN = r'<form action="/profi_down\.php" method="post">(.*?)</form>' FORM_INPUT_PATTERN = r'<input[^>]* name="(.+?)" value="(.+?)"[^>]*/>' MULTIDL_PATTERN = r"<p><font color=\'red\'>Z.*?PROFI.</font></p>" USER_CREDIT_PATTERN = r'<div class="credit">\s*kredit: <strong>([\d .,]+)(\w+)</strong>\s*</div><!-- .credit -->' def out_of_traffic(self): #: Check if user logged in m = re.search(self.USER_CREDIT_PATTERN, self.data) if m is None: self.account.relogin() self.data = self.load(self.pyfile.url) m = re.search(self.USER_CREDIT_PATTERN, self.data) if m is None: return True #: Check user credit try: credit = parse.bytesize(m.group(1).replace(" ", ""), m.group(2)) self.log_info( self._("Premium download for {} KiB of Credit").format( self.pyfile.size >> 10 ) ) self.log_info( self._("User {} has {} KiB left").format( self.account.user, credit >> 10 ) ) if credit < self.pyfile.size: self.log_info( self._("Not enough credit to download file: {}").format( self.pyfile.name ) ) return True except Exception as exc: #: let's continue and see what happens... self.log_error( exc, exc_info=self.pyload.debug > 1, stack_info=self.pyload.debug > 2 ) return False def handle_premium(self, pyfile): try: form = re.search(self.PREMIUM_FORM_PATTERN, self.data, re.S).group(1) inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form)) except Exception as exc: self.log_error( exc, exc_info=self.pyload.debug > 1, stack_info=self.pyload.debug > 2 ) self.restart(premium=False) #: Download the file, destination is determined by pyLoad self.download("http://sdilej.cz/profi_down.php", post=inputs, disposition=True) def handle_free(self, pyfile): #: Get free url m = re.search(self.FREE_URL_PATTERN, self.data) if m is None: self.error(self._("FREE_URL_PATTERN not found")) parsed_url = "http://sdilej.cz" + m.group(1) self.log_debug("PARSED_URL:" + parsed_url) #: Get download ticket and parse html self.data = self.load(parsed_url) if re.search(self.MULTIDL_PATTERN, self.data): self.retry( timedelta(minutes=5).seconds, 12, self._("Download limit reached") ) try: form = re.search(self.FREE_FORM_PATTERN, self.data, re.S).group(1) inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form)) pyfile.size = int(inputs["size"]) except Exception as exc: self.log_error( exc, exc_info=self.pyload.debug > 1, stack_info=self.pyload.debug > 2 ) self.error(self._("Form")) #: Get and decrypt captcha captcha_url = "http://sdilej.cz/captcha.php" inputs["captchastring2"] = self.captcha.decrypt(captcha_url) self.data = self.load(parsed_url, post=inputs) if "<li>Zadaný ověřovací kód nesouhlasí!</li>" in self.data: self.retry_captcha() elif re.search(self.MULTIDL_PATTERN, self.data): self.retry( timedelta(minutes=5).seconds, 12, self._("Download limit reached") ) else: self.captcha.correct() m = re.search(r"countdown_number = (\d+);", self.data) self.set_wait(int(m.group(1)) if m else 50) #: Download the file, destination is determined by pyLoad self.log_debug("WAIT URL", self.req.last_effective_url) m = re.search(r"free_wait.php\?server=(.*?)&(.*)", self.req.last_effective_url) if m is None: self.error(self._("Download URL not found")) self.link = "http://{}/download.php?{}".format(m.group(1), m.group(2)) self.wait() def check_download(self): #: Check download check = self.scan_download( { "temp offline": re.compile(r"^Soubor je do.*asn.* nedostupn.*$"), "credit": re.compile(r"^Nem.*te dostate.*n.* kredit.$"), "multi-dl": re.compile(self.MULTIDL_PATTERN), "captcha": "<li>Zadaný ověřovací kód nesouhlasí!</li>", } ) if check == "temp offline": self.fail(self._("File not available - try later")) elif check == "credit": self.restart(premium=False) elif check == "multi-dl": self.retry( timedelta(minutes=5).seconds, 12, self._("Download limit reached") ) elif check == "captcha": self.retry_captcha() return SimpleDownloader.check_download(self)
6,810
WebScraper/prediction.py
Peischlili/ComputerVision_WebScraper
2
2171771
from packageManager import * from modelBuilder import * from boundBox import * # with trained/pretrained yolo3 weights, save the compiled model to a path on local def init_model(weights_path, new_model_path): # instantiate the model and save to variable model = make_yolov3_model() # load the model weights weight_reader = WeightReader(weights_path) # set the model weights into the model weight_reader.load_weights(model) # save the model to file, filename should look like model.h5 model.save(new_model_path) # for an image given, predict existence of trained category and draw box out of if. def predict_cat(model_h5, image_file): # load yolov3 model model = load_model(model_h5, compile=False) # define the expected input shape for the model input_w, input_h = 416, 416 # define our new photo photo_filename = image_file # load and prepare image image, image_w, image_h = load_image_pixels(photo_filename, (input_w, input_h)) # make prediction yhat = model.predict(image) # summarize the shape of the list of arrays print([a.shape for a in yhat]) # define the anchors anchors = [[116, 90, 156, 198, 373, 326], [30, 61, 62, 45, 59, 119], [10, 13, 16, 30, 33, 23]] # define the probability threshold for detected objects class_threshold = 0.8 boxes = list() for i in range(len(yhat)): # decode the output of the network boxes += decode_netout(yhat[i][0], anchors[i], class_threshold, input_h, input_w) # correct the sizes of the bounding boxes for the shape of the image correct_yolo_boxes(boxes, image_h, image_w, input_h, input_w) # suppress non-maximal boxes do_nms(boxes, 0.5) # define the labels labels = ["Dress"] # get the details of the detected objects v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold) # summarize what we found and save the labels to a list labelList:list = [] for i in range(len(v_boxes)): # each element in the list is : a list of the label and its probability labelList.append([v_labels[i], v_scores[i]]) # print(v_labels[i], v_scores[i]) # draw what we found #draw_boxes(photo_filename, v_boxes, v_labels, v_scores) return labelList
2,297
members/admin.py
Joshua-Barawa/Django-IP3
0
2172700
from django.contrib import admin from .models import * admin.site.register(Profile) admin.site.register(Project) admin.site.register(Prorating) admin.site.register(Comment)
174
cart/urls.py
masrufjaman/central-shop
1
2172851
from django.urls import path from . import views urlpatterns = [ path('', views.cart, name='cart-cart'), path('checkout/', views.checkout, name='cart-checkout'), ]
173
test/test_gvf.py
tristanang/bilayer-clusters
1
2172989
import numpy as np from bilayer_clusters.gvf import * def test_gvf1(): cluster = [np.array([1,1,1,1]),np.array([5,5,5,5,5]),np.array([9,9,9,9])] assert gvf_helper(cluster) == 1 cluster = [np.arange(1,4),np.arange(100,104),np.arange(10000,10004)] print(gvf_helper(cluster)) return True if __name__ == '__main__': test_gvf1() print("passed")
374
lms/extractors/ziparchive.py
jungrishi/lms
0
2173582
import fnmatch import os import pathlib from typing import Iterator, List, Set, Text, Tuple from zipfile import BadZipFile, ZipFile from lms.extractors.base import Extractor, File from lms.utils.log import log GITIGNORE_FILE = pathlib.Path(__file__).parent / 'ignorefiles.txt' class Ziparchive(Extractor): def __init__(self, **kwargs): super().__init__(**kwargs) self.is_zipfile = ( self.filename is not None and self.filename.endswith('.zip') ) if not self.is_zipfile: return try: self.archive = ZipFile(self.to_extract.stream._file) except BadZipFile: self.is_zipfile = False def can_extract(self) -> bool: return self.is_zipfile @staticmethod def _extract(archive: ZipFile, filename: str, dirname: str = '') -> File: with archive.open(filename) as current_file: log.debug(f'Extracting from archive: {filename}') code = current_file.read() decoded = code.decode('utf-8', errors='replace').replace('\x00', '') filename = filename[len(dirname):] return File(path=f'/{filename}', code=decoded) def get_files( self, archive: ZipFile, filenames: List[Text], dirname: str = '', ) -> Iterator[File]: unwanted_files = self.get_unwanted_files(filenames) yield from ( self._extract(archive, filename, dirname) for filename in filenames if ( filename.startswith(dirname) and filename not in unwanted_files and filename != dirname ) ) def get_exercises_by_dirs( self, archive: ZipFile, filenames: List[Text], ) -> Iterator[Tuple[int, List[File]]]: for dirname in filenames: if len(dirname.strip(os.path.sep).split(os.path.sep)) == 1: # Checking if the dirname is in the first dir in the zipfile parent_name, _ = os.path.split(dirname) exercise_id, _ = self._clean(parent_name) if exercise_id: files = list(self.get_files(archive, filenames, dirname)) yield exercise_id, files def get_exercise(self, file: ZipFile) -> Iterator[Tuple[int, List[File]]]: assert self.filename is not None exercise_id, _ = self._clean(self.filename.rpartition('.')[0]) with file as archive: filenames = archive.namelist() if exercise_id: files = list(self.get_files(archive, filenames)) yield exercise_id, files else: yield from self.get_exercises_by_dirs(archive, filenames) def get_exercises(self) -> Iterator[Tuple[int, List[File]]]: for exercise_id, files in self.get_exercise(self.archive): if exercise_id and files and any(file.code for file in files): yield (exercise_id, files) @staticmethod def get_unwanted_files_types() -> Iterator[str]: with open(GITIGNORE_FILE, 'r') as file: lines = file.read().splitlines() yield from ( line.strip() for line in lines if line and not line.strip().startswith('#') ) def get_unwanted_files(self, namelist: List[str]) -> Set: unwanted_files = set() for pattern in self.get_unwanted_files_types(): unwanted_files.update(fnmatch.filter(namelist, pattern)) return unwanted_files
3,543
jts_datashape/__init__.py
frictionlessdata/tableschema-datashape
1
2171576
#!/usr/bin/env python # -*- coding: utf-8 -*- import warnings from datashape import dshape D_TYPES_FIELD_TO_DTYPE = { 'string': 'string', 'number': 'float64', 'integer': 'int', 'boolean': 'bool', 'null': 'void', 'object': 'json', 'array': 'var', 'datetime': "datetime[tz='UTC']", 'date': 'date', 'time': "time[tz='UTC']", 'geopoint': 'string', 'geojson': 'json', 'any': 'var' } D_DTYPE_TO_TYPES_FIELD = { } DTYPE_DEFAULT = 'string' JTS_FIELD_TYPE_DEFAULT = 'string' class DatashapeNoFormatter(object): LINE_FEED = '' INDENT = '' KEY_VAL_SEP = '' class DatashapeNormalFormatter(DatashapeNoFormatter): LINE_FEED = '\n' INDENT = " " KEY_VAL_SEP = ' ' DatashapeDefaultFormatter = DatashapeNormalFormatter def jts_field_to_dtype(field, missing=False): """Converts a field of a JSON Table Schema to dtype""" if missing: s_missing = '?' else: s_missing = '' try: typ = field['type'] except KeyError: return DTYPE_DEFAULT try: fmt = field['format'] except KeyError: fmt = None try: constraint = field['constraint'] except KeyError: constraint = None try: return s_missing + D_TYPES_FIELD_TO_DTYPE[typ] except KeyError: msg = "Can't find type %r - using %r" % (typ, DTYPE_DEFAULT) warnings.warn(msg) return s_missing + DTYPE_DEFAULT def jts_to_datashape(schema, missing=False): """Converts a JSON Table Schema to a Datashape""" return dshape(_jts_to_string_datashape(schema, missing, datashape_formatter=DatashapeNoFormatter)) def _jts_to_string_datashape(schema, missing=False, datashape_formatter=DatashapeDefaultFormatter): """Converts a JSON Table Schema to a string that could be convert to Datashape""" line_feed = datashape_formatter.LINE_FEED indent = datashape_formatter.INDENT key_val_sep = datashape_formatter.KEY_VAL_SEP s = 'var * {' + line_feed for i, field in enumerate(schema['fields']): if i != 0: s += ',' + line_feed s += indent + "%r:%s%s" % (field['name'], key_val_sep, jts_field_to_dtype(field, missing=missing)) s += line_feed + '}' return s def dtype_to_jts_fieldtype(dtyp): """Converts a dtype to a type of a field of a JSON Table Schema""" raise NotImplementedError def datashape_to_jts(ds): """Converts a Datashape to JSON Table Schema""" raise NotImplementedError
2,502
djangocms_oscar/cms_plugins.py
maikhoepfel/djangocms-oscar
18
2173336
from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from django.utils.translation import ugettext_lazy as _ from . import models class FeaturedProductPlugin(CMSPluginBase): model = models.FeaturedProduct name = _("Featured product") admin_preview = True render_template = 'djangocms_oscar/plugins/product.html' def render(self, context, instance, placeholder): context.update({'instance': instance}) return context plugin_pool.register_plugin(FeaturedProductPlugin)
537
My_Automata.py
Zuricho/Easy_Cell_Automata
0
2173201
# -*- coding: utf-8 -*- """ Created on Fri Mar 29 16:59:00 2019 @author: Zuricho """ import numpy as np import matplotlib.pyplot as plt import random class CellAutomata(object): def __init__(self, cells_shape): # cells_shape : 一个元组,表示画布的大小。 self.cells = np.zeros(cells_shape) # 矩阵的四周不参与运算 real_width = cells_shape[0] - 2 real_height = cells_shape[1] - 2 self.cells[1:-1, 1:-1] = np.random.randint(2, size=(real_width, real_height)) self.timer = 0 self.mask = np.ones(9) self.mask[4] = 0 def update_state(self): #更新一次状态 buf = np.zeros(self.cells.shape) cells = self.cells def lifegame(): # 计算该细胞周围的存活细胞数 neighbor = cells[i-1:i+2, j-1:j+2].reshape((-1, )) neighbor_num = np.convolve(self.mask, neighbor, 'valid')[0] if neighbor_num == 3: buf[i, j] = 1 elif neighbor_num == 2: buf[i, j] = cells[i, j] else: buf[i, j] = 0 def forestfire(): neighbor = cells[i-1:i+2, j-1:j+2].reshape((-1, )) neighbor_num = np.convolve(self.mask, neighbor, 'valid')[0] if neighbor_num >= 1 and cells[i,j]==0.1: buf[i, j] = 1/(9*int(random.random()>0.6)+1) #如果绿树格位的最近邻居中有一个树在燃烧,则它变成正在燃烧的树; elif neighbor_num < 1 and cells[i,j]==0.1: buf[i, j] = int(random.random()>0.999) #在最近的邻居中没有正在燃烧的树的情况下树在每一时步以概率f(闪电)变为正在燃烧的树。 elif cells[i,j]==1: buf[i,j]=0.1; #正在燃烧的树变成空格位 else: buf[i, j] = int(random.random()<0.5)/10.0; #在空格位,树以概率p生长; for i in range(1, cells.shape[0] - 1): for j in range(1, cells.shape[0] - 1): if self.type=="lifegame": lifegame() elif self.type=="forestfire": forestfire() self.cells = buf self.timer += 1 def plot_state(self): #画出当前的状态 plt.title('Iter :{}'.format(self.timer)) plt.imshow(self.cells) plt.show() def update_and_plot(self, n_iter,type_auto): self.type=type_auto #更新状态并画图 #n_iter : 更新的轮数 plt.ion() for _ in range(n_iter): plt.title('Iter :{}'.format(self.timer)) plt.imshow(self.cells) self.update_state() plt.pause(0.2) plt.ioff() if __name__ == '__main__': game = CellAutomata(cells_shape=(100, 100)) #初始化元胞自动机 game.update_and_plot(20,'lifegame') #开始执行元胞自动机并同时作图
2,802
setup.py
jkozera/zevdocs-update-docsets-lambda
0
2171363
import os from setuptools import setup, find_packages setup( name="zevdocs-update-docsets-lambda", version="0.0.1", author="<NAME>", author_email="<EMAIL>", description=("An AWS Lambda utility to update ZevDocs docsets list"), license="MIT", url="https://zevdocs.io", packages=find_packages(), install_requires=['boto3', 'paramiko', 'requests', 'dulwich', 'pypng', 'pyyaml'], )
415
output/models/ibm_data/valid/d4_3_16/d4_3_16v04_xsd/d4_3_16v04.py
tefra/xsdata-w3c-tests
1
2173108
from dataclasses import dataclass, field from typing import List, Union from xsdata.models.datatype import XmlDateTime __NAMESPACE__ = "http://xstest-tns/schema11_F4_3_16_v04" @dataclass class Root: """ :ivar eld_time_union_a: :ivar eld_time_union_b: :ivar eld_time_union_c: Tests the simpleType dateTime, explicitTimezone used in a unions """ class Meta: name = "root" namespace = "http://xstest-tns/schema11_F4_3_16_v04" eld_time_union_a: List[Union[XmlDateTime, str]] = field( default_factory=list, metadata={ "name": "eldTimeUnionA", "type": "Element", "namespace": "", "explicit_timezone": "required", } ) eld_time_union_b: List[Union[XmlDateTime, int]] = field( default_factory=list, metadata={ "name": "eldTimeUnionB", "type": "Element", "namespace": "", "explicit_timezone": "prohibited", } ) eld_time_union_c: List[Union[str, int, XmlDateTime]] = field( default_factory=list, metadata={ "name": "eldTimeUnionC", "type": "Element", "namespace": "", "explicit_timezone": "optional", } )
1,286
Command Line Scripts/Interactive Batch Segmentation.py
pme1123/PyRoots
0
2171826
#!/bin/python3 # Interactive script to run a segmentation and medial axis loop using pyroots. # Steps: # 1. Copy this script to the directory you want to work in. # - images and settings should be in a child directory! # 2. Open your terminal and navigate to the working directory (ex. with `cd "path_to_directory"` # 3. type: `python3 script_name.py` # If you'll run this a bunch of times, try editing the non-interactive version #TODO: DEBUG import pyroots as pr from os import path, getcwd import warnings # Parameters ## method method = input("What is your analysis method? ['frangi', 'thresholding']").lower() while method != 'frangi' and method != 'thresholding': method = input("Try again: ['frangi', 'thresholding']") ## input directory root_dir = getcwd() dir_in = input("What is the target directory in {}? ".format(root_dir)) dir_in = path.join(root_dir, dir_in) while not path.exists(dir_in): dir_in = input("Path {} doesn't exist!\nWhat is the target directory in {}? "\ .format(dir_in, root_dir)) dir_in = path.join(root_dir, dir_in) ## output images directory dir_out_t = path.join(root_dir, "Pyroots Out") dir_out = input("Writing output to:\n\t{}.\n\n\tIs this OK? [y, n]"\ .format(dir_out_t)).upper() while dir_out != "Y" and dir_out != "N": dir_out = input("Try again: [y, n]").upper() if dir_out == "Y": dir_out = dir_out_t else: dir_out = input("Where would you like to write output images\n(in {})?".format(root_dir)) dir_out = path.join(root_dir, dir_out) ## Output table tab_out_t = path.join(root_dir, "output {}.txt".format(123))####DATE)) tab_out = input("Write data to:\n\n\t{}\n[y, n]".format(tab_out_t)).upper() while tab_out != "Y" and tab_out != "N": tab_out = input("Try again: [y, n]") if tab_out == "Y": tab_out = tab_out_t else: tab_out = input("What would you like to call the output table?") ## Table overwrite q = "N" while q == "N": if path.exists(path.join(root_dir, tab_out)): tab_over = input("Table exists. Append? [y, n]").upper() while tab_over != "Y" and tab_over != "N": tab_over = input("Try again: [y, n]").upper() tab_over = tab_over == "Y" if tab_over == False: q = input("Overwrite existing data? [y, n]").upper() while q != "Y" and q != "N": q = input("Try again: [y, n]").upper() ## Parameters params = input("What is the name of the parameters file?") params = path.join(root_dir, params) while not path.exists(params): params = input("Couldn't find {}.\n\nWhat is the name of the parameters file?".format(params)) params = path.join(root_dir, params) ## extensions extension_in = input("What is the image type for input images?") extension_out = ".png" print("Reading in {}. Writing out {}. To change this, edit the script".format(extension_in, extension_out)) ## Multiprocessing threads threads = int(input("How many cores would you like to use?")) ## Begin loop. warnings.filterwarnings("ignore") x = pr.pyroots_batch_loop(dir_in, dir_out=dir_out, extension_in=extension_in, method=method, table_out=tab_out, params=params, save_images=True, table_overwrite=tab_over, threads=threads)
3,466
cronus/cmdb.py
pangealab/cronus
1
2172679
# Local Imports from cronus import properties from cronus import profile # Repo Imports import logging import configparser import requests import json # Set Logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) def main(args): print("Called CMDB...") # Get Profile Props props = profile.get_props(args.profile) # Build Request headers = properties.HEADERS data = open(args.data,'rb') url = props["server"]+props["cmdb_api"] username = props["username"] password = props["password"] r = requests.post(url, headers=headers, data=data, auth=(username,password),timeout=60) print(r.json())
694
ortools/linear_solver/samples/multiple_knapsack_mip.py
AlohaChina/or-tools
1
2173286
#!/usr/bin/env python3 # Copyright 2010-2021 Google LLC # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START program] """Solve a multiple knapsack problem using a MIP solver.""" # [START import] from ortools.linear_solver import pywraplp # [END import] def main(): # [START data] data = {} data['weights'] = [ 48, 30, 42, 36, 36, 48, 42, 42, 36, 24, 30, 30, 42, 36, 36 ] data['values'] = [ 10, 30, 25, 50, 35, 30, 15, 40, 30, 35, 45, 10, 20, 30, 25 ] assert len(data['weights']) == len(data['values']) data['num_items'] = len(data['weights']) data['all_items'] = range(data['num_items']) data['bin_capacities'] = [100, 100, 100, 100, 100] data['num_bins'] = len(data['bin_capacities']) data['all_bins'] = range(data['num_bins']) # [END data] # Create the mip solver with the SCIP backend. # [START solver] solver = pywraplp.Solver.CreateSolver('SCIP') if solver is None: print('SCIP solver unavailable.') return # [END solver] # Variables. # [START variables] # x[i, b] = 1 if item i is packed in bin b. x = {} for i in data['all_items']: for b in data['all_bins']: x[i, b] = solver.BoolVar(f'x_{i}_{b}') # [END variables] # Constraints. # [START constraints] # Each item is assigned to at most one bin. for i in data['all_items']: solver.Add(sum(x[i, b] for b in data['all_bins']) <= 1) # The amount packed in each bin cannot exceed its capacity. for b in data['all_bins']: solver.Add( sum(x[i, b] * data['weights'][i] for i in data['all_items']) <= data['bin_capacities'][b]) # [END constraints] # Objective. # [START objective] # Maximize total value of packed items. objective = solver.Objective() for i in data['all_items']: for b in data['all_bins']: objective.SetCoefficient(x[i, b], data['values'][i]) objective.SetMaximization() # [END objective] # [START solve] status = solver.Solve() # [END solve] # [START print_solution] if status == pywraplp.Solver.OPTIMAL: print(f'Total packed value: {objective.Value()}') total_weight = 0 for b in data['all_bins']: print(f'Bin {b}') bin_weight = 0 bin_value = 0 for i in data['all_items']: if x[i, b].solution_value() > 0: print( f"Item {i} weight: {data['weights'][i]} value: {data['values'][i]}" ) bin_weight += data['weights'][i] bin_value += data['values'][i] print(f'Packed bin weight: {bin_weight}') print(f'Packed bin value: {bin_value}\n') total_weight += bin_weight print(f'Total packed weight: {total_weight}') else: print('The problem does not have an optimal solution.') # [END print_solution] if __name__ == '__main__': main() # [END program]
3,564
a-practical-introduction-to-python-programming-brian-heinold/chapter-04/exercise-10.py
elarabyelaidy19/awesome-reading
31
2172900
# 10. Write a multiplication game program for kids. The program should give the player ten ran- # domly generated multiplication questions to do. After each, the program should tell them # whether they got it right or wrong and what the correct answer is. # Question 1: 3 x 4 = 12 # Right! # Question 2: 8 x 6 = 44 # Wrong. The answer is 48. # ... # ... # Question 10: 7 x 7 = 49 # Right. from random import randint for i in range(1, 11): a = randint(1, 100) # for kids ;) b = randint(1, 100) print('Question ', i, ': ', a, ' x ', b, ' = ', sep='', end='') c = eval(input()) if a * b == c: print('Right!') else: print('Wrong. The answer is', a * b)
698
venv/Lib/site-packages/binstar_client/utils/notebook/tests/test_base.py
GiovanniConserva/TestDeploy
0
2172583
from os.path import join, dirname import unittest from binstar_client.utils.notebook import notebook_url, parse, has_environment from binstar_client.errors import BinstarError class ParseTestCase(unittest.TestCase): def test_parse(self): self.assertEqual(parse("user/notebook-ipynb")[0], 'user') self.assertEqual(parse("user/notebook-ipynb")[1], 'notebook-ipynb') self.assertIsNone(parse("notebook")[0]) self.assertEqual(parse("notebook")[1], 'notebook') class NotebookURLTestCase(unittest.TestCase): def test_anaconda_org_installation(self): upload_info = {'url': 'http://anaconda.org/darth/deathstart-ipynb'} url = 'http://notebooks.anaconda.org/darth/deathstart-ipynb' self.assertEqual(notebook_url(upload_info), url) def test_anaconda_server_installation(self): upload_info = {'url': 'http://custom/darth/deathstart-ipynb'} url = 'http://custom/notebooks/darth/deathstart-ipynb' self.assertEqual(notebook_url(upload_info), url) class HasEnvironmentTestCase(unittest.TestCase): def data_dir(self, filename): test_data = join(dirname(__file__), 'data') return join(test_data, filename) def test_has_no_environment(self): self.assertEqual(False, has_environment(self.data_dir('notebook.ipynb'))) def test_has_environment(self): assert has_environment(self.data_dir('notebook_with_env.ipynb')) def test_no_file(self): with self.assertRaises(BinstarError): has_environment("no-file") if __name__ == '__main__': unittest.main()
1,603
src/models/mobilenet_v2.py
oq-Yuki-po/DeepMetricLearning
0
2171681
from tensorflow import keras from tensorflow.keras import Model from tensorflow.keras.applications import MobileNetV2 from tensorflow.keras.layers import Dense, GlobalAveragePooling2D class BaseMobileNetV2(Model): def __init__(self, input_shape): super(BaseMobileNetV2, self).__init__() weight_decay = 1e-4 self.base_model = MobileNetV2(input_shape=input_shape, include_top=False, weights='imagenet') self.base_model.trainable = False self.gap = GlobalAveragePooling2D() self.dense = Dense(10, kernel_initializer='he_normal', kernel_regularizer=keras.regularizers.l2(weight_decay)) def call(self, inputs): x = self.base_model(inputs) x = self.gap(x) output = self.dense(x) return output
905
CellCycle/ChainModule/Const.py
AQuadroTeam/server_cellsCycle
3
2173371
# Message identifiers INT = '0' EXT = '1' MIN_RANDOM = 1000 MAX_RANDOM = 9999 NO_VERSION = '' DIE = '666' # Message indexes SOURCE_FLAG_INDEX = 0 VERSION_INDEX = 1 PRIORITY_INDEX = 2 RANDOM_INDEX = 3 TARGET_ID_INDEX = 4 TARGET_ADDR_INDEX = 5 TARGET_KEY_INDEX = 6 TARGET_RELATIVE_INDEX = 7 SOURCE_ID_INDEX = 8 NUM_FIELDS = 9 # Priority DEAD = '5' RESTORE = '4' RESTORED = '3' ADDED = '2' ADD = '1' ALIVE = '0' MEMORY_REQUEST_STARTED = '-1' MEMORY_REQUEST_FINISHED = '-2' SCALE_UP = '-3' SCALE_DOWN = '-4' # List communication DEFAULT_ADDR = '127.0.0.1' CANONICAL_ADDR = ["172.31.20.1", "172.31.20.2", "172.31.20.3", "172.31.20.4", "172.31.20.5"] # ACK/NACK NOK = 'NOK' OK = 'OK' # Node Address MEMORY_ADDR = '127.0.0.1:8080' # Well Known Ports INT_PORT = '5193' EXT_PORT = '5194' # Try a number of times, then stop sending message TRY_TIMEOUT = 1 TRACKER_TIMEOUT = 1 TRACKER_INFINITE_TIMEOUT = -1 # Writer Timeout, 1 ms WRITER_TIMEOUT = 0.001
951