hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e1a7e069e83c9928bd2e21f0c69debfb3a517a8
| 723 |
py
|
Python
|
Python/Tipos e comandos/ArvoreDireita.py
|
gianvstheworld/POO
|
f6b9d3d351d703d974a6ca031ee636aea7119367
|
[
"MIT"
] | null | null | null |
Python/Tipos e comandos/ArvoreDireita.py
|
gianvstheworld/POO
|
f6b9d3d351d703d974a6ca031ee636aea7119367
|
[
"MIT"
] | null | null | null |
Python/Tipos e comandos/ArvoreDireita.py
|
gianvstheworld/POO
|
f6b9d3d351d703d974a6ca031ee636aea7119367
|
[
"MIT"
] | null | null | null |
'''
-- Nome: Gianluca Capezzuto Sardinha
-- NUSP: 11876933
-- Exercício 4
'''
def main():
x = leu()
spaceL = 0
while x > 0:
i = 0
for _ in range(0, spaceL):
print(" ", end = "")
while i < x:
print("*", end = "")
i += 1
print("\n")
x -= 1
spaceL += 1
def leu():
while(True):
try:
k = int(input('Digite um inteiro: '))
if(k < 0):
print('O número deve ser positivo!')
continue
break
except ValueError:
print('O valor digitado deve ser um número inteiro!')
return k
if __name__ == '__main__':
main()
| 14.176471 | 65 | 0.426003 |
95367666ad755f362da22f6c63c235c545a9fe06
| 5,160 |
py
|
Python
|
bewerte/gesamt.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/gesamt.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
bewerte/gesamt.py
|
jupfi81/NotenManager
|
ee96a41088bb898c025aed7b3c904741cb71d004
|
[
"MIT"
] | null | null | null |
"""
Gehe alle Schüler durch und erstelle eine Zusammenstellung der mündlichen Noten
"""
import configparser
import csv
import os
from statistics import mean
from note import endnote
class Klasse:
"""
Jeder Schueler ist ein dictionary mit dem Namen als key und einem array als value
"""
def __init__(self):
""" Erstellt die Schuelerliste """
self.schueler = dict()
self.config = configparser.ConfigParser()
self.config.read('~/Projekte/KlassenManager/config/defaultrc')
self.config.read('klasserc')
self.header = ["Vorname", "Nachname"]
self.tabelle = [self.header]
with open("./liste.csv", encoding='utf-8', mode='r') as datei:
reader = csv.reader(datei, delimiter=',')
count = False
for row in reader:
if count:
name = row[0].strip() + ", " + row[1].strip()
self.tabelle.append([row[0], row[1]])
self.schueler.update({name: dict()})
else:
count = True
def reihenfolge_art(self, kategorie, art):
""" Sortiere die Arten in der richtigen Reiehnfolge """
liste = []
for messung in os.listdir(kategorie):
pfad = "./"+kategorie+"/"+messung+"/"
self.config.read(pfad+"messungrc")
if self.config['basics']['art'] == art:
liste.append([pfad, self.config['basics']['nummer']])
for val in liste:
print("reihenfolge",val)
def gesamt_art(self, kategorie, art):
""" Fuege bei jedem SuS den Eintrag {art: []} ein """
print("\nStelle die Noten von", art, "zusammen.")
for key in self.schueler:
self.schueler[key].update({art: []})
# Lese zu jeder Art von schriftlichen Noten die Noten aus
liste = os.listdir(kategorie)
for messung in liste:
pfad = "./"+kategorie+"/"+messung+"/"
self.config.read(pfad+"messungrc")
if self.config['basics']['art'] == art:
print("Bin in Pfad", pfad, "und das sollte nummer", self.config['basics']['nummer'],
"sein und öffne", pfad + "noten.csv")
self.header.append(self.config.get('basics', 'nummer', fallback='NN'))
with open(pfad+"noten.csv", encoding='utf-8', mode='r') as datei:
reader = csv.reader(datei, delimiter=',')
count1 = 0
count2 = 0
for row in reader:
count1 += 1
name = row[0].strip() + ", " + row[1].strip()
if name in self.schueler.keys():
if len(row) >= 3 and row[-1]:
self.schueler[name][art].append(float(row[-1].strip()))
else:
self.schueler[name][art].append("")
count2 += 1
print("Das öffnen hat geklappt und es wurden", count2, "SuS eingetragen.")
if not count1 == count2 + 1:
print("In", pfad, "wurden", count1-count2-1, "viele Namen ausgelassen")
self.header.append("s"+art)
def build_tabelle(self):
""" Gibt alle momentan gespeicherten Infos zu den SuS aus """
self.tabelle = []
self.header.append("Gesamt")
self.tabelle.append(self.header)
for key, schueler in self.schueler.items():
row = []
row += key.split(',')
for art in schueler:
row += schueler[art]
noten = [val for val in schueler[art] if val]
if noten:
row.append(round(4*mean(noten), 0)/4)
else:
row.append("")
row.append(endnote(schueler, 'klasserc'))
self.tabelle.append(row)
def write_tabelle(self, ort):
""" Schreibt die Tabelle an den vorgesehenen Ort """
with open(ort, encoding='utf-8', mode='w') as tabelle:
writer = csv.writer(tabelle, delimiter=',')
for row in self.tabelle:
writer.writerow(row)
def to_string(self):
""" Gibt alle momentan gespeicherten Infos zu den SuS aus """
for key, value in self.schueler.items():
print(key)
for art in value:
print(art, "\t", value[art])
def process(self):
""" Gehe alle Kategorien durch """
kategorien = [val.strip() for val
in self.config.get("basics", "Kategorien",
fallback="schriftlich, muendlich").split(",")]
for kategorie in kategorien:
for eintrag in self.config["arten"]:
if self.config["arten"][eintrag] == kategorie:
self.gesamt_art(kategorie, eintrag)
self.build_tabelle()
self.write_tabelle('info.csv')
if __name__ == "__main__":
KLASSE = Klasse()
KLASSE.process()
# KLASSE.reihenfolge_art('schriftlich', 'arbeit')
| 40.629921 | 100 | 0.518798 |
95d5821944725b948bccf5c92d5f34e676396e78
| 296 |
py
|
Python
|
test_generate.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
test_generate.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
test_generate.py
|
corganhejijun/FaceFill
|
6914f3ee680b41161817fe5eafc09b82e59d9113
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from src.util import headFromDir
IN_DIR = 'datasets/George_W_Bush'
OUT_DIR = 'datasets/George_W_Bush_test'
SHAPE_MODEL = "models/shape_predictor_68_face_landmarks.dat"
IMG_SIZE = 128
FACE_SIZE = 64
headFromDir(IN_DIR, OUT_DIR, SHAPE_MODEL, IMG_SIZE, FACE_SIZE, 10, 30)
| 26.909091 | 70 | 0.773649 |
c2c50e4a90db3ea286d864d2c80ee6139386b8ac
| 498 |
py
|
Python
|
01_Einfuehrung/while_loop.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
01_Einfuehrung/while_loop.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
01_Einfuehrung/while_loop.py
|
Hananja/DQI19-Python
|
63749c49910b5be57d09bb98a5fe728c8fdd5280
|
[
"Unlicense"
] | null | null | null |
#
# Berechnung des Maximums natuerlicher Zahlen
#
print("Bitte positive ganze Zahlen eingeben (negativ beendet): ")
user_input = 0 # minimalste positive Zahl
max_value = 0 # dito
while user_input >= 0: # negative Zahl beendet
user_input_text = input("Bitte Zahl eingeben: ")
user_input = int(user_input_text)
if max_value < user_input: # neues Maximum?
max_value = user_input
print("neues Maximum gefunden: " + user_input_text)
print("Maximum: " + str(max_value))
| 31.125 | 65 | 0.708835 |
6659dd1eff26b77b402cadc1607acc42eec700a8
| 221 |
py
|
Python
|
comp/microsoft/010_min_steps_to_make_piles_equal_height.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
comp/microsoft/010_min_steps_to_make_piles_equal_height.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
comp/microsoft/010_min_steps_to_make_piles_equal_height.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution:
def min_steps(self, piles):
piles.sort(reverse=True)
res = 0
for i in range(1, len(piles)):
if piles[i] != piles[i - 1]:
res += i
return res
| 24.555556 | 40 | 0.479638 |
66934240b0dfef02d2cf125a05388d6e7ff951f6
| 739 |
py
|
Python
|
python_gui_tkinter/Tkinter/TkinterCourse/3_tk_label.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/Tkinter/TkinterCourse/3_tk_label.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/Tkinter/TkinterCourse/3_tk_label.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
# You want the text drawn on top of the image? No problem! We need just one label and use the image and the text option at the same time. By default, if an image is given, it is drawn instead of the text. To get the text as well, you have to use the compound option. If you set the compound option to CENTER the text will be drawn on top of the image:
import tkinter as tk
root = tk.Tk()
logo = tk.PhotoImage(file="python.png")
explanation = """At present, only GIF and PPM/PGM
formats are supported, but an interface
exists to allow additional image file
formats to be added easily."""
w = tk.Label(root,
compound = tk.CENTER,
text=explanation,
image=logo).pack(side="right")
root.mainloop()
| 36.95 | 352 | 0.705007 |
66c4b626973f1bd493567b55ac19a4287c5592c8
| 123 |
py
|
Python
|
dataloader/transforms_utils/__init__.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | 2 |
2020-12-22T08:40:05.000Z
|
2021-03-30T08:09:44.000Z
|
dataloader/transforms_utils/__init__.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
dataloader/transforms_utils/__init__.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@description:
@author: LiuXin
@contact: [email protected]
@Created on: 2020/11/20 上午11:41
"""
| 15.375 | 31 | 0.642276 |
b01bf5b3d0867b3b59503282b377d9d8dd0217c1
| 633 |
py
|
Python
|
chillow/service/data_writer.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 3 |
2021-01-17T23:32:07.000Z
|
2022-01-30T14:49:16.000Z
|
chillow/service/data_writer.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2 |
2021-01-17T13:37:56.000Z
|
2021-04-14T12:28:49.000Z
|
chillow/service/data_writer.py
|
jonashellmann/informaticup21-team-chillow
|
f2e519af0a5d9a9368d62556703cfb1066ebb58f
|
[
"MIT"
] | 2 |
2021-04-02T14:53:38.000Z
|
2021-04-20T11:10:17.000Z
|
import json
from abc import ABCMeta, abstractmethod
from chillow.model.action import Action
class DataWriter(metaclass=ABCMeta):
"""Converts an object to a string."""
@abstractmethod
def write(self, action: Action) -> str:
"""Converts an action to a string.
Args:
action: The action to be converted.
Returns:
The action as a string.
"""
pass
class JSONDataWriter(DataWriter):
"""Converts an object to a JSON string."""
def write(self, action: Action) -> str:
"""See base class."""
return json.dumps({"action": action.name})
| 21.827586 | 50 | 0.614534 |
a52aa8026b2f0058d3f2f597bfaa5f1a740a9ecf
| 972 |
py
|
Python
|
deprecated/benchmark/reader/reader_visreader.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/benchmark/reader/reader_visreader.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/benchmark/reader/reader_visreader.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
"""
Reader using visreader.
"""
from __future__ import division
from __future__ import print_function
from visreader.reader_builder import ReaderBuilder
from visreader.reader_builder import ReaderSetting
THREAD = 8
def _parse_kv(r):
"""
parse kv data from sequence file for imagenet dataset.
"""
import cPickle
k, v = r
obj = cPickle.loads(v)
return obj['image'], obj['label']
def train(data_dir, num_threads=THREAD):
args = dict()
args['worker_mode'] = 'python_thread'
args['use_sharedmem'] = False
args['worker_num'] = num_threads
settings = {
'sample_parser': _parse_kv,
'lua_fname': None,
'worker_args': args
}
train_setting = ReaderSetting(
data_dir, sc_setting={'pass_num': 1}, pl_setting=settings)
settings = {'train': train_setting}
rd_builder = ReaderBuilder(settings=settings, pl_name='imagenet')
train_reader = rd_builder.train()
return train_reader
| 24.923077 | 69 | 0.682099 |
3c5ad137248656d098c911417ce8a217b92134f6
| 416 |
py
|
Python
|
src/onegov/election_day/formats/vote/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/formats/vote/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/election_day/formats/vote/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.election_day.formats.vote.default import import_vote_default
from onegov.election_day.formats.vote.internal import import_vote_internal
from onegov.election_day.formats.vote.wabsti import import_vote_wabsti
from onegov.election_day.formats.vote.wabstic import import_vote_wabstic
__all__ = [
'import_vote_default',
'import_vote_internal',
'import_vote_wabsti',
'import_vote_wabstic',
]
| 32 | 74 | 0.824519 |
59a7333ce7c2c1042e484f8481e6c8a5a8278bda
| 61 |
py
|
Python
|
BlackJack/run.py
|
jvpersuhn/hb
|
5487abf4fc5b742cf21086ca5b823b915132445d
|
[
"MIT"
] | null | null | null |
BlackJack/run.py
|
jvpersuhn/hb
|
5487abf4fc5b742cf21086ca5b823b915132445d
|
[
"MIT"
] | null | null | null |
BlackJack/run.py
|
jvpersuhn/hb
|
5487abf4fc5b742cf21086ca5b823b915132445d
|
[
"MIT"
] | null | null | null |
from blackJack import BlackJack
b = BlackJack()
b.jogar()
| 8.714286 | 31 | 0.721311 |
59b8489895e215a7a18aa38c4da5a21b9715b1b9
| 1,005 |
py
|
Python
|
functions/rgb2wb/rgb2wb/handler.py
|
Reyes-fred/SmartCity
|
eee87c70d38ef4168fbdfc4ed0ee531090f283e6
|
[
"Apache-2.0"
] | null | null | null |
functions/rgb2wb/rgb2wb/handler.py
|
Reyes-fred/SmartCity
|
eee87c70d38ef4168fbdfc4ed0ee531090f283e6
|
[
"Apache-2.0"
] | null | null | null |
functions/rgb2wb/rgb2wb/handler.py
|
Reyes-fred/SmartCity
|
eee87c70d38ef4168fbdfc4ed0ee531090f283e6
|
[
"Apache-2.0"
] | null | null | null |
# NOTE: Below the required modules to work with PIL
# python module.
#
#from PIL import Image
#import io
#import sys
#import os
#
import numpy as np
import cv2
import sys
def handle(image_data):
"""handle a request to the function
Args:
req (str): request body
"""
# image is a numpy array
np_array = np.fromstring(image_data, np.uint8)
image = cv2.imdecode(np_array, cv2.IMREAD_COLOR)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_str = cv2.imencode('.jpg', gray_image)[1].tostring()
return str(img_str)
# NOTE: The next piece of code is able to do the same objective
# (convert a JPG image from RGB to gray scale), however it uses PIL
# python module.
#
# image = Image.open(io.BytesIO(image_data))
# out = image.convert('L')
# out = image.convert('1')
# out.save('/tmp/out.jpg')
# out_file = open("/tmp/out.jpg","r+")
# content = out_file.read()
# out_file.close()
# os.remove("/tmp/out.jpg")
#
# return str(content)
| 25.125 | 67 | 0.662687 |
75d63b790f3271ac34f640557dcf004092d63c94
| 2,354 |
py
|
Python
|
Project Euler Qusetions 11 - 20/Project Euler Question 18.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | 1 |
2020-02-11T02:03:02.000Z
|
2020-02-11T02:03:02.000Z
|
Project Euler Qusetions 11 - 20/Project Euler Question 18.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
Project Euler Qusetions 11 - 20/Project Euler Question 18.py
|
Clayton-Threm/Coding-Practice
|
6671e8a15f9e797338caa617dae45093f4157bc1
|
[
"MIT"
] | null | null | null |
#Project Euler Question 18
#By starting at the top of the triangle below and moving to adjacent numbers on the row below, the maximum total from top to bottom is 23.
#3
#7 4
#2 4 6
#8 5 9 3
#That is, 3 + 7 + 4 + 9 = 23.
#Find the maximum total from top to bottom of the triangle below:
grid = """75
95 64
17 47 82
18 35 87 10
20 04 82 47 65
19 01 23 75 03 34
88 02 77 73 07 63 67
99 65 04 28 06 16 70 92
41 41 26 56 83 40 80 70 33
41 48 72 33 47 32 37 16 94 29
53 71 44 65 25 43 91 52 97 51 14
70 11 33 28 77 73 17 78 39 68 17 57
91 71 52 38 17 14 91 43 58 50 27 29 48
63 66 04 68 89 53 67 30 73 16 69 87 40 31
04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"""
grid = grid.replace("00", "0")
grid = grid.replace("01", "1")
grid = grid.replace("02", "2")
grid = grid.replace("03", "3")
grid = grid.replace("04", "4")
grid = grid.replace("05", "5")
grid = grid.replace("06", "6")
grid = grid.replace("07", "7")
grid = grid.replace("08", "8")
grid = grid.replace("09", "9")
grid = [(row.split(" ")) for row in grid.split("\n")]
for row in grid:
for term in row:
intterm = int(term)
row[row.index(term)] = intterm
y = 1
high_term = []
for row in grid[-1:-2:-1]:
#print (row)
for term in row:
#print (term)
#print (row[y])
if term >= row[y]:
high_term.append(term)
else:
high_term.append(row[y])
y += 1
if y >= len(row):
break
y = 1
y = 1
#print (high_term)
old_high_term = high_term.copy()
new_high_term = []
#new_high_list = []
z = 0
for row in grid[-2::-1]:
#print (row, "is the current row")
if len(new_high_term) == 1:
highest_sum = new_high_term
highest_sum.append(row[0])
highest_sum = sum(highest_sum)
break
else:
new_high_term.clear()
for term in row:
#print (term)
check1 = (term + old_high_term[z])
check2 = (row[y] + old_high_term[z+1])
if check1 >= check2:
new_high_term.append(check1)
else:
new_high_term.append(check2)
y += 1
z += 1
if z >= (len(old_high_term)-1):
break
#print (new_high_term, "is the current list of high sums")
old_high_term.clear()
old_high_term = new_high_term.copy()
y = 1
z = 0
print (highest_sum, "is the highest sum")
| 24.020408 | 138 | 0.581988 |
75ec1f6ead4540bc1bcce7c1c9fe037146ef7bae
| 2,151 |
py
|
Python
|
python/en/archive/topics/temp/audio/audiosp/pcm2wav/src/audiosp.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/audio/audiosp/pcm2wav/src/audiosp.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/archive/topics/temp/audio/audiosp/pcm2wav/src/audiosp.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Audio signal processing, https://en.wikipedia.org/wiki/Audio_signal_processing
Last updated: 2019-06-27 (Thu)
First written: 2019-06-11 (Tue)
Written by Tae-Hyung "T" Kim, Ph.D.
pcm2wav: Linux, Shell Scripting, Python을 써서 .pcm을 .wav로 파일 포맷 변환 (In Korean)
https://aimldl.blog.me/221559323232
"""
import wave
# The parameters are prerequisite information. More specifically,
# channels, bit_depth, sampling_rate must be known to use this function.
class AudioSignalProcessing:
def __init__( self ):
pass
# The parameters are prerequisite information. More specifically,
# channels, bit_depth, sampling_rate must be known to use this function.
def pcm2wav( self, pcm_file, wav_file, channels=1, bit_depth=16, sampling_rate=16000 ):
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth "+str(bit_depth)+" must be a multiple of 8.")
# Read the .pcm file as a binary file and store the data to pcm_data
with open( pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read();
obj2write = wave.open( wav_file, 'wb')
obj2write.setnchannels( channels )
obj2write.setsampwidth( bit_depth // 8 )
obj2write.setframerate( sampling_rate )
obj2write.writeframes( pcm_data )
obj2write.close()
# Caution: do not enclose wave.open(...) with the 'with' keyword. That is,
#
# with wave.open( wav_file, 'wb') as obj2write:
# obj2write.setnchannels( channels )
# ...
#
# will cause an error:
#
# AttributeError: Wave_write instance has no attribute '__exit__'
#
# This error can be solved as the following suggestion.
# [SOLVED] AttributeError: Wave_write instance has no attribute '__exit__'
# https://tutel.me/c/programming/questions/46538867/at ̄tributeerror+wave_write+instance+has+no+attribute+39__exit__39
# But the code doens't look neat and more number of lines are required.
| 40.584906 | 123 | 0.654114 |
f956e22c976e12c7a70be3eacc25d3ac81423c4d
| 11,863 |
py
|
Python
|
Transformer/Transformer.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
Transformer/Transformer.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
Transformer/Transformer.py
|
baowj-678/TC
|
4c9bf6bf2202c9930616259d3f3e1a2b0529a6e6
|
[
"MIT"
] | null | null | null |
"""
Transformer 实现
Author: Bao Wenjie
Date: 2021/3/7
"""
import math
from numpy.core.numeric import outer
import torch
import torch.nn as NN
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.modules.dropout import Dropout
class TransformerGenerate(NN.Module):
""" Transformer """
def __init__(self, d_model, d_ff, max_len, src_seq_len, tgt_seq_len, vocab_size, h=8, dropout=0.1, n_layers=6):
super(TransformerGenerate, self).__init__()
self.PosEnc = PositionalEncoding(d_model, max_len)
self.encoder = Encoder(src_seq_len, d_model, h, dropout, d_ff, n_layers)
self.decoder = Decoder(tgt_seq_len, d_model, h, dropout, d_ff, n_layers)
self.linear = NN.Linear(d_model, vocab_size)
self.softmax = NN.Softmax(dim=-1)
def forward(self, X, src_mask, tgt_src_mask, tgt_mask, Y=None):
"""
:X (batch_size, src_seq_len, d_model)
:src_mask (batch_size, src_seq_len, src_seq_len)
:tgt_src_mask (batch_size, tgt_seq_len, src_seq_len)
:tgt_seq_mask (batch_size, src_seq_len, src_seq_len)
:Y (batch_size, tgt_seq_len, d_model)
"""
pass
class PositionalEncoding(NN.Module):
""" 位置编码 """
def __init__(self, d_model, max_len=50000):
"""
Param
-----
:d_model 词向量的维度
:max_len 句子的最大长度
"""
super(PositionalEncoding, self).__init__()
assert d_model % 2 == 0
pos_enc = torch.zeros(max_len, d_model)
# (max_len, d_model)
pos = torch.arange(0, max_len, 1).unsqueeze(1)
# (max_len, 1)
div_term = 1 / torch.pow(10000.0, torch.arange(0, d_model, 2) / d_model)
# (d_model/2)
pos_enc[:, 0::2] = torch.sin(pos * div_term)
pos_enc[:, 1::2] = torch.cos(pos * div_term)
pos_enc = pos_enc.unsqueeze(0)
self.register_buffer('pos_enc', pos_enc)
# (1, max_len, d_model)
def forward(self, X:torch.tensor):
"""
Param
-----
:X [batch_size, length, d_model]
Return
------
:X [batch_size, length, d_model]
"""
X = X + Variable(self.pos_enc[:, 0:X.shape[1]], requires_grad=False)
return X
def attention(query, key, value, mask=None, dropout=None):
"""
注意力机制
Param
-----
:query (batch_size, seq_len, d_k)
:key (batch_size, seq_len, d_k)
:value (batch_size, seq_len, d_v)
:mask (batch_size, seq_len, seq_len)
:dropout function
Return
------
:output (batch_size, seq_len, d_v)
:p_attn (batch_size, seq_len, d_k)
"""
d_k = query.shape[-1]
score = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
# (batch_size, seq_len, seq_len)
if mask is not None:
score = score.masked_fill(mask, -100)
p_attn = F.softmax(score, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadAttention(NN.Module):
""" 多头注意力 """
def __init__(self, h, d_model, dropout=0.1) -> None:
"""
Param
-----
:h 头的个数
:d_model 模型维度
:dropout
"""
super(MultiHeadAttention, self).__init__()
assert d_model % h == 0
self.d_k = d_model // h
self.h = h
self.W_O = NN.Linear(self.d_k * h, d_model)
self.W_Q = [NN.Linear(d_model, self.d_k) for i in range(h)]
self.W_K = [NN.Linear(d_model, self.d_k) for i in range(h)]
self.W_V = [NN.Linear(d_model, self.d_k) for i in range(h)]
self.dropout = NN.Dropout(dropout)
self.attn = None
def forward(self, query, key, value, mask=None):
"""
Param
-----
:query (batch_size, seq_len, d_model)
:key (batch_size, seq_len, d_model)
:value (batch_size, seq_len, d_model)
:mask (batch_size, seq_len, seq_len)
Return
------
:x (batch_size, seq_len, d_model)
"""
if mask is not None:
mask = mask.unsqueeze(dim=0)
# (1, batch_size, seq_len, seq_len)
batch_size = query.shape[0]
query = [network(query) for network in self.W_Q]
key = [network(key) for network in self.W_K]
value = [network(value) for network in self.W_V]
# (h, batch_size, seq_len, d_k/d_v)
query = torch.stack(query)
key = torch.stack(key)
value = torch.stack(value)
# (h, batch_size, seq_len, d_k/d_v)
x, self.attn = attention(query, key, value, mask)
# (h, batch_size, seq_len, d_v)
x = x.permute([1, 2, 0, 3])
# (batch_size, seq_len, n, d_v)
x = x.reshape(shape=(batch_size, -1, self.h * self.d_k))
x = self.W_O(x)
x = self.dropout(x)
return x
class PositionwiseFeedForward(NN.Module):
""" FeedForward"""
def __init__(self, d_model, d_ff, dropout=0) -> None:
"""
Param
-----
:d_model 模型(输入)维度
:d_ff 内部参数维度
"""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = NN.Linear(d_model, d_ff)
self.relu = NN.ReLU()
self.w_2 = NN.Linear(d_ff, d_model)
self.dropout = NN.Dropout(dropout)
def forward(self, X):
"""
Param
-----
:X (batch_size, seq_len, d_model)
Return
------
:X (batch_size, seq_len, d_model)
"""
X = self.relu(self.w_1(X))
X = self.dropout(X)
X = self.w_2(X)
return X
class Embedding(NN.Module):
def __init__(self, vocab_size, d_model) -> None:
"""
Param
-----
:vocab_size 词典大小(int)
:d_model 模型维度(int)
"""
super(Embedding, self).__init__()
self.embeddings = NN.Embedding(vocab_size, d_model)
self.sqrt_d_model = math.sqrt(d_model)
def forward(self, X):
"""
词向量编码
Param
-----
:X [torch.tensor](batch_size, max_seq_len)
Return
------
:embed [torch.tensor](batch_size, max_seq_len, d_model)
"""
embed = self.embeddings(X) * self.sqrt_d_model
return embed
class AddNorm(NN.Module):
""" 残差连接 """
def __init__(self, seq_len, d_model, dropout=0.1) -> None:
super(AddNorm, self).__init__()
self.layernorm = NN.LayerNorm((seq_len, d_model))
self.dropout = NN.Dropout(dropout)
def forward(self, X, sub_X):
sub_X = self.dropout(sub_X)
X = X + sub_X
X = self.layernorm(X)
return X
class EncoderLayer(NN.Module):
def __init__(self, seq_len, d_model, h, dropout, d_ff) -> None:
"""
一个编码层
Param
-----
:seq_len 句子长度
:d_model 模型维度
:h 头数
:dropout
"""
super(EncoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(h, d_model, dropout)
self.addnorm_1 = AddNorm(seq_len, d_model, dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.addnorm_2 = AddNorm(seq_len, d_model, dropout)
def forward(self, X, mask):
"""
Param
-----
:X (batch_size, seq_len, d_model)
:mask (batch_size, seq_len, seq_len)
Return
------
:X (batch_size, seq_len, d_model)
"""
sub_X = self.self_attn(X, X, X, mask)
X = self.addnorm_1(X, sub_X)
sub_X = self.feed_forward(X)
X = self.addnorm_2(X, sub_X)
return X
class Encoder(NN.Module):
def __init__(self, seq_len, d_model, h, dropout, d_ff, n_layer=6) -> None:
"""
编码器
Param
-----
:seq_len 句子长度
:d_model 模型维度
:h 头数
:dropout
:d_ff 前馈层的内部向量维度
:n_layer 编码器包含的层数
"""
super(Encoder, self).__init__()
self.encoder_layers = [EncoderLayer(seq_len, d_model, h, dropout, d_ff) for i in range(n_layer)]
def forward(self, X, mask):
"""
Param
-----
:X (batch_size, seq_len, d_model)
:mask (batch_size, seq_len, seq_len)
Return
------
:X (batch_size, seq_len, d_model)
"""
for encoderlayer in self.encoder_layers:
X = encoderlayer(X, mask)
return X
class DecoderLayer(NN.Module):
""" 解码器的子层 """
def __init__(self, tgt_len, d_model, h, dropout, d_ff) -> None:
"""
Param
-----
:seq_len 句子长度
:d_model 模型维度
:h 头数
:dropout
:d_ff
"""
super(DecoderLayer, self).__init__()
self.self_attn = MultiHeadAttention(h, d_model, dropout)
self.addnorm_1 = AddNorm(tgt_len, d_model, dropout)
self.src_attn = MultiHeadAttention(h, d_model, dropout)
self.addnorm_2 = AddNorm(tgt_len, d_model, dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff)
self.addnorm_3 = AddNorm(tgt_len, d_model, dropout)
def forward(self, X, M, src_mask, tgt_mask):
"""
Param
-----
:X 训练时的目标语句 (batch_size, tgt_len, d_model)
:M encoder得到的数据 (batch_size, src_len, d_model)
:src_mask 对XM进行attention时的mask (batch_size, src_len, src_len)
:tgt_mask 生成目标语句的mask (batch_size, tgt_len, src_len)
Return
------
:X (batch_size, tgt_len, d_model)
"""
sub_X = self.self_attn(X, X, X, tgt_mask)
X = self.addnorm_1(X, sub_X)
sub_X = self.src_attn(X, M, M, src_mask)
X = self.addnorm_2(X, sub_X)
sub_X = self.feed_forward(X)
X = self.addnorm_3(X, sub_X)
return X
class Decoder(NN.Module):
""" 解码器 """
def __init__(self, tgt_len, d_model, h, dropout, d_ff, n_layers) -> None:
super(Decoder, self).__init__()
self.decoderlayers = [DecoderLayer(tgt_len, d_model, h, dropout, d_ff) for i in range(n_layers)]
def forward(self, X, M, src_mask, tgt_mask):
"""
Param
-----
:X 训练时的目标语句 (batch_size, tgt_len, d_model)
:M encoder得到的数据 (batch_size, src_len, d_model)
:src_mask 对XM进行attention时的mask (batch_size, src_len, src_len)
:tgt_mask 生成目标语句的mask (batch_size, tgt_len, src_len)
Return
------
:X (batch_size, tgt_len, d_model)
"""
for decoderlayer in self.decoderlayers:
X = decoderlayer(X, M, src_mask, tgt_mask)
return X
def en_seq_mask(seq_len, tgt_len, src_len):
"""
encoder的句子mask
Param
-----
:seq_len (list) (batch_size)
:max_len (int)
Return
------
:mask (torch.ByteTensor) (batch_size, max_len, max_len)
"""
batch_size = len(seq_len)
mask = torch.ones(size=(batch_size, tgt_len, src_len), dtype=torch.bool)
for mask_i, length in zip(mask, seq_len):
mask_i[0:min(length, tgt_len), 0:min(length, src_len)] = torch.zeros((min(length, tgt_len), min(length, src_len)), dtype=torch.bool)
return mask
def de_seq_mask(tgt_len):
"""
生成上三角的mask
Param
-----
:tgt_len 方阵边长 (int)
Return
------
:mask (1, tgt_len, tgt_len)
"""
mask = torch.ones((1, tgt_len, tgt_len), dtype=torch.bool)
mask = torch.triu(mask, diagonal=1)
return mask
# tgt_len = 25
# src_len = 23
# d_model = 512
# h = 8
# dropout = 0.1
# d_ff = 2048
# batch_size = 13
# seq_lens = [22, 21, 21, 20, 18, 15, 17, 19, 13, 12, 11, 11, 10]
# M = torch.randn((batch_size, src_len, d_model))
# X = torch.randn((batch_size, tgt_len, d_model))
# src_mask = en_seq_mask(seq_lens, tgt_len, src_len)
# tgt_mask = de_seq_mask(tgt_len)
# model = Decoder(tgt_len, d_model, h, dropout, d_ff, 6)
# model(X, M, src_mask, tgt_mask)
| 29.14742 | 140 | 0.569586 |
9b2833c40162cf572a5fa199c3205e2012dd0082
| 762 |
py
|
Python
|
Hackerrank_problems/Find Merge Point of Two Lists/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 165 |
2020-10-03T08:01:11.000Z
|
2022-03-31T02:42:08.000Z
|
Hackerrank_problems/Find Merge Point of Two Lists/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 383 |
2020-10-03T07:39:11.000Z
|
2021-11-20T07:06:35.000Z
|
Hackerrank_problems/Find Merge Point of Two Lists/solution.py
|
gbrls/CompetitiveCode
|
b6f1b817a655635c3c843d40bd05793406fea9c6
|
[
"MIT"
] | 380 |
2020-10-03T08:05:04.000Z
|
2022-03-19T06:56:59.000Z
|
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
def findMergeNode(head1, head2):
"""
Go forward the lists every time till the end, and
then jumps to the beginning of the opposite list, and
so on. Advance each of the pointers by 1 every time,
until they meet. The number of nodes traveled from
head1 -> tail1 -> head2 -> intersection point and
head2 -> tail2-> head1 -> intersection point will be equal.
"""
node1 = head1
node2 = head2
while node1 != node2:
if node1.next:
node1 = node1.next
else:
node1 = head2
if node2.next:
node2 = node2.next
else:
node2 = head1
return node2.data
| 26.275862 | 63 | 0.606299 |
fd4cfd5eecac1a59569a106e426a6dc5e3fdb0a8
| 713 |
py
|
Python
|
pages/themes/beginners/sequenceDataTypes/Tasks_and_HW/task6_solution.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/sequenceDataTypes/Tasks_and_HW/task6_solution.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
pages/themes/beginners/sequenceDataTypes/Tasks_and_HW/task6_solution.py
|
ProgressBG-Python-Course/ProgressBG-Python
|
6429833696c2c50d9f902f62cc3a65ca62659c69
|
[
"MIT"
] | null | null | null |
distances_from_sofia = [
('Bansko', 97),
('Brussels', 1701),
('Alexandria', 1403),
('Nice', 1307),
('Szeged', 469),
('Dublin', 2479),
('Palermo', 987),
('Oslo', 2098),
('Moscow', 1779),
('Madrid', 2259),
('London', 2019)
]
# variable to store the filtered list of distance tuples:
selected_distances = []
# filter each distance tuple:
for item in distances_from_sofia:
# each item is a tuple, and we check its second value:
if(item[1] < 1500):
selected_distances.append(item)
# print the filtered list of distance tuples:
print("Distances bellow 1500 km from Sofia are:")
for item in selected_distances:
print("{} - {}".format(item[0], item[1]))
| 25.464286 | 58 | 0.625526 |
95298f98063454f33325f9c970621df487f01c4a
| 2,832 |
py
|
Python
|
test/test_npu/test_network_ops/test_chunk_copy_contiguous.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
test/test_npu/test_network_ops/test_chunk_copy_contiguous.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
test/test_npu/test_network_ops/test_chunk_copy_contiguous.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
import time
class TestChunkToContiguous(TestCase):
def cpu_op_exec(self, input, chunk, dim):
outputs = torch.chunk(input, chunk, dim)
output_list = []
for i in range(len(outputs)):
output_list.append(outputs[i].contiguous())
output_tensor = torch.cat(output_list)
return output_tensor.numpy()
def npu_op_exec(self, input, chunk, dim):
outputs = torch.chunk(input, chunk, dim)
output_list = []
for i in range(len(outputs)):
output_list.append(outputs[i].contiguous().to("cpu"))
output_tensor = torch.cat(output_list)
return output_tensor.numpy()
def test_chunk_to_contiguous(self, device):
data_type_list = [np.float16, np.float32]
format_list = [0, 3, 29]
shape_list = [[2, 6, 6]]
shape_format = [
[i,j,k] for i in data_type_list for j in format_list for k in shape_list
]
cpu_time = 0.
npu_time = 0.
for item in shape_format:
for dim in range(0, len(item[-1])):
cpu_input, npu_input = create_common_tensor(item, 1, 100)
cpu_start = time.time()
cpu_output = self.cpu_op_exec(cpu_input, 2, dim)
cpu_end = time.time()
npu_start = time.time()
npu_output = self.npu_op_exec(npu_input, 2, dim)
npu_end = time.time()
cpu_time += cpu_end - cpu_start
npu_time += npu_end - npu_start
self.assertRtolEqual(cpu_output, npu_output)
self.assertTrue(npu_time < 30, f"execute time:{npu_time:.2f}s should be less than 30s")
print(f"chunk to contiguous use: {cpu_time:.5f} s (CPU)")
print(f"chunk to contiguous use: {npu_time:.5f} s (NPU)")
print("TBE Ops used: NpuSlice")
instantiate_device_type_tests(TestChunkToContiguous, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 39.887324 | 95 | 0.653249 |
2f90fef02ffae3623e3cd46907cb9ff0e1f4b072
| 149 |
py
|
Python
|
UCEF-main/setup.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 6 |
2021-08-23T04:54:21.000Z
|
2021-12-06T09:45:20.000Z
|
UCEF-main/setup.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
UCEF-main/setup.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-08-19T04:11:08.000Z
|
2021-12-13T12:06:35.000Z
|
import os
for i in ["requests","bs4","html5lib","colorama","tqdm","cloudscraper"]:
print("installing",i)
os.system(f"pip3 install {i} -U")
| 37.25 | 73 | 0.637584 |
c83c6a77a92db084f0d2352d15bb41b2909dab5e
| 19 |
py
|
Python
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 93/subdirs/level12/level121/6.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 101 |
2021-12-20T11:57:11.000Z
|
2022-03-23T09:49:13.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 93/subdirs/level12/level121/6.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 4 |
2022-01-12T11:55:56.000Z
|
2022-02-12T04:53:33.000Z
|
Programming Languages/Python/Theory/100_Python_Exercises/Exercises/Exercise 93/subdirs/level12/level121/6.py
|
jaswinder9051998/Resources
|
fd468af37bf24ca57555d153ee64693c018e822e
|
[
"MIT"
] | 38 |
2022-01-12T11:56:16.000Z
|
2022-03-23T10:07:52.000Z
|
print("Hello User")
| 19 | 19 | 0.736842 |
f1cf51694dc0c19b80a7cf7a9b662e3273942fe6
| 1,007 |
py
|
Python
|
python/product_sum_jit.py
|
Sultanow/aberkane-sultanow-cnts
|
5230c6ef25231f74b305a9210b9ffd47519cc460
|
[
"CC-BY-4.0"
] | null | null | null |
python/product_sum_jit.py
|
Sultanow/aberkane-sultanow-cnts
|
5230c6ef25231f74b305a9210b9ffd47519cc460
|
[
"CC-BY-4.0"
] | null | null | null |
python/product_sum_jit.py
|
Sultanow/aberkane-sultanow-cnts
|
5230c6ef25231f74b305a9210b9ffd47519cc460
|
[
"CC-BY-4.0"
] | 1 |
2022-01-24T19:04:08.000Z
|
2022-01-24T19:04:08.000Z
|
from numba.cuda import target
import numpy as np
import pandas as pd
from numba import jit
from gmpy2 import mpz
import sys
@jit('void(uint64,uint64)')
def prod_sum(x: np.uint64, n: np.uint64) -> int:
sum=0
for l in range(1,(n-2)//x+1):
prod=1
for k in range(1,l+1):
prod*=(n-x*k)//12
sum+=prod
print(sum)
return sum
@jit('void(uint64,uint64)')
def prod_sum_gmpy2(x: np.uint64, n: np.uint64) -> int:
sum=mpz(0)
for l in np.arange(1, (n-2)//x+1, dtype=np.uint64):
prod=mpz(1)
for k in np.arange(1, l+1, dtype=np.uint64):
prod=prod*mpz(n-x*k)//12
sum+=prod
print(n)
return sum
def main() -> int:
start = 3
df = pd.read_csv('notebook/s_3_max_bin_length.csv')
y_val=df['max_bin_length'].tolist()
y_val3=[prod_sum_gmpy2(start,n) for n in y_val]
with open("prod_sum.txt", "w") as output:
output.write(str(y_val3))
return 0
if __name__ == '__main__':
sys.exit(main())
| 23.418605 | 55 | 0.596822 |
7b0fae6e80c7fffd47fc6f1b61484267f1763285
| 465 |
py
|
Python
|
tradingbot/core/algorithm.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 3 |
2018-05-10T13:51:42.000Z
|
2020-07-05T16:43:45.000Z
|
tradingbot/core/algorithm.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | null | null | null |
tradingbot/core/algorithm.py
|
stefaniuk/trading-bot
|
403abd2b53caf686a6d2456e7eab124c670e7340
|
[
"MIT"
] | 1 |
2020-04-22T09:06:17.000Z
|
2020-04-22T09:06:17.000Z
|
import asyncio
from .grapher import Grapher
class Pivot(object):
def __init__(self, api, conf):
self.api = api
self.conf = conf
self.graph = Grapher(self.conf)
self.graph.update = asyncio.gather(self.graph.updatePrice(),
self.graph.candlestickUpdate())
def getPivotPoint(self):
self.graph
class trueStock(object):
def __init__(self, name):
self.name = name
| 23.25 | 74 | 0.591398 |
9e5ed5cf715144501a2fef216a270f98cbfb8b47
| 1,607 |
py
|
Python
|
docker/api/api/createCron.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 2 |
2021-03-23T20:32:38.000Z
|
2021-04-21T11:20:12.000Z
|
docker/api/api/createCron.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | 4 |
2021-04-19T11:00:55.000Z
|
2021-04-20T08:21:48.000Z
|
docker/api/api/createCron.py
|
healthIMIS/aha-kompass
|
7b7cae24502c0c0e5635c587cfef797a93ae02b5
|
[
"MIT"
] | null | null | null |
# Include utilities
import json
# Include db connection
from main import db
import os
# Include models
from models.cron import cron, CreateCron
from models.districts import addCron
#Erzeugung von Cron-Jobs
f = open("cron_jobs.json", "r")
cron_jobs = json.loads(f.read())
f.close()
for job in cron_jobs:
if "commands" not in job or job["commands"] == None:
job["commands"] = {}
c = CreateCron(job["type"], job["url"], commands=job["commands"])
if "district_id" in job:
addCron(job["district_id"], c)
### The following lines are for debug and testing only. Remove them in Production! #TODO
if job["type"] == 1:
# Check the complete document for changes (Debug only!)
cmd = job["commands"]["search"]
commands = {}
while True:
if cmd["type"] == "remove_attributes":
commands = {
"search": {
"type":"remove_attributes",
"to_remove": cmd["to_remove"]
}
}
break
if "next" in cmd:
cmd = cmd["next"]
else:
break
c = CreateCron(job["type"], job["url"], commands=commands)
#if "district_id" in job:
#addCron(job["district_id"], c)
print("Führe Cronjobs initial aus.")
os.system("/usr/bin/python3 /home/tobias/Corona-Info/api/cron.py")
print("Setze alles als gelesen.")
for job in cron.query.all():
job.referenceHtml_lastRead = job.referenceHtml_lastCron
job.unread_change = False
db.session.commit()
| 32.795918 | 92 | 0.576229 |
9ebe2ae451d9feff8bb62f79efffac4a1b3b205e
| 184 |
py
|
Python
|
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_show_info_no_auth.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_show_info_no_auth.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
2-resources/_PYTHON/code-examples-master/elasticsearch/python/es_show_info_no_auth.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | 1 |
2021-11-05T07:48:26.000Z
|
2021-11-05T07:48:26.000Z
|
from elasticsearch import Elasticsearch
es = Elasticsearch(
hosts = [{'host': 'es.id.eu-west-1.es.amazonaws.com', 'port': 443}],
use_ssl=True, verify_certs=True
)
es.info()
| 20.444444 | 73 | 0.679348 |
7b888387bcbf10e9b7130a7a426b48baefe52ee7
| 1,050 |
py
|
Python
|
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_build__ext.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_build__ext.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/lang/python27/patches/patch-Lib_distutils_command_build__ext.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-Lib_distutils_command_build__ext.py,v 1.1 2018/06/17 19:21:21 adam Exp $
--- Lib/distutils/command/build_ext.py.orig 2014-12-10 15:59:34.000000000 +0000
+++ Lib/distutils/command/build_ext.py
@@ -511,8 +511,19 @@ class build_ext (Command):
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
+
+ # Two possible sources for extra linker arguments:
+ # - 'extra_link_args' in Extension object
+ # - LDFLAGS environment variable
+ # The environment variable should take precedence, and
+ # any sensible compiler will give precedence to later
+ # command line args. Hence we combine them in order:
extra_args = ext.extra_link_args or []
+ if os.environ.has_key('LDFLAGS'):
+ extra_args = list(extra_args)
+ extra_args.extend(string.split(os.environ['LDFLAGS']))
+
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
| 42 | 87 | 0.658095 |
b55fd4ba7404ff3b241639b293c369a27b44649f
| 408 |
py
|
Python
|
BugTracker-main/config.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
BugTracker-main/config.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
BugTracker-main/config.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
from flask import Flask
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Change these
app.secret_key = 'really secret key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db'
app.config['SMTP_EMAIL'] = 'EMAIL'
app.config['SMTP_PASSWORD'] = 'Password'
app.config['SMTP_HOST'] = 'smtp.gmail.com'
app.config['SMTP_PORT'] = 587
app.config['ADMIN_EMAIL'] = 'ADMIN-EMAIL'
| 29.142857 | 63 | 0.740196 |
a93057f7a9c4ce3bbf5a0c58b6d64b8a05011a0a
| 8,562 |
py
|
Python
|
Packs/PerimeterX/Integrations/BotDefender/BotDefender_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/PerimeterX/Integrations/BotDefender/BotDefender_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/PerimeterX/Integrations/BotDefender/BotDefender_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
from BotDefender import Client, ip, perimeterx_get_investigate_details
from CommonServerPython import Common
HEADERS = {
'Authorization': 'Bearer token',
'Content-Type': 'application/json'
}
MOCK_BASE_URL = "https://api.perimeterx.com/v1/bot-defender/"
def _get_mock_url():
return f'{MOCK_BASE_URL}/investigate/mock?search=ip:test&tops=user-agent,path,socket_ip_classification'
def _test_base_score(requests_mock, actual_score, expected_score):
ip_addresses = ['5.79.76.181', '5.79.76.182']
mock_response = {
'max_risk_score': actual_score
}
mock_url = _get_mock_url()
requests_mock.get(mock_url, json=mock_response)
client = Client(base_url=mock_url, verify=False, headers=HEADERS)
args = {
'ip': ip_addresses
}
thresholds = {
'good_threshold': 5,
'suspicious_threshold': 50,
'bad_threshold': 90,
'unknown_threshold': 0
}
response = ip(client=client, args=args, thresholds=thresholds, api_key="test")
for single_response in response:
assert single_response.outputs_prefix == 'PerimeterX'
assert isinstance(single_response.indicator, Common.IP)
assert single_response.indicator.ip in ip_addresses
assert single_response.indicator.dbot_score.score == expected_score
def test_ip_high_score(requests_mock):
_test_base_score(requests_mock, actual_score=100, expected_score=3)
def test_ip_suspicious_score(requests_mock):
_test_base_score(requests_mock, actual_score=60, expected_score=2)
def test_ip_good_score(requests_mock):
_test_base_score(requests_mock, actual_score=10, expected_score=1)
def test_ip_unknown_score(requests_mock):
_test_base_score(requests_mock, actual_score=1, expected_score=0)
def _test_perimeterx_get_investigate_details_base(requests_mock, search_type):
mock_response = {
'topUserAgents': [
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64 Gecko) Chrome/67.0.3396.79 Safari/537.36',
'count': 84},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64 Gecko) Chrome/65.0.3325.181 Safari/537.36',
'count': 80},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; Win64;x64 Gecko) Chrome/66.0.3359.170 OPR/53.0.2907.99',
'count': 78},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; WOW64 Gecko) Chrome/67.0.3396.87 (Edition Yx)',
'count': 76},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64;x64 Gecko) Chrome/67.0.3396.87 OPR/54.0.2952.51',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64 Gecko) Chrome/68.0.3440.75 Safari/537.36',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 5.1 Gecko) Chrome/49.0.2623.112 Safari/537.36', 'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1 Gecko) Chrome/66.0.3359.181 Safari/537.36', 'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1 Gecko) Chrome/66.0.3359.181 Safari/537.36 Kinza/4.7.2',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64 Gecko) Chrome/67.0.3396.79 Safari/537.36',
'count': 72}
],
'topURLPaths': [
{'urlPath': '/favicon.ico', 'count': 3315},
{'urlPath': '/favicon.png', 'count': 3253},
{'urlPath': '/', 'count': 3212},
{'urlPath': '/loginok/light.cgi', 'count': 1228},
{'urlPath': '/cgi-bin/way-board.cgi', 'count': 1222},
{'urlPath': '/phpmyadmin/', 'count': 205},
{'urlPath': '-', 'count': 139},
{'urlPath': '/images/icons/favicon.ico', 'count': 82},
{'urlPath': '/test.php', 'count': 48}
],
'topBlockedURLPaths': [
{'blockedURLPath': '/', 'count': 1404},
{'blockedURLPath': '/cgi-bin/way-board.cgi', 'count': 702},
{'blockedURLPath': '/loginok/light.cgi', 'count': 702}
],
'topIncidentTypes': [
{'incidentType': 'Spoof', 'count': 2106},
{'incidentType': 'Bot Behavior', 'count': 702}
],
'catpchaSolves': 200,
'trafficOverTime': [
],
'pageTypeDistributions': [
{'pageType': 'Login', 'count': 1228},
{'pageType': 'Scraping', 'count': 739},
{'pageType': 'Checkout', 'count': 139}
],
'max_risk_score': 100,
'ipClassifications': [
{'class': 'Bad Reputation', 'name': 'Bad Reputation'},
{'class': 'SharedIPs', 'name': 'Shared IPs'},
{'class': 'DataCenter', 'name': 'TAG DCIP'}
]
}
ip_address = ['5.79.76.181']
mock_url = _get_mock_url()
requests_mock.get(mock_url, json=mock_response)
client = Client(base_url=mock_url, verify=False, headers=HEADERS)
args = {
'search_type': search_type,
'search_term': ip_address
}
thresholds = {
'good_threshold': 5,
'suspicious_threshold': 50,
'bad_threshold': 90,
'unknown_threshold': 0
}
response = perimeterx_get_investigate_details(client=client, args=args, thresholds=thresholds, api_key="test_key")
assert response.outputs_prefix == 'PerimeterX'
assert response.outputs == {
'topUserAgents': [
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64 Gecko) Chrome/67.0.3396.79 Safari/537.36',
'count': 84},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64 Gecko) Chrome/65.0.3325.181 Safari/537.36',
'count': 80},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; Win64;x64 Gecko) Chrome/66.0.3359.170 OPR/53.0.2907.99',
'count': 78},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1; WOW64 Gecko) Chrome/67.0.3396.87 (Edition Yx)',
'count': 76},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64;x64 Gecko) Chrome/67.0.3396.87 OPR/54.0.2952.51',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64 Gecko) Chrome/68.0.3440.75 Safari/537.36',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 5.1 Gecko) Chrome/49.0.2623.112 Safari/537.36', 'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1 Gecko) Chrome/66.0.3359.181 Safari/537.36', 'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.1 Gecko) Chrome/66.0.3359.181 Safari/537.36 Kinza/4.7.2',
'count': 72},
{'userAgentName': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64 Gecko) Chrome/67.0.3396.79 Safari/537.36',
'count': 72}
],
'topURLPaths': [
{'urlPath': '/favicon.ico', 'count': 3315},
{'urlPath': '/favicon.png', 'count': 3253},
{'urlPath': '/', 'count': 3212},
{'urlPath': '/loginok/light.cgi', 'count': 1228},
{'urlPath': '/cgi-bin/way-board.cgi', 'count': 1222},
{'urlPath': '/phpmyadmin/', 'count': 205},
{'urlPath': '-', 'count': 139},
{'urlPath': '/images/icons/favicon.ico', 'count': 82},
{'urlPath': '/test.php', 'count': 48}
],
'topBlockedURLPaths': [
{'blockedURLPath': '/', 'count': 1404},
{'blockedURLPath': '/cgi-bin/way-board.cgi', 'count': 702},
{'blockedURLPath': '/loginok/light.cgi', 'count': 702}
],
'topIncidentTypes': [
{'incidentType': 'Spoof', 'count': 2106},
{'incidentType': 'Bot Behavior', 'count': 702}
],
'catpchaSolves': 200,
'trafficOverTime': [
],
'pageTypeDistributions': [
{'pageType': 'Login', 'count': 1228},
{'pageType': 'Scraping', 'count': 739},
{'pageType': 'Checkout', 'count': 139}
],
'max_risk_score': 100,
'ipClassifications': [
{'class': 'Bad Reputation', 'name': 'Bad Reputation'},
{'class': 'SharedIPs', 'name': 'Shared IPs'},
{'class': 'DataCenter', 'name': 'TAG DCIP'}
]
}
def test_perimeterx_get_investigate_details_by_socket_ip(requests_mock):
return _test_perimeterx_get_investigate_details_base(requests_mock, search_type='socket_ip')
def test_perimeterx_get_investigate_details_by_true_ip(requests_mock):
return _test_perimeterx_get_investigate_details_base(requests_mock, search_type='true_ip')
| 41.765854 | 118 | 0.588414 |
8d92628631632918a66823c3cd739b43d5e8f3b5
| 1,118 |
py
|
Python
|
scripts/fabsp/rime.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/rime.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
scripts/fabsp/rime.py
|
swoiow/dsc
|
5860e6bfaa70b700e025533c406a6bc52d4ab74b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from fabric.api import sudo
def depend_redhat():
"""
yum install -y gcc gcc-c++ boost boost-devel cmake make
yum install glog glog-devel kyotocabinet kyotocabinet-devel marisa-devel yaml-cpp yaml-cpp-devel gtest gtest-devel libnotify zlib zlib-devel gflags gflags-devel leveldb leveldb-devel libnotify-devel ibus-devel
cd /usr/src
# install opencc
curl -L https://github.com/BYVoid/OpenCC/archive/ver.1.0.5.tar.gz | tar zx
cd OpenCC-ver.1.0.5/
make
make install
ln -s /usr/lib/libopencc.so /usr/lib64/libopencc.so
cd /usr/src
git clone --recursive https://github.com/rime/ibus-rime.git
cd /usr/src/ibus-rime
./install.sh
# checkout master & pull
cd /usr/src/ibus-rime/plum/
git checkout master
git pull origin master
cd /usr/src/ibus-rime
# skip submodule init
sed -i 's/git submodule update --init/#git submodule update --init/g' ./install.sh
./install.sh
"""
for line in depend_redhat.__doc__.split("\n"):
if not line.startswith("#"):
sudo(line)
| 28.666667 | 213 | 0.66458 |
9e01de145aae803dc6cd2de519ba30f9dfb50726
| 4,661 |
py
|
Python
|
experiment.py
|
christopherL91/KexExperiment
|
04822a4258c1faab6eada5079885a3947fa7f24a
|
[
"MIT"
] | null | null | null |
experiment.py
|
christopherL91/KexExperiment
|
04822a4258c1faab6eada5079885a3947fa7f24a
|
[
"MIT"
] | 1 |
2021-08-17T19:29:02.000Z
|
2021-08-17T19:29:02.000Z
|
experiment.py
|
christopherL91/KexExperiment
|
04822a4258c1faab6eada5079885a3947fa7f24a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# The MIT License (MIT)
# Copyright (c) 2016 Samuel Philipson, Christopher Lillthors
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import graphlab as gl
import os
from time import time
import csv
import datetime
import numpy as np
timestamp = str(datetime.datetime.now())
output_path = os.path.join('.', timestamp)
def f1_score(precision, recall):
return 2 * ((precision * recall)/(precision + recall))
def evaluate():
# Swap these depending on dataset.
ratings = gl.SFrame.read_csv(os.path.join('.', 'ratings.dat'),
delimiter='::',
column_type_hints=[int,int,float,int])
#ratings = gl.SFrame.read_csv(os.path.join('.', 'small-ratings.csv'),
# column_type_hints=[int,int,float,int])
num_ratings = ratings.num_rows()
print('There are {} number of ratings in the dataset'.format(num_ratings))
k = 4
folds = gl.cross_validation.KFold(ratings, k)
# Evaluation section
# -------------------------------------
seed = 5L
iterations = 50
ranks = [4]
#ranks = range(4, 84, 4) # Change this
solver = 'als' # als or sgd
verbose = False
min_error = float('inf')
best_rank = -1
best_iteration = -1
with open(output_path + '/' + solver + '-' + str(iterations) + '-' + 'out.csv', 'w') as f:
fieldnames = ['rmse', 'time', 'rank', 'precision', 'recall', 'f1-score']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for rank in ranks:
error = 0
time_total = 0
precision = 0
recall = 0
for train, valid in folds:
t0 = time()
model = gl.recommender.factorization_recommender.create(train,
user_id='userId',
item_id='movieId',
target='rating',
solver=solver,
max_iterations=iterations,
random_seed=seed,
num_factors=rank,
verbose=verbose)
# Stop clock and get execution time.
time_total += round(time() - t0, 3)
error += model.evaluate_rmse(valid, target='rating')['rmse_overall']
precision_recall = model.evaluate_precision_recall(valid)['precision_recall_overall'].to_numpy()
print(precision_recall)
precision += precision_recall[0,1]
recall += precision_recall[0,2]
error /= k
time_total /= k
precision /= k
recall /= k
# Write to file
writer.writerow({'rmse': error,
'time': time_total,
'rank': rank,
'precision': precision,
'recall': recall,
'f1-score': f1_score(precision, recall)})
print('For rank {} the RMSE is {}'.format(rank, error))
if error < min_error:
min_error = error
best_rank = rank
print('The best model was trained with rank {}'.format(best_rank))
if __name__ == '__main__':
if not os.path.exists(output_path):
os.makedirs(output_path)
evaluate()
| 41.616071 | 112 | 0.56018 |
9e1e340ce03aa5ef5ee5b6c8faf16522066b017c
| 5,271 |
py
|
Python
|
util/weibo_data_hashtag.py
|
xinlwa/HashTag
|
44d86b2d83db42990dfc0246e99d0c90bf461b0c
|
[
"Apache-2.0"
] | 1 |
2017-12-22T07:42:09.000Z
|
2017-12-22T07:42:09.000Z
|
util/weibo_data_hashtag.py
|
xinlwa/HashTag
|
44d86b2d83db42990dfc0246e99d0c90bf461b0c
|
[
"Apache-2.0"
] | null | null | null |
util/weibo_data_hashtag.py
|
xinlwa/HashTag
|
44d86b2d83db42990dfc0246e99d0c90bf461b0c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import config
import re
def hashtag_re(cont):
return re.findall(r'#[^#]+#',cont)
def name_re(cont):
return re.findall(r'@([^\s:]+)',cont)
def weibo_hashtag_index(weibo_data,hashtag_index):
weibo = open(weibo_data,'rb')
hashtag = open(hashtag_index,'wb')
hashtag_dic = {}
for line in weibo:
lines = line.strip().split('\t')
cont = ''.join(lines[9:])
hashtag_list = re.findall(r'#[^#]+#',cont)
for _ in hashtag_list:
hashtag_dic[_] = hashtag_dic.get(_,0)+1
hashtag_dic_sorted = sorted(hashtag_dic.iteritems(),key = lambda asd:asd[1],reverse = True)
print('hashtag number:{}'.format(len(hashtag_dic_sorted)))
i = 0
for _ in hashtag_dic_sorted:
hashtag.write('{},{},{}\n'.format(i,_[1],_[0]))
i += 1
hashtag.close()
weibo.close()
def weibo_hashtag_report(hashtag_index,weibo_data,weibo_hashtag_feature):
weibo = open(weibo_data,'rb')
hashtag = open(hashtag_index,'rb')
weibo_hash_data = open(weibo_hashtag_feature,'wb')
hashtag_dict = {}
for line in hashtag:
lines = line.strip().split(',')
hashtag_dict[','.join(lines[2:]).decode('utf-8')] = lines[0]
hashtag.close()
for line in weibo:
lines = line.strip().split('\t')
cont = ''.join(lines[6:])
uid = lines[0]
pt = lines[1]
uid_list = re.findall(r'@([^\s:]+)',cont)
hashtag_list = re.findall(r'#[^#]+#',cont)
if len(uid_list) > 0:
hashtag_list_first = re.findall(r'#[^#]+#',cont.split(uid_list[-1])[-1])
if len(hashtag_list_first) > 0:
for _ in hashtag_list_first:
try:
weibo_hash_data.write('{},{},{},1,{}\n'.format(uid,pt,_,','.join(uid_list)))
except Exception,e:
print('{},{}\n'.format(_,e))
else:
if len(hashtag_list) > 0:
for _ in hashtag_list:
weibo_hash_data.write('{},{},{},0\n'.format(uid,pt,_))
weibo.close()
weibo_hash_data.close()
def weibo_hashtag_feature(weibo_name,weibo_data):
name_uid = {}
with open(weibo_name,'rb') as f:
data = f.readlines()
for _ in data:
temp = _.strip().split('\t')
name_uid[temp[1]] = name_uid[temp[0]]
with open(weibo_data,'rb') as f:
data = f.readlines()
for _ in data:
temp = _.strip().split('\t')
def weibo_hive_feature(weibo_data_original,weibo_data_report,weibo_name,weibo_feature):
weibo_name_uid = {}
with open(weibo_name,'rb') as f:
data = f.readlines()
for _ in data:
temp = _.strip().split('\t')
weibo_name_uid[temp[1].decode('utf-8')] = temp[0]
#uid,pt,max(nfri),max(nfans),max(nrply),max(nfwd),wb_mid,wb_r_mid,wb_r_uid,wb_msg_type,cont
original_mid_hashtag = {}
original_mid_uid_time = {}
def data_parse(content):
temp = content.strip().split('\t')
cont = '\t'.join(temp[10:])
hashtag_list = hashtag_re(cont)
name_list = name_re(cont)
uid = temp[0]
pt = temp[1]
wb_mid = temp[6]
wb_r_mid = temp[7]
wb_r_uid = temp[8]
return uid,pt,wb_mid,wb_r_mid,wb_r_uid,hashtag_list,name_list
with open(weibo_data_original,'rb') as f:
data = f.readlines()
for _ in data:
uid,pt,wb_mid,wb_r_mid,wb_r_uid,hashtag_list,name_list = data_parse(_)
original_mid_hashtag[wb_mid] = hashtag_list
original_mid_uid_time[wb_mid] = [uid,pt]
print('weibo_data_original_end')
original_mid_hashtag_report = {}
with open(weibo_data_report,'rb') as f:
data = f.readlines()
for _ in data:
uid,pt,wb_mid,wb_r_mid,wb_r_uid,hashtag_list,name_list = data_parse(_)
if wb_r_mid in original_mid_hashtag:
if wb_r_mid not in original_mid_hashtag_report:
original_mid_hashtag_report[wb_r_mid] = []
if len(name_list) == 0:
original_mid_hashtag_report[wb_r_mid].append([pt,wb_r_uid,uid])
else:
report_temp = original_mid_hashtag_report[wb_r_mid]
uid_report = [pt,wb_r_uid]
for name in name_list[::-1]:
if name.decode('utf-8') in weibo_name_uid:
uid_report.append(weibo_name_uid[name.decode('utf-8')])
uid_report.append(uid)
report_temp.append(uid_report)
original_mid_hashtag_report[wb_r_mid] = report_temp
print('weibo_data_report')
with open(weibo_feature,'wb') as w:
for key,value in original_mid_hashtag_report.iteritems():
message_id = key
report_list = value
root_user_id,publish_time = original_mid_uid_time[key]
retweet_number = len(report_list)
hashtag_list_temp = original_mid_hashtag[key]
message_times = len(hashtag_list_temp)
if(message_times == 0):
continue
if(int(config.split) == 1):
message_times = 1
for i in range(message_times):
w.write('{}_{}\t{}\t{}\t{}\t{}:0'.format(message_id,hashtag_list_temp[i],root_user_id,publish_time,retweet_number,root_user_id))
for _ in report_list:
pt,uid_report = int(_[0]),_[1:]
if int(pt) < int(publish_time):
pt += 12 * 3600
w.write(' {}:{}'.format('/'.join(uid_report), pt - int(publish_time)))
w.write('\n')
if __name__ == '__main__':
#weibo_hashtag_index(config.weibo_data_hive,config.weibo_hashtag_index)
#weibo_hashtag_report(config.weibo_hashtag_index,config.weibo_data_hive,config.weibo_hashtag_feature)
#hashtag_number(config.weibo_data_hive)
#weibo_original(config.weibo_data_hive_path,config.weibo_original,config.weibo_report)
weibo_hive_feature(config.weibo_original,config.weibo_report,config.weibo_name,config.weibo_feature)
| 32.94375 | 132 | 0.690761 |
19183bf4865e457fe5be79a84041196aede2a958
| 2,147 |
py
|
Python
|
files/meas/Experiment_12/pickle_data_compression_patrice.py
|
mimeiners/ans
|
382e000e687d5ec0c80a84223087e60ed656a1dd
|
[
"MIT"
] | null | null | null |
files/meas/Experiment_12/pickle_data_compression_patrice.py
|
mimeiners/ans
|
382e000e687d5ec0c80a84223087e60ed656a1dd
|
[
"MIT"
] | null | null | null |
files/meas/Experiment_12/pickle_data_compression_patrice.py
|
mimeiners/ans
|
382e000e687d5ec0c80a84223087e60ed656a1dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Patrice Bönig
"""
import pandas as pd
import os
# ------------------------
def compression(file_type, seperator, path):
r'''
Parameters
----------
file_type : string
Enthält den zu komprimierenden Dateitypen
path : string
Enthält den Pfad zu den Dateien.
Returns
-------
None.
'''
i = 0
# Auswahl ob dafault Pfad oder nicht
os.chdir("./")
if path != "":
os.chdir(path)
# lesen der Dateien in Pfad
files = os.listdir("./")
# Parameter für die unterschiedlichen Dateitypen unter Berücksichtigung von
# Groß- und Kleinschreibung
if file_type == "txt":
ending = [".txt", ".TXT"]
for end in ending: # die zwei Endungen durchiterieren
for name in files:
if name.endswith(end):
i = i + 1
print("converting file " + str(i) + " with ending " + end + " ...")
df = pd.read_table(name, decimal=str(seperator))
df.to_pickle(name + '.pkl.xz', compression='infer')
print("\n" + str(i) + " files where converted, finished.")
elif file_type == "csv":
ending = ["csv", "CSV"]
for end in ending: # die zwei Endungen durchiterieren
for name in files:
if name.endswith(end):
i = i + 1
print("converting file " + str(i) + " with ending " + end + " ...")
df = pd.read_table(name, sep=seperator, engine='python')
df.to_pickle(name + '.pkl.xz', compression='infer')
print("\n" + str(i) + " files where converted, finished.")
# ------------------------
print("\n With this little program all files in one folder where compressed, via pickle.\n")
answer = input("Which file type should be comprossed? [txt/csv]: ")
seperator = input("Which seperator? [,/;]: ")
path = input("In which folder are they? default -> [./]: ")
compression(answer, seperator, path)
| 28.626667 | 100 | 0.516535 |
198697e8eae31a59e532ce82b75a39c6394f3c7b
| 756 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/screed-0.7.1-py2.7.egg/screed/tests/test_nodb.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/screed-0.7.1-py2.7.egg/screed/tests/test_nodb.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/screed-0.7.1-py2.7.egg/screed/tests/test_nodb.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
import screed
import os
from screed.DBConstants import fileExtension
def test_nodb():
"""
Tests if screed throws an appropriate exception if it is
asked to open a non-existant screed database
"""
try:
db = screed.ScreedDB('foo')
assert 1 == 0 # Previous line should throw an error
except ValueError:
pass
def test_wrongdb():
"""
Tests if screed throws an appropriate exception if it is
asked to open a file that isn't a screed database
"""
try:
blah = 'blah_screed'
blah_file = open(blah, 'wb')
blah_file.close()
db = screed.ScreedDB(blah)
os.unlink(blah)
assert 1 == 0
except TypeError:
os.unlink(blah)
pass
| 23.625 | 60 | 0.607143 |
27b0adcd81abeb1d4e3805b947a58c5e058bd937
| 1,589 |
py
|
Python
|
python/python_backup/PRAC_PYTHON/ab17.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/python_backup/PRAC_PYTHON/ab17.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/python_backup/PRAC_PYTHON/ab17.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
#array touple
first=(1,2,3);
second=(2,3,4);
print first
print second
print"Maximum of first touple:", max(first);
print"Miniimum of second touple:", min(second);
print "Touple in 3 times:",first*3;
print "check the value 1 present in first touple:", 1 in first;
print "check the value 4 present in second touple:", 4 in second;
print "check the value 5 present in second touple:", 5 in second;
third=((1,2,3),"India",3.5);
print third
#addition of touple
fourth=first+second+third;
print fourth;
print "1st 2 values of touple first",first[0:2];
print "1st 2 values of touple third",third[0][0:2];
print "1st 2 characters of touple third",third[1][0:2];
print " values of touple first with -1 index",first[-1];
print " values of touple second with -3 index",second[-3];
print " values of touple second with -2 index",second[-2];
print "whole values of touple first with no index",first[:];
print "whole values of touple first when index is 8 ",first[:8];
print "whole values of touple first when index is upto -1 ",first[:-1];
print "whole values of touple first when index is upto -2 ",first[:-2];
print "whole values of touple first when index is upto -3 ",first[:-3];
print "whole values of touple first when index starts from 0 ",first[0:];
#input procedure in touple
#declairation of empty touple
a=();
#convert touple into list
b=list(a);
#input into list
for i in range(0,5,1):
k=input("data:")
b.insert(i,k)
#convert list again in to touple
a=tuple(b);
#print the touple
for i in a:
print i
| 29.425926 | 76 | 0.678414 |
8b81d6d4a530adb5913c5f8ae7c3293863e59cba
| 121 |
py
|
Python
|
Curso_Python/Secao2-Python-Basico-Logica-Programacao/25_pass_ellipsis/25_pass_ellipsis.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/25_pass_ellipsis/25_pass_ellipsis.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso_Python/Secao2-Python-Basico-Logica-Programacao/25_pass_ellipsis/25_pass_ellipsis.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
valor = False
if valor:
pass # para escrever alguma coisa depois
... #Tbm é a msm coisa
else:
print('Tchau')
| 20.166667 | 45 | 0.636364 |
9a5f16cf95ea629c202c9321fbadbf715ebe7ea4
| 2,452 |
py
|
Python
|
Objekte/lighting.py
|
fdienesch/Solar-System
|
617096bcb525a19ee94ff86948b53bd8a65e4386
|
[
"MIT"
] | null | null | null |
Objekte/lighting.py
|
fdienesch/Solar-System
|
617096bcb525a19ee94ff86948b53bd8a65e4386
|
[
"MIT"
] | null | null | null |
Objekte/lighting.py
|
fdienesch/Solar-System
|
617096bcb525a19ee94ff86948b53bd8a65e4386
|
[
"MIT"
] | null | null | null |
# pylint: disable=wildcard-import, unused-wildcard-import, invalid-name, import-error, too-many-instance-attributes, too-few-public-methods, undefined-variable, missing-docstring
__author__ = 'floriandienesch'
from OpenGL.GL import *
class Lighting(object):
"""
This class sets the light of the scene
"""
def __init__(self):
# list of our lights
self.lights = {}
def enableLighting(self):
"""
Method enableLighting
This method enables the lighting
"""
glEnable(GL_LIGHTING)
def disableLighting(self):
"""
Method disableLighting
This method disable the lighting
"""
glDisable(GL_LIGHTING)
def addLight(self, id):
"""
Method addLight
This method adds a new light in the scene
Of course the light needs to be already set
:param id: id of the light, it should be e.g. GL_LIGHTn
"""
newLight = Light(id)
self.lights[id] = newLight
def getLight(self):
return str(self.lights)
def setLight(self, id, position, diffuse, specular, ambient):
"""
Method setLight
This method sets the light
:param id: id of the light, it should be e.g. GL_LIGHTn
:param position: position of the light, it should be a list with 4 sections
"""
self.lights[id].set(position, diffuse, specular, ambient)
def render(self):
"""
Method render
This method renders the light
"""
for light in self.lights.values():
light.render()
class Light(object):
"""
This class represents an OpenGL light.
"""
def __init__(self, id):
self.id = id
glEnable(id)
self.position = []
self.diffuse = []
self.specular = []
self.ambient = []
def set(self, position, diffuse, specular, ambient):
"""
Method set
This method sets the light
"""
self.position = position
self.diffuse = diffuse
self.specular = specular
self.ambient = ambient
def render(self):
"""
Method render
This method render the light
"""
glLight(self.id, GL_POSITION, self.position)
glLight(self.id, GL_DIFFUSE, self.diffuse)
glLight(self.id, GL_SPECULAR, self.specular)
glLight(self.id, GL_AMBIENT, self.ambient)
| 27.550562 | 178 | 0.589315 |
560148da63e9c3794cccdc2a118830f3df08b3f1
| 215 |
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/ph-3.11-arithmetic-with-int-f.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/ph-3.11-arithmetic-with-int-f.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-003/ph-3.11-arithmetic-with-int-f.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
number1 = input("Please enter a number and press enter: ")
number2 = input("Please enter another nunber and press enter ")
number1 = int(number1)
number2 = int(number2)
print("number1 + number2 =", number1+number2)
| 35.833333 | 63 | 0.739535 |
ef54ea6d9db2c0043ba2936fd8b009d8e746c082
| 659 |
py
|
Python
|
simple-text-editor.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
simple-text-editor.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
simple-text-editor.py
|
kautuk-desai/HackerRank
|
6c0b22800ea2e40d118d6a0a5c0ece067a0a69bf
|
[
"MIT"
] | null | null | null |
num_queries = int(input())
def executeQuery(num_queries):
stack = list()
string = ""
k = 0
for i in range(num_queries):
q = input().split()
if(q[0] == '4'):
string = stack.pop()
elif(q[0] == '1'):
stack.append(string)
string += q[1]
elif(q[0] == '2'):
str_len = len(string)
stack.append(string)
k = int(q[1])
if(str_len == k):
string = ""
else:
string = string[0:str_len - k]
else:
print(string[int(q[1]) - 1])
executeQuery(num_queries)
| 25.346154 | 46 | 0.420334 |
32c1a8f139ac3dbd40c063cf25154c1997a6e6cc
| 2,814 |
py
|
Python
|
Prototype/main prototype/TabData.py
|
fowado/BauphysikSE1
|
eb8805196c8fbf99a879c40c5e0725d740c5a0de
|
[
"CC-BY-4.0"
] | 4 |
2019-12-03T16:13:09.000Z
|
2019-12-11T23:22:58.000Z
|
Prototype/main prototype/TabData.py
|
fowado/BauphysikSE1
|
eb8805196c8fbf99a879c40c5e0725d740c5a0de
|
[
"CC-BY-4.0"
] | 65 |
2019-12-08T17:43:59.000Z
|
2020-08-14T15:26:21.000Z
|
Prototype/main prototype/TabData.py
|
fowado/BauphysikSE1
|
eb8805196c8fbf99a879c40c5e0725d740c5a0de
|
[
"CC-BY-4.0"
] | null | null | null |
import Calculation
from LayerData import LayerData
class TabData:
def __init__(self, mode, name, layers=None, rright=None, rleft=None, rsum=None, rt=None, tright=None, tleft=None, u=None, currentFileLocation=None):
#some trickery because default values in the constructor call kind of destroy the the possibility to have multiple instances of the same class. Why? Python, that's why. further information: https://stackoverflow.com/questions/4841782/python-constructor-and-default-value
self.name = name
"""title of tab"""
self.layers = layers if layers is not None else map(LayerData,[])
"""list of layers in current tab"""
self.rright = rright if rright is not None else 0.0
"""if imagining the layers vertically, this is the R which is adjacent to the right-most layer"""
self.rleft = rleft if rleft is not None else 0.0
"""same as rright, only this time you have to think to the left"""
self.rsum = rsum if rsum is not None else 0.0
"""sum of all the layers' R's"""
self.tright = tright if tright is not None else 0.0
"""same as rright, this time it's the temperature"""
self.tleft = tleft if tleft is not None else 0.0
"""same as tright, this time it's to the left"""
self.mode = mode
"""mode of tab"""
self.u = u if u is not None else 0.0
"""inverse of rt"""
self.rt = rt if rt is not None else 0.0
"""sum of rsum and rright and rleft"""
self.currentFileLocation = currentFileLocation if currentFileLocation is not None else None
"""current save path if there is one"""
def add_layer(self, layer):
"""add layer (data) to the list of layers"""
self.layers.append(layer)
def pop_layer(self, index):
"""remove layer (data) from the layer list at given index and return it"""
return self.layers.pop(index)
def remove_layer(self, index):
"""remove layer (data) from layer list at given index"""
l = self.pop_layer(index)
del l
def move_layer(self, indexFrom, indexTo):
"""move ("drag and drop") a layer from position x to position y"""
if indexFrom <= indexTo:
self.insert_layer(self.pop_layer(indexFrom), indexTo)
else:
self.insert_layer(self.pop_layer(indexFrom), indexTo - 1)
def insert_layer(self, layer, index):
"""insert layer (data) at given index"""
self.layers.insert(index, layer)
def reverse_layers(self):
"""reverse the list of layers (data)"""
self.layers.reverse()
def calculate(self):
"""calculate the tab"""
try:
Calculation.tab_calc(self)
except ZeroDivisionError:
raise ZeroDivisionError
| 43.96875 | 278 | 0.636816 |
f5c9b4413911f01ede2138fa69aaf59f213c00e0
| 200 |
py
|
Python
|
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-pipeline-kernel/src/watchmen_pipeline_kernel/pipeline/__init__.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from .monitor_log_invoker import create_monitor_log_pipeline_invoker
from .pipeline_invoker import try_to_invoke_pipelines, try_to_invoke_pipelines_async
from .pipeline_trigger import PipelineTrigger
| 50 | 84 | 0.915 |
c221eb4db0d383cf52accc786ceef53b9de40731
| 176 |
py
|
Python
|
Global.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | 1 |
2019-01-03T12:59:19.000Z
|
2019-01-03T12:59:19.000Z
|
Global.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | null | null | null |
Global.py
|
TechLabCommunity/SaintPeterTalent
|
eb80237de4d73f3a99e82e02edb714f5057bd559
|
[
"MIT"
] | null | null | null |
import xml.etree.ElementTree as ET
PATH_CONFIG = './config.xml'
def get_value_config(fromroot, key):
return ET.parse(PATH_CONFIG).getroot().find(fromroot).find(key).text
| 25.142857 | 72 | 0.755682 |
a0432c180af6562b2b7d31520d78264dcb9327df
| 307 |
py
|
Python
|
code/Functions.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
code/Functions.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
code/Functions.py
|
ju1-eu/pyAnfaenger
|
59440e99d69582ee2c7a022c8b819e7c78f41ac0
|
[
"MIT"
] | null | null | null |
# Funktion: max
def list_max(my_list):
result = my_list[0]
for i in range(1, len(my_list)):
wert = my_list[i]
if wert > result:
result = wert
return result
# Liste
l1 = [-2, 1, -10]
l2 = [-20, 123, 22]
# Funktionsaufruf
l1_max = list_max(l1)
l2_max = list_max(l2)
| 17.055556 | 36 | 0.579805 |
266217a40cb8a8617ee2cc7e27fd77cfcdfed940
| 3,719 |
py
|
Python
|
forge/lib/helpers.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 31 |
2015-07-13T15:36:50.000Z
|
2022-02-07T21:37:51.000Z
|
forge/lib/helpers.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 109 |
2015-04-24T10:03:24.000Z
|
2019-04-12T13:34:01.000Z
|
forge/lib/helpers.py
|
Pandinosaurus/3d-forge
|
d631e14a9351911c3e5612c73c1608d97ed547d2
|
[
"BSD-3-Clause"
] | 16 |
2015-10-03T06:03:22.000Z
|
2022-03-31T08:24:37.000Z
|
# -*- coding: utf-8 -*-
from osgeo import osr, ogr
import requests
import os
import gzip
import sys
import time
import datetime
import cStringIO
from pyproj import Proj, transform
def createBBox(center, length):
offset = length / 2
xmin = center[0] - offset
ymin = center[1] - offset
xmax = center[0] + offset
ymax = center[1] + offset
return [xmin, ymin, xmax, ymax]
def transformCoordinate(wkt, srid_from, srid_to):
srid_in = osr.SpatialReference()
srid_in.ImportFromEPSG(srid_from)
srid_out = osr.SpatialReference()
srid_out.ImportFromEPSG(srid_to)
geom = ogr.CreateGeometryFromWkt(wkt)
geom.AssignSpatialReference(srid_in)
geom.TransformTo(srid_out)
return geom
def gzipFileContent(filePath):
content = open(filePath)
compressed = cStringIO.StringIO()
gz = gzip.GzipFile(fileobj=compressed, mode='w')
gz.writelines(content)
gz.close()
compressed.seek(0)
content.close()
return compressed
def gzipFileObject(data):
compressed = cStringIO.StringIO()
gz = gzip.GzipFile(fileobj=compressed, mode='w', compresslevel=5)
gz.write(data.getvalue())
gz.close()
compressed.seek(0)
return compressed
def isShapefile(filePath):
return filePath.endswith('.shp')
def timestamp():
ts = time.time()
return datetime.datetime.fromtimestamp(ts).strftime('%Y%m%d%H%M%S')
def error(msg, exitCode=1, usage=None):
print('Error: %(msg)s.' % {'msg': msg})
print('')
if usage is not None:
usage()
sys.exit(exitCode)
def resourceExists(path, headers={}):
try:
r = requests.head(path, headers=headers)
except requests.exceptions.ConnectionError as e:
raise requests.exceptions.ConnectionError(e)
return r.status_code == requests.codes.ok
def cleanup(filePath, extensions=['.shp', '.shx', '.prj', '.dbf']):
if os.path.isfile(filePath):
dirName = os.path.dirname(filePath)
baseName = os.path.basename(filePath).split('.')[0]
for ext in extensions:
try:
os.remove('%s/%s%s' % (dirName, baseName, ext))
except OSError as e:
raise OSError(e)
# Approximation only, arc must be provided in degrees
def degreesToMeters(arc):
projWGS84 = Proj(proj='latlong', datum='WGS84')
projLV03 = Proj(init='epsg:21781')
chCenterWGS84 = [8.3, 46.85]
p1 = transform(projWGS84, projLV03, *chCenterWGS84)
p2 = transform(projWGS84, projLV03, chCenterWGS84[0] + arc, chCenterWGS84[1])
return p2[0] - p1[0]
class Bulk:
def __init__(self, rows=None):
self.rows = list() if rows is None else rows
self.n = len(self.rows)
def add(self, row):
self.n = self.n + 1
self.rows.append(row)
def commit(self, model, session):
if self.n > 0:
session.bulk_insert_mappings(
model,
self.rows
)
session.commit()
self.n = 0
self.rows = list()
class BulkInsert:
NO_LIMIT = float('inf')
def __init__(self, model, session, withAutoCommit=None):
self.model = model
self.session = session
self.autoCommit = withAutoCommit if withAutoCommit is not None else \
BulkInsert.NO_LIMIT
self.bulk = Bulk()
def add(self, row):
if self.bulk.n < self.autoCommit:
self.bulk.add(row)
else:
self.bulk.commit(self.model, self.session)
self.bulk = Bulk([row])
def addN(self, rows):
for row in rows:
self.add(row)
def commit(self):
self.bulk.commit(self.model, self.session)
self.bulk = Bulk([])
| 25.29932 | 81 | 0.622748 |
3e42f5995876bbf7359739a5c0aea921346ce092
| 2,833 |
py
|
Python
|
deprecated/examples/dam/utils.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/examples/dam/utils.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/examples/dam/utils.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.collective import fleet
import os
def dist_eval(exe, var_names, feed):
prog = fluid.Program()
with fluid.program_guard(prog):
feed_list = []
for name in var_names:
feed_list.append(fluid.layers.data(
name=name, shape=[1], dtype='float32'))
feed_list.append(fluid.layers.data(name='length', shape=[1], dtype='int32'))
dist_fetch = []
for var in feed_list:
dist_fetch.append(fluid.layers.collective._c_allreduce(
var, reduce_type='sum', use_calc_stream=True))
ret = exe.run(prog, feed=feed, fetch_list=dist_fetch)
return ret
def dist_eval_ubuntu(exe, result):
var_names = ["p_at_1_in_2", "p_at_1_in_10", "p_at_2_in_10", "p_at_5_in_10"]
feed = {
'p_at_1_in_2': result["1_in_2"][0],
'p_at_1_in_10': result["1_in_10"][0],
'p_at_2_in_10': result["2_in_10"][0],
'p_at_5_in_10': result["5_in_10"][0],
'length': result["1_in_2"][1],
}
ret = dist_eval(exe, var_names, feed)
p_at_1_in_2, p_at_1_in_10, p_at_2_in_10, p_at_5_in_10, length = ret
dist_result = {
"1_in_2": p_at_1_in_2 / length,
"1_in_10": p_at_1_in_10 / length,
"2_in_10": p_at_2_in_10 / length,
"5_in_10": p_at_5_in_10 / length
}
return dist_result
def dist_eval_douban(exe, result):
var_names = ["sum_m_a_p", "sum_m_r_r", "sum_p_1", "sum_r_1", "sum_r_2", "sum_r_5"]
feed = {
"sum_m_a_p": result["MAP"][0],
"sum_m_r_r": result["MRR"][0],
"sum_p_1": result["P_1"][0],
"sum_r_1": result["1_in_10"][0],
"sum_r_2": result["2_in_10"][0],
"sum_r_5": result["5_in_10"][0],
'length': result["MAP"][1],
}
ret = dist_eval(exe, var_names, feed)
sum_m_a_p, sum_m_r_r, sum_p_1, sum_r_1, sum_r_2, sum_r_5, total_num = ret
dist_result = {
"MAP": sum_m_a_p / total_num,
"MRR": sum_m_r_r / total_num,
"P_1": sum_p_1 / total_num,
"1_in_10": sum_r_1 / total_num,
"2_in_10": sum_r_2 / total_num,
"5_in_10": sum_r_5 / total_num
}
return dist_result
| 35.4125 | 86 | 0.639252 |
e41a536b196c60e344b8645d1cde5dd755e28f59
| 11,975 |
py
|
Python
|
src/onegov/feriennet/boardlets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/boardlets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/feriennet/boardlets.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from cached_property import cached_property
from onegov.activity import Activity, Attendee, Booking, Occasion
from onegov.feriennet import _
from onegov.feriennet import FeriennetApp
from onegov.feriennet.collections import BillingCollection, MatchCollection
from onegov.feriennet.exports.unlucky import UnluckyExport
from onegov.feriennet.layout import DefaultLayout
from onegov.org.models import Boardlet, BoardletFact
class FeriennetBoardlet(Boardlet):
@cached_property
def session(self):
return self.request.session
@cached_property
def period(self):
return self.request.app.active_period
@cached_property
def layout(self):
return DefaultLayout(None, self.request)
@property
def state(self):
if not self.period:
return 'failure'
if not self.period.confirmed:
return 'warning'
return 'success'
@FeriennetApp.boardlet(name='period', order=(1, 1))
class PeriodBoardlet(FeriennetBoardlet):
@property
def title(self):
return self.period and self.period.title or _("No active period")
@property
def state(self):
if not self.period:
return 'failure'
return 'success'
@property
def facts(self):
if not self.period:
return
def icon(checked):
return checked and 'fa-check-square-o' or 'fa-square-o'
yield BoardletFact(
text=_("Prebooking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.prebooking_start,
self.period.prebooking_end,
)
}),
icon=icon(self.period.confirmed)
)
yield BoardletFact(
text=_("Booking: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.booking_start,
self.period.booking_end,
)
}),
icon=icon(self.period.finalized if self.period.finalizable
else self.period.is_booking_in_past)
)
yield BoardletFact(
text=_("Execution: ${dates}", mapping={
'dates': self.layout.format_date_range(
self.period.execution_start,
self.period.execution_end,
)
}),
icon=icon(self.period.is_execution_in_past)
)
@FeriennetApp.boardlet(name='activities', order=(1, 2))
class ActivitiesBoardlet(FeriennetBoardlet):
@cached_property
def occasions_count(self):
if not self.period:
return 0
return self.session.query(Occasion)\
.filter_by(period_id=self.period.id)\
.count()
@cached_property
def activities_count(self):
if not self.period:
return 0
return self.session.query(Activity).filter(Activity.id.in_(
self.session.query(Occasion.activity_id)
.filter_by(period_id=self.period.id)
.subquery()
)).filter_by(state='accepted').count()
@property
def title(self):
return _("${count} Activities", mapping={
'count': self.activities_count
})
@property
def state(self):
if not self.period:
return 'failure'
return self.activities_count and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Activities", mapping={
'count': self.activities_count
}),
icon='fa-dot-circle-o'
)
yield BoardletFact(
text=_("${count} Occasions", mapping={
'count': self.occasions_count
}),
icon='fa-circle-o'
)
@FeriennetApp.boardlet(name='bookings', order=(1, 3))
class BookingsBoardlet(FeriennetBoardlet):
@cached_property
def counts(self):
if not self.period:
return {
'accepted': 0,
'blocked': 0,
'cancelled': 0,
'denied': 0,
'total': 0,
}
bookings = self.session.query(Booking)\
.filter_by(period_id=self.period.id)
return {
'accepted': bookings.filter_by(state='accepted').count(),
'blocked': bookings.filter_by(state='blocked').count(),
'cancelled': bookings.filter_by(state='cancelled').count(),
'denied': bookings.filter_by(state='denied').count(),
'total': bookings.count(),
}
@cached_property
def attendees_count(self):
if not self.period:
return 0
return self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
)).count()
@property
def title(self):
if not self.period or not self.period.confirmed:
return _("${count} Wishes", mapping={
'count': self.counts['total']
})
else:
return _("${count} Bookings", mapping={
'count': self.counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
if not self.period.confirmed:
yield BoardletFact(
text=_("${count} Wishes", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} Wishes per Attendee", mapping={
'count': self.attendees_count and (
round(self.counts['total'] / self.attendees_count, 1)
) or 0
}),
icon='fa-line-chart',
)
else:
yield BoardletFact(
text=_("${count} Bookings", mapping={
'count': self.counts['total']
}),
icon='fa-square',
)
yield BoardletFact(
text=_("${count} accepted", mapping={
'count': self.counts['accepted']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} cancelled", mapping={
'count': self.counts['cancelled']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} denied", mapping={
'count': self.counts['denied']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} blocked", mapping={
'count': self.counts['blocked']
}),
icon='fa-minus',
)
yield BoardletFact(
text=_("${count} Bookings per Attendee", mapping={
'count': self.attendees_count and round(
self.counts['accepted'] / self.attendees_count, 1
) or 0
}),
icon='fa-line-chart',
)
@FeriennetApp.boardlet(name='attendees', order=(1, 4))
class AttendeesBoardlet(FeriennetBoardlet):
@cached_property
def attendee_counts(self):
if not self.period:
return {
'total': 0,
'female': 0,
'male': 0,
}
attendees = self.session.query(Attendee)\
.filter(Attendee.id.in_(
self.session.query(Booking.attendee_id).filter_by(
period_id=self.period.id
)
))
return {
'total': attendees.count(),
'girls': attendees.filter_by(gender='female').count(),
'boys': attendees.filter_by(gender='male').count(),
}
@property
def title(self):
return _("${count} Attendees", mapping={
'count': self.attendee_counts['total']
})
@property
def state(self):
if not self.period:
return 'failure'
return self.attendee_counts['total'] and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${count} Girls", mapping={
'count': self.attendee_counts['girls']
}),
icon='fa-female'
)
yield BoardletFact(
text=_("${count} Boys", mapping={
'count': self.attendee_counts['boys']
}),
icon='fa-male'
)
@FeriennetApp.boardlet(name='matching', order=(1, 5))
class MatchingBoardlet(FeriennetBoardlet):
@cached_property
def happiness(self):
if not self.period or not self.period.confirmed:
return 0
raw = MatchCollection(self.session, self.period).happiness
return round(raw * 100, 2)
@cached_property
def unlucky_count(self):
if not self.period:
return 0
return UnluckyExport().query(self.session, self.period).count()
@property
def title(self):
return _("${amount}% Happiness", mapping={
'amount': self.happiness
})
@property
def state(self):
if not self.period:
return 'failure'
return self.happiness > 75 and 'success' or 'warning'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount}% Happiness", mapping={
'amount': self.happiness
}),
icon='fa-smile-o',
)
yield BoardletFact(
text=_("${count} Attendees Without Occasion", mapping={
'count': self.unlucky_count
}),
icon='fa-frown-o',
)
@FeriennetApp.boardlet(name='billing', order=(1, 6))
class BillingPortlet(FeriennetBoardlet):
@cached_property
def amounts(self):
if not self.period:
return {
'total': 0,
'outstanding': 0,
'paid': 0,
}
billing = BillingCollection(self.request, self.period)
result = {
'total': billing.total,
'outstanding': billing.outstanding,
}
result['paid'] = result['total'] - result['outstanding']
return result
@property
def title(self):
return _("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(self.amounts['outstanding'])
})
@property
def state(self):
if not self.period:
return 'failure'
return self.amounts['outstanding'] and 'warning' or 'success'
@property
def facts(self):
if not self.period:
return
yield BoardletFact(
text=_("${amount} CHF total", mapping={
'amount': self.layout.format_number(self.amounts['total'])
}),
icon='fa-circle',
)
yield BoardletFact(
text=_("${amount} CHF paid", mapping={
'amount': self.layout.format_number(self.amounts['paid'])
}),
icon='fa-plus-circle',
)
yield BoardletFact(
text=_("${amount} CHF outstanding", mapping={
'amount': self.layout.format_number(
self.amounts['outstanding']
)
}),
icon='fa-minus-circle',
)
| 27.784223 | 77 | 0.517077 |
e422291f7998d85026e91171364808697735a94e
| 9,137 |
py
|
Python
|
Packs/TrendMicroCAS/Integrations/TrendMicroCAS/TrendMicroCAS_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/TrendMicroCAS/Integrations/TrendMicroCAS/TrendMicroCAS_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/TrendMicroCAS/Integrations/TrendMicroCAS/TrendMicroCAS_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import pytest
from TrendMicroCAS import Client, security_events_list_command, email_sweep_command, parse_date_to_isoformat,\
user_take_action_command, email_take_action_command, user_action_result_command, blocked_lists_get_command,\
blocked_lists_update_command
import datetime
import test_data.commands_raw_response as data
from CommonServerPython import CommandResults
client = Client(base_url='https://test.com', verify=False, headers={'Authorization': f'Bearer {"1243545"}'})
args_test_security_events_list = [
{'service': 'onedrive', 'event_type': 'securityrisk'},
{'service': 'onedrive', 'event_type': 'securityrisk', 'start': '1 day'},
{'service': 'onedrive', 'event_type': 'securityrisk', 'start': '2020-01-01T00:00Z', 'end': '3 days'}]
@pytest.mark.parametrize('args', args_test_security_events_list)
def test_security_events_list_command(mocker, args):
"""Tests security_events_list_command function
Given
1. service=onedrive and event_type=securityrisk in arguments
2. service=onedrive and event_type=securityrisk and start=1 day
3. service=onedrive and event_type=securityrisk and start=1 day and end=now
When
- Calling `security_events_list_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.SECURITY_EVENTS_LIST_RESULT)
results = security_events_list_command(client, args)
assert results[0].outputs == data.SECURITY_EVENTS_LIST_OUTPUT['security_risk']
assert results[0].outputs_key_field == 'log_item_id'
assert results[0].outputs_prefix == 'TrendMicroCAS.Events'
args_test_security_events_list = [
{'limit': '1'},
{'limit': '1', 'start': '12 days', 'end': '10 days'},
{'limit': '1', 'start': '2020-01-01', 'end': '1 day'}]
@pytest.mark.parametrize('args', args_test_security_events_list)
def test_email_sweep_command(mocker, args):
"""Tests email_sweep_command function
Given
1. limit=1 in arguments
2. limit=1 and start=12 days and end=10 days
3. limit=1 start=2020-01-01 and end=now
When
- Calling `email_sweep_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.EMAIL_SWEEP_RESULT)
results: CommandResults = email_sweep_command(client, args)
assert results.outputs == data.EMAIL_SWEEP_RESULT
assert results.outputs_key_field == 'traceId'
assert results.outputs_prefix == 'TrendMicroCAS.EmailSweep'
def test_user_take_action_command(mocker):
"""Tests user_take_action_command function
Given
args = action_type=action_type and account_user_email=account_user_email1,account_user_email2'
When
- Calling `user_take_action_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.USER_TAKE_ACTION_RESULT)
args = {
'action_type': 'action_type',
'account_user_email': 'account_user_email1, account_user_email2'
}
results: CommandResults = user_take_action_command(client, args)
assert results.outputs == data.USER_TAKE_ACTION_OUTPUT
assert results.outputs_key_field == 'batch_id'
assert results.outputs_prefix == 'TrendMicroCAS.UserTakeAction'
def test_email_take_action_command(mocker):
"""Tests email_take_action_command function
Given
args = action_type=action_type and mailbox=mailbox and mail_message_id=mail_message_id and
mail_unique_id=mail_unique_id and mail_message_delivery_time=2020-07-13T01:52:50.000Z
When
- Calling `email_take_action_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.EMAIL_TAKE_ACTION_RESULT)
args = {
'action_type': 'action_type',
'mailbox': 'mailbox',
'mail_message_id': 'mail_message_id',
'mail_unique_id': 'mail_unique_id',
'mail_message_delivery_time': '2020-07-13T01:52:50.000Z'
}
results: CommandResults = email_take_action_command(client, args)
assert results.outputs == data.EMAIL_TAKE_ACTION_OUTPUT
assert results.outputs_key_field == 'batch_id'
assert results.outputs_prefix == 'TrendMicroCAS.EmailTakeAction'
args_test_user_action_result = [
{'limit': '5'},
{'start': '12 hours', 'end': '10 hours'},
{'batch_id': 'batch_id'}]
@pytest.mark.parametrize('args', args_test_user_action_result)
def test_user_action_result_command(mocker, args):
"""Tests user_action_result_command function
Given
1. limit=r in arguments
2. start=12 hours and end=10 hours
3. batch_id=batch_id'
When
- Calling `email_sweep_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.USER_ACTION_RESULT_RESULT)
results: CommandResults = user_action_result_command(client, args)
assert results.outputs == data.USER_ACTION_RESULT_OUTPUT
assert results.outputs_key_field == 'batch_id'
assert results.outputs_prefix == 'TrendMicroCAS.UserActionResult'
def test_blocked_lists_get_command(mocker):
"""Tests blocked_lists_get_command function
When
- Calling `blocked_lists_get_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
mocker.patch.object(client, '_http_request', return_value=data.BLOCKED_LISTS_GET_RESULT)
results: CommandResults = blocked_lists_get_command(client)
assert results.outputs == data.BLOCKED_LISTS_OUTPUT
assert results.outputs_key_field == 'BlockedList'
assert results.outputs_prefix == 'TrendMicroCAS.BlockedList'
def test_blocked_lists_update_command(mocker):
"""Tests blocked_lists_update_command function
Given
args = action_type=action_type and [email protected],[email protected] and urls=123.com,456.com,789.com and
filehashes=f3cdddb37f6a933d6a256bd98b4bc703a448c621'
When
- Calling `blocked_lists_update_command`
Then
- convert the result to human readable table
- create the context
- validate the context data, the key, and the prefix.
"""
args = {
'action_type': 'action_type',
'senders': '[email protected],[email protected]',
'urls': '123.com,456.com,789.com',
'filehashes': 'f3cdddb37f6a933d6a256bd98b4bc703a448c621'
}
mocker.patch.object(client, '_http_request', return_value=data.BLOCKED_LISTS_UPDATE_RESULT)
results: CommandResults = blocked_lists_update_command(client, args)
assert results.outputs == data.BLOCKED_LISTS_OUTPUT
assert results.outputs_key_field == 'BlockedList'
assert results.outputs_prefix == 'TrendMicroCAS.BlockedList'
DATA_TEST_PARSE_DATE_TO_ISOFORMAT = [
('08/09/10', '2010-08-09T00:00:00Z'),
('08.09.10', '2010-08-09T00:00:00Z'),
('08-09-10', '2010-08-09T00:00:00Z'),
('9/10/11 09:45:33', '2011-09-10T09:45:33Z'),
]
@pytest.mark.parametrize('date_input, fan_result', DATA_TEST_PARSE_DATE_TO_ISOFORMAT)
def test_parse_date_to_isoformat(date_input, fan_result):
"""Tests parse_date_to_isoformat function
Given
1. 08/09/10
2. 08.09.10
3. 08-09-10
4. 9/10/11
When
- Calling `parse_date_to_isoformat function`
Then
- convert the date to isoformat string
- validate result are in isoformat string %Y-%m-%dT%H:%M:%SZ:
1. = 2010-08-09T00:00:00Z
2. = 2010-08-09T00:00:00Z
3. = 2010-08-09T00:00:00Z
4. = 2011-09-10T09:45:33Z
"""
result = parse_date_to_isoformat(date_input, 'test')
assert result == fan_result
DATA_TEST_PARSE_DATE_TO_ISOFORMAT_FREE_TEXT = [
'1 day',
'3 months',
'1 week and 1 day'
]
@pytest.mark.parametrize('date_input', DATA_TEST_PARSE_DATE_TO_ISOFORMAT_FREE_TEXT)
def test_parse_date_to_isoformat_on_free_text(date_input):
"""Tests parse_date_to_isoformat function
Given
free text:
1. 1 day
2. 3 months
3. 1 week and 1 day
When
- Calling `parse_date_to_isoformat function`
Then
- convert the date to isoformat string
- validate result are in isoformat string %Y-%m-%dT%H:%M:%SZ:
"""
result = parse_date_to_isoformat(date_input, 'test')
try:
datetime.datetime.strptime(result, "%Y-%m-%dT%H:%M:%SZ")
except ValueError:
its_not_isoformat = True
assert its_not_isoformat
| 37.600823 | 115 | 0.704279 |
5fa88a1356fdd019f1a580a03ac1bb65e8ab75bd
| 1,240 |
py
|
Python
|
leetcode/068-Text-Justification/TextJustification.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/068-Text-Justification/TextJustification.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/068-Text-Justification/TextJustification.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def fullJustify(self, words, maxWidth):
"""
:type words: List[str]
:type maxWidth: int
:rtype: List[str]
"""
def format(wds, L):
wdslen = len(''.join(wds))
l = len(wds) - 1
res = ''
step = (L - wdslen) / l if l else L - len(wds[0]) - 1
diff = (L - wdslen) % l if l else 1
blanks = [' ' * (step + 1)] * diff + [' ' * step] * (l - diff)
for i in range(len(blanks)):
res += wds[i] + blanks[i]
res += wds[-1] if l else ''
return res
lines = []
res = []
tmp = 0
for i in range(len(words)):
word = words[i]
if tmp and tmp + len(word) < maxWidth:
lines[-1].append(word)
tmp += len(word) + 1
else:
tmp = 0
lines.append([])
lines[-1].append(word)
tmp = len(word)
for l in lines:
res.append(format(l, maxWidth))
if len(res):
res[-1] = ' '.join(res[-1].split())
res[-1] = res[-1] + (maxWidth - len(res[-1])) * ' '
return res
| 31 | 74 | 0.397581 |
f2bfaa4612587f85837ea1e68cefb329f915d656
| 299 |
py
|
Python
|
Uebung1/Uebung1_Aufgabe3_2.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-18T19:10:06.000Z
|
2018-04-18T19:10:06.000Z
|
Uebung1/Uebung1_Aufgabe3_2.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | null | null | null |
Uebung1/Uebung1_Aufgabe3_2.py
|
B0mM3L6000/EiP
|
f68718f95a2d3cde8ead62b6134ac1b5068881a5
|
[
"MIT"
] | 1 |
2018-04-29T08:48:00.000Z
|
2018-04-29T08:48:00.000Z
|
#K0 = Startkapital
#p = Zins
#n = Perioden
#K = Endkapital
K0 = float(input("Wieviel Startkapital hast du?"))
p = float(input("Wie hoch ist der Zins?"))
n = float(input("wieviele Perioden legst du an?"))
#berechnung K
K = (K0*(1+p)**n)
print("Das Endkapital nach ", n, " Perioden beträgt: ",K)
| 18.6875 | 57 | 0.655518 |
d9b7d56be59460373b39b95f460c79e143e8a7eb
| 4,508 |
py
|
Python
|
rbac/app/config.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 75 |
2018-04-06T09:13:34.000Z
|
2020-05-18T18:59:47.000Z
|
rbac/app/config.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 989 |
2018-04-18T21:01:56.000Z
|
2019-10-23T15:37:09.000Z
|
rbac/app/config.py
|
fthornton67/sawtooth-next-directory
|
79479afb8d234911c56379bb1d8abf11f28ef86d
|
[
"Apache-2.0"
] | 72 |
2018-04-13T18:29:12.000Z
|
2020-05-29T06:00:33.000Z
|
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
import os
from rbac.common.crypto.keys import Key
from rbac.common.crypto.secrets import generate_aes_key
from rbac.common.crypto.secrets import generate_secret_key
from rbac.common.logs import get_default_logger
LOGGER = get_default_logger(__name__)
DEFAULT_CONFIG = {
"SERVER_HOST": "0.0.0.0",
"SERVER_PORT": "8000",
"VALIDATOR_HOST": "validator",
"VALIDATOR_PORT": "4004",
"VALIDATOR_TIMEOUT": "500",
"VALIDATOR_REST_HOST": "rest-api",
"VALIDATOR_REST_PORT": "8008",
"DB_HOST": "rethink",
"DB_PORT": "28015",
"DB_NAME": "rbac",
"CHATBOT_HOST": "chatbot",
"CHATBOT_PORT": "5005",
"CLIENT_HOST": "http://localhost",
"CLIENT_PORT": "4201",
"SECRET_KEY": "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890",
"AES_KEY": "1111111111111111111111111111111111111111111111111111111111111111",
"AIOHTTP_CONN_LIMIT": "0",
"AIOHTTP_DNS_TTL": "900",
"WORKERS": "4",
}
SERVER_HOST = os.getenv("SERVER_HOST", DEFAULT_CONFIG["SERVER_HOST"])
SERVER_PORT = os.getenv("SERVER_PORT", DEFAULT_CONFIG["SERVER_PORT"])
VALIDATOR_HOST = os.getenv("VALIDATOR_HOST", DEFAULT_CONFIG["VALIDATOR_HOST"])
VALIDATOR_PORT = os.getenv("VALIDATOR_PORT", DEFAULT_CONFIG["VALIDATOR_PORT"])
VALIDATOR_TIMEOUT = int(
os.getenv("VALIDATOR_TIMEOUT", DEFAULT_CONFIG["VALIDATOR_TIMEOUT"])
)
VALIDATOR_REST_HOST = os.getenv(
"VALIDATOR_REST_HOST", DEFAULT_CONFIG["VALIDATOR_REST_HOST"]
)
VALIDATOR_REST_PORT = os.getenv(
"VALIDATOR_REST_PORT", DEFAULT_CONFIG["VALIDATOR_REST_PORT"]
)
DB_HOST = os.getenv("DB_HOST", DEFAULT_CONFIG["DB_HOST"])
DB_PORT = os.getenv("DB_PORT", DEFAULT_CONFIG["DB_PORT"])
DB_NAME = os.getenv("DB_NAME", DEFAULT_CONFIG["DB_NAME"])
CHATBOT_HOST = os.getenv("CHATBOT_HOST", DEFAULT_CONFIG["CHATBOT_HOST"])
CHATBOT_PORT = os.getenv("CHATBOT_PORT", DEFAULT_CONFIG["CHATBOT_PORT"])
ADAPI_HOST = os.getenv("ADAPI_HOST")
ADAPI_PORT = os.getenv("ADAPI_PORT")
CLIENT_HOST = os.getenv("CLIENT_HOST", DEFAULT_CONFIG["CLIENT_HOST"])
CLIENT_PORT = os.getenv("CLIENT_PORT", DEFAULT_CONFIG["CLIENT_PORT"])
AES_KEY = os.getenv("AES_KEY", DEFAULT_CONFIG["AES_KEY"])
SECRET_KEY = os.getenv("SECRET_KEY", DEFAULT_CONFIG["SECRET_KEY"])
AIOHTTP_CONN_LIMIT = int(
os.getenv("AIOHTTP_CONN_LIMIT", DEFAULT_CONFIG["AIOHTTP_CONN_LIMIT"])
)
AIOHTTP_DNS_TTL = int(os.getenv("AIOHTTP_DNS_TTL", DEFAULT_CONFIG["AIOHTTP_DNS_TTL"]))
WORKERS = os.getenv("WORKERS", DEFAULT_CONFIG["WORKERS"])
if SECRET_KEY is DEFAULT_CONFIG["SECRET_KEY"]:
LOGGER.warning(
"""
---------------------------------------------
WARNING: The API secret key was not provided.
Using an insecure default key. Consider adding
the following to the environment (e.g. .env file):
SECRET_KEY=%s
---------------------------------------------
""",
generate_secret_key(),
)
if AES_KEY is DEFAULT_CONFIG["AES_KEY"]:
LOGGER.warning(
"""
---------------------------------------------
WARNING: The AES secret key was not provided.
Using an insecure default key. Consider adding
the following to the environment (e.g. .env file):
AES_KEY=%s
---------------------------------------------
""",
generate_aes_key(),
)
BATCHER_KEY_PAIR = Key()
# Derived configuration
VALIDATOR_ENDPOINT = "tcp://{VALIDATOR_HOST}:{VALIDATOR_PORT}".format(
VALIDATOR_HOST=VALIDATOR_HOST, VALIDATOR_PORT=VALIDATOR_PORT
)
VALIDATOR_REST_ENDPOINT = "http://{VALIDATOR_REST_HOST}:{VALIDATOR_REST_PORT}".format(
VALIDATOR_REST_HOST=VALIDATOR_REST_HOST, VALIDATOR_REST_PORT=VALIDATOR_REST_PORT
)
CHATBOT_REST_ENDPOINT = "http://{CHATBOT_HOST}:{CHATBOT_PORT}".format(
CHATBOT_HOST=CHATBOT_HOST, CHATBOT_PORT=CHATBOT_PORT
)
ADAPI_REST_ENDPOINT = "https://{ADAPI_HOST}:{ADAPI_PORT}".format(
ADAPI_HOST=ADAPI_HOST, ADAPI_PORT=ADAPI_PORT
)
| 37.566667 | 86 | 0.696318 |
8a5e5fc6336968f25a2c53c1c84be61f1fb99a45
| 694 |
py
|
Python
|
___Python/RomanS/untitled/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/RomanS/untitled/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/RomanS/untitled/p13_parameter/m01_parameter.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
def ausgabe(liste, ende="\n", erstezeile=""):
print(erstezeile)
for element in liste:
print(element, end=ende)
def ausgabe2(liste, **kwargs):
ende = "\n" if "ende" not in kwargs else kwargs["ende"]
erstezeile = "" if "erstezeile" not in kwargs else kwargs["erstezeile"]
print(erstezeile)
for element in liste:
print(element, end=ende)
einkaufsliste = ["Milch", "Eier", "Bier"]
ausgabe(einkaufsliste)
ausgabe(einkaufsliste, " ")
print()
ausgabe(einkaufsliste, " ", "Meine Einkaufsliste:")
###
ausgabe2(einkaufsliste)
ausgabe2(einkaufsliste,ende = " ")
print()
ausgabe2(einkaufsliste, erstezeile = "Meine Einkaufsliste:")
| 24.785714 | 76 | 0.651297 |
6a71456d5badc95c19468cfee76390b8e11776c4
| 18,187 |
py
|
Python
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/integrate.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/integrate.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/integrate.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
#!/usr/bin/env python
# Author : Pierre Schnizer
"""
This modules defines routines for performing numerical integration
(quadrature) of a function in one dimension. There are routines for adaptive
and non-adaptive integration of general functions, with specialised routines
for specific cases. These include integration over infinite and semi-infinite
ranges, singular integrals, including logarithmic singularities, computation
of Cauchy principal values and oscillatory integrals. The library
reimplements the algorithms used in QUADPACK, a numerical integration package
written by Piessens, Doncker-Kapenga, Uberhuber and Kahaner. Fortran code for
QUADPACK is available on Netlib.
"""
from . import _callback
from .gsl_function import gsl_function
from ._generic_solver import _workspace
GAUSS15 = _callback.GSL_INTEG_GAUSS15 # 15 point Gauss-Kronrod rule
GAUSS21 = _callback.GSL_INTEG_GAUSS21 # 21 point Gauss-Kronrod rule
GAUSS31 = _callback.GSL_INTEG_GAUSS31 # 31 point Gauss-Kronrod rule
GAUSS41 = _callback.GSL_INTEG_GAUSS41 # 41 point Gauss-Kronrod rule
GAUSS51 = _callback.GSL_INTEG_GAUSS51 # 51 point Gauss-Kronrod rule
GAUSS61 = _callback.GSL_INTEG_GAUSS61 # 61 point Gauss-Kronrod rule
SINE = _callback.GSL_INTEG_SINE
COSINE = _callback.GSL_INTEG_COSINE
class workspace(_workspace):
"""
This class provides a workspace sufficient to hold N double
precision intervals, their integration results and error estimates.
input : size
size ... size of the workspace
"""
_alloc = _callback.gsl_integration_workspace_alloc
_free = _callback.gsl_integration_workspace_free
_size = _callback.gsl_integration_workspace_get_size
def get_size(self):
"""
Get the size of the workspace
"""
return self._size(self._ptr)
class qaws_table(_workspace):
"""
This class allocates space for a `gsl_integration_qaws_table'
struct and associated workspace describing a singular weight
function W(x) with the parameters (\alpha, \beta, \mu, \nu),
W(x) = (x-a)^alpha (b-x)^beta log^mu (x-a) log^nu (b-x)
where \alpha < -1, \beta < -1, and \mu = 0, 1, \nu = 0, 1. The
weight function can take four different forms depending on the
values of \mu and \nu,
W(x) = (x-a)^alpha (b-x)^beta (mu = 0, nu = 0)
W(x) = (x-a)^alpha (b-x)^beta log(x-a) (mu = 1, nu = 0)
W(x) = (x-a)^alpha (b-x)^beta log(b-x) (mu = 0, nu = 1)
W(x) = (x-a)^alpha (b-x)^beta log(x-a) log(b-x) (mu = 1, nu = 1)
The singular points (a,b) do not have to be specified until the
integral is computed, where they are the endpoints of the
integration range.
The function returns a pointer to the newly allocated
`gsl_integration_qaws_table' if no errors were detected, and 0 in
the case of error.
"""
_alloc = _callback.gsl_integration_qaws_table_alloc
_free = _callback.gsl_integration_qaws_table_free
_set = _callback.gsl_integration_qaws_table_set
def __init__(self, alpha, beta, mu, nu):
self._ptr = None
assert(self._alloc != None)
assert(self._free != None)
self._ptr = self._alloc(alpha, beta, mu, nu)
def set(self, alpha, beta, mu, nu):
"""
This function modifies the parameters (\alpha, \beta, \mu, \nu)
input : alpha, beta, mu, nu
"""
self._set(self._ptr, alpha, beta, mu, nu)
class qawo_table(_workspace):
"""
This class manages space for a `qawo_table'
and its associated workspace describing a sine or cosine
weight function W(x) with the parameters (\omega, L),
W(x) = sin(omega x)
W(x) = cos(omega x)
The parameter L must be the length of the interval over which the
function will be integrated L = b - a. The choice of sine or
cosine is made with the parameter SINE which should be chosen from
one of the two following symbolic values:
COSINE
SINE
The `gsl_integration_qawo_table' is a table of the trigonometric
coefficients required in the integration process. The parameter N
determines the number of levels of coefficients that are computed.
Each level corresponds to one bisection of the interval L, so that
N levels are sufficient for subintervals down to the length L/2^n.
The integration routine `gsl_integration_qawo' returns the error
`GSL_ETABLE' if the number of levels is insufficient for the
requested accuracy.
input : omega, L, sine, n
"""
_alloc = _callback.gsl_integration_qawo_table_alloc
_free = _callback.gsl_integration_qawo_table_free
_set = _callback.gsl_integration_qawo_table_set
_set_length = _callback.gsl_integration_qawo_table_set
def __init__(self, omega, L, sine, n):
self._ptr = None
assert(self._alloc != None)
assert(self._free != None)
self._ptr = self._alloc(omega, L, sine, n)
def set(self, omega, L, sine, n):
"""
Change the parameters OMEGA, L and SINE
"""
self._set(self._ptr, omega, L, sine, n)
def set_length(self, L):
"""
Change the length parameter L
"""
self._set_length(self._ptr, L)
def qng(f, a, b, epsabs, epsrel):
"""
This function applies the Gauss-Kronrod 10-point, 21-point,
43-point and 87-point integration rules in succession until an
estimate of the integral of f over (a,b) is achieved within the
desired absolute and relative error limits, EPSABS and EPSREL. The
function returns the final approximation, RESULT, an estimate of
the absolute error, ABSERR and the number of function evaluations
used, NEVAL. The Gauss-Kronrod rules are designed in such a way
that each rule uses all the results of its predecessors, in order
to minimize the total number of function evaluations.
input : f, a, b, epsabs, epsrel
f ... gsl_function
"""
return _callback.gsl_integration_qng(f.get_ptr(), a, b, epsabs, epsrel)
def qag(f, a, b, epsabs, epsrel, limit, key, workspace):
"""
The QAG algorithm is a simple adaptive integration procedure. The
integration region is divided into subintervals, and on each iteration
the subinterval with the largest estimated error is bisected. This
reduces the overall error rapidly, as the subintervals become
concentrated around local difficulties in the integrand. These
subintervals are managed by a `gsl_integration_workspace' struct, which
handles the memory for the subinterval ranges, results and error
estimates.
This function applies an integration rule adaptively until an
estimate of the integral of f over (a,b) is achieved within the
desired absolute and relative error limits, EPSABS and EPSREL.
The function returns the final approximation, RESULT, and an
estimate of the absolute error, ABSERR. The integration rule is
determined by the value of KEY, which should be chosen from the
following symbolic names,
GAUSS15
GAUSS21
GAUSS31
GAUSS41
GAUSS51
GAUSS61
corresponding to the 15, 21, 31, 41, 51 and 61 point Gauss-Kronrod
rules. The higher-order rules give better accuracy for smooth
functions, while lower-order rules save time when the function
contains local difficulties, such as discontinuities.
On each iteration the adaptive integration strategy bisects the
with the largest error estimate. The subintervals and
their results are stored in the memory provided by WORKSPACE. The
maximum number of subintervals is given by LIMIT, which may not
exceed the allocated size of the workspace.
input : f, a, b, epsabs, epsrel, limit, key, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qag(f.get_ptr(), a, b, epsabs, epsrel, limit,
key, workspace._ptr)
def qags(f, a, b, epsabs, epsrel, limit, workspace):
"""
This function applies the Gauss-Kronrod 21-point integration rule
adaptively until an estimate of the integral of f over (a,b) is
achieved within the desired absolute and relative error limits,
EPSABS and EPSREL. The results are extrapolated using the
epsilon-algorithm, which accelerates the convergence of the
integral in the presence of discontinuities and integrable
singularities. The function returns the final approximation from
the extrapolation, RESULT, and an estimate of the absolute error,
ABSERR. The subintervals and their results are stored in the
memory provided by WORKSPACE. The maximum number of subintervals
is given by LIMIT, which may not exceed the allocated size of the
workspace.
input : f.get_ptr(), a, b, epsabs, epsrel, limit, key, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qags(f._ptr, a, b, epsabs, epsrel,
limit, workspace._ptr)
def qagp(f, pts, epsabs, epsrel, limit, workspace):
"""
This function applies the adaptive integration algorithm QAGS
taking account of the user-supplied locations of singular points.
The array PTS of length NPTS should contain the endpoints of the
integration ranges defined by the integration region and locations
of the singularities. For example, to integrate over the region
(a,b) with break-points at x_1, x_2, x_3 (where a < x_1 < x_2 <
x_3 < b) the following PTS array should be used
pts[0] = a
pts[1] = x_1
pts[2] = x_2
pts[3] = x_3
pts[4] = b
with NPTS = 5.
If you know the locations of the singular points in the integration
region then this routine will be faster than `QAGS'.
input : f, pts, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qagp(f.get_ptr(), pts, epsabs, epsrel, limit,
workspace._ptr)
def qagi(f, epsabs, epsrel, limit, workspace):
"""
This function computes the integral of the function F over the
infinite interval (-\infty,+\infty). The integral is mapped onto
the interval (0,1] using the transformation x = (1-t)/t,
\int_{-\infty}^{+\infty} dx f(x) =
\int_0^1 dt (f((1-t)/t) + f((-1+t)/t))/t^2.
It is then integrated using the QAGS algorithm. The normal
21-point Gauss-Kronrod rule of QAGS is replaced by a 15-point
rule, because the transformation can generate an integrable
singularity at the origin. In this case a lower-order rule is
more efficient.
input : f, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qagi(f.get_ptr(), epsabs, epsrel, limit,
workspace._ptr)
def qagiu(f, a, epsabs, epsrel, limit, workspace):
"""
This function computes the integral of the function F over the
semi-infinite interval (a,+\infty). The integral is mapped onto
the interval (0,1] using the transformation x = a + (1-t)/t,
\int_{a}^{+\infty} dx f(x) =
\int_0^1 dt f(a + (1-t)/t)/t^2
and then integrated using the QAGS algorithm.
input : f, a, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qagiu(f.get_ptr(), a, epsabs, epsrel, limit,
workspace._ptr)
def qagil(f, b, epsabs, epsrel, limit, workspace):
"""
his function computes the integral of the function F over the
semi-infinite interval (-\infty,b). The integral is mapped onto
the region (0,1] using the transformation x = b - (1-t)/t,
\int_{+\infty}^{b} dx f(x) =
\int_0^1 dt f(b - (1-t)/t)/t^2
and then integrated using the QAGS algorithm.
input : f, b, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qagil(f.get_ptr(), b, epsabs, epsrel, limit,
workspace._ptr)
def qawc(f, a, b, c, epsabs, epsrel, limit, workspace):
"""
This function computes the Cauchy principal value of the integral
of f over (a,b), with a singularity at C,
I = \int_a^b dx f(x) / (x - c)
The adaptive bisection algorithm of QAG is used, with
modifications to ensure that subdivisions do not occur at the
singular point x = c. When a subinterval contains the point x = c
or is close to it then a special 25-point modified Clenshaw-Curtis
rule is used to control the singularity. Further away from the
singularity the algorithm uses an ordinary 15-point Gauss-Kronrod
integration rule.
input : f, a, b, c, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qawc(f.get_ptr(), a, b, c, epsabs, epsrel, limit,
workspace._ptr)
def qaws(f, a, b, qwas_table, epsabs, epsrel, limit, workspace):
"""
This function computes the integral of the function f(x) over the
interval (a,b) with the singular weight function (x-a)^\alpha
(b-x)^\beta \log^\mu (x-a) \log^\nu (b-x). The parameters of the
weight function (\alpha, \beta, \mu, \nu) are taken from the table
T. The integral is,
I = \int_a^b dx f(x) (x-a)^alpha (b-x)^beta log^mu (x-a) log^nu (b-x).
The adaptive bisection algorithm of QAG is used. When a
subinterval contains one of the endpoints then a special 25-point
modified Clenshaw-Curtis rule is used to control the
singularities. For subintervals which do not include the
endpoints an ordinary 15-point Gauss-Kronrod integration rule is
used.
input : f, a, b, qwas_table, epsabs, epsrel, limit, workspace
f ... gsl_function
"""
return _callback.gsl_integration_qaws(f.get_ptr(), a, b, qwas_table._ptr, epsabs,
epsrel, limit, workspace._ptr)
def qawo(f, a, epsabs, epsrel, limit, workspace, qwao_table):
"""
This function uses an adaptive algorithm to compute the integral of
f over (a,b) with the weight function \sin(\omega x) or
\cos(\omega x) defined by the table WF.
I = \int_a^b dx f(x) sin(omega x)
I = \int_a^b dx f(x) cos(omega x)
The results are extrapolated using the epsilon-algorithm to
accelerate the convergence of the integral. The function returns
the final approximation from the extrapolation, RESULT, and an
estimate of the absolute error, ABSERR. The subintervals and
their results are stored in the memory provided by WORKSPACE. The
maximum number of subintervals is given by LIMIT, which may not
exceed the allocated size of the workspace.
Those subintervals with "large" widths d, d\omega > 4 are computed
using a 25-point Clenshaw-Curtis integration rule, which handles
the oscillatory behavior. Subintervals with a "small" width
d\omega < 4 are computed using a 15-point Gauss-Kronrod
integration.
input : f, a, b, qwas_table, epsabs, epsrel, limit, workspace
qwao_table
f ... gsl_function
"""
return _callback.gsl_integration_qawo(f.get_ptr(), a, epsabs, epsrel, limit,
workspace._ptr, qwao_table._ptr)
def qawf(f, a, epsabs, limit, workspace, cycleworkspace, qwao_table):
"""
This function attempts to compute a Fourier integral of the
function F over the semi-infinite interval [a,+\infty).
I = \int_a^{+\infty} dx f(x) sin(omega x)
I = \int_a^{+\infty} dx f(x) cos(omega x)
The parameter \omega is taken from the table WF (the length L can
take any value, since it is overridden by this function to a value
appropriate for the fourier integration). The integral is computed
using the QAWO algorithm over each of the subintervals,
C_1 = [a, a + c]
C_2 = [a + c, a + 2 c]
... = ...
C_k = [a + (k-1) c, a + k c]
where c = (2 floor(|\omega|) + 1) \pi/|\omega|. The width c is
chosen to cover an odd number of periods so that the contributions
from the intervals alternate in sign and are monotonically
decreasing when F is positive and monotonically decreasing. The
sum of this sequence of contributions is accelerated using the
epsilon-algorithm.
This function works to an overall absolute tolerance of ABSERR.
The following strategy is used: on each interval C_k the algorithm
tries to achieve the tolerance
TOL_k = u_k abserr
where u_k = (1 - p)p^{k-1} and p = 9/10. The sum of the geometric
series of contributions from each interval gives an overall
tolerance of ABSERR.
If the integration of a subinterval leads to difficulties then the
accuracy requirement for subsequent intervals is relaxed,
TOL_k = u_k max(abserr, max_{i<k}{E_i})
where E_k is the estimated error on the interval C_k.
The subintervals and their results are stored in the memory
provided by WORKSPACE. The maximum number of subintervals is
given by LIMIT, which may not exceed the allocated size of the
workspace. The integration over each subinterval uses the memory
provided by CYCLE_WORKSPACE as workspace for the QAWO algorithm.
input : f, a, b, qwas_table, epsabs, epsrel, limit, workspace
qwao_table
f ... gsl_function
"""
return _callback.gsl_integration_qawf(f.get_ptr(), a, epsabs, limit,
workspace._ptr, cycleworkspace._ptr, qwao_table._ptr)
| 40.778027 | 95 | 0.657173 |
6a7a06262773f9d48a06c212cf06889f3dcecbd2
| 368 |
py
|
Python
|
Source/11_XML_RPC_MultiCall/server.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | 1 |
2022-02-28T09:49:35.000Z
|
2022-02-28T09:49:35.000Z
|
Source/11_XML_RPC_MultiCall/server.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
Source/11_XML_RPC_MultiCall/server.py
|
rbiotblbk/WBS_T9_2022
|
533156db88ff2fe676564b0e5d6e84e888ab0916
|
[
"MIT"
] | null | null | null |
from xmlrpc.server import SimpleXMLRPCServer
def multiply(x, y):
return x * y
def add(x, y):
return x + y
server = SimpleXMLRPCServer(("localhost", 50000))
server.register_multicall_functions()
server.register_function(add, "add")
server.register_function(multiply, "multiply")
print("Server is On and Listening.....")
server.serve_forever()
| 14.153846 | 49 | 0.714674 |
6a887ea2793ebc873e98725ccbbeb24387450433
| 7,164 |
py
|
Python
|
tests/nlu/featurizers/test_regex_featurizer.py
|
techBeck03/rasa
|
72fef6e7742f5ccb8614c75b6410dff68f137554
|
[
"Apache-2.0"
] | 2 |
2020-02-27T16:41:52.000Z
|
2020-02-27T16:41:57.000Z
|
tests/nlu/featurizers/test_regex_featurizer.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 56 |
2020-06-09T00:16:14.000Z
|
2020-11-16T00:25:20.000Z
|
tests/nlu/featurizers/test_regex_featurizer.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 3 |
2019-02-27T10:13:16.000Z
|
2019-07-26T08:33:45.000Z
|
import numpy as np
import pytest
from rasa.nlu.training_data import TrainingData
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
from rasa.nlu.constants import (
TEXT,
RESPONSE,
SPACY_DOCS,
TOKENS_NAMES,
INTENT,
SPARSE_FEATURE_NAMES,
)
from rasa.nlu.tokenizers.spacy_tokenizer import SpacyTokenizer
from rasa.nlu.training_data import Message
@pytest.mark.parametrize(
"sentence, expected, labeled_tokens",
[
(
"hey how are you today",
[
[0.0, 1.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
],
[0],
),
(
"hey 456 how are you",
[
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
],
[1, 0],
),
(
"blah balh random eh",
[
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
],
[],
),
(
"a 1 digit number",
[
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 1.0],
],
[1, 1],
),
],
)
def test_regex_featurizer(sentence, expected, labeled_tokens, spacy_nlp):
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = RegexFeaturizer({}, known_patterns=patterns)
# adds tokens to the message
tokenizer = SpacyTokenizer({})
message = Message(sentence, data={RESPONSE: sentence})
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
tokenizer.process(message)
result = ftr._features_for_patterns(message, TEXT)
assert np.allclose(result.toarray(), expected, atol=1e-10)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected, labeled_tokens",
[
(
"lemonade and mapo tofu",
[[1.0, 0.0], [0.0, 0.0], [0.0, 1.0], [0.0, 1.0], [1.0, 1.0]],
[0.0, 2.0, 3.0],
),
(
"a cup of tea",
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [1.0, 0.0], [1.0, 0.0]],
[3.0],
),
(
"Is burrito my favorite food?",
[
[0.0, 0.0],
[0.0, 1.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 0.0],
[0.0, 1.0],
],
[1.0],
),
("I want club?mate", [[0.0, 0.0], [0.0, 0.0], [1.0, 0.0], [1.0, 0.0]], [2.0]),
],
)
def test_lookup_tables(sentence, expected, labeled_tokens, spacy_nlp):
from rasa.nlu.featurizers.sparse_featurizer.regex_featurizer import RegexFeaturizer
lookups = [
{
"name": "drinks",
"elements": ["mojito", "lemonade", "sweet berry wine", "tea", "club?mate"],
},
{"name": "plates", "elements": "data/test/lookup_tables/plates.txt"},
]
ftr = RegexFeaturizer({}, lookup_tables=lookups)
# adds tokens to the message
component_config = {"name": "SpacyTokenizer"}
tokenizer = SpacyTokenizer(component_config)
message = Message(sentence)
message.set("text_spacy_doc", spacy_nlp(sentence))
tokenizer.process(message)
result = ftr._features_for_patterns(message, TEXT)
assert np.allclose(result.toarray(), expected, atol=1e-10)
# the tokenizer should have added tokens
assert len(message.get(TOKENS_NAMES[TEXT], [])) > 0
# the number of regex matches on each token should match
for i, token in enumerate(message.get(TOKENS_NAMES[TEXT])):
token_matches = token.get("pattern").values()
num_matches = sum(token_matches)
assert num_matches == labeled_tokens.count(i)
@pytest.mark.parametrize(
"sentence, expected, expected_cls",
[
("hey how are you today", [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]),
("hey 456 how are you", [0.0, 1.0, 0.0], [1.0, 1.0, 0.0]),
("blah balh random eh", [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]),
("a 1 digit number", [0.0, 0.0, 0.0], [1.0, 0.0, 1.0]),
],
)
def test_regex_featurizer_no_sequence(sentence, expected, expected_cls, spacy_nlp):
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
ftr = RegexFeaturizer({}, known_patterns=patterns)
# adds tokens to the message
tokenizer = SpacyTokenizer()
message = Message(sentence)
message.set(SPACY_DOCS[TEXT], spacy_nlp(sentence))
tokenizer.process(message)
result = ftr._features_for_patterns(message, TEXT)
assert np.allclose(result.toarray()[0], expected, atol=1e-10)
assert np.allclose(result.toarray()[-1], expected_cls, atol=1e-10)
def test_regex_featurizer_train():
patterns = [
{"pattern": "[0-9]+", "name": "number", "usage": "intent"},
{"pattern": "\\bhey*", "name": "hello", "usage": "intent"},
{"pattern": "[0-1]+", "name": "binary", "usage": "intent"},
]
featurizer = RegexFeaturizer.create({}, RasaNLUModelConfig())
sentence = "hey how are you today 19.12.2019 ?"
message = Message(sentence)
message.set(RESPONSE, sentence)
message.set(INTENT, "intent")
WhitespaceTokenizer().train(TrainingData([message]))
featurizer.train(
TrainingData([message], regex_features=patterns), RasaNLUModelConfig()
)
expected = np.array([0, 1, 0])
expected_cls = np.array([1, 1, 1])
vecs = message.get(SPARSE_FEATURE_NAMES[TEXT])
assert (7, 3) == vecs.shape
assert np.all(vecs.toarray()[0] == expected)
assert np.all(vecs.toarray()[-1] == expected_cls)
vecs = message.get(SPARSE_FEATURE_NAMES[RESPONSE])
assert (7, 3) == vecs.shape
assert np.all(vecs.toarray()[0] == expected)
assert np.all(vecs.toarray()[-1] == expected_cls)
vecs = message.get(SPARSE_FEATURE_NAMES[INTENT])
assert vecs is None
| 31.699115 | 87 | 0.538247 |
6ac8fe2008ba4846dbe85df2ed6f332542dc9e75
| 308 |
py
|
Python
|
pacman-arch/test/pacman/tests/sync052.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync052.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/sync052.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "sysupgrade with a disabled repo"
sp = pmpkg("dummy", "1.0-2")
self.addpkg2db("sync", sp)
lp = pmpkg("dummy", "1.0-1")
self.addpkg2db("local", lp)
self.args = "-Syu"
self.db['sync'].option['Usage'] = ['Search']
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=dummy|1.0-1")
| 20.533333 | 52 | 0.665584 |
0af27bd5b742c8a1d0f8d3c9aa703a138da5977f
| 7,782 |
py
|
Python
|
alzheimer_model.py
|
bsolano/Alzheimer-Resnets
|
b7b4f0d240f932513ef3427924894358b87dd68f
|
[
"MIT"
] | 3 |
2020-04-01T08:13:31.000Z
|
2020-10-30T01:01:41.000Z
|
alzheimer_model.py
|
bsolano/Alzheimer-Resnets
|
b7b4f0d240f932513ef3427924894358b87dd68f
|
[
"MIT"
] | null | null | null |
alzheimer_model.py
|
bsolano/Alzheimer-Resnets
|
b7b4f0d240f932513ef3427924894358b87dd68f
|
[
"MIT"
] | 1 |
2021-01-27T11:46:49.000Z
|
2021-01-27T11:46:49.000Z
|
# encoding: utf-8
"""
The main implementation.
"""
from transforms import ToTensor
from adni_dataset import ADNI_Dataset
from adni_dataset import NumpyADNI_Dataset
from adni_dataset import NumpyADNI_FolderDataset
from lib.functions import get_class_distribution
from lib.functions import get_test_predicted
from lib.functions import print_info_and_plots
from models.densenet import densenet121
from models.densenet import densenet169
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision.transforms as transforms
from torch.utils.data.sampler import WeightedRandomSampler
from torchsummary import summary
import pickle
import re
from sklearn.metrics import accuracy_score
def test(class_names, data_dir, results_dir, epochs, batch_size, lr_decay_epochs=None, model_file=None, architecture='densenet121', plot_accuracy=None, nesterov=False):
import platform; print(platform.platform())
import sys; print('Python ', sys.version)
import pydicom; print('pydicom ', pydicom.__version__)
print('pytorch ', torch.__version__)
# Sets device to GPU if available, else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # pylint: disable=no-member
print('Using device:', device)
# Additional about GPU
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
# Optimiza la corrida
cudnn.benchmark = True
# Transformaciones de cada resonancia de magnética
#transform = transforms.Compose([ToTensor(spacing=[1,1,1], num_slices=256, aspect='sagittal', cut=(slice(40,214,2),slice(50,200,2),slice(40,240,2)), normalize=True)]) # Hace falta normalizar pero la función de pytorch no funciona en cubos
# Conjunto de datos con las transformaciones especificadas anteriormente
adni_dataset = NumpyADNI_Dataset(data_dir=data_dir)
# Entrenamiento y prueba
train_size = int(0.75 * len(adni_dataset))
test_size = len(adni_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(adni_dataset, [train_size, test_size])
# Sampler
'''targets = torch.tensor(adni_dataset.targets) # pylint: disable=not-callable,disable=no-member
target_list = targets[train_dataset.indices]
class_count = [i for i in get_class_distribution(adni_dataset).values()]
class_weights = 1./torch.tensor(class_count, dtype=torch.float) # pylint: disable=no-member,not-callable
class_weights_all = class_weights[target_list]
weighted_sampler = WeightedRandomSampler(
weights=class_weights_all,
num_samples=len(class_weights_all),
replacement=False
)'''
# Loaders
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, sampler=None, num_workers=5)
test_train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=1, num_workers=4)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, num_workers=4)
print('%d MRI images in training loader...' % (train_size))
print('%d MRI images in testing loader...' % (test_size))
# Inicializa y carga el modelo
class_ = globals()[architecture]
model = class_(channels=1, num_classes=len(class_names), drop_rate=0.7).to(device)
model = torch.nn.DataParallel(model).to(device)
if model_file is not None:
model.load_state_dict(torch.load(results_dir+'/'+model_file))
match = re.match( r'.*-epoch-(\d+)-.*', model_file, re.M)
starting_epoch = int(match.group(1)) + 1
print('Loaded file {}, restarting at epoch {}.'.format(model_file, starting_epoch))
model.train()
# Imprime el modelo:
#summary(model, adni_dataset[0][0].shape)
# Función de pérdida:
# Es la función usada para evaluar una solución candidata, es decir, la topología diseñada con sus pesos.
criterion = nn.CrossEntropyLoss() # Entropía cruzada
# Optimizador:
# Estas son optimizaciones al algoritmo de descenso por gradiente para evitar mínimos locales en la búsqueda.
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, nesterov=nesterov) # SGD: Descenso por gradiente estocástico
# Ciclo de entrenamiento:
losses = []
if plot_accuracy:
accuracies = []
else:
accuracies = None
try: starting_epoch
except NameError: starting_epoch = None
if starting_epoch is None:
epoch = 0
else:
epoch = starting_epoch
while epoch < epochs:
if lr_decay_epochs is not None:
lr_scheduler(optimizer, epoch, lr_decay=0.1, lr_decay_epochs=lr_decay_epochs)
running_loss = 0.0
for _, data in enumerate(train_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
labels = labels.to(device)
# Para no acumular gradientes
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, torch.max(labels, 1)[1]) # pylint: disable=no-member
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
print('[epoch %d] pérdida: %.6f' % (epoch, running_loss / train_size))
losses.append([epoch, running_loss / train_size])
with open(results_dir+'/'+device.type+'-epoch-'+str(epoch)+'-losses.dump', 'wb') as losses_file:
pickle.dump(losses, losses_file)
losses_file.close()
if plot_accuracy:
model.eval()
test_test, predicted_test = get_test_predicted(device, model, test_loader)
test_train, predicted_train = get_test_predicted(device, model, test_train_loader)
model.train()
accuracy_test = accuracy_score(test_test, predicted_test)
accuracy_train = accuracy_score(test_train, predicted_train)
print('[epoch %d] exactitud: %.6f (prueba) %.6f (entrenamiento)' % (epoch, accuracy_test, accuracy_train))
accuracies.append([epoch, accuracy_test, accuracy_train])
with open(results_dir+'/'+device.type+'-epoch-'+str(epoch)+'-accuracies.dump', 'wb') as accuracies_file:
pickle.dump(accuracies, accuracies_file)
accuracies_file.close()
if (epoch % 10 == 9) or (epoch >= 79):
torch.save(model.state_dict(), results_dir+'/'+device.type+'-epoch-'+str(epoch)+'-alzheimer-' + architecture + '.pth')
# Next epoch
epoch += 1
torch.save(model.state_dict(), results_dir+'/'+device.type+'-alzheimer-' + architecture + '.pth')
model.eval()
test, predicted = get_test_predicted(device, model, test_loader)
# Imprime estadísticas y gráficos
print_info_and_plots(test, predicted, class_names, losses, accuracies)
def lr_scheduler(optimizer, epoch, lr_decay=0.1, lr_decay_epochs=[]):
"""Decay learning rate by lr_decay on predefined epochs"""
if epoch not in lr_decay_epochs:
return optimizer
print('Learning rate decay of {} on epoch {}.'.format(lr_decay, epoch))
for param_group in optimizer.param_groups:
param_group['lr'] *= lr_decay
return optimizer
# Si corre como programa principal y no como módulo:
if __name__ == '__main__':
test(class_names=['CN','EMCI','MCI','LMCI','AD'],
data_dir='./NumpyADNI',
results_dir='./results',
epochs=110,
batch_size=5,
lr_decay_epochs=[69])
| 40.53125 | 242 | 0.683629 |
7caed9bc46a74a77cfdd9c3d472d4709acb8940d
| 5,547 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/selling/report/territory_target_variance_item_group_wise/territory_target_variance_item_group_wise.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/selling/report/territory_target_variance_item_group_wise/territory_target_variance_item_group_wise.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/selling/report/territory_target_variance_item_group_wise/territory_target_variance_item_group_wise.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from erpnext.accounts.utils import get_fiscal_year
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
territory_item_group_dict = get_territory_item_month_map(filters)
data = []
for territory, territory_items in territory_item_group_dict.items():
for item_group, monthwise_data in territory_items.items():
row = [territory, item_group]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "achieved", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "target_on"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label, raise_exception=True)
columns = [_("Territory") + ":Link/Territory:120", _("Item Group") + ":Link/Item Group:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") +" (%s)", _("Achieved") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (_(from_date.strftime("%b")) + " - " + _(to_date.strftime("%b")))
else:
label = label % _(from_date.strftime("%b"))
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Achieved") + ":Float:120",
_("Total Variance") + ":Float:120"]
#Get territory & item group details
def get_territory_details(filters):
return frappe.db.sql("""
select
t.name, td.item_group, td.target_qty, td.target_amount, t.distribution_id
from
`tabTerritory` t, `tabTarget Detail` td
where
td.parent=t.name and td.fiscal_year=%s order by t.name
""", (filters["fiscal_year"]), as_dict=1)
#Get target distribution details of item group
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""
select
md.name, mdp.month, mdp.percentage_allocation
from
`tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where
mdp.parent=md.name and md.fiscal_year=%s
""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get achieved details from sales order
def get_achieved_details(filters, territory, item_groups):
start_date, end_date = get_fiscal_year(fiscal_year = filters["fiscal_year"])[1:]
lft, rgt = frappe.db.get_value("Territory", territory, ["lft", "rgt"])
item_details = frappe.db.sql("""
select
soi.item_code, sum(soi.stock_qty) as qty, sum(soi.base_net_amount) as amount,
MONTHNAME(so.transaction_date) as month_name
from
`tabSales Order Item` soi, `tabSales Order` so
where
soi.parent=so.name and so.docstatus=1
and so.transaction_date>=%s and so.transaction_date<=%s
and exists(select name from `tabTerritory` where lft >=%s and rgt <= %s and name=so.territory)
group by
month_name, item_code
""", (start_date, end_date, lft, rgt), as_dict=1)
item_actual_details = {}
for d in item_details:
item_group = item_groups[d.item_code]
item_actual_details.setdefault(item_group, frappe._dict())\
.setdefault(d.month_name, frappe._dict({
"quantity": 0,
"amount": 0
}))
value_dict = item_actual_details[item_group][d.month_name]
value_dict.quantity += flt(d.qty)
value_dict.amount += flt(d.amount)
return item_actual_details
def get_territory_item_month_map(filters):
import datetime
territory_details = get_territory_details(filters)
tdd = get_target_distribution_details(filters)
item_groups = get_item_groups()
territory_item_group_dict = {}
for td in territory_details:
achieved_details = get_achieved_details(filters, td.name, item_groups)
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
territory_item_group_dict.setdefault(td.name, {}).setdefault(td.item_group, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "achieved": 0.0
}))
target_achieved = territory_item_group_dict[td.name][td.item_group][month]
month_percentage = tdd.get(td.distribution_id, {}).get(month, 0) \
if td.distribution_id else 100.0/12
if (filters["target_on"] == "Quantity"):
target_achieved.target = flt(td.target_qty) * month_percentage / 100
else:
target_achieved.target = flt(td.target_amount) * month_percentage / 100
target_achieved.achieved = achieved_details.get(td.item_group, {}).get(month, {})\
.get(filters["target_on"].lower())
return territory_item_group_dict
def get_item_groups():
return dict(frappe.get_all("Item", fields=["name", "item_group"], as_list=1))
| 35.107595 | 97 | 0.715702 |
86b2abc5900563b4ad2d923bafe2810c972b11f7
| 631 |
py
|
Python
|
Curso-Em-Video-Python/2Exercicios/069_Analise_de_dados_do_grupo.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/069_Analise_de_dados_do_grupo.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
Curso-Em-Video-Python/2Exercicios/069_Analise_de_dados_do_grupo.py
|
pedrohd21/Cursos-Feitos
|
b223aad83867bfa45ad161d133e33c2c200d42bd
|
[
"MIT"
] | null | null | null |
n = sexo = 'M'
maioridade = mulheres = homens = 0
print('=-' * 20)
print('ANALISE DE DADOS')
print('=-' * 20)
while True:
idade = int(input('Digite sua idade: '))
sexo = str(input('Qual o seu sexo: [M/F] ')).upper().split()[0]
if idade >= 18:
maioridade += 1
if sexo == 'M':
homens += 1
if sexo == 'F' and idade < 20:
mulheres += 1
n = str(input('Deseja Continuar? [S/N]')).upper().split()[0]
if n == 'N':
break
print(f' {maioridade} pessoas tem mais de 18 anos \n {homens} homens foram cadastrados \n {mulheres} mulheres e com menos de 20 anos')
print('Volte sempre!!')
| 31.55 | 134 | 0.570523 |
8113e6ff89cfce1beeb2f06943807c659cbefbfb
| 4,626 |
py
|
Python
|
setup.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | null | null | null |
setup.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | 5 |
2020-01-18T21:10:46.000Z
|
2020-01-25T20:58:19.000Z
|
setup.py
|
Berserker66/ALttPEntranceRandomizer
|
9c681cc65d7281ccded03484b050e8f80ea65dc6
|
[
"MIT"
] | 2 |
2020-01-18T17:36:50.000Z
|
2020-01-22T16:52:08.000Z
|
import os
import shutil
import sys
import sysconfig
from pathlib import Path
import cx_Freeze
is_64bits = sys.maxsize > 2 ** 32
folder = "exe.{platform}-{version}".format(platform=sysconfig.get_platform(),
version=sysconfig.get_python_version())
buildfolder = Path("build", folder)
sbuildfolder = str(buildfolder)
libfolder = Path(buildfolder, "lib")
library = Path(libfolder, "library.zip")
print("Outputting to: " + sbuildfolder)
icon = "icon.ico"
if os.path.exists("X:/pw.txt"):
print("Using signtool")
with open("X:/pw.txt") as f:
pw = f.read()
signtool = r'signtool sign /f X:/_SITS_Zertifikat_.pfx /p ' + pw + r' /fd sha256 /tr http://timestamp.digicert.com/ '
else:
signtool = None
from hashlib import sha3_512
import base64
def _threaded_hash(filepath):
hasher = sha3_512()
hasher.update(open(filepath, "rb").read())
return base64.b85encode(hasher.digest()).decode()
os.makedirs(buildfolder, exist_ok=True)
def manifest_creation():
hashes = {}
manifestpath = os.path.join(buildfolder, "manifest.json")
from concurrent.futures import ThreadPoolExecutor
pool = ThreadPoolExecutor()
for dirpath, dirnames, filenames in os.walk(buildfolder):
for filename in filenames:
path = os.path.join(dirpath, filename)
hashes[os.path.relpath(path, start=buildfolder)] = pool.submit(_threaded_hash, path)
import json
manifest = {"buildtime": buildtime.isoformat(sep=" ", timespec="seconds"),
"hashes": {path: hash.result() for path, hash in hashes.items()}}
json.dump(manifest, open(manifestpath, "wt"), indent=4)
print("Created Manifest")
scripts = {"MultiClient.py": "BerserkerMultiClient",
"MultiMystery.py": "BerserkerMultiMystery",
"MultiServer.py": "BerserkerMultiServer",
"gui.py": "BerserkerMultiCreator",
"Mystery.py": "BerserkerMystery",
"Adjuster.py": "BerserkerLttPAdjuster"}
exes = []
for script, scriptname in scripts.items():
exes.append(cx_Freeze.Executable(
script=script,
target_name=scriptname + ("" if sys.platform == "linux" else ".exe"),
icon=icon,
))
import datetime
buildtime = datetime.datetime.utcnow()
cx_Freeze.setup(
name="BerserkerMultiWorld",
version=f"{buildtime.year}.{buildtime.month}.{buildtime.day}.{buildtime.hour}",
description="BerserkerMultiWorld",
executables=exes,
options={
"build_exe": {
"includes": [],
"excludes": ["numpy", "Cython", "PySide2", "PIL",
"pandas"],
"zip_include_packages": ["*"],
"zip_exclude_packages": [],
"include_files": [],
"include_msvcr": True,
"replace_paths": [("*", "")],
"optimize": 2,
"build_exe": buildfolder
},
},
)
def installfile(path, keep_content=False):
lbuildfolder = buildfolder
print('copying', path, '->', lbuildfolder)
if path.is_dir():
lbuildfolder /= path.name
if lbuildfolder.is_dir() and not keep_content:
shutil.rmtree(lbuildfolder)
shutil.copytree(path, lbuildfolder, dirs_exist_ok=True)
elif path.is_file():
shutil.copy(path, lbuildfolder)
else:
print('Warning,', path, 'not found')
extra_data = ["LICENSE", "data", "EnemizerCLI", "host.yaml", "QUsb2Snes", "meta.yaml"]
for data in extra_data:
installfile(Path(data))
os.makedirs(buildfolder / "Players", exist_ok=True)
shutil.copyfile("playerSettings.yaml", buildfolder / "Players" / "weightedSettings.yaml")
try:
from maseya import z3pr
except ImportError:
print("Maseya Palette Shuffle not found, skipping data files.")
else:
# maseya Palette Shuffle exists and needs its data files
print("Maseya Palette Shuffle found, including data files...")
file = z3pr.__file__
installfile(Path(os.path.dirname(file)) / "data", keep_content=True)
qusb2sneslog = buildfolder / "QUsb2Snes" / "log.txt"
if os.path.exists(qusb2sneslog):
os.remove(qusb2sneslog)
qusb2snesconfig = buildfolder / "QUsb2Snes" / "config.ini"
if os.path.exists(qusb2snesconfig):
os.remove(qusb2snesconfig)
alttpr_sprites_folder = buildfolder / "data" / "sprites" / "alttpr"
for file in os.listdir(alttpr_sprites_folder):
if file != ".gitignore":
os.remove(alttpr_sprites_folder / file)
if signtool:
for exe in exes:
print(f"Signing {exe.target_name}")
os.system(signtool + os.path.join(buildfolder, exe.target_name))
manifest_creation()
| 31.256757 | 121 | 0.653048 |
81327fba936b3e85222aba4acdae01031e2e1c0d
| 7,081 |
py
|
Python
|
deprecated/benchmark/ps/distribute_word2vec/paddle/model.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/benchmark/ps/distribute_word2vec/paddle/model.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/benchmark/ps/distribute_word2vec/paddle/model.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from distribute_base import FleetDistRunnerBase
from argument import params_args
import math
class word2vec(FleetDistRunnerBase):
def input_data(self, params):
with fluid.unique_name.guard():
input_word = fluid.layers.data(name="input_word", shape=[1], dtype='int64',lod_level=1)
true_word = fluid.layers.data(name='true_label', shape=[1], dtype='int64',lod_level=1)
neg_word = fluid.layers.data(
name="neg_label", shape=[1], dtype='int64',lod_level=1)
self.data = [input_word, true_word, neg_word]
return self.data
def net(self, inputs, params):
with fluid.unique_name.guard():
init_width = 0.5 / params.embedding_size
input_emb = fluid.layers.embedding(
input=inputs[0],
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb',
initializer=fluid.initializer.Uniform(-init_width, init_width)))
true_emb_w = fluid.layers.embedding(
input=inputs[1],
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w', initializer=fluid.initializer.Constant(value=0.0)))
true_emb_b = fluid.layers.embedding(
input=inputs[1],
is_sparse=params.is_sparse,
size=[params.dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b', initializer=fluid.initializer.Constant(value=0.0)))
neg_word_reshape = fluid.layers.reshape(inputs[2], shape=[-1, 1])
neg_word_reshape.stop_gradient = True
neg_emb_w = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=params.is_sparse,
size=[params.dict_size, params.embedding_size],
param_attr=fluid.ParamAttr(
name='emb_w', learning_rate=1.0))
neg_emb_w_re = fluid.layers.reshape(
neg_emb_w, shape=[-1, params.nce_num, params.embedding_size])
neg_emb_b = fluid.layers.embedding(
input=neg_word_reshape,
is_sparse=params.is_sparse,
size=[params.dict_size, 1],
param_attr=fluid.ParamAttr(
name='emb_b', learning_rate=1.0))
neg_emb_b_vec = fluid.layers.reshape(neg_emb_b, shape=[-1, params.nce_num])
true_logits = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
fluid.layers.elementwise_mul(input_emb, true_emb_w),
dim=1,
keep_dim=True),
true_emb_b)
input_emb_re = fluid.layers.reshape(
input_emb, shape=[-1, 1, params.embedding_size])
neg_matmul = fluid.layers.matmul(
input_emb_re, neg_emb_w_re, transpose_y=True)
neg_matmul_re = fluid.layers.reshape(neg_matmul, shape=[-1, params.nce_num])
neg_logits = fluid.layers.elementwise_add(neg_matmul_re, neg_emb_b_vec)
# nce loss
label_ones = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, 1], value=1.0, dtype='float32')
label_zeros = fluid.layers.fill_constant_batch_size_like(
true_logits, shape=[-1, params.nce_num], value=0.0, dtype='float32')
true_xent = fluid.layers.sigmoid_cross_entropy_with_logits(true_logits,
label_ones)
neg_xent = fluid.layers.sigmoid_cross_entropy_with_logits(neg_logits,
label_zeros)
cost = fluid.layers.elementwise_add(
fluid.layers.reduce_sum(
true_xent, dim=1),
fluid.layers.reduce_sum(
neg_xent, dim=1))
avg_cost = fluid.layers.reduce_mean(cost)
return avg_cost
def py_reader(self, params):
reader = fluid.layers.create_py_reader_by_data(
capacity=64, feed_list=self.data, name='py_reader', use_double_buffer=False)
return reader
def dataset_reader(self, inputs, params):
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_use_var(self.data)
pipe_command = "python dataset_generator.py"
dataset.set_pipe_command(pipe_command)
dataset.set_batch_size(params.batch_size)
thread_num = int(params.cpu_num)
dataset.set_thread(thread_num)
return dataset
def infer_net(self,params):
with fluid.unique_name.guard():
vocab_size = params.dict_size
emb_size = params.embedding_size
analogy_a = fluid.layers.data(name="analogy_a", shape=[1], dtype='int64')
analogy_b = fluid.layers.data(name="analogy_b", shape=[1], dtype='int64')
analogy_c = fluid.layers.data(name="analogy_c", shape=[1], dtype='int64')
all_label = fluid.layers.data(
name="all_label",
shape=[vocab_size, 1],
dtype='int64',
append_batch_size=False)
emb_all_label = fluid.layers.embedding(
input=all_label, size=[vocab_size, emb_size], param_attr="emb")
emb_a = fluid.layers.embedding(
input=analogy_a, size=[vocab_size, emb_size], param_attr="emb")
emb_b = fluid.layers.embedding(
input=analogy_b, size=[vocab_size, emb_size], param_attr="emb")
emb_c = fluid.layers.embedding(
input=analogy_c, size=[vocab_size, emb_size], param_attr="emb")
target = fluid.layers.elementwise_add(
fluid.layers.elementwise_sub(emb_b, emb_a), emb_c)
emb_all_label_l2 = fluid.layers.l2_normalize(x=emb_all_label, axis=1)
dist = fluid.layers.matmul(x=target, y=emb_all_label_l2, transpose_y=True)
values, pred_idx = fluid.layers.topk(input=dist, k=4)
return values, pred_idx
if __name__ == '__main__':
params = params_args()
model = word2vec()
model.runtime_main(params)
| 43.709877 | 99 | 0.605141 |
d4a0458ef2f240d7ecbf2cc0b357ac1f5919ad42
| 7,289 |
py
|
Python
|
xiaobu/xiaobu.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
xiaobu/xiaobu.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
xiaobu/xiaobu.py
|
hanyanze/FS_AILPB
|
7756551cf926aa6296ec851dd696c97d56e06bca
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8-*-
# 开源项目地址:https://github.com/wzpan/wukong-robot.git
from snowboy import snowboydecoder
from robot import config, utils, constants, logging, Player
from robot.sdk import LED
from robot.ConfigMonitor import ConfigMonitor
from robot.Conversation import Conversation
from watchdog.observers import Observer
import sys
import os
import fire
import base64
import signal
import random
import hashlib
import urllib3
import requests
import multiprocessing
import _thread as thread
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
logger = logging.getLogger(__name__)
class Xiaobu(object):
_profiling = False
_dev = False
_response_wav = [
"response1.wav",
"response2.wav",
"response3.wav",
"response4.wav",
"response5.wav",
]
def init(self):
global conversation
self.detector = None
self._thinking = False
self._interrupted = False
config.init()
self._conversation = Conversation(self._profiling)
self._conversation.say('你好!我叫小布', True)
self._observer = Observer()
event_handler = ConfigMonitor(self._conversation)
self._observer.schedule(event_handler, constants.CONFIG_PATH, False)
self._observer.schedule(event_handler, constants.DATA_PATH, False)
self._observer.start()
if config.get('/LED/enable', False) and config.get('/LED/type') == 'aiy':
thread.start_new_thread(self._init_aiy_button_event, ())
if config.get('/muse/enable', False):
self._wakeup = multiprocessing.Event()
self.bci = BCI.MuseBCI(self._wakeup)
self.bci.start()
thread.start_new_thread(self._loop_event, ())
def _loop_event(self):
while True:
self._wakeup.wait()
self._conversation.interrupt()
query = self._conversation.activeListen()
self._conversation.doResponse(query)
self._wakeup.clear()
def _signal_handler(self, signal, frame):
self._interrupted = True
utils.clean()
self._observer.stop()
def _detected_callback(self):
def start_record():
logger.info('开始录音')
print('开始录音')
self._conversation.isRecording = True;
if not utils.is_proper_time():
logger.warning('勿扰模式开启中')
return
if self._conversation.isRecording:
logger.warning('正在录音中,跳过')
return
self._conversation.interrupt()
if config.get('/LED/enable', False):
LED.wakeup()
Player.play(constants.getData(random.choice(self._response_wav)), onCompleted=start_record, wait=True)
with open("../communication/InPut.txt", "w") as file_writer:
file_writer.write("true")
def _do_not_bother_on_callback(self):
if config.get('/do_not_bother/hotword_switch', False):
utils.do_not_bother = True
Player.play(constants.getData('off.wav'))
logger.info('勿扰模式打开')
def _do_not_bother_off_callback(self):
if config.get('/do_not_bother/hotword_switch', False):
utils.do_not_bother = False
Player.play(constants.getData('on.wav'))
logger.info('勿扰模式关闭')
def _interrupt_callback(self):
return self._interrupted
def _init_aiy_button_event(self):
from aiy.board import Board
with Board() as board:
while True:
board.button.wait_for_press()
self._conversation.interrupt()
query = self._conversation.activeListen()
self._conversation.doResponse(query)
def run(self):
self.init()
# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, self._signal_handler)
try:
self.initDetector()
except AttributeError:
logger.error('初始化离线唤醒功能失败')
pass
def initDetector(self):
if self.detector is not None:
self.detector.terminate()
if config.get('/do_not_bother/hotword_switch', False):
models = [
constants.getHotwordModel(config.get('hotword', 'xiaobu.pmdl')),
constants.getHotwordModel(utils.get_do_not_bother_on_hotword()),
constants.getHotwordModel(utils.get_do_not_bother_off_hotword())
]
else:
models = constants.getHotwordModel(config.get('hotword', 'xiaobu.pmdl'))
self.detector = snowboydecoder.HotwordDetector(models, sensitivity=config.get('sensitivity', 0.5))
# main loop
try:
if config.get('/do_not_bother/hotword_switch', False):
callbacks = [self._detected_callback,
self._do_not_bother_on_callback,
self._do_not_bother_off_callback]
else:
callbacks = self._detected_callback
self.detector.start(detected_callback=callbacks,
audio_recorder_callback=self._conversation.converse,
interrupt_check=self._interrupt_callback,
silent_count_threshold=config.get('silent_threshold', 15),
recording_timeout=config.get('recording_timeout', 5) * 4,
sleep_time=0.03)
self.detector.terminate()
except Exception as e:
logger.critical('离线唤醒机制初始化失败:{}'.format(e))
def help(self):
print("""=====================================================================================
python3 xiaobu.py [命令]
可选命令:
train <w1> <w2> <w3> <m> - 传入三个wav文件,生成snowboy的.pmdl模型
w1, w2, w3 表示三个1~3秒的唤醒词录音
m 表示snowboy的.pmdl模型
=====================================================================================""")
def profiling(self):
"""
运行过程中打印耗时数据
"""
logger.info('性能调优')
self._profiling = True
self.run()
def train(self, w1, w2, w3, m):
'''
传入三个wav文件,生成snowboy的.pmdl模型
'''
def get_wave(fname):
with open(fname, 'rb') as infile:
return base64.b64encode(infile.read()).decode('utf-8')
url = 'https://snowboy.kitt.ai/api/v1/train/'
data = {
"name": "xiaobu",
"language": "zh",
"token": config.get('snowboy_token', '', True),
"voice_samples": [
{"wave": get_wave(w1)},
{"wave": get_wave(w2)},
{"wave": get_wave(w3)}
]
}
response = requests.post(url, json=data)
if response.ok:
with open(m, "wb") as outfile:
outfile.write(response.content)
return 'Snowboy模型已保存至{}'.format(m)
else:
return "Snowboy模型生成失败,原因:{}".format(response.text)
if __name__ == '__main__':
if len(sys.argv) == 1:
xiaobu = Xiaobu()
xiaobu.run()
elif '-h' in (sys.argv):
xiaobu = Xiaobu()
xiaobu.help()
else:
fire.Fire(Xiaobu)
| 35.043269 | 110 | 0.571135 |
be19981a801625771806a4e2ca98816cd8104174
| 3,794 |
py
|
Python
|
Metadataextraction/getGeoPackageInfo.py
|
corneliazy/Geosoftware2
|
8604c79c58a61b84c602f16b5f1e74e30dfcbd0e
|
[
"MIT"
] | null | null | null |
Metadataextraction/getGeoPackageInfo.py
|
corneliazy/Geosoftware2
|
8604c79c58a61b84c602f16b5f1e74e30dfcbd0e
|
[
"MIT"
] | 47 |
2018-11-13T13:55:01.000Z
|
2019-09-16T13:38:11.000Z
|
Metadataextraction/getGeoPackageInfo.py
|
corneliazy/Geosoftware2
|
8604c79c58a61b84c602f16b5f1e74e30dfcbd0e
|
[
"MIT"
] | 4 |
2018-11-27T12:36:51.000Z
|
2020-10-14T18:07:04.000Z
|
import click # used to print something
import extractTool # used for the the transformation and prints
import sqlite3 # used to access the geopackage database, :see: https://docs.python.org/2/library/sqlite3.html
from scipy.spatial import ConvexHull # used to calculate the convex hull
"""
Function for extracting the bounding box of a geopackage file
:param filepath: path to the file
:param detail: specifies the level of detail of the geospatial extent (bbox or convex hull)
:param time: boolean variable, if it is true the user gets the temporal extent instead of the spatial extent
:returns: spatial and temporal information in the format [[bounding box],[convex Hull],[temporal extent]]
"""
def getGeopackagebbx(filepath, detail, time):
if detail =='bbox':
bbox_val=geopackage_bbox(filepath)
else:
bbox_val=[None]
if detail == 'convexHull':
convex_hull_val=geopackage_convex_hull(filepath)
else:
convex_hull_val=[None]
if (time):
time_val=geopackage_time(filepath)
else:
time_val=[None]
ret_value=[bbox_val, convex_hull_val, time_val]
return ret_value
"""
Function that should extract the temporal extent of a geopackage file, but for now there is no time value for geopackage files. So it just returns None.
:param filepath: path to the file
:returns: None
"""
def geopackage_time(filepath):
click.echo("There is no time-value for GeoPackage files.")
timeval=[None]
return timeval
"""
Function for extracting the convex hull
:param filepath: path to the file
:returns: convex hull of the geojson
"""
def geopackage_convex_hull(filepath):
# accessing the database
conn = sqlite3.connect(filepath)
c = conn.cursor()
# retrieving coordinate values and crs information
c.execute("""SELECT min_x, min_y, max_x, max_y, srs_id FROM gpkg_contents""")
points = c.fetchall()
point_list=[]
for z in points:
point_list.append(extractTool.transformToWGS84(z[0], z[1], z[5]))
point_list.append(extractTool.transformToWGS84(z[2], z[3], z[5]))
hull=ConvexHull(point_list)
hull_points=hull.vertices
convex_hull=[]
for y in hull_points:
point=[point_list[y][0], point_list[y][1]]
convex_hull.append(point)
return convex_hull
"""
Function for extracting the bbox using sqlite3
:see: https://www.geopackage.org/spec121/index.html#_contents_2
:param filepath: path to the file
:returns: bounding box of the geopackage in the format [minlon, minlat, maxlon, maxlat]
"""
def geopackage_bbox(filepath):
conn = sqlite3.connect(filepath)
c = conn.cursor()
c.execute("""SELECT min(min_x), min(min_y), max(max_x), max(max_y), srs_id FROM gpkg_contents""")
row = c.fetchall()
try:
min_lon=row[0][0]
min_lat=row[0][1]
max_lon=row[0][2]
max_lat=row[0][3]
myCRS=row[0][4]
except Exception:
click.echo("There are no coordinate values in this file.")
raise
if ((myCRS=="CRS84" or myCRS == 4326) and (min_lon and min_lat)):
crs_info=True
bbox=[min_lon,min_lat,max_lon,max_lat]
elif(myCRS):
crs_info=True
min_lon_t,min_lat_t = extractTool.transformToWGS84(min_lon,min_lat,myCRS)
max_lon_t,max_lat_t = extractTool.transformToWGS84(max_lon,max_lat,myCRS)
bbox=[min_lon_t,min_lat_t,max_lon_t,max_lat_t]
else:
click.echo("There is no crs provided.")
bbox=[min_lon,min_lat,max_lon,max_lat]
if (crs_info):
extractTool.print_pretty_bbox(filepath, bbox, "GeoJSON")
return bbox
else:
click.echo("Missing CRS -----> Boundingbox will not be saved in zenodo.")
return [None]
if __name__ == '__main__':
getGeopackagebbx()
| 33.875 | 152 | 0.689246 |
07eb3d8e0ceff85151df4363da58499e8f6e90cb
| 374 |
py
|
Python
|
PINp/2014/Gryadin_V_D/task_2_46.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Gryadin_V_D/task_2_46.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
PINp/2014/Gryadin_V_D/task_2_46.py
|
YukkaSarasti/pythonintask
|
eadf4245abb65f4400a3bae30a4256b4658e009c
|
[
"Apache-2.0"
] | null | null | null |
#Задача 2. Вариант 46
#Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание,
#автором которого является Абу-ль-Фарадж бин Харун.
#Не забудьте о том, что автор должен быть упомянут на отдельной строке.
#Gryadin V. D.
print("Пьянство-мать всех пороков.")
print("\n Абу-ль-Фарадж бин Харун")
input("\n\nНажмите Enter для завершения")
| 34 | 93 | 0.772727 |
ed2c48d8b47fc26987d8f6412e7188290fb6d13f
| 1,240 |
py
|
Python
|
crypto/Random Guess/script.py
|
killua4564/2019-AIS3-preexam
|
b13b5c9d3a2ec8beef7cca781154655bb51605e3
|
[
"MIT"
] | 1 |
2019-06-15T11:45:41.000Z
|
2019-06-15T11:45:41.000Z
|
crypto/Random Guess/script.py
|
killua4564/2019-AIS3-preexam
|
b13b5c9d3a2ec8beef7cca781154655bb51605e3
|
[
"MIT"
] | null | null | null |
crypto/Random Guess/script.py
|
killua4564/2019-AIS3-preexam
|
b13b5c9d3a2ec8beef7cca781154655bb51605e3
|
[
"MIT"
] | null | null | null |
from pwn import *
from sympy import invert, gcd
conn = remote("pre-exam-chals.ais3.org", 10200)
conn.recvuntil(":\n")
conn.recvuntil(" \n")
n = list(map(int, str(conn.recvuntil("\n").strip(b"\n").strip(b"N = "), 'utf-8').split(', ')))
conn.recvuntil("+\n")
'''
N1 = a * N0 + b % c
N2 = a * N1 + b % c
N3 = a * N2 + b % c
N4 = a * N3 + b % c
N2 - N1 = a * (N1 - N0) % c ... (1)
N3 - N2 = a * (N2 - N1) % c ... (2)
N4 - N3 = a * (N3 - N2) % c ... (3)
first:
N0 * N3 = N0 * (a * N2 + b) % c
N1 * N2 = (a * N0 + b) * N2 % c
so:
N0 * N3 - N1 * N2 % c = 0
(1) * (3) - (2) * (2) % c
==> a ** 2 * (N1 - N0) * (N3 - N2) - a ** 2 * (N2 - N1) * (N2 - N1) % c = 0
==> (N2 - N1) * (N4 - N3) - (N3 - N2) * (N3 - N2) % c = 0
'''
c = abs((n[1] - n[0]) * (n[3] - n[2]) - (n[2] - n[1]) * (n[2] - n[1]))
for i in range(4, 10):
c = gcd(abs((n[i-2] - n[i-3]) * (n[i] - n[i-1]) - (n[i-1] - n[i-2]) * (n[i-1] - n[i-2])), c)
for index in range(1, 10):
c = c // gcd(n[index-1] - n[index], c)
a = int(invert(n[1] - n[0], c)) * (n[2] - n[1]) % c
b = (n[1] - a * n[0]) % c
f = lambda x: (a * x + b) % c
x = n[-1]
for _ in range(100):
x = f(x)
conn.sendline(str(x))
conn.recvuntil("\n")
conn.interactive()
# AIS3{GGEZ!!LiNe42_COngRuen7i4l_6eNErATor}
| 22.962963 | 94 | 0.445161 |
9c7e54b5ddb0fab0a567468ece64f19ec1347ebf
| 5,976 |
py
|
Python
|
Packs/Gmail/Integrations/Gmail/Gmail_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Gmail/Integrations/Gmail/Gmail_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Gmail/Integrations/Gmail/Gmail_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
MOCK_MAIL_NO_LABELS = {
u'internalDate': u'1572251535000',
u'historyId': u'249781',
u'payload': {
u'mimeType': u'multipart/mixed',
u'body': {u'size': 0},
u'partId': u'',
u'filename': u'',
u'headers': [
{
u'name': u'Received',
u'value': u'from 1041831412594 named unknown by gmailapi.google.com with '
u'HTTPREST; Mon, 28 Oct 2019 04:32:15 -0400'
}, {
u'name': u'Content-Type',
u'value': u'mixed; boundary="===============4922146810840031257=="'
}, {
u'name': u'MIME-Version',
u'value': u'1.0'
}, {
u'name': u'to',
u'value': u'<some_mail>'
}, {
u'name': u'cc',
u'value': u''
}, {
u'name': u'bcc',
u'value': u''
}, {
u'name': u'from',
u'value': u'<some_mail>'
}, {
u'name': u'subject',
u'value': u'a mail subject'
}, {
u'name': u'reply-to',
u'value': u''
}, {
u'name': u'Date',
u'value': u'Mon, 28 Oct 2019 04:32:15 -0400'
}, {
u'name': u'Message-Id',
u'value': u'<some_id>'
}
],
u'parts': [
{
u'mimeType': u'text/plain',
u'headers': [
{
u'name': u'Content-Type',
u'value': u'text/plain; charset="utf-8"'
}, {
u'name': u'MIME-Version',
u'value': u'1.0'
}, {
u'name': u'Content-Transfer-Encoding',
u'value': u'base64'
}
],
u'body': {
u'data': u'<data>',
u'size': 9
},
u'partId': u'0',
u'filename': u''
}
]
},
u'snippet': u'some info',
u'sizeEstimate': 637,
u'threadId': u'<id>',
u'id': u'<id>'
}
EXPECTED_GMAIL_CONTEXT = {
'To': u'<some_mail>',
'Body': u'',
'From': u'<some_mail>',
'Attachments': u'',
'Format': u'mixed',
'Cc': u'',
'Labels': '',
'Mailbox': 'some_mail',
'Headers': [
{
'Name': u'Received',
'Value': u'from 1041831412594 named '
u'unknown by gmailapi.google.com with HTTPREST; Mon, 28 Oct 2019 04:32:15 -0400'
}, {
'Name': u'Content-Type',
'Value': u'mixed; boundary="===============4922146810840031257=="'
}, {
'Name': u'MIME-Version',
'Value': u'1.0'
}, {
'Name': u'to',
'Value': u'<some_mail>'
}, {
'Name': u'cc',
'Value': u''
}, {
'Name': u'bcc', 'Value': u''
}, {
'Name': u'from', 'Value': u'<some_mail>'
}, {
'Name': u'subject',
'Value': u'a mail subject'
}, {
'Name': u'reply-to',
'Value': u''
}, {
'Name': u'Date',
'Value': u'Mon, 28 Oct 2019 04:32:15 -0400'
}, {
'Name': u'Message-Id',
'Value': u'<some_id>'
}
],
'Html': None,
'RawData': None,
'ThreadId': u'<id>',
'Date': 'Mon, 28 Oct 2019 04:32:15 -0400',
'Bcc': u'',
'Type': 'Gmail',
'ID': u'<id>',
'Subject': u'a mail subject'
}
def test_timestamp_to_date():
from Gmail import create_base_time
valid_timestamp = '1566819604000'
valid_header_date = "Mon, 26 Aug 2019 14:40:04 +0300"
# this does contain the utc time change
invalid_header_date = "25 Aug 2019 06:25:38"
# this does contain the utc time change
semi_valid_header_date = "26 Aug 2019 14:40:04 +0300"
assert str(create_base_time(valid_timestamp, valid_header_date)) == "Mon, 26 Aug 2019 14:40:04 +0300"
assert str(create_base_time(valid_timestamp, semi_valid_header_date)) == "Mon, 26 Aug 2019 14:40:04 +0300"
assert str(create_base_time(valid_timestamp, invalid_header_date)) == "Mon, 26 Aug 2019 11:40:04 -0000"
def test_move_to_gmt():
from Gmail import move_to_gmt
valid_header_date = "Mon, 26 Aug 2019 14:40:04 +0300"
no_utc_header_date = "Mon, 26 Aug 2019 14:40:04 -0000"
assert str(move_to_gmt(valid_header_date)) == "2019-08-26T11:40:04Z"
assert str(move_to_gmt(no_utc_header_date)) == "2019-08-26T14:40:04Z"
def test_no_label_mail_context_creation():
from Gmail import get_email_context
context_gmail, _, _ = get_email_context(MOCK_MAIL_NO_LABELS, "some_mail")
assert context_gmail.get('Labels') == EXPECTED_GMAIL_CONTEXT.get('Labels')
assert context_gmail.get('To') == EXPECTED_GMAIL_CONTEXT.get('To')
assert context_gmail.get('From') == EXPECTED_GMAIL_CONTEXT.get('From')
assert context_gmail.get('Subject') == EXPECTED_GMAIL_CONTEXT.get('Subject')
def test_parse_privileges():
from Gmail import parse_privileges
privileges = [{'serviceId': '', 'privilegeName': 'name_no_id'}, {'serviceId': '', 'privilegeName': ''},
{'serviceId': 'id', 'privilegeName': 'name'}]
assert sorted(parse_privileges(privileges)) == sorted([{'ServiceID': 'id', 'Name': 'name'}, {'Name': 'name_no_id'}])
def test_dict_keys_snake_to_camelcase():
"""
Tests dict_keys_snake_to_camelcase method works as expected.
e.g. family_name -> familyName
"""
from Gmail import dict_keys_snake_to_camelcase
dictionary = {
'user_name': 'user1',
'user_id': '2'
}
assert dict_keys_snake_to_camelcase(dictionary) == {'userName': 'user1', 'userId': '2'}
| 33.385475 | 120 | 0.483601 |
92d17d988d3f0a827fc2777d4baab9f04dfc4510
| 564 |
py
|
Python
|
backend/alembic/versions/943cf797c278_task_hint_types_are_ints.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2022-02-20T12:45:04.000Z
|
2022-02-20T12:45:04.000Z
|
backend/alembic/versions/943cf797c278_task_hint_types_are_ints.py
|
JamesNeumann/learning-by-annotations
|
c2b5e4b653eeb1c973aa5a7dad35ac8be18cb1ad
|
[
"MIT"
] | 21 |
2021-11-01T10:13:56.000Z
|
2021-12-02T10:02:13.000Z
|
backend/alembic/versions/943cf797c278_task_hint_types_are_ints.py
|
jinnn-dev/patholearn
|
b4e6a18cfbf963e71640ed6cac3fc3a618a7ae15
|
[
"MIT"
] | 1 |
2021-12-16T18:20:55.000Z
|
2021-12-16T18:20:55.000Z
|
"""task hint types are ints
Revision ID: 943cf797c278
Revises: 8e8247e7893c
Create Date: 2021-09-23 21:56:07.338197
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "943cf797c278"
down_revision = "8e8247e7893c"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 19.448276 | 65 | 0.687943 |
130a7f97e27414a4e81f3ba48080e5033b44681e
| 1,433 |
py
|
Python
|
documentation_src/doc_examples/parse_json_ebnf.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 2 |
2020-12-25T19:37:42.000Z
|
2021-03-26T04:59:12.000Z
|
documentation_src/doc_examples/parse_json_ebnf.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | 6 |
2018-08-07T22:48:52.000Z
|
2021-10-07T18:38:20.000Z
|
documentation_src/doc_examples/parse_json_ebnf.py
|
jecki/DHParser
|
c6c1bd7db2de85b5997a3640242f4f444532304e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""JSON-parser-example from the DHParser-manual"""
from DHParser.dsl import create_parser
json_grammar = r"""
@literalws = right
@drop = whitespace, strings
@disposable = /_\w+/
json = ~ _element _EOF
_element = object | array | string | number | _bool | null
object = "{" member { "," §member } §"}"
member = string §":" _element
array = "[" [ _element { "," _element } ] §"]"
string = `"` §_CHARACTERS `"` ~
number = INT [ FRAC ] [ EXP ] ~
_bool = true | false
true = `true` ~
false = `false` ~
null = "null"
_CHARACTERS = { PLAIN | ESCAPE }
PLAIN = /[^"\\]+/
ESCAPE = /\\[\/bnrt\\]/ | UNICODE
UNICODE = "\u" HEX HEX
HEX = /[0-9a-fA-F][0-9a-fA-F]/
INT = [NEG] ( /[1-9][0-9]+/ | /[0-9]/ )
NEG = `-`
FRAC = DOT /[0-9]+/
DOT = `.`
EXP = (`E`|`e`) [`+`|`-`] /[0-9]+/
_EOF = !/./
"""
json_parser = create_parser(json_grammar, 'JSON')
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
with open(sys.argv[1], 'r', encoding='utf-8') as f:
json_text = f.read()
else:
# just a test
json_text = '{ "one": 1, "two": 2 }'
syntax_tree = json_parser(json_text)
print(syntax_tree.serialize(how='indented'))
| 28.098039 | 65 | 0.466853 |
133d06d5d5059c3c417aad2c955a5541e035e628
| 3,214 |
py
|
Python
|
app/t1health_app/static/fusioncharts-suite-xt/integrations/django/samples/fusioncharts/samples/chart_annotation.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 14 |
2016-11-03T19:06:21.000Z
|
2021-11-24T09:05:09.000Z
|
app/t1health_app/static/fusioncharts-suite-xt/integrations/django/samples/fusioncharts/samples/chart_annotation.py
|
siyaochen/Tier1Health
|
536591a7534bbb3fb27fe889bfed9de152ec1864
|
[
"MIT"
] | 10 |
2022-02-16T07:17:49.000Z
|
2022-03-08T12:43:51.000Z
|
asset/integrations/django/samples/fusioncharts/samples/chart_annotation.py
|
Piusshungu/catherine-junior-school
|
5356f4ff5a5c8383849d32e22a60d638c35b1a48
|
[
"MIT"
] | 17 |
2016-05-19T13:16:34.000Z
|
2021-04-30T14:38:42.000Z
|
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
# Loading Data from a Static JSON String
# It is a example to show a spline chart where data is passed as JSON string format.
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
# Create an object for the spline chart using the FusionCharts class constructor
spline = FusionCharts("spline", "ex1", 600, 400, "chart-1", "json",
# The chart data is passed as a string to the `dataSource` parameter.
"""{
"chart":
{
"caption": "Bakersfield Central - Total footfalls",
"subCaption": "Last week",
"xAxisName": "Day",
"yAxisName": "No. of Visitors (In 1000s)",
"showValues": "0",
"theme": "fusion"
},
"annotations":{
"groups": [
{
"id": "anchor-highlight",
"items": [
{
"id": "high-star",
"type": "circle",
"x": "$dataset.0.set.2.x",
"y": "$dataset.0.set.2.y",
"radius": "12",
"color": "#6baa01",
"border": "2",
"borderColor": "#f8bd19"
},
{
"id": "label",
"type": "text",
"text": "Highest footfall 25.5K",
"fillcolor": "#6baa01",
"rotate": "90",
"x": "$dataset.0.set.2.x+75",
"y": "$dataset.0.set.2.y-2"
}
]
}
]
},
"data": [
{
"label": "Mon",
"value": "15123"
},
{
"label": "Tue",
"value": "14233"
},
{
"label": "Wed",
"value": "25507"
},
{
"label": "Thu",
"value": "9110"
},
{
"label": "Fri",
"value": "15529"
},
{
"label": "Sat",
"value": "20803"
},
{
"label": "Sun",
"value": "19202"
}
]
}""")
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'index.html', {'output' : spline.render(),'chartTitle': 'Chart Annotation'})
| 37.372093 | 102 | 0.350965 |
136ea55f2085e62d10c0882933f10aea80a27d1b
| 570 |
py
|
Python
|
TUMAGA/obline.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2017-12-12T13:58:08.000Z
|
2017-12-12T13:58:08.000Z
|
TUMAGA/obline.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | null | null | null |
TUMAGA/obline.py
|
wittrup/crap
|
a77474588fd54a5a998e24df7b1e6e2ab473ded1
|
[
"MIT"
] | 1 |
2019-11-03T10:16:35.000Z
|
2019-11-03T10:16:35.000Z
|
"oblique line"
__author__ = 'wittr'
from amazon import Boa
from keysclass import Keys
import configparser
import turtle
def samesideofline(pos, dest, line):
"check if pos and dest is on same side of line, list of two Vec2D(x, y)"
pass
player = Boa()
player.appendline(-100,0, 0, 100)
# player.appendline(0, 100, 100, 0)
player.drawlines()
player.collidewithlines = False
config = configparser.ConfigParser()
config.optionxform=str # preserve case
config.read('config.ini')
keys = Keys(player, config)
turtle.tracer(1)
turtle.listen()
turtle.mainloop()
| 18.387097 | 76 | 0.736842 |
b99d763cfda44760d44b7ebe9c8bab63fd89721c
| 795 |
py
|
Python
|
Python/zzz_training_challenge/Python_Challenge/solutions/ch09_search_and_sort/intro/intro_quicksort.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch09_search_and_sort/intro/intro_quicksort.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
Python/zzz_training_challenge/Python_Challenge/solutions/ch09_search_and_sort/intro/intro_quicksort.py
|
Kreijeck/learning
|
eaffee08e61f2a34e01eb8f9f04519aac633f48c
|
[
"MIT"
] | null | null | null |
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
def quick_sort(values):
# rekursiver Abbruch
if len(values) <= 1:
return values
# Aufsammeln kleiner gleich / größer als Pivot
pivot = values[0]
below_or_equals = [value for value in values[1:] if value <= pivot]
aboves = [value for value in values[1:] if value > pivot]
# rekursiver Abstieg
sorted_lowers_part = quick_sort(below_or_equals)
sorted_uppers_part = quick_sort(aboves)
# Zusammenfügen
return sorted_lowers_part + [pivot] + sorted_uppers_part
def main():
values = [4, 2, 7, 9, 1, 6, 5, 8, 3]
print(quick_sort(values))
values2 = [1, 2, 6, 9, 4, 7, 8, 3]
print(quick_sort(values2))
if __name__ == "__main__":
main()
| 22.714286 | 71 | 0.656604 |
6a3f5959c19e9835afa387b7c06e60975ccc551f
| 18,035 |
py
|
Python
|
src/torch/npu/memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-12-02T03:07:35.000Z
|
2021-12-02T03:07:35.000Z
|
src/torch/npu/memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | 1 |
2021-11-12T07:23:03.000Z
|
2021-11-12T08:28:13.000Z
|
src/torch/npu/memory.py
|
Ascend/pytorch
|
39849cf72dafe8d2fb68bd1679d8fd54ad60fcfc
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import warnings
import torch
from . import is_initialized, _get_device_index
'''
def _host_allocator():
_lazy_init()
return torch._C._npu_npuHostAllocator()
'''
@contextlib.contextmanager
def _free_mutex():
torch._C._npu_lock_mutex()
try:
yield
finally:
torch._C._npu_unlock_mutex()
def caching_allocator_alloc(size, device=None, stream=None):
r"""Performs a memory allocation using the NPU memory allocator.
Memory is allocated for a given device and a stream, this
function is intended to be used for interoperability with other
frameworks. Allocated memory is released through
:func:`~torch.npu.caching_allocator_delete`.
Arguments:
size (int): number of bytes to be allocated.
device (torch.device or int, optional): selected device. If it is
``None`` the default NPU device is used.
stream (torch.npu.Stream or int, optional): selected stream. If is ``None`` then
the default stream for the selected device is used.
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
if device is None:
device = torch.npu.current_device()
device = _get_device_index(device)
if stream is None:
stream = torch.npu.current_stream(device)
if isinstance(stream, torch.npu.streams.Stream):
stream = stream.npu_stream
if not isinstance(stream, int):
raise TypeError('Invalid type for stream argument, must be '
'`torch.npu.Stream` or `int` representing a pointer '
'to a exisiting stream')
with torch.npu.device(device):
return torch._C._npu_npuCachingAllocator_raw_alloc(size, stream)
def caching_allocator_delete(mem_ptr):
r"""Deletes memory allocated using the NPU memory allocator.
Memory allocated with :func:`~torch.npu.caching_allocator_alloc`.
is freed here. The associated device and stream are tracked inside
the allocator.
Arguments:
mem_ptr (int): memory address to be freed by the allocator.
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
torch._C._npu_npuCachingAllocator_raw_delete(mem_ptr)
def empty_cache():
r"""Releases all unoccupied cached memory currently held by the caching
allocator so that those can be used in other NPU application and visible in
`nvidia-smi`.
.. note::
:func:`~torch.npu.empty_cache` doesn't increase the amount of NPU
memory available for PyTorch. However, it may help reduce fragmentation
of NPU memory in certain cases. See :ref:`npu-memory-management` for
more details about NPU memory management.
"""
if is_initialized():
torch._C._npu_emptyCache()
def memory_stats(device=None):
r"""Returns a dictionary of NPU memory allocator statistics for a
given device.
The return value of this function is a dictionary of statistics, each of
which is a non-negative integer.
Core statistics:
- ``"allocated.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of allocation requests received by the memory allocator.
- ``"allocated_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of allocated memory.
- ``"segment.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of reserved segments from ``npuMalloc()``.
- ``"reserved_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of reserved memory.
- ``"active.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of active memory blocks.
- ``"active_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of active memory.
- ``"inactive_split.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
number of inactive, non-releasable memory blocks.
- ``"inactive_split_bytes.{all,large_pool,small_pool}.{current,peak,allocated,freed}"``:
amount of inactive, non-releasable memory.
For these core statistics, values are broken down as follows.
Pool type:
- ``all``: combined statistics across all memory pools.
- ``large_pool``: statistics for the large allocation pool
(as of October 2019, for size >= 1MB allocations).
- ``small_pool``: statistics for the small allocation pool
(as of October 2019, for size < 1MB allocations).
Metric type:
- ``current``: current value of this metric.
- ``peak``: maximum value of this metric.
- ``allocated``: historical total increase in this metric.
- ``freed``: historical total decrease in this metric.
In addition to the core statistics, we also provide some simple event
counters:
- ``"num_alloc_retries"``: number of failed ``npuMalloc`` calls that
result in a cache flush and retry.
- ``"num_ooms"``: number of out-of-memory errors thrown.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistics for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
result = []
def _recurse_add_to_result(prefix, obj):
if isinstance(obj, dict):
if len(prefix) > 0:
prefix += "."
for k, v in obj.items():
_recurse_add_to_result(prefix + k, v)
else:
result.append((prefix, obj))
stats = memory_stats_as_nested_dict(device=device)
_recurse_add_to_result("", stats)
result.sort()
return collections.OrderedDict(result)
def memory_stats_as_nested_dict(device=None):
r"""Returns the result of :func:`~torch.npu.memory_stats` as a nested dictionary."""
device = _get_device_index(device, optional=True)
return torch._C._npu_memoryStats(device)
def reset_accumulated_memory_stats(device=None):
r"""Resets the "accumulated" (historical) stats tracked by the NPU memory allocator.
See :func:`~torch.npu.memory_stats` for details. Accumulated stats correspond to
the `"allocated"` and `"freed"` keys in each individual stat dict, as well as
`"num_alloc_retries"` and `"num_ooms"`.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._npu_resetAccumulatedMemoryStats(device)
def reset_peak_memory_stats(device=None):
r"""Resets the "peak" stats tracked by the NPU memory allocator.
See :func:`~torch.npu.memory_stats` for details. Peak stats correspond to the
`"peak"` key in each individual stat dict.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
device = _get_device_index(device, optional=True)
return torch._C._npu_resetPeakMemoryStats(device)
def reset_max_memory_allocated(device=None):
r"""Resets the starting point in tracking maximum NPU memory occupied by
tensors for a given device.
See :func:`~torch.npu.max_memory_allocated` for details.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.npu.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
warnings.warn(
"torch.npu.reset_max_memory_allocated now calls torch.npu.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
DeprecationWarning)
return reset_peak_memory_stats(device=device)
def reset_max_memory_cached(device=None):
r"""Resets the starting point in tracking maximum NPU memory managed by the
caching allocator for a given device.
See :func:`~torch.npu.max_memory_cached` for details.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. warning::
This function now calls :func:`~torch.npu.reset_peak_memory_stats`, which resets
/all/ peak memory stats.
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
warnings.warn(
"torch.npu.reset_max_memory_cached now calls torch.npu.reset_peak_memory_stats, "
"which resets /all/ peak memory stats.",
DeprecationWarning)
return reset_peak_memory_stats(device=device)
def memory_allocated(device=None):
r"""Returns the current NPU memory occupied by tensors in bytes for a given
device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
This is likely less than the amount shown in `nvidia-smi` since some
unused memory can be held by the caching allocator and some context
needs to be created on NPU. See :ref:`npu-memory-management` for more
details about NPU memory management.
"""
return memory_stats(device=device)["allocated_bytes.all.current"]
def max_memory_allocated(device=None):
r"""Returns the maximum NPU memory occupied by tensors in bytes for a given
device.
By default, this returns the peak allocated memory since the beginning of
this program. :func:`~torch.npu.reset_peak_stats` can be used to
reset the starting point in tracking this metric. For example, these two
functions can measure the peak allocated memory usage of each iteration in a
training loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
return memory_stats(device=device)["allocated_bytes.all.peak"]
def memory_reserved(device=None):
r"""Returns the current NPU memory managed by the caching allocator in bytes
for a given device.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.current"]
def max_memory_reserved(device=None):
r"""Returns the maximum NPU memory managed by the caching allocator in bytes
for a given device.
By default, this returns the peak cached memory since the beginning of this
program. :func:`~torch.npu.reset_peak_stats` can be used to reset
the starting point in tracking this metric. For example, these two functions
can measure the peak cached memory amount of each iteration in a training
loop.
Arguments:
device (torch.device or int, optional): selected device. Returns
statistic for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
return memory_stats(device=device)["reserved_bytes.all.peak"]
def memory_cached(device=None):
r"""Deprecated; see :func:`~torch.npu.memory_reserved`."""
warnings.warn(
"torch.npu.memory_cached has been renamed to torch.npu.memory_reserved",
DeprecationWarning)
return memory_reserved(device=device)
def max_memory_cached(device=None):
r"""Deprecated; see :func:`~torch.npu.max_memory_reserved`."""
warnings.warn(
"torch.npu.max_memory_cached has been renamed to torch.npu.max_memory_reserved",
DeprecationWarning)
return max_memory_reserved(device=device)
def memory_snapshot():
r"""Returns a snapshot of the NPU memory allocator state across all devices.
Interpreting the output of this function requires familiarity with the
memory allocator internals.
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
return torch._C._npu_memorySnapshot()
def memory_summary(device=None, abbreviated=False):
r"""Returns a human-readable printout of the current memory allocator
statistics for a given device.
This can be useful to display periodically during training, or when
handling out-of-memory exceptions.
Arguments:
device (torch.device or int, optional): selected device. Returns
printout for the current device, given by :func:`~torch.npu.current_device`,
if :attr:`device` is ``None`` (default).
abbreviated (bool, optional): whether to return an abbreviated summary
(default: False).
.. note::
See :ref:`npu-memory-management` for more details about NPU memory
management.
"""
device = _get_device_index(device, optional=True)
stats = memory_stats(device=device)
def _format_size(sz, pref_sz):
prefixes = ["B ", "KB", "MB", "GB", "TB", "PB"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_sz < 768 * 1024:
break
prefix = new_prefix
sz //= 1024
pref_sz /= 1024
return "{:7d} {}".format(sz, prefix)
def _format_count(cnt, pref_cnt):
prefixes = [" ", "K", "M"]
prefix = prefixes[0]
for new_prefix in prefixes[1:]:
if pref_cnt < 750 * 1000:
break
prefix = new_prefix
cnt //= 1000
pref_cnt /= 1000
return "{:7d} {} ".format(cnt, prefix)
metrics_to_display = [
("allocated_bytes", "Allocated memory", _format_size),
("active_bytes", "Active memory", _format_size),
("reserved_bytes", "NPU reserved memory", _format_size),
("inactive_split_bytes", "Non-releasable memory", _format_size),
("allocation", "Allocations", _format_count),
("active", "Active allocs", _format_count),
("segment", "NPU reserved segments", _format_count),
("inactive_split", "Non-releasable allocs", _format_count),
]
lines = []
lines.append("=" * 75)
lines.append(" {_:16} PyTorch NPU memory summary, device ID {device:<18d} ")
lines.append("-" * 75)
lines.append(" {_:9} NPU OOMs: {num_ooms:<13d} | {_:6} npuMalloc retries: {num_alloc_retries:<9d} ")
lines.append("=" * 75)
lines.append(" Metric | Cur Usage | Peak Usage | Tot Alloc | Tot Freed ")
for metric_key, metric_name, formatter in metrics_to_display:
lines.append("-" * 75)
submetrics = [("all", metric_name)]
if not abbreviated:
submetrics.append(("large_pool", " from large pool"))
submetrics.append(("small_pool", " from small pool"))
current_prefval, peak_prefval, allocated_prefval, freed_prefval = None, None, None, None
for submetric_key, submetric_name in submetrics:
prefix = metric_key + "." + submetric_key + "."
current = stats[prefix + "current"]
peak = stats[prefix + "peak"]
allocated = stats[prefix + "allocated"]
freed = stats[prefix + "freed"]
if current_prefval is None:
current_prefval = current
peak_prefval = peak
allocated_prefval = allocated
freed_prefval = freed
lines.append(" {:<21} | {} | {} | {} | {} ".format(
submetric_name,
formatter(current, current_prefval),
formatter(peak, peak_prefval),
formatter(allocated, allocated_prefval),
formatter(freed, freed_prefval)),
)
lines.append("=" * 75)
fmt_dict = {"_": "", "device": device}
for k, v in stats.items():
fmt_dict[k.replace(".", "-")] = v
return "|" + "|\n|".join(lines).format(**fmt_dict) + "|\n"
| 37.109053 | 106 | 0.662101 |
16a16c78f2a73e0eaf01f6c01c7a38ed82616ca4
| 610 |
py
|
Python
|
turngen/ai_dummy.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/ai_dummy.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
turngen/ai_dummy.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
from ai_base import ai_base
import game as g
import state as s
from random import randint
from turn import Turn
class ai_dummy(ai_base):
def __init__(self, player):
super().__init__(player)
self.player = player
self.game = g.Game()
def calculateTurn(self, board):
state = s.State(board, self.player)
turnlist = self.game.turnlist(state)
turn = turnlist[randint(0,len(turnlist)-1)]
# translate turn back to turnobject
x1 = ord(turn[0]) - ord('a')
y1 = ord(turn[1]) - ord('1')
# turn[2] ist '-'
x2 = ord(turn[3]) - ord('a')
y2 = ord(turn[4]) - ord('1')
return Turn(x1,y1,x2,y2)
| 24.4 | 45 | 0.662295 |
bca4fe9a837381cd95de03c97bbf51e73ac55223
| 293 |
py
|
Python
|
exercises/pt/exc_02_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_02_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/exc_02_09.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
# Carregue o fluxo de processamento en_core_web_md
# para fazer o download do fluxo: python -m spacy download pt_core_news_md
nlp = ____
# Processe um texto
doc = nlp("Duas bananas de pijamas")
# Imprima o vetor para "bananas"
bananas_vector = ____.____
print(bananas_vector)
| 22.538462 | 74 | 0.774744 |
d5f4b4eb4d3705cd3ee7c20a6970990b576c902e
| 12,039 |
py
|
Python
|
src/tools/data/vdb_check/vdb_check.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/tools/data/vdb_check/vdb_check.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/tools/data/vdb_check/vdb_check.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
#
# This is vdbck, like fsck for a VisIt Database
#
# This tool is designed to iterate over all the objects in a database
# and validate that VisIt can process them by doing plots involving
# the objects. It reports cases that fail. This is intended to be a
# way for users to sanity check that they are creating a database
# VisIt is capable of reading or indicate which object(s) in the database
# VisIt encounters errors processing.
#
# Mark C. Miller, 01Apr17
#
import sys, string, atexit
import argparse
from os import path as ospath
# TO DO:
# 3. html output of results
# 4. dump results to different dirs
#
# Codify the set of database objects we're going to iterate on.
# For each object, we need to know the name of a suitable plot,
# the name of the list in metadata, the name of the expression
# type for expressions.
#
visitDbObjectMap = {
"Mesh":{
"plotName":"Mesh",
"mdName":"Meshes",
"exName":"Mesh"
},
"Scalar":{
"plotName":"Pseudocolor",
"mdName":"Scalars",
"exName":"ScalarMeshVar"
},
"Vector":{
"plotName":"Vector",
"mdName":"Vectors",
"exName":"VectorMeshVar"
},
"Tensor":{
"plotName":"Tensor",
"mdName":"Tensors",
"exName":"TensorMeshVar"
},
"Symmetric Tensor":{
"plotName":"Tensor",
"mdName":"SymmTensors",
"exName":"SymmetricTensorMeshVar"
},
"Array":{
"plotName":"Label",
"mdName":"Arrays",
"exName":"ArrayMeshVar"
},
"Label":{
"plotName":"Label",
"mdName":"Labels",
"exName":""
},
"Material":{
"plotName":"FilledBoundary",
"mdName":"Materials",
"exName":"Material"
},
"Curve":{
"plotName":"Curve",
"mdName":"Curves",
"exName":"CurveMeshVar"
}
}
class lastError:
def __init__(self):
self.lastMsg = ""
def message(self):
if self.lastMsg == GetLastError():
return ""
else:
self.lastMsg = GetLastError()
return self.lastMsg
lastErrorCache = lastError()
class phaseStack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
log = []
logPhaseStack = phaseStack()
badMeshNames = []
def isTrue(testVal):
if testVal:
return True
return False
def isFalse(testVal):
if not testVal:
return True
return False
def OutputLog():
if clArgs.no_verbose:
return
f = open("check_visit_db.out","w")
for line in log:
f.write(line)
f.close()
def Fail():
OutputLog()
sys.exit(1)
def logIndentLen():
return int(4*logPhaseStack.size())
def logIndent():
return " "*logIndentLen()
def StartPhase(phaseName):
if clArgs.no_verbose:
return
log.append("%s%s\n"%(logIndent(),"*"*(120-logIndentLen())))
log.append("%s%s\n"%(logIndent(),phaseName))
log.append("%s%s\n"%(logIndent(),"*"*(120-logIndentLen())))
logPhaseStack.push(phaseName)
def EndPhase():
if clArgs.no_verbose:
return
logPhaseStack.pop()
def LogStatus(objName, retVal, errMsg, statusStr, failSeverity):
if clArgs.no_verbose and statusStr == "PASSED":
return
log.append("%s%s %s %s %s %s\n"%(
logIndent(),
'{:20s}'.format(objName),
'{:30s}'.format(str(retVal).replace("\n"," ")[:30]),
'{:30s}'.format(errMsg),
'{:10s}'.format(statusStr),
'{:10s}'.format(failSeverity)))
def doVisItCLIOp(func, args, passFunc, failSeverity):
retVal = None
try:
retVal = func(*args)
errMsg = lastErrorCache.message()
if passFunc(retVal):
LogStatus(func.__name__, retVal, errMsg, "PASSED", failSeverity)
else:
LogStatus(func.__name__, retVal, errMsg, "FAILED", failSeverity)
if failSeverity == "Fatal":
Fail()
except:
LogStatus(func.__name__, retVal, errMsg, "EXCEPTION", failSeverity)
if failSeverity == "Fatal":
Fail()
return retVal
def doVisItPlot(plotName, plotVar):
retVal = 0
try:
DeleteAllPlots()
AddPlot(plotName, plotVar)
doVisItCLIOp(DrawPlots, (), isTrue, "NonFatal")
ResetView()
SaveWindow()
LogStatus(plotName, "", plotVar, "PASSED", "NonFatal")
except:
LogStatus(plotName, "", plotVar, "WARNING", "NonFatal")
retVal = 1
return retVal
#
# Report all metadata objects that are not valide
#
def checkNotValidVars(md):
retVal = 0
StartPhase("Invalid variables")
for oClass in visitDbObjectMap:
StartPhase("Invalid %s variables"%oClass)
mdName = visitDbObjectMap[oClass]["mdName"]
numObjsInClass = getattr(md, "GetNum%s"%mdName)()
for i in range(numObjsInClass):
objMd = getattr(md, "Get%s"%mdName)(i)
if not objMd.validVariable:
retVal = 1
LogStatus(objMd.name, "", "Invalid %s"%oClass, "WARNING", "NonFatal")
if oClass == "Mesh":
badMeshNames.append(objMd.name)
EndPhase()
EndPhase()
return retVal
#
# Test all variables taking care to avoid invalid variables
#
def checkVarsInDatabase(md):
retVal = 0
StartPhase("Plotting variables")
for oClass in visitDbObjectMap:
StartPhase("Plotting %s variables"%oClass)
plotName = visitDbObjectMap[oClass]["plotName"]
mdName = visitDbObjectMap[oClass]["mdName"]
numObjsInClass = getattr(md, "GetNum%s"%mdName)()
for i in range(numObjsInClass):
objMd = getattr(md, "Get%s"%mdName)(i)
if not objMd.validVariable:
continue
if doVisItPlot(plotName, objMd.name) != 0:
retVal = 1
if oClass == "Mesh":
badMeshNames.append(objMd.name)
EndPhase()
EndPhase()
return retVal
#
# Test all expressions taking care to skip auto-expressions
#
def checkExprsInDatabase(md):
retVal = 0
exprList = md.GetExprList()
if not exprList.GetNumExpressions():
return
StartPhase("Plotting expressions")
for oClass in visitDbObjectMap:
StartPhase("Plotting %s expressions"%oClass)
plotName = visitDbObjectMap[oClass]["plotName"]
exprTypeName = visitDbObjectMap[oClass]["exName"]
if not exprTypeName:
EndPhase()
continue
exprType = getattr(md.GetExprList().GetExpressions(0),exprTypeName)
for i in range(exprList.GetNumExpressions()):
expr = exprList.GetExpressions(i)
if expr.GetAutoExpression():
continue
if expr.GetType() != exprType:
continue
if doVisItPlot(plotName, expr.GetName()) != 0:
retVal = 1
EndPhase()
EndPhase()
return retVal
def checkSILsInDatabase(md):
retVal = 0
StartPhase("Subset defined by SILs")
numMeshes = md.GetNumMeshes()
for i in range(numMeshes):
meshMd = md.GetMeshes(i)
if not meshMd.validVariable:
continue
if meshMd.GetName() in badMeshNames:
continue
DeleteAllPlots()
AddPlot("Mesh", meshMd.GetName())
DrawPlots()
ResetView()
silr = SILRestriction()
cats = silr.Categories()
print(cats)
for c in cats:
catSets = silr.SetsInCategory(c)
if len(catSets) <= 1:
LogStatus(c, "", "", "SKIPPED", "NonFatal")
continue
aset = silr.SetsInCategory(c)[0]
silr.TurnOffSet(aset)
try:
SetPlotSILRestriction(silr)
SaveWindow()
silr.TurnOnAll()
LogStatus(c, "", silr.SetName(aset), "PASSED", "NonFatal")
SetPlotSILRestriction(silr)
except:
retVal = 1
LogStatus(c, "", silr.SetName(aset), "FAILED", "NonFatal")
EndPhase()
return retVal
def checkMdDiffs(md0, md1):
retVal = 0
StartPhase("Metadata time variation")
for oClass in visitDbObjectMap:
mdName = visitDbObjectMap[oClass]["mdName"]
numObjs0 = getattr(md0, "GetNum%s"%mdName)()
numObjs1 = getattr(md1, "GetNum%s"%mdName)()
getObjs0 = getattr(md0, "Get%s"%mdName)
getObjs1 = getattr(md1, "Get%s"%mdName)
objNames0 = set(getObjs0(i).GetName() for i in range(numObjs0) if getObjs0(i).GetValidVariable())
objNames1 = set(getObjs1(i).GetName() for i in range(numObjs1) if getObjs1(i).GetValidVariable())
if objNames0 != objNames1:
retVal = 1
LogStatus(oClass, "", "", "FAILED", "NonFatal")
EndPhase()
return retVal
def checkFile(dbName):
try:
fp = open(dbName)
fp.close()
except:
LogStatus("File check", "", "", "FAILED", "Fatal")
Fail()
def checkDatabase(dbName, timeStates):
nerrors = 0
allTimeStates = False
initTimeState = 0
baseDbName = ospath.basename(dbName)
if timeStates[0] == -1:
allTimeStates = True
else:
initTimeState = timeStates[0]
#
# Confirm file is readable
#
checkFile(dbName)
StartPhase("Database \"%s\""%ospath.basename(dbName))
#
# Get database metadata
#
md = doVisItCLIOp(GetMetaData, (dbName, initTimeState), isTrue, "Fatal")
moreThanOneTimeState = False
if len(md.GetTimes()) > 1:
moreThanOneTimeState = True
if allTimeStates:
timeStates = list(md.GetTimes())
#
# Open the database
#
doVisItCLIOp(OpenDatabase, (dbName, initTimeState), isTrue, "Fatal")
for t in timeStates:
if moreThanOneTimeState:
StartPhase("Time state %d"%t)
ReOpenDatabase(dbName)
TimeSliderSetState(t)
newMd = doVisItCLIOp(GetMetaData, (dbName, t), isTrue, "NonFatal")
#
# Report differences in Metadata
#
nerrors = nerrors + checkMdDiffs(md, newMd)
#
# Report any invalid vars in md
#
nerrors = nerrors + checkNotValidVars(md)
#
# Test plotting all the variables in the database
#
nerrors = nerrors + checkVarsInDatabase(md)
#
# Test plotting all expressions in the database
#
nerrors = nerrors + checkExprsInDatabase(md)
#
# Test SIL Restrictions
#
nerrors = nerrors + checkSILsInDatabase(md)
md = newMd
if moreThanOneTimeState:
EndPhase() # time states
else:
break
EndPhase() # Database
OutputLog()
if nerrors > 0:
sys.exit(1)
sys.exit(0)
#
# Main
#
atexit.register(OutputLog)
clParser = argparse.ArgumentParser(description="VisIt Database Consistency Check.")
clParser.add_argument("dbpath", type=str,
help="The name of a VisIt database to check")
clParser.add_argument("-t", "--time-states", type=int, nargs="*", default=[0], metavar="I",
help="Time state indices to check. -1=All.")
clParser.add_argument("--no-verbose", action="store_true",
help="Turn off verbose reporting and report only failures.")
clParser.add_argument("--no-images", action="store_true",
help="Turn off keeping of saved images as they are produced.")
clArgs = clParser.parse_args()
swa = GetSaveWindowAttributes()
swa.fileName = "vdb_check_image"
if clArgs.no_images:
swa.outputDirectory = "/dev"
swa.outputToCurrentDirectory = 0
swa.family = 0
swa.fileName = "null"
SetSaveWindowAttributes(swa)
checkDatabase(clArgs.dbpath,clArgs.time_states)
| 27.486301 | 105 | 0.592076 |
fc39bdccddda8ae1a90457f913c51b0f861ec67a
| 7,921 |
py
|
Python
|
Treball/simulator.py
|
jmigual/OiS
|
b5eb0dc9db8d8c655ea3b941a5ada5c764b7aa0d
|
[
"MIT"
] | null | null | null |
Treball/simulator.py
|
jmigual/OiS
|
b5eb0dc9db8d8c655ea3b941a5ada5c764b7aa0d
|
[
"MIT"
] | null | null | null |
Treball/simulator.py
|
jmigual/OiS
|
b5eb0dc9db8d8c655ea3b941a5ada5c764b7aa0d
|
[
"MIT"
] | null | null | null |
import csv
import random
import abc
import numpy
import os
from logger import *
class ComptadorEstadistic:
def __init__(self, nom_arxiu):
self.llista_espera = []
self.nom_arxiu = nom_arxiu
directori, arxiu = os.path.split(nom_arxiu)
csvfile = open(os.path.join(directori, "resultats_" + arxiu), 'a')
self.csv = csv.writer(csvfile, delimiter=',')
self.iteration = 1
def error(self):
if len(self.llista_espera) < 20:
return None
# Calcular la desviació estandard dels ultims 10 elements
desviacio = numpy.std(self.llista_espera[-10:])
desviacio_total = numpy.std(self.llista_espera)
error = abs(desviacio_total - desviacio) / (desviacio_total + 0.0001)
return error
def imprimir_resultats(self, estat):
mitjana = numpy.mean(self.llista_espera)
desviacio = numpy.std(self.llista_espera)
clients = len(self.llista_espera)
logger = logging.getLogger()
logger.info("Temps final : {:9.4f}".format(estat.rellotge))
logger.info("Total de clients : {:9.4f}".format(clients))
logger.info("Temps mitjà d'espera: {:9.4f}".format(mitjana))
logger.info("Desviació std dades : {:9.4f}".format(desviacio))
logger.info("Files al CSV : {:9.4f}".format(self.iteration))
# Intentem aproximar les dades linealment per poder veure si el sistema es estable
polinomi = numpy.polyfit(range(clients), self.llista_espera, 1)
logger.info("Regressió a+b*x :")
logger.info(" a : {:9.7f}".format(polinomi[1]))
logger.info(" b : {:9.7f}".format(polinomi[0]))
with open(self.nom_arxiu, mode='a') as file:
csv_results = csv.writer(file, delimiter=',')
resultats = [mitjana, desviacio, clients, polinomi[0], polinomi[1]]
csv_results.writerow(resultats)
return resultats
def imprimir_estadistiques(self, estat):
self.iteration += 1
self.csv.writerow([self.iteration, numpy.sum(self.llista_espera), self.llista_espera[-1],
len(estat.llista_persones_espera), estat.rellotge])
class Estat:
def __init__(self, nom_arxiu, facturadors=12):
self.llista_persones_espera = []
self.facturador_lliure = [True] * facturadors
self.rellotge = 0
self.stat = ComptadorEstadistic(nom_arxiu)
class Esdeveniment(metaclass=abc.ABCMeta):
"""Classe abstracta Esdeveniment conté els mètodes necessaris per implementar un esdeveniment
"""
def __init__(self, tipus, rellotge):
self.tipus = tipus
self.rellotge = rellotge
def __lt__(self, other):
return self.rellotge < other.rellotge
def __str__(self):
return "Temps: {0:5.1f} {1:<25}".format(self.rellotge, self.tipus)
@abc.abstractmethod
def esdevenir(self, estat):
"""
Executar esdeveniment i crear els esdeveniments derivats
:param estat: Estat del sistema
:return: Llista d'esdeveniments creats
"""
return []
class EsdevenimentFinalitzacio(Esdeveniment):
def __init__(self, rellotge, facturador):
super(EsdevenimentFinalitzacio, self).__init__("Finalitzacio facturacio", rellotge)
self.facturador = facturador
def esdevenir(self, estat):
"""
Implementació de la funció esdevenir per la finalització d'una facturació,
si hi ha més clients a la llista d'espera s'elimina un i es crea un nou
EsdevenimentFinalització
:param estat: Estat actual del sistema
:return: Es retorna una llista d'esdeveniments creats per aquest esdeveniment
"""
estat.rellotge = self.rellotge
if len(estat.llista_persones_espera) <= 0:
estat.facturador_lliure[self.facturador] = True
return []
persona = estat.llista_persones_espera.pop(0)
estat.stat.llista_espera.append(estat.rellotge - persona)
estat.stat.imprimir_estadistiques(estat)
return [EsdevenimentFinalitzacio(self.rellotge + Simulacio.get_temps_facturacio(),
self.facturador)]
class EsdevenimentArribada(Esdeveniment):
def __init__(self, rellotge):
super(EsdevenimentArribada, self).__init__("Arribada grup passatgers", rellotge)
y = random.random()
if y < 0.25: # Probabilitat 0.25
n = 1
elif y < 0.6: # Probabilitat 0.35
n = 2
elif y < 0.85: # Probabilitat 0.25
n = 3
elif y < 0.95: # Probabilitat 0.10
n = 4
else: # Probabilitat 0.05
n = 5
self.nombre_passatgers = n
def esdevenir(self, estat):
"""
Implementació de la funció esdevenir per l'arribada de nous passatgers, si hi ha n
facturadors disponibles es generaran n EsdevenimentFinalitzacio i a part també es genera
un nou EsdevenimentArribada seguint les probabilitats especificades en quan al temps i a
la mida del grup que arriba
:param estat: Estat del sistema
:return: Llista amb els esdeveniments generats
"""
facturadors_lliures = [i for i, x in enumerate(estat.facturador_lliure) if x]
p = min(len(facturadors_lliures), self.nombre_passatgers)
estat.llista_persones_espera += [estat.rellotge] * (self.nombre_passatgers - p)
estat.stat.llista_espera += [0]*p
finalitzats = []
for i in range(p):
estat.facturador_lliure[facturadors_lliures[i]] = False
temps_f = self.rellotge + Simulacio.get_temps_facturacio()
finalitzats.append(EsdevenimentFinalitzacio(temps_f, facturadors_lliures[i]))
return [EsdevenimentArribada(self.rellotge + random.expovariate(1))] + finalitzats
class Simulacio:
TEMPS_MAXIM_SIMULACIO = 8*60.0
ERROR_MINIM = 0.005
simulacio_amb_maquines_autofacturacio = False
def __init__(self, nom_arxiu, maquines, facturadors):
Simulacio.simulacio_amb_maquines_autofacturacio = maquines
self.nom_arxiu = nom_arxiu
self.temps_inicial = 0.0
self.llista_esdeveniments = []
self.estat = Estat(nom_arxiu, facturadors=facturadors)
self.logger = logging.getLogger()
# Afegir esdeveniment inicial arribada
self.llista_esdeveniments.append(EsdevenimentArribada(self.temps_inicial))
# Returns a bool
def finalitzar(self, esdeveniment):
error = self.estat.stat.error()
if error is None:
return False
# La simulació s'acaba s'ha arribat al maxim de temps o quan l'error relatiu d'una
# simulació a l'altra és molt petit
return esdeveniment.rellotge > self.TEMPS_MAXIM_SIMULACIO #or error < self.ERROR_MINIM
def executa(self):
esdeveniment = self.obtenir_esdeveniment_proper()
while not self.finalitzar(esdeveniment):
# Executar l'esdevniment i afegir els nous esdeveniments que aquest genera a la llista
# d'esdeveniments
self.llista_esdeveniments += esdeveniment.esdevenir(self.estat)
self.escriure_informacio(esdeveniment)
# Obtenir el següent esdeveniment
esdeveniment = self.obtenir_esdeveniment_proper()
return self.estat.stat.imprimir_resultats(self.estat)
@staticmethod
def get_temps_facturacio():
if Simulacio.simulacio_amb_maquines_autofacturacio:
return random.uniform(4, 10)
else:
return numpy.clip(random.gauss(4.0, 1.0), 2, 6)
def obtenir_esdeveniment_proper(self):
self.llista_esdeveniments.sort()
return self.llista_esdeveniments.pop(0)
def escriure_informacio(self, esdeveniment):
self.logger.debug(str(esdeveniment) + " " + str(self.estat.facturador_lliure))
| 38.2657 | 98 | 0.652695 |
fca1c7cf8beca479553355ac23daddc572c4fcfa
| 92 |
py
|
Python
|
2014/09/aircraft-over-time/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/09/aircraft-over-time/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/09/aircraft-over-time/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '0AqjLQISCZzBkdDF6dTlNdC00Q2NPMVEtQ3FYeUh1amc'
| 23 | 68 | 0.847826 |
db3da0335edae7392d3d3f936675177dd93e08ae
| 1,219 |
py
|
Python
|
diversos/Producto.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 1 |
2021-12-18T15:29:24.000Z
|
2021-12-18T15:29:24.000Z
|
diversos/Producto.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | null | null | null |
diversos/Producto.py
|
lcarlin/guppe
|
a0ee7b85e8687e8fb8243fbb509119a94bc6460f
|
[
"Apache-2.0"
] | 3 |
2021-08-23T22:45:20.000Z
|
2022-02-17T13:17:09.000Z
|
## https://www.youtube.com/watch?v=PGXwNophTOQ
class Produto:
def __init__(self, nome, preco):
self.nome = nome
self.preco = preco
def desconto (self, percentual):
self.preco = self.preco - (self.preco * (percentual / 100 ))
# Getters note que o nome do metodo Getter é o mesmo da variavel
# e por isso, o nome do atributo foi acrescido de _
@property
def preco (self):
print(' *** Acessando o atributo preco de forma INDIRETA')
return self._preco
#Setter
@preco.setter
def preco(self, valor ):
if isinstance(valor, str ):
valor = float(valor.replace('R$',''))
self._preco = valor
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, valor):
self._nome = valor.title()
def main() :
print('-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-')
p1 = Produto('camisete',65)
p1.desconto(15)
print(p1.nome)
print(p1.preco)
print('-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-|-=-')
p2 = Produto('Caneca LInux', 36)
p2.desconto(17)
print(p2.nome)
print(p2.preco)
if __name__ == '__main__':
main()
| 25.395833 | 72 | 0.533224 |
9187c429759359a97057ea9d6f095a4b71c7013a
| 213 |
py
|
Python
|
listings/chapter05/use_ds.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | 2 |
2021-09-20T06:16:41.000Z
|
2022-01-17T14:24:43.000Z
|
listings/chapter05/use_ds.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
listings/chapter05/use_ds.py
|
SaschaKersken/Daten-Prozessanalyse
|
370f07a75b9465329deb3671adbfbef8483f76f6
|
[
"Apache-2.0"
] | null | null | null |
from data_structures import Stack, Queue
s = Stack()
s.push(1)
s.push(2)
s.push(3)
while s.empty == False:
print(s.pop())
q = Queue()
q.push(1)
q.push(2)
q.push(3)
while q.empty == False:
print(q.pop())
| 13.3125 | 40 | 0.624413 |
91ec256082d027ce6c9078e4e10073734cb2ca74
| 860 |
py
|
Python
|
abstract_pre_deal2.py
|
pku601/LDA
|
a4bcd3e40ed2920dc29bf0270eab05b245ea003e
|
[
"Apache-2.0"
] | null | null | null |
abstract_pre_deal2.py
|
pku601/LDA
|
a4bcd3e40ed2920dc29bf0270eab05b245ea003e
|
[
"Apache-2.0"
] | null | null | null |
abstract_pre_deal2.py
|
pku601/LDA
|
a4bcd3e40ed2920dc29bf0270eab05b245ea003e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import codecs
import sys
import re
reload(sys)
sys.setdefaultencoding('utf8')
fin = codecs.open('abstract_temp.txt', 'r', 'utf-8')
fout = codecs.open('abstracts_clean.txt', 'w', "utf-8")
lines = fin.readlines()
line_num = 0
for line in lines:
if len(line.strip()) == 0:
continue
line_num += 1
if line_num % 2 == 0:
if line.strip() == 'Section None':
fout.write(line)
continue
line = line.strip()
print line
line = re.sub('\(.*?\)', '', line)
line = re.sub('[^a-zA-Z]', ' ', line)
line = re.sub(' {2,}', ' ', line) # substitute multiple space to one space
print line.strip()
print ''
fout.write(line.strip())
fout.write('\n')
else: # line content: xxx.html
fout.write(line)
fout.close()
| 20 | 83 | 0.536047 |
72d0dd11dcde4646caff5be71a53e1195110b8e1
| 1,204 |
py
|
Python
|
sort/heap_sort.py
|
imttx/awesome-algorithm
|
d72277cc278131ae670bea74db4283b02f362758
|
[
"MIT"
] | null | null | null |
sort/heap_sort.py
|
imttx/awesome-algorithm
|
d72277cc278131ae670bea74db4283b02f362758
|
[
"MIT"
] | null | null | null |
sort/heap_sort.py
|
imttx/awesome-algorithm
|
d72277cc278131ae670bea74db4283b02f362758
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-22 11:16:42
#
# 堆排序 纯粹练手
class HeapSort(object):
def __init__(self, data):
self.data = data
def sort(self):
# 建立最大堆
for i in range(len(self.data)//2-1, -1, -1):
# 从第一个非叶子结点从下至上,从右至左调整结构
self.sink(i, len(self.data))
# 调整下沉元素
for i in range(len(self.data)-1, 0, -1):
# 堆顶元素移到最后一位
self.swap(0, i)
# 重新调整堆结构
self.sink(0, i)
# 下沉元素
def sink(self, i, length):
tmp = self.data[i]
k = 2 * i + 1
while k < length:
if k + 1 < length and self.data[k] < self.data[k+1]:
k += 1
if self.data[k] > tmp:
self.data[i] = self.data[k]
i = k
k = 2*k+1
else:
break
self.data[i] = tmp
def swap(self, i, j):
self.data[i], self.data[j] = self.data[j], self.data[i]
def display(self):
print(self.data)
if __name__ == '__main__':
data = [10, 3, 4, 8, 7, 20, 1, 9, 2]
heap = HeapSort(data)
heap.display()
heap.sort()
heap.display()
| 21.5 | 64 | 0.45598 |
43203a5da8df11c98e0f707b614cdf1166c3657b
| 1,025 |
py
|
Python
|
02_transcription/test_google/transcribe_file.py
|
Verena-Lemberger/masterarbeit
|
2819f222b75b31bd2a44b45ee3417998c385eeab
|
[
"MIT"
] | null | null | null |
02_transcription/test_google/transcribe_file.py
|
Verena-Lemberger/masterarbeit
|
2819f222b75b31bd2a44b45ee3417998c385eeab
|
[
"MIT"
] | 14 |
2021-06-21T19:41:33.000Z
|
2021-06-21T19:41:35.000Z
|
02_transcription/test_google/transcribe_file.py
|
Verena-Lemberger/masterarbeit
|
2819f222b75b31bd2a44b45ee3417998c385eeab
|
[
"MIT"
] | null | null | null |
from google.cloud import speech_v1 as speech
def speech_to_text(config, audio):
client = speech.SpeechClient()
response = client.recognize(config=config, audio=audio)
print_sentences(response)
def print_sentences(response):
for result in response.results:
best_alternative = result.alternatives[0]
transcript = best_alternative.transcript
confidence = best_alternative.confidence
print("-" * 80)
print(f"Transcript: {transcript}")
print(f"Confidence: {confidence:.0%}")
print_word_offsets(best_alternative)
def print_word_offsets(alternative):
for word in alternative.words:
start_s = word.start_time.total_seconds()
end_s = word.end_time.total_seconds()
word = word.word
print(f"{start_s:>7.3f} | {end_s:>7.3f} | {word}")
config = dict(
language_code="en-US",
enable_automatic_punctuation=True,
enable_word_time_offsets=True,
)
audio = dict(uri="gs://cloud-samples-data/speech/brooklyn_bridge.flac")
| 33.064516 | 71 | 0.696585 |
432f034bda863b543bde3741605ea699b7959e06
| 775 |
py
|
Python
|
event_detector/add_test_event_detection_task.py
|
MahdiFarnaghi/gtm
|
adbec372786262607291f901a444a0ebe9e98b48
|
[
"Apache-2.0"
] | null | null | null |
event_detector/add_test_event_detection_task.py
|
MahdiFarnaghi/gtm
|
adbec372786262607291f901a444a0ebe9e98b48
|
[
"Apache-2.0"
] | null | null | null |
event_detector/add_test_event_detection_task.py
|
MahdiFarnaghi/gtm
|
adbec372786262607291f901a444a0ebe9e98b48
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
from time import sleep
from gttm.ts.task_scheduler import TaskScheduler
from gttm.db import PostgresHandler_EventDetection
from dotenv import load_dotenv
load_dotenv()
postgres = PostgresHandler_EventDetection(
os.getenv('DB_HOSTNAME'),
os.getenv('DB_PORT'),
os.getenv('DB_DATABASE'),
os.getenv('DB_USER'),
os.getenv('DB_PASS'))
postgres.check_db()
delay = 10
postgres.delete_event_detection_tasks()
postgres.insert_event_detection_task('task 1 NYC', 'desc ...', -76, 39, 71.5, 42, 36, 'en', 1, True)
# postgres.insert_event_detection_task('task 2 London', 'desc ...', -1, 51, 1, 52, 36, 'en', 5, True)
# postgres.insert_event_detection_task('task 2 Lisbon', 'desc ...', -9.5, 38.5, -9, 39, 36, 'pt', 5, True)
print('Inserted')
| 29.807692 | 106 | 0.713548 |
60b55f5e8ad9e652b1cbcf018bee8025ca4fe33d
| 12,763 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/nios_nsgroup.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_nsgroup
short_description: Configure InfoBlox DNS Nameserver Groups
extends_documentation_fragment:
- community.general.nios
author:
- Erich Birngruber (@ebirn)
- Sumit Jaiswal (@sjaiswal)
description:
- Adds and/or removes nameserver groups form Infoblox NIOS servers.
This module manages NIOS C(nsgroup) objects using the Infoblox. WAPI interface over REST.
requirements:
- infoblox_client
options:
name:
description:
- Specifies the name of the NIOS nameserver group to be managed.
required: true
grid_primary:
description:
- This host is to be used as primary server in this nameserver group. It must be a grid member.
This option is required when setting I(use_external_primaries) to C(false).
suboptions:
name:
description:
- Provide the name of the grid member to identify the host.
required: true
enable_preferred_primaries:
description:
- This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
default: false
type: bool
grid_replicate:
description:
- Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False).
type: bool
default: false
lead:
description:
- This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
type: bool
default: false
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
grid_secondaries:
description:
- Configures the list of grid member hosts that act as secondary nameservers.
This option is required when setting I(use_external_primaries) to C(true).
suboptions:
name:
description:
- Provide the name of the grid member to identify the host.
required: true
enable_preferred_primaries:
description:
- This flag represents whether the preferred_primaries field values of this member are used (see Infoblox WAPI docs).
default: false
type: bool
grid_replicate:
description:
- Use DNS zone transfers if set to C(True) or ID Grid Replication if set to C(False)
type: bool
default: false
lead:
description:
- This flag controls if the grid lead secondary nameserver performs zone transfers to non lead secondaries.
type: bool
default: false
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
preferred_primaries:
description:
- Provide a list of elements like in I(external_primaries) to set the precedence of preferred primary nameservers.
is_grid_default:
description:
- If set to C(True) this nsgroup will become the default nameserver group for new zones.
type: bool
required: false
default: false
use_external_primary:
description:
- This flag controls whether the group is using an external primary nameserver.
Note that modification of this field requires passing values for I(grid_secondaries) and I(external_primaries).
type: bool
required: false
default: false
external_primaries:
description:
- Configures a list of external nameservers (non-members of the grid).
This option is required when setting I(use_external_primaries) to C(true).
suboptions:
address:
description:
- Configures the IP address of the external nameserver
required: true
name:
description:
- Set a label for the external nameserver
required: true
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
tsig_key_name:
description:
- Sets a label for the I(tsig_key) value
tsig_key_alg:
description:
- Provides the algorithm used for the I(tsig_key) in use.
choices: ['HMAC-MD5', 'HMAC-SHA256']
default: 'HMAC-MD5'
tsig_key:
description:
- Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
required: false
external_secondaries:
description:
- Allows to provide a list of external secondary nameservers, that are not members of the grid.
suboptions:
address:
description:
- Configures the IP address of the external nameserver
required: true
name:
description:
- Set a label for the external nameserver
required: true
stealth:
description:
- Configure the external nameserver as stealth server (without NS record) in the zones.
type: bool
default: false
tsig_key_name:
description:
- Sets a label for the I(tsig_key) value
tsig_key_alg:
description:
- Provides the algorithm used for the I(tsig_key) in use.
choices: ['HMAC-MD5', 'HMAC-SHA256']
default: 'HMAC-MD5'
tsig_key:
description:
- Set a DNS TSIG key for the nameserver to secure zone transfers (AFXRs).
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
required: false
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
required: false
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
choices: [present, absent]
default: present
'''
EXAMPLES = '''
- name: create simple infoblox nameserver group
nios_nsgroup:
name: my-simple-group
comment: "this is a simple nameserver group"
grid_primary:
- name: infoblox-test.example.com
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: create infoblox nameserver group with external primaries
nios_nsgroup:
name: my-example-group
use_external_primary: true
comment: "this is my example nameserver group"
external_primaries: "{{ ext_nameservers }}"
grid_secondaries:
- name: infoblox-test.example.com
lead: True
preferred_primaries: "{{ ext_nameservers }}"
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: delete infoblox nameserver group
nios_nsgroup:
name: my-simple-group
comment: "this is a simple nameserver group"
grid_primary:
- name: infoblox-test.example.com
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import WapiModule
from ansible_collections.community.general.plugins.module_utils.net_tools.nios.api import NIOS_NSGROUP
# from infoblox documentation
# Fields List
# Field Type Req R/O Base Search
# comment String N N Y : = ~
# extattrs Extattr N N N ext
# external_primaries [struct] N N N N/A
# external_secondaries [struct] N N N N/A
# grid_primary [struct] N N N N/A
# grid_secondaries [struct] N N N N/A
# is_grid_default Bool N N N N/A
# is_multimaster Bool N Y N N/A
# name String Y N Y : = ~
# use_external_primary Bool N N N N/A
def main():
'''entrypoint for module execution.'''
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
# cleanup tsig fields
def clean_tsig(ext):
if 'tsig_key' in ext and not ext['tsig_key']:
del ext['tsig_key']
if 'tsig_key' not in ext and 'tsig_key_name' in ext and not ext['tsig_key_name']:
del ext['tsig_key_name']
if 'tsig_key' not in ext and 'tsig_key_alg' in ext:
del ext['tsig_key_alg']
def clean_grid_member(member):
if member['preferred_primaries']:
for ext in member['preferred_primaries']:
clean_tsig(ext)
if member['enable_preferred_primaries'] is False:
del member['enable_preferred_primaries']
del member['preferred_primaries']
if member['lead'] is False:
del member['lead']
if member['grid_replicate'] is False:
del member['grid_replicate']
def ext_primaries_transform(module):
if module.params['external_primaries']:
for ext in module.params['external_primaries']:
clean_tsig(ext)
return module.params['external_primaries']
def ext_secondaries_transform(module):
if module.params['external_secondaries']:
for ext in module.params['external_secondaries']:
clean_tsig(ext)
return module.params['external_secondaries']
def grid_primary_preferred_transform(module):
for member in module.params['grid_primary']:
clean_grid_member(member)
return module.params['grid_primary']
def grid_secondaries_preferred_primaries_transform(module):
for member in module.params['grid_secondaries']:
clean_grid_member(member)
return module.params['grid_secondaries']
extserver_spec = dict(
address=dict(required=True, ib_req=True),
name=dict(required=True, ib_req=True),
stealth=dict(type='bool', default=False),
tsig_key=dict(),
tsig_key_alg=dict(choices=['HMAC-MD5', 'HMAC-SHA256'], default='HMAC-MD5'),
tsig_key_name=dict(required=True)
)
memberserver_spec = dict(
name=dict(required=True, ib_req=True),
enable_preferred_primaries=dict(type='bool', default=False),
grid_replicate=dict(type='bool', default=False),
lead=dict(type='bool', default=False),
preferred_primaries=dict(type='list', elements='dict', options=extserver_spec, default=[]),
stealth=dict(type='bool', default=False),
)
ib_spec = dict(
name=dict(required=True, ib_req=True),
grid_primary=dict(type='list', elements='dict', options=memberserver_spec,
transform=grid_primary_preferred_transform),
grid_secondaries=dict(type='list', elements='dict', options=memberserver_spec,
transform=grid_secondaries_preferred_primaries_transform),
external_primaries=dict(type='list', elements='dict', options=extserver_spec, transform=ext_primaries_transform),
external_secondaries=dict(type='list', elements='dict', options=extserver_spec,
transform=ext_secondaries_transform),
is_grid_default=dict(type='bool', default=False),
use_external_primary=dict(type='bool', default=False),
extattrs=dict(),
comment=dict(),
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_NSGROUP, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 36.361823 | 127 | 0.643893 |
e8330226b5b0d7a16ec946fa0dc198b61deafad6
| 9,017 |
py
|
Python
|
tests/cli/test_rasa_export.py
|
isaac-philip/rasa
|
923db75e03921921a6f1f3489a2c5574138ee685
|
[
"Apache-2.0"
] | 5 |
2020-08-28T06:44:31.000Z
|
2021-03-14T18:46:01.000Z
|
tests/cli/test_rasa_export.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 71 |
2020-08-25T02:46:08.000Z
|
2022-03-01T13:23:38.000Z
|
tests/cli/test_rasa_export.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 3 |
2019-02-27T10:13:16.000Z
|
2019-07-26T08:33:45.000Z
|
import argparse
from pathlib import Path
from typing import Callable, Optional, Dict, Text, List, Tuple, Any
from unittest.mock import Mock
import pytest
from _pytest.monkeypatch import MonkeyPatch
from _pytest.pytester import RunResult
import rasa.core.utils as rasa_core_utils
from rasa.cli import export
from rasa.core.brokers.pika import PikaEventBroker
from rasa.core.events import UserUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.exceptions import PublishingError, NoEventsToMigrateError
from tests.conftest import (
MockExporter,
random_user_uttered_event,
write_endpoint_config_to_yaml,
)
def test_export_help(run: Callable[..., RunResult]):
output = run("export", "--help")
help_text = """usage: rasa export [-h] [-v] [-vv] [--quiet] [--endpoints ENDPOINTS]
[--minimum-timestamp MINIMUM_TIMESTAMP]
[--maximum-timestamp MAXIMUM_TIMESTAMP]
[--conversation-ids CONVERSATION_IDS]"""
lines = help_text.split("\n")
for i, line in enumerate(lines):
assert output.outlines[i] == line
@pytest.mark.parametrize(
"minimum_timestamp,maximum_timestamp",
[(2, 3), (None, 5.5), (None, None), (5, None)],
)
def test_validate_timestamp_options(
minimum_timestamp: Optional[float], maximum_timestamp: Optional[float],
):
args = argparse.Namespace()
args.minimum_timestamp = (
str(minimum_timestamp) if minimum_timestamp is not None else None
)
args.maximum_timestamp = (
str(maximum_timestamp) if maximum_timestamp is not None else None
)
# no error is raised
# noinspection PyProtectedMember
export._assert_max_timestamp_is_greater_than_min_timestamp(args)
def test_validate_timestamp_options_with_invalid_timestamps():
args = argparse.Namespace(minimum_timestamp=3, maximum_timestamp=2)
with pytest.raises(SystemExit):
# noinspection PyProtectedMember
export._assert_max_timestamp_is_greater_than_min_timestamp(args)
# noinspection PyProtectedMember
def test_get_event_broker_and_tracker_store_from_endpoint_config(tmp_path: Path):
# write valid config to file
endpoints_path = write_endpoint_config_to_yaml(
tmp_path, {"event_broker": {"type": "sql"}, "tracker_store": {"type": "sql"}}
)
available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path)
# fetching the event broker is successful
assert export._get_event_broker(available_endpoints)
assert export._get_tracker_store(available_endpoints)
# noinspection PyProtectedMember
def test_get_event_broker_from_endpoint_config_error_exit(tmp_path: Path):
# write config without event broker to file
endpoints_path = write_endpoint_config_to_yaml(
tmp_path, {"tracker_store": {"type": "sql"}}
)
available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path)
with pytest.raises(SystemExit):
assert export._get_event_broker(available_endpoints)
def test_get_tracker_store_from_endpoint_config_error_exit(tmp_path: Path):
# write config without event broker to file
endpoints_path = write_endpoint_config_to_yaml(tmp_path, {})
available_endpoints = rasa_core_utils.read_endpoints_from_path(endpoints_path)
with pytest.raises(SystemExit):
# noinspection PyProtectedMember
assert export._get_tracker_store(available_endpoints)
@pytest.mark.parametrize(
"requested_ids,expected",
[("id1", ["id1"]), ("id1,id2", ["id1", "id2"]), (None, None), ("", None)],
)
def test_get_requested_conversation_ids(
requested_ids: Optional[Text], expected: Optional[List[Text]]
):
# noinspection PyProtectedMember
assert export._get_requested_conversation_ids(requested_ids) == expected
def test_prepare_pika_event_broker():
# mock a pika event broker
pika_broker = Mock(spec=PikaEventBroker)
# patch the spinner so we can execute the `_prepare_pika_producer()` function
pika_broker.is_ready.return_value = True
# noinspection PyProtectedMember
export._prepare_event_broker(pika_broker)
# the attributes are set as expected
assert not pika_broker.should_keep_unpublished_messages
assert pika_broker.raise_on_failure
@pytest.mark.parametrize(
"current_timestamp,maximum_timestamp,endpoints_path,requested_ids,expected",
[
(1.0, None, None, None, "--minimum-timestamp 1.0"),
(1.0, None, None, ["5", "6"], "--minimum-timestamp 1.0 --conversation-ids 5,6"),
(1.0, 3.4, None, None, "--minimum-timestamp 1.0 --maximum-timestamp 3.4"),
(
1.0,
2.5,
"a.yml",
None,
"--endpoints a.yml --minimum-timestamp 1.0 --maximum-timestamp 2.5",
),
(
1.0,
2.5,
"a.yml",
["1", "2", "3"],
(
"--endpoints a.yml --minimum-timestamp 1.0 --maximum-timestamp 2.5 "
"--conversation-ids 1,2,3"
),
),
],
)
def test_get_continuation_command(
current_timestamp: float,
maximum_timestamp: Optional[float],
endpoints_path: Optional[Text],
requested_ids: Optional[List[Text]],
expected: Text,
):
exporter = MockExporter()
exporter.maximum_timestamp = maximum_timestamp
exporter.endpoints_path = endpoints_path
exporter.requested_conversation_ids = requested_ids
# noinspection PyProtectedMember
assert (
export._get_continuation_command(exporter, current_timestamp)
== f"rasa export {expected}"
)
def _add_conversation_id_to_event(event: Dict, conversation_id: Text):
event["sender_id"] = conversation_id
def prepare_namespace_and_mocked_tracker_store_with_events(
temporary_path: Path, monkeypatch: MonkeyPatch
) -> Tuple[List[UserUttered], argparse.Namespace]:
endpoints_path = write_endpoint_config_to_yaml(
temporary_path,
{"event_broker": {"type": "pika"}, "tracker_store": {"type": "sql"}},
)
# export these conversation IDs
all_conversation_ids = ["id-1", "id-2", "id-3"]
requested_conversation_ids = ["id-1", "id-2"]
# create namespace with a set of cmdline arguments
namespace = argparse.Namespace(
endpoints=endpoints_path,
conversation_ids=",".join(requested_conversation_ids),
minimum_timestamp=1.0,
maximum_timestamp=10.0,
)
# prepare events from different senders and different timestamps
events = [random_user_uttered_event(timestamp) for timestamp in [1, 2, 3, 4, 11, 5]]
events_for_conversation_id = {
all_conversation_ids[0]: [events[0], events[1]],
all_conversation_ids[1]: [events[2], events[3], events[4]],
all_conversation_ids[2]: [events[5]],
}
def _get_tracker(conversation_id: Text) -> DialogueStateTracker:
return DialogueStateTracker.from_events(
conversation_id, events_for_conversation_id[conversation_id]
)
# mock tracker store
tracker_store = Mock()
tracker_store.keys.return_value = all_conversation_ids
tracker_store.retrieve.side_effect = _get_tracker
monkeypatch.setattr(export, "_get_tracker_store", lambda _: tracker_store)
return events, namespace
def test_export_trackers(tmp_path: Path, monkeypatch: MonkeyPatch):
events, namespace = prepare_namespace_and_mocked_tracker_store_with_events(
tmp_path, monkeypatch
)
# mock event broker so we can check its `publish` method is called
event_broker = Mock()
event_broker.publish = Mock()
monkeypatch.setattr(export, "_get_event_broker", lambda _: event_broker)
# run the export function
export.export_trackers(namespace)
# check that only events 1, 2, 3, and 4 have been published
# event 6 was sent by `id-3` which was not requested, and event 5
# lies outside the requested time range
calls = event_broker.publish.mock_calls
# only four events were published (i.e. `publish()` method was called four times)
assert len(calls) == 4
# call objects are tuples of (name, pos. args, kwargs)
# args itself is a tuple, and we want to access the first one, hence `call[1][0]`
# check that events 1-4 were published
assert all(
any(call[1][0]["text"] == event.text for call in calls) for event in events[:4]
)
@pytest.mark.parametrize("exception", [NoEventsToMigrateError, PublishingError(123)])
def test_export_trackers_publishing_exceptions(
tmp_path: Path, monkeypatch: MonkeyPatch, exception: Exception
):
events, namespace = prepare_namespace_and_mocked_tracker_store_with_events(
tmp_path, monkeypatch
)
# mock event broker so we can check its `publish` method is called
event_broker = Mock()
event_broker.publish.side_effect = exception
monkeypatch.setattr(export, "_get_event_broker", lambda _: event_broker)
with pytest.raises(SystemExit):
export.export_trackers(namespace)
| 34.155303 | 88 | 0.708883 |
1c8e6fbb40d918349d304cff48d846ee8d49faa7
| 109 |
py
|
Python
|
Shivani/positive.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/positive.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
Shivani/positive.py
|
63Shivani/Python-BootCamp
|
2ed0ef95af35d35c0602031670fecfc92d8cea0a
|
[
"MIT"
] | null | null | null |
x = int(input("enter first number\n"))
if(x>=0):
print(x,"is positive")
else:
print(x,"is negative")
| 18.166667 | 38 | 0.605505 |
98dea6c71ea2216bb74348aa1c6b68e3146f385a
| 9,797 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/imgadm.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/imgadm.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/modules/imgadm.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, 2017 Jasper Lievisse Adriaanse <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: imgadm
short_description: Manage SmartOS images
description:
- Manage SmartOS virtual machine images through imgadm(1M)
author: Jasper Lievisse Adriaanse (@jasperla)
options:
force:
required: false
type: bool
description:
- Force a given operation (where supported by imgadm(1M)).
pool:
required: false
default: zones
description:
- zpool to import to or delete images from.
source:
required: false
description:
- URI for the image source.
state:
required: true
choices: [ present, absent, deleted, imported, updated, vacuumed ]
description:
- State the object operated on should be in. C(imported) is an alias for
for C(present) and C(deleted) for C(absent). When set to C(vacuumed)
and C(uuid) to C(*), it will remove all unused images.
type:
required: false
choices: [ imgapi, docker, dsapi ]
default: imgapi
description:
- Type for image sources.
uuid:
required: false
description:
- Image UUID. Can either be a full UUID or C(*) for all images.
requirements:
- python >= 2.6
'''
EXAMPLES = '''
- name: Import an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: imported
- name: Delete an image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: deleted
- name: Update all images
imgadm:
uuid: '*'
state: updated
- name: Update a single image
imgadm:
uuid: '70e3ae72-96b6-11e6-9056-9737fd4d0764'
state: updated
- name: Add a source
imgadm:
source: 'https://datasets.project-fifo.net'
state: present
- name: Add a Docker source
imgadm:
source: 'https://docker.io'
type: docker
state: present
- name: Remove a source
imgadm:
source: 'https://docker.io'
state: absent
'''
RETURN = '''
source:
description: Source that is managed.
returned: When not managing an image.
type: str
sample: https://datasets.project-fifo.net
uuid:
description: UUID for an image operated on.
returned: When not managing an image source.
type: str
sample: 70e3ae72-96b6-11e6-9056-9737fd4d0764
state:
description: State of the target, after execution.
returned: success
type: str
sample: 'present'
'''
import re
from ansible.module_utils.basic import AnsibleModule
# Shortcut for the imgadm(1M) command. While imgadm(1M) supports a
# -E option to return any errors in JSON, the generated JSON does not play well
# with the JSON parsers of Python. The returned message contains '\n' as part of
# the stacktrace, which breaks the parsers.
class Imgadm(object):
def __init__(self, module):
self.module = module
self.params = module.params
self.cmd = module.get_bin_path('imgadm', required=True)
self.changed = False
self.uuid = module.params['uuid']
# Since there are a number of (natural) aliases, prevent having to look
# them up everytime we operate on `state`.
if self.params['state'] in ['present', 'imported', 'updated']:
self.present = True
else:
self.present = False
# Perform basic UUID validation upfront.
if self.uuid and self.uuid != '*':
if not re.match('^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$', self.uuid, re.IGNORECASE):
module.fail_json(msg='Provided value for uuid option is not a valid UUID.')
# Helper method to massage stderr
def errmsg(self, stderr):
match = re.match(r'^imgadm .*?: error \(\w+\): (.*): .*', stderr)
if match:
return match.groups()[0]
else:
return 'Unexpected failure'
def update_images(self):
if self.uuid == '*':
cmd = '{0} update'.format(self.cmd)
else:
cmd = '{0} update {1}'.format(self.cmd, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to update images: {0}'.format(self.errmsg(stderr)))
# There is no feedback from imgadm(1M) to determine if anything
# was actually changed. So treat this as an 'always-changes' operation.
# Note that 'imgadm -v' produces unparseable JSON...
self.changed = True
def manage_sources(self):
force = self.params['force']
source = self.params['source']
imgtype = self.params['type']
cmd = '{0} sources'.format(self.cmd)
if force:
cmd += ' -f'
if self.present:
cmd = '{0} -a {1} -t {2}'.format(cmd, source, imgtype)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to add source: {0}'.format(self.errmsg(stderr)))
# Check the various responses.
# Note that trying to add a source with the wrong type is handled
# above as it results in a non-zero status.
regex = 'Already have "{0}" image source "{1}", no change'.format(imgtype, source)
if re.match(regex, stdout):
self.changed = False
regex = 'Added "%s" image source "%s"' % (imgtype, source)
if re.match(regex, stdout):
self.changed = True
else:
# Type is ignored by imgadm(1M) here
cmd += ' -d %s' % source
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to remove source: {0}'.format(self.errmsg(stderr)))
regex = 'Do not have image source "%s", no change' % source
if re.match(regex, stdout):
self.changed = False
regex = 'Deleted ".*" image source "%s"' % source
if re.match(regex, stdout):
self.changed = True
def manage_images(self):
pool = self.params['pool']
state = self.params['state']
if state == 'vacuumed':
# Unconditionally pass '--force', otherwise we're prompted with 'y/N'
cmd = '{0} vacuum -f'.format(self.cmd)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to vacuum images: {0}'.format(self.errmsg(stderr)))
else:
if stdout == '':
self.changed = False
else:
self.changed = True
if self.present:
cmd = '{0} import -P {1} -q {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to import image: {0}'.format(self.errmsg(stderr)))
regex = r'Image {0} \(.*\) is already installed, skipping'.format(self.uuid)
if re.match(regex, stdout):
self.changed = False
regex = '.*ActiveImageNotFound.*'
if re.match(regex, stderr):
self.changed = False
regex = 'Imported image {0}.*'.format(self.uuid)
if re.match(regex, stdout.splitlines()[-1]):
self.changed = True
else:
cmd = '{0} delete -P {1} {2}'.format(self.cmd, pool, self.uuid)
(rc, stdout, stderr) = self.module.run_command(cmd)
regex = '.*ImageNotInstalled.*'
if re.match(regex, stderr):
# Even if the 'rc' was non-zero (3), we handled the situation
# in order to determine if there was a change.
self.changed = False
regex = 'Deleted image {0}'.format(self.uuid)
if re.match(regex, stdout):
self.changed = True
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(default=None, type='bool'),
pool=dict(default='zones'),
source=dict(default=None),
state=dict(default=None, required=True, choices=['present', 'absent', 'deleted', 'imported', 'updated', 'vacuumed']),
type=dict(default='imgapi', choices=['imgapi', 'docker', 'dsapi']),
uuid=dict(default=None)
),
# This module relies largely on imgadm(1M) to enforce idempotency, which does not
# provide a "noop" (or equivalent) mode to do a dry-run.
supports_check_mode=False,
)
imgadm = Imgadm(module)
uuid = module.params['uuid']
source = module.params['source']
state = module.params['state']
result = {'state': state}
# Either manage sources or images.
if source:
result['source'] = source
imgadm.manage_sources()
else:
result['uuid'] = uuid
if state == 'updated':
imgadm.update_images()
else:
# Make sure operate on a single image for the following actions
if (uuid == '*') and (state != 'vacuumed'):
module.fail_json(msg='Can only specify uuid as "*" when updating image(s)')
imgadm.manage_images()
result['changed'] = imgadm.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| 31.705502 | 129 | 0.578136 |
c763616e2fc0bdf8ccaa39f1b90078bbf160a658
| 345 |
py
|
Python
|
leetcode/168-Excel-Sheet-Column-Title/ExcelSheetColTitle_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/168-Excel-Sheet-Column-Title/ExcelSheetColTitle_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/168-Excel-Sheet-Column-Title/ExcelSheetColTitle_001.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
res = ''
base = ord('A') - 1
while n > 0:
rem = n % 26
rem = 26 if rem == 0 else rem
res = chr(rem + base) + res
n = (n - rem) / 26
return res
| 23 | 41 | 0.382609 |
c78c29fb4fdf9cc522c333a830f015267925b299
| 1,091 |
py
|
Python
|
packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/key_store_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/key_store_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-data-kernel/src/watchmen_data_kernel/meta/key_store_service.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from typing import Optional
from watchmen_auth import PrincipalService
from watchmen_data_kernel.cache import CacheService
from watchmen_meta.common import ask_meta_storage
from watchmen_meta.system import KeyStoreService as KeyStoreStorageService
from watchmen_model.common import TenantId
from watchmen_model.system import KeyStore
class KeyStoreService:
def __init__(self, principal_service: PrincipalService):
self.principalService = principal_service
# noinspection PyMethodMayBeStatic
def find_by_type(self, key_type: str, tenant_id: TenantId) -> Optional[KeyStore]:
hit, key_store = CacheService.key_store().get(key_type, tenant_id)
if hit:
return key_store
storage_service = KeyStoreStorageService(ask_meta_storage())
storage_service.begin_transaction()
try:
key_store: KeyStore = storage_service.find_by_type(key_type, tenant_id)
if key_store is None:
CacheService.key_store().declare_not_existing(key_type, tenant_id)
return None
CacheService.key_store().put(key_store)
return key_store
finally:
storage_service.close_transaction()
| 33.060606 | 82 | 0.817599 |
c7b580fef3ec6f11d26161930a22476480050a46
| 547 |
pyde
|
Python
|
sketches/runningorc01/runningorc01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 4 |
2018-06-03T02:11:46.000Z
|
2021-08-18T19:55:15.000Z
|
sketches/runningorc01/runningorc01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | null | null | null |
sketches/runningorc01/runningorc01.pyde
|
kantel/processingpy
|
74aae222e46f68d1c8f06307aaede3cdae65c8ec
|
[
"MIT"
] | 3 |
2019-12-23T19:12:51.000Z
|
2021-04-30T14:00:31.000Z
|
# Running Orc 01
from orc2 import Orc
orc = Orc(160, -32)
def setup():
global bg
bg = loadImage("field.png")
frameRate(30)
size(320, 320)
orc.loadPics()
orc.dx = 2
orc.dy = 2
def draw():
background(bg)
orc.move()
orc.display()
def keyPressed():
if keyPressed and key == CODED:
if keyCode == RIGHT:
orc.dir = 0
elif keyCode == DOWN:
orc.dir = 1
elif keyCode == LEFT:
orc.dir = 2
elif keyCode == UP:
orc.dir = 3
| 17.645161 | 35 | 0.50457 |
c7cdcf5204c8539c198cba755d64a43f3fc1025b
| 28,319 |
py
|
Python
|
src/tools/data/DataManualExamples/Simulations/contrib/SimBase/simbase.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 226 |
2018-12-29T01:13:49.000Z
|
2022-03-30T19:16:31.000Z
|
src/tools/data/DataManualExamples/Simulations/contrib/SimBase/simbase.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 5,100 |
2019-01-14T18:19:25.000Z
|
2022-03-31T23:08:36.000Z
|
src/tools/data/DataManualExamples/Simulations/contrib/SimBase/simbase.py
|
visit-dav/vis
|
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
|
[
"BSD-3-Clause"
] | 84 |
2019-01-24T17:41:50.000Z
|
2022-03-10T10:01:46.000Z
|
"""Simulation Base Class
Handles the basics of setting up a simulation. Actual simulations
will want to subclass this with a class which defines actual
data and metadata, and also with something that will run
algorithm epochs.
Concepts are taken from, and occasionally idioms borrowed from,
the updateplots.py sample. Routines are named similarly where
that makes sense.
"""
# Portions derived from works
# Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
#*****************************************************************************
# Copyright (c) 2014 Fred Morris, Tacoma WA.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#*****************************************************************************
import sys, os
class Simulation(object):
"""Simulation Base Class. Subclass this.
The Initialize and Finalize methods in this class implement
the paradigm that there is
* A Primary Method, which is capitalized and belongs to the base class
* A Secondary Overridable method, which is not capitalized and is
present for the convenience of subclasses.
Methods which implement this paradigm accept additional kwargs
which will be passed to the subclasses' method.
The MainLoop method implements the paradigm that there is the Primary
Method (MainLoop), and a number of main_ functions which you will
probably want to override. In particular:
* main_doRun: This is where your compute kernel should run an epoch.
* main_doVisItConnect: This is where you could connect callbacks.
* main_doConsoleCommand: This is where you can implement console_commands.
HERE'S A SUGGESTION: Use self.context to store your kernel's context. I
promise never to step on that.
The following reference arrays are defined on self in lieu of using
the VISIT_ equivalences. They are defined as instance variables to
get around the fact that we're importing simV2 during __init__.
MESHTYPES: VISIT_MESHTYPE_***
VARTYPES: VISIT_VARTYPE_***
CENTERINGS: VISIT_VARCENTERING_***
COMMANDS: Function pointers to VisIt_SimulationMetaData_add***Command
VARDATATYPES: Function pointers to VisIt_VariableData_setData*
DATAOWNERS: VISIT_OWNER_***
"""
def __init__(self, lib_path, visit_base):
"""The most important things that this does are to import simV2
and to save the VisIt base directory.
Parameters:
lib_path: Path to simv2.py. It is probably a good idea to set
an environment variable VISIT_LIBPATH and pull from
that.
visit_base: Base path to the VisIt installation. In practice,
this is one directory above the bin/ directory that
VisIt runs out of. It is probably a good idea to set
an environment variable VISIT_HOME and pull from that.
"""
# This is really such a glorious hack. Shout out to Quashie...
if lib_path not in sys.path: sys.path.insert(1,lib_path)
import simV2
globals()['simV2'] = simV2
self.visit_base = visit_base
self.MESHTYPES = {
'UNKNOWN': simV2.VISIT_MESHTYPE_UNKNOWN,
'RECTILINEAR': simV2.VISIT_MESHTYPE_RECTILINEAR,
'CURVILINEAR': simV2.VISIT_MESHTYPE_CURVILINEAR,
'UNSTRUCTURED': simV2.VISIT_MESHTYPE_UNSTRUCTURED,
'POINT': simV2.VISIT_MESHTYPE_POINT,
'CSG': simV2.VISIT_MESHTYPE_CSG,
'AMR': simV2.VISIT_MESHTYPE_AMR
}
self.VARTYPES = {
'UNKNOWN': simV2.VISIT_VARTYPE_UNKNOWN,
'SCALAR': simV2.VISIT_VARTYPE_SCALAR,
'VECTOR': simV2.VISIT_VARTYPE_VECTOR,
'TENSOR': simV2.VISIT_VARTYPE_TENSOR,
'SYMMETRIC_TENSOR': simV2.VISIT_VARTYPE_SYMMETRIC_TENSOR,
'MATERIAL': simV2.VISIT_VARTYPE_MATERIAL,
'MATSPECIES': simV2.VISIT_VARTYPE_MATSPECIES,
'LABEL': simV2.VISIT_VARTYPE_LABEL,
'ARRAY': simV2.VISIT_VARTYPE_ARRAY,
'MESH': simV2.VISIT_VARTYPE_MESH,
'CURVE': simV2.VISIT_VARTYPE_CURVE
}
self.CENTERINGS = {
'NODE': simV2.VISIT_VARCENTERING_NODE,
'ZONE': simV2.VISIT_VARCENTERING_ZONE
}
self.COMMANDS = {
# I don't know what a custom command is, unfortunately...
#'CUSTOM': simV2.VisIt_SimulationMetaData_addCustomCommand,
'GENERIC': simV2.VisIt_SimulationMetaData_addGenericCommand
}
self.VARDATATYPES = {
'CHAR': simV2.VisIt_VariableData_setDataC,
'INTEGER': simV2.VisIt_VariableData_setDataI,
# In Python, floats are doubles.
'FLOAT': simV2.VisIt_VariableData_setDataD
}
self.DATAOWNERS = {
'SIM': simV2.VISIT_OWNER_SIM,
'VISIT': simV2.VISIT_OWNER_VISIT,
'COPY': simV2.VISIT_OWNER_COPY
}
return
#*********************************************************
# UTILITY METHODS
def truth_to_0_1(self,truth):
"""Returns 1 if truth is true, 0 if false."""
if truth: return 1
return 0
def print_to_console(self,message):
"""Prints to the console and reissues the console prompt."""
print()
print(message)
self.main_doPrompt()
return
#*********************************************************
# SETTERS
def set_runMode(self,running):
"""Sets self.runMode to the appropriate VISIT_ constant."""
if running:
self.runMode = simV2.VISIT_SIMMODE_RUNNING
else:
self.runMode = simV2.VISIT_SIMMODE_STOPPED
return
#*********************************************************
def Initialize(self,sim_name,sim_description,**kwargs):
"""Call this first.
By default the simulation comes with the following console
and callback commands:
* quit (console only)
* halt
* step
* run
Keyword arguments are passed to initialize()
"""
self.done = False
self.trace_qualifier = os.getpid()
self.sim_name = sim_name
self.sim_description = sim_description
self.sim_path = os.getcwd()
self.command_prompt = "command>"
self.console_timeout = 100000 # Seconds? Milliseconds? Don't know.
self.runMode = simV2.VISIT_SIMMODE_RUNNING
self.console = {
"quit": (self.cmd_quit, None),
"halt": (self.cmd_halt, None),
"step": (self.cmd_step, None),
"run": (self.cmd_run, None)
}
self.callbacks = {
"commands": {
"halt": (self.cmd_halt, None, 'GENERIC'),
"step": (self.cmd_step, None, 'GENERIC'),
"run": (self.cmd_run, None, 'GENERIC')
},
}
self.initialize(**kwargs)
if self.trace_qualifier is not None: simV2.VisItOpenTraceFile("trace.%d.txt" % self.trace_qualifier)
simV2.VisItSetDirectory(self.visit_base)
if not simV2.VisItSetupEnvironment():
print("VisItSetupEnvironment: could not get environment\n\nVISIT_HOME is '%s', is this correct?" % \
(self.visit_base,), file=sys.stderr)
sys.exit(1)
# We don't have to worry about the rank, because we only ever run a single compute kernel.
if not simV2.VisItInitializeSocketAndDumpSimFile(
self.sim_name, self.sim_description, self.sim_path,
None, None, None ):
print('VisItInitializeSocketAndDumpSimFile: failed for some reason', file=sys.stderr)
sys.exit(1)
return
def initialize(self,**kwargs):
"""This is the subclass-overridable companion to Initialize().
The keyword arguments are what were supplied in the call to
Initialize().
Specific values which you might want to override:
trace_qualifier: Used while naming the trace file. Defaults to
the process PID. If set to None no trace file
is opened.
sim_path: It is assumed that the simulation was started
from the current working directory, i.e.
os.getcwd()
console: Define the console commands.
callbacks: Define the callbacks and specific values to be
exposed. Otherwise you are going to need to
override main_doVisItConnect and do a heap o'
work.
callbacks in turn has substructures:
commands: Defines commands which can be triggered from the
VisIt sim control panel.
metadata: Defines/names metadata for which callbacks should
be made. The following types of metadata are
supported: mesh, variable, curve expression.
See the source for callback_metadata() method
for enumeration of the properties which can be
set for specific types of metadata.
"""
return
def MainLoop(self):
"""This is the main loop. It does not have a subclass-overridable companion,
however it has a number of main_ methods which are intended to be overridden.
It runs until self.done is true.
"""
self.main_doPrompt()
while not self.done:
visitstate = simV2.VisItDetectInputWithTimeout(
self.truth_to_0_1(self.runMode == simV2.VISIT_SIMMODE_STOPPED),
self.console_timeout,
sys.stdin.fileno()
)
if visitstate == 0:
self.cmd_step(None,None,None,None)
elif visitstate == 1:
if simV2.VisItAttemptToCompleteConnection() == simV2.VISIT_OKAY:
self.runMode = simV2.VISIT_SIMMODE_STOPPED
self.main_doVisItConnect()
elif visitstate == 2:
if simV2.VisItProcessEngineCommand() != simV2.VISIT_OKAY:
simV2.VisItDisconnect()
self.runMode = simV2.VISIT_SIMMODE_RUNNING
elif visitstate == 3:
cmd = simV2.VisItReadConsole()
self.main_doConsoleCommand(cmd)
self.main_doPrompt()
else:
self.main_visitstateError(visitstate)
return
def main_doPrompt(self):
"""Displays a command prompt on stdout."""
# Don't have to worry about the rank, because we only ever run a single kernel.
sys.stdout.write(self.command_prompt)
sys.stdout.flush()
return
def main_doRun(self,running):
"""Your compute kernel will want to override this, this is where you will
do work!
Parameters:
running: This will be True if VisIt believes the simulation is
running.
"""
return
def main_doVisItConnect(self):
"""Your compute kernel may want to override this. This is where you will
connect callbacks. There are helper methods to assist with this. The
default automagically invokes some default callbacks based on some
data definitions, and presumably if you're not doing anything too fancy
the callbacks themselves can be defined with some... errrm... definitions.
"""
simV2.VisItSetCommandCallback(self.callback_command_, 0)
simV2.VisItSetGetMetaData(self.callback_metadata_, None)
simV2.VisItSetGetMesh(self.callback_mesh_, 0)
simV2.VisItSetGetVariable(self.callback_variable_, 0)
simV2.VisItSetGetCurve(self.callback_curve_, 0)
return
def main_doConsoleCommand(self, cmd):
"""Processes console commands.
Parameters:
cmd: The command read from the console.
"""
if cmd in self.console:
self.console[cmd][0](self.console[cmd][1], None, None, None)
return
def main_visitstateError(self, visitstate):
"""Called when the main loop visitstate contains an unexpected value."""
self.print_to_console("visitstate error: %s" % (visitstate,))
return
def cmd_run(self, arg, cmd, visit_args, cbdata):
"""Sets the runMode to VISIT_SIMMODE_RUNNING"""
self.runMode = simV2.VISIT_SIMMODE_RUNNING
return
def cmd_halt(self, arg, cmd, visit_args, cbdata):
"""Sets the runMode to VISIT_SIMMODE_STOPPED"""
self.runMode = simV2.VISIT_SIMMODE_STOPPED
return
def cmd_step(self, arg, cmd, visit_args, cbdata):
"""Runs one epoch."""
self.main_doRun(self.runMode == simV2.VISIT_SIMMODE_RUNNING)
simV2.VisItTimeStepChanged()
simV2.VisItUpdatePlots()
return
def cmd_quit(self, arg, cmd, visit_args, cbdata):
"""Sets the done flag which causes the main loop to exit."""
self.done = True
return
def visit_execute(self, commands, cmd, visit_args, cbdata):
"""Executes the commands"""
for command in commands:
simV2.VisItExecuteCommand(command + '\n')
return
def callback_command_(self, cmd, visit_args, cbdata):
"""A wrapper around callback_command, making it easier to
override in a subclass.
This is the method which is actually registered as a
callback.
"""
self.callback_command(cmd, visit_args, cbdata)
return
def callback_command(self, cmd, visit_args, cbdata):
"""Overridable method."""
commands = self.callbacks['commands']
if cmd in commands:
commands[cmd][0](commands[cmd][1], cmd, visit_args, cbdata)
return
def callback_metadata_(self, cbdata):
"""This is the method which is actually registered as a callback."""
return self.callback_metadata(cbdata)
def callback_metadata(self, cbdata):
"""Overridable method. (You don't have to.)
This method will declare your metadata, if you pass it an
appropriate structure.
"""
metadata = self.callbacks['metadata']
md = simV2.VisIt_SimulationMetaData_alloc()
if md == simV2.VISIT_INVALID_HANDLE: return md
mode,cycle,elapsed = self.callback_modecycletime()
if mode is not None:
if mode:
current_mode = VISIT_SIMMODE_RUNNING
else:
current_mode = VISIT_SIMMODE_STOPPED
simV2.VisIt_SimulationMetaData_setMode(md,current_mode)
else:
simV2.VisIt_SimulationMetaData_setMode(md,self.runMode)
if (cycle is not None) and (elapsed is not None):
simV2.VisIt_SimulationMetaData_setCycleTime(md,cycle,elapsed)
if 'mesh' in metadata:
for mesh in metadata['mesh']:
mmd = simV2.VisIt_MeshMetaData_alloc()
if mmd != simV2.VISIT_INVALID_HANDLE:
if 'Name' in mesh:
simV2.VisIt_MeshMetaData_setName(mmd, mesh['Name'])
if 'MeshType' in mesh:
simV2.VisIt_MeshMetaData_setMeshType(mmd, self.MESHTYPES[mesh['MeshType']])
if 'TopologicalDimension' in mesh:
simV2.VisIt_MeshMetaData_setTopologicalDimension(mmd, mesh['TopologicalDimension'])
if 'SpatialDimension' in mesh:
simV2.VisIt_MeshMetaData_setSpatialDimension(mmd, mesh['SpatialDimension'])
if 'NumDomains' in mesh:
simV2.VisIt_MeshMetaData_setNumDomains(mmd, mesh['NumDomains'])
if 'DomainTitle' in mesh:
simV2.VisIt_MeshMetaData_setDomainTitle(mmd, mesh['DomainTitle'])
if 'DomainPieceName' in mesh:
simV2.VisIt_MeshMetaData_setDomainPieceName(mmd, mesh['DomainPieceName'])
if 'NumGroups' in mesh:
simV2.VisIt_MeshMetaData_setNumGroups(mmd, mesh['NumGroups'])
if 'XUnits' in mesh:
simV2.VisIt_MeshMetaData_setXUnits(mmd, mesh['XUnits'])
if 'YUnits' in mesh:
simV2.VisIt_MeshMetaData_setYUnits(mmd, mesh['YUnits'])
if 'ZUnits' in mesh:
simV2.VisIt_MeshMetaData_setZUnits(mmd, mesh['ZUnits'])
if 'XLabel' in mesh:
simV2.VisIt_MeshMetaData_setXLabel(mmd, mesh['XLabel'])
if 'YLabel' in mesh:
simV2.VisIt_MeshMetaData_setYLabel(mmd, mesh['YLabel'])
if 'ZLabel' in mesh:
simV2.VisIt_MeshMetaData_setZLabel(mmd, mesh['ZLabel'])
simV2.VisIt_SimulationMetaData_addMesh(md, mmd)
if 'variable' in metadata:
for variable in metadata['variable']:
vmd = simV2.VisIt_VariableMetaData_alloc()
if vmd != simV2.VISIT_INVALID_HANDLE:
if 'Name' in variable:
simV2.VisIt_VariableMetaData_setName(vmd, variable['Name'])
if 'MeshName' in variable:
simV2.VisIt_VariableMetaData_setMeshName(vmd, variable['MeshName'])
if 'Type' in variable:
simV2.VisIt_VariableMetaData_setType(vmd, self.VARTYPES[variable['Type']])
if 'Centering' in variable:
simV2.VisIt_VariableMetaData_setCentering(vmd, self.CENTERINGS[variable['Centering']])
simV2.VisIt_SimulationMetaData_addVariable(md, vmd)
if 'curve' in metadata:
for curve in metadata['curve']:
cmd = simV2.VisIt_CurveMetaData_alloc()
if cmd != simV2.VISIT_INVALID_HANDLE:
if 'Name' in curve:
simV2.VisIt_CurveMetaData_setName(cmd, curve['Name'])
if 'XLabel' in curve:
simV2.VisIt_CurveMetaData_setXLabel(cmd, curve['XLabel'])
if 'XUnits' in curve:
simV2.VisIt_CurveMetaData_setXUnits(cmd, curve['XUnits'])
if 'YLabel' in curve:
simV2.VisIt_CurveMetaData_setYLabel(cmd, curve['YLabel'])
if 'YUnits' in curve:
simV2.VisIt_CurveMetaData_setYUnits(cmd, curve['YUnits'])
simV2.VisIt_SimulationMetaData_addCurve(md, cmd)
if 'expression' in metadata:
for expression in metadata['expression']:
emd = simV2.VisIt_ExpressionMetaData_alloc()
if emd != simV2.VISIT_INVALID_HANDLE:
if 'Name' in expression:
simV2.VisIt_ExpressionMetaData_setName(emd, expression['Name'])
if 'Definition' in expression:
simV2.VisIt_ExpressionMetaData_setDefinition(emd, expression['Definition'])
if 'Type' in expression:
simV2.VisIt_ExpressionMetaData_setType(emd, self.VARTYPES[expression['Type']])
simV2.VisIt_SimulationMetaData_addExpression(md, emd)
if 'commands' in self.callbacks:
for command in self.callbacks['commands']:
cmd = simV2.VisIt_CommandMetaData_alloc()
if cmd != simV2.VISIT_INVALID_HANDLE:
simV2.VisIt_CommandMetaData_setName(cmd, command)
self.COMMANDS[self.callbacks['commands'][command][2]](md, cmd)
return md
def callback_modecycletime(self):
"""Returns a triple of:
mode: True if running, false if not. If you just want this to
follow self.simMode, return None.
cycle: The epoch.
time: The "elapsed time"... presumably in seconds, don't really know.
"""
return None,None,None
def callback_mesh_(self, domain, name, cbdata):
"""A wrapper around callback_mesh, making it easier to
override in a subclass.
This is the method which is actually registered as a
callback.
"""
h = self.callback_mesh(domain, name, cbdata)
if h is None: h = simV2.VISIT_INVALID_HANDLE
return h
def callback_mesh(self, domain, name, cbdata):
"""Overridable method."""
return simV2.VISIT_INVALID_HANDLE
def callback_variable_(self, domain, name, cbdata):
"""A wrapper around callback_variable, making it easier to
override in a subclass.
This is the method which is actually registered as a
callback.
"""
h = self.callback_variable(domain, name, cbdata)
if h is None: h = simV2.VISIT_INVALID_HANDLE
return h
def callback_variable(self, domain, name, cbdata):
"""Overridable method."""
return simV2.VISIT_INVALID_HANDLE
def callback_curve_(self, name, cbdata):
"""A wrapper around callback_curve, making it easier to
override in a subclass.
This is the method which is actually registered as a
callback.
"""
h = self.callback_curve(name, cbdata)
if h is None: h = simV2.VISIT_INVALID_HANDLE
return h
def callback_curve(self, name, cbdata):
"""Overridable method."""
return simV2.VISIT_INVALID_HANDLE
# TODO: Change the owner to 'SIM' and see what happens.
def visit_rectilinear_mesh(self, data_func, min_real_idx, max_real_idx, data_x, data_y, data_z=None, owner='VISIT'):
"""Creates a rectilinear mesh.
The handling for owner other that VISIT_OWNER_VISIT is complete and
utterly untested guesswork.
data_func is one of VARDATATYPES. NOTE: In Python, floats are doubles.
"""
h = simV2.VisIt_RectilinearMesh_alloc()
if h == simV2.VISIT_INVALID_HANDLE: return h
hx = simV2.VisIt_VariableData_alloc()
data_func(hx,self.DATAOWNERS[owner],1,len(data_x),data_x)
hy = simV2.VisIt_VariableData_alloc()
data_func(hy,self.DATAOWNERS[owner],1,len(data_y),data_y)
if data_z is None:
simV2.VisIt_RectilinearMesh_setCoordsXY(h,hx,hy)
else:
hz = simV2.VisIt_VariableData_alloc()
data_func(hz,self.DATAOWNERS[owner],1,len(data_z),data_z)
simV2.VisIt_RectilinearMesh_setCoordsXYZ(h,hx,hy,hz)
simV2.VisIt_RectilinearMesh_setRealIndices(h,min_real_idx,max_real_idx)
return h
# TODO: Change the owner to 'SIM' and see what happens.
def visit_point_mesh(self, data_func, data_x, data_y, data_z=None, owner='VISIT'):
"""Creates a point mesh.
The handling for owner other that VISIT_OWNER_VISIT is complete and
utterly untested guesswork.
data_func is one of VARDATATYPES. NOTE: In Python, floats are doubles.
"""
h = simV2.VisIt_PointMesh_alloc()
if h == simV2.VISIT_INVALID_HANDLE: return h
hx = simV2.VisIt_VariableData_alloc()
data_func(hx,self.DATAOWNERS[owner],1,len(data_x),data_x)
hy = simV2.VisIt_VariableData_alloc()
data_func(hy,self.DATAOWNERS[owner],1,len(data_y),data_y)
if data_z is None:
simV2.VisIt_PointMesh_setCoordsXY(h,hx,hy)
else:
hz = simV2.VisIt_VariableData_alloc()
data_func(hz,self.DATAOWNERS[owner],1,len(data_z),data_z)
simV2.VisIt_PointMesh_setCoordsXYZ(h,hx,hy,hz)
return h
# TODO: Change the owner to 'SIM' and see what happens.
def visit_variable(self, data_func, data, owner='VISIT', nComp=1):
"""Creates a variable.
nComp determines the "number of components". For a precise definition
see the documentation. For Floats and Ints this should typically be
1 (no stride). For character strings (such as labels), this is the
length of the label; it is required that each string be the same length.
"""
h = simV2.VisIt_VariableData_alloc()
data_func(h,self.DATAOWNERS[owner],nComp,len(data),data)
return h
# TODO: Change the owner to 'SIM' and see what happens.
def visit_curve(self, data_func, data_x, data_y, data_z=None, owner='VISIT'):
"""Creates a curve."""
h = simV2.VisIt_CurveData_alloc()
if h == simV2.VISIT_INVALID_HANDLE: return h
hx = simV2.VisIt_VariableData_alloc()
data_func(hx,self.DATAOWNERS[owner],1,len(data_x),data_x)
hy = simV2.VisIt_VariableData_alloc()
data_func(hy,self.DATAOWNERS[owner],1,len(data_y),data_y)
if data_z is None:
simV2.VisIt_CurveData_setCoordsXY(h,hx,hy)
else:
hz = simV2.VisIt_VariableData_alloc()
data_func(hz,self.DATAOWNERS[owner],1,len(data_z),data_z)
simV2.VisIt_CurveData_setCoordsXYZ(h,hx,hy,hz)
return h
def Finalize(self,**kwargs):
"""Most inportant thing that this does is to call VisItCloseTraceFile
if trace_qualifier is not set to None.
"""
self.finalize(**kwargs)
if self.trace_qualifier is not None: simV2.VisItCloseTraceFile()
return
def finalize(self,**kwargs):
"""This is the subclass-overridable companion to Finalize."""
return
| 44.667192 | 120 | 0.595819 |
90c82c43c3eca00a6743d066b39cf0ab21f176e8
| 9,598 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/stock/stock_balance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/stock_balance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/stock/stock_balance.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import flt, cstr, nowdate, nowtime
from erpnext.stock.utils import update_bin
from erpnext.stock.stock_ledger import update_entries_after
def repost(only_actual=False, allow_negative_stock=False, allow_zero_rate=False, only_bin=False):
"""
Repost everything!
"""
frappe.db.auto_commit_on_many_writes = 1
if allow_negative_stock:
existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
for d in frappe.db.sql("""select distinct item_code, warehouse from
(select item_code, warehouse from tabBin
union
select item_code, warehouse from `tabStock Ledger Entry`) a"""):
try:
repost_stock(d[0], d[1], allow_zero_rate, only_actual, only_bin)
frappe.db.commit()
except:
frappe.db.rollback()
if allow_negative_stock:
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
frappe.db.auto_commit_on_many_writes = 0
def repost_stock(item_code, warehouse, allow_zero_rate=False, only_actual=False, only_bin=False):
if not only_bin:
repost_actual_qty(item_code, warehouse, allow_zero_rate)
if item_code and warehouse and not only_actual:
qty_dict = {
"reserved_qty": get_reserved_qty(item_code, warehouse),
"indented_qty": get_indented_qty(item_code, warehouse),
"ordered_qty": get_ordered_qty(item_code, warehouse),
"planned_qty": get_planned_qty(item_code, warehouse)
}
if only_bin:
qty_dict.update({
"actual_qty": get_balance_qty_from_sle(item_code, warehouse)
})
update_bin_qty(item_code, warehouse, qty_dict)
def repost_actual_qty(item_code, warehouse, allow_zero_rate=False):
try:
update_entries_after({ "item_code": item_code, "warehouse": warehouse }, allow_zero_rate)
except:
pass
def get_balance_qty_from_sle(item_code, warehouse):
balance_qty = frappe.db.sql("""select qty_after_transaction from `tabStock Ledger Entry`
where item_code=%s and warehouse=%s and is_cancelled='No'
order by posting_date desc, posting_time desc, name desc
limit 1""", (item_code, warehouse))
return flt(balance_qty[0][0]) if balance_qty else 0.0
def get_reserved_qty(item_code, warehouse):
reserved_qty = frappe.db.sql("""
select
sum(dnpi_qty * ((so_item_qty - so_item_delivered_qty) / so_item_qty))
from
(
(select
qty as dnpi_qty,
(
select qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and (delivered_by_supplier is null or delivered_by_supplier = 0)
) as so_item_qty,
(
select delivered_qty from `tabSales Order Item`
where name = dnpi.parent_detail_docname
and delivered_by_supplier = 0
) as so_item_delivered_qty,
parent, name
from
(
select qty, parent_detail_docname, parent, name
from `tabPacked Item` dnpi_in
where item_code = %s and warehouse = %s
and parenttype="Sales Order"
and item_code != parent_item
and exists (select * from `tabSales Order` so
where name = dnpi_in.parent and docstatus = 1 and status != 'Closed')
) dnpi)
union
(select stock_qty as dnpi_qty, qty as so_item_qty,
delivered_qty as so_item_delivered_qty, parent, name
from `tabSales Order Item` so_item
where item_code = %s and warehouse = %s
and (so_item.delivered_by_supplier is null or so_item.delivered_by_supplier = 0)
and exists(select * from `tabSales Order` so
where so.name = so_item.parent and so.docstatus = 1
and so.status != 'Closed'))
) tab
where
so_item_qty >= so_item_delivered_qty
""", (item_code, warehouse, item_code, warehouse))
return flt(reserved_qty[0][0]) if reserved_qty else 0
def get_indented_qty(item_code, warehouse):
indented_qty = frappe.db.sql("""select sum(mr_item.qty - mr_item.ordered_qty)
from `tabMaterial Request Item` mr_item, `tabMaterial Request` mr
where mr_item.item_code=%s and mr_item.warehouse=%s
and mr_item.qty > mr_item.ordered_qty and mr_item.parent=mr.name
and mr.status!='Stopped' and mr.docstatus=1""", (item_code, warehouse))
return flt(indented_qty[0][0]) if indented_qty else 0
def get_ordered_qty(item_code, warehouse):
ordered_qty = frappe.db.sql("""
select sum((po_item.qty - po_item.received_qty)*po_item.conversion_factor)
from `tabPurchase Order Item` po_item, `tabPurchase Order` po
where po_item.item_code=%s and po_item.warehouse=%s
and po_item.qty > po_item.received_qty and po_item.parent=po.name
and po.status not in ('Closed', 'Delivered') and po.docstatus=1
and po_item.delivered_by_supplier = 0""", (item_code, warehouse))
return flt(ordered_qty[0][0]) if ordered_qty else 0
def get_planned_qty(item_code, warehouse):
planned_qty = frappe.db.sql("""
select sum(qty - produced_qty) from `tabWork Order`
where production_item = %s and fg_warehouse = %s and status not in ("Stopped", "Completed")
and docstatus=1 and qty > produced_qty""", (item_code, warehouse))
return flt(planned_qty[0][0]) if planned_qty else 0
def update_bin_qty(item_code, warehouse, qty_dict=None):
from erpnext.stock.utils import get_bin
bin = get_bin(item_code, warehouse)
mismatch = False
for fld, val in qty_dict.items():
if flt(bin.get(fld)) != flt(val):
bin.set(fld, flt(val))
mismatch = True
if mismatch:
bin.projected_qty = (flt(bin.actual_qty) + flt(bin.ordered_qty) +
flt(bin.indented_qty) + flt(bin.planned_qty) - flt(bin.reserved_qty)
- flt(bin.reserved_qty_for_production)) - flt(bin.reserved_qty_for_sub_contract)
bin.save()
def set_stock_balance_as_per_serial_no(item_code=None, posting_date=None, posting_time=None,
fiscal_year=None):
if not posting_date: posting_date = nowdate()
if not posting_time: posting_time = nowtime()
condition = " and item.name='%s'" % item_code.replace("'", "\'") if item_code else ""
bin = frappe.db.sql("""select bin.item_code, bin.warehouse, bin.actual_qty, item.stock_uom
from `tabBin` bin, tabItem item
where bin.item_code = item.name and item.has_serial_no = 1 %s""" % condition)
for d in bin:
serial_nos = frappe.db.sql("""select count(name) from `tabSerial No`
where item_code=%s and warehouse=%s and docstatus < 2""", (d[0], d[1]))
if serial_nos and flt(serial_nos[0][0]) != flt(d[2]):
print(d[0], d[1], d[2], serial_nos[0][0])
sle = frappe.db.sql("""select valuation_rate, company from `tabStock Ledger Entry`
where item_code = %s and warehouse = %s and ifnull(is_cancelled, 'No') = 'No'
order by posting_date desc limit 1""", (d[0], d[1]))
sle_dict = {
'doctype' : 'Stock Ledger Entry',
'item_code' : d[0],
'warehouse' : d[1],
'transaction_date' : nowdate(),
'posting_date' : posting_date,
'posting_time' : posting_time,
'voucher_type' : 'Stock Reconciliation (Manual)',
'voucher_no' : '',
'voucher_detail_no' : '',
'actual_qty' : flt(serial_nos[0][0]) - flt(d[2]),
'stock_uom' : d[3],
'incoming_rate' : sle and flt(serial_nos[0][0]) > flt(d[2]) and flt(sle[0][0]) or 0,
'company' : sle and cstr(sle[0][1]) or 0,
'is_cancelled' : 'No',
'batch_no' : '',
'serial_no' : ''
}
sle_doc = frappe.get_doc(sle_dict)
sle_doc.flags.ignore_validate = True
sle_doc.flags.ignore_links = True
sle_doc.insert()
args = sle_dict.copy()
args.update({
"sle_id": sle_doc.name,
"is_amended": 'No'
})
update_bin(args)
update_entries_after({
"item_code": d[0],
"warehouse": d[1],
"posting_date": posting_date,
"posting_time": posting_time
})
def reset_serial_no_status_and_warehouse(serial_nos=None):
if not serial_nos:
serial_nos = frappe.db.sql_list("""select name from `tabSerial No` where docstatus = 0""")
for serial_no in serial_nos:
try:
sr = frappe.get_doc("Serial No", serial_no)
last_sle = sr.get_last_sle()
if flt(last_sle.actual_qty) > 0:
sr.warehouse = last_sle.warehouse
sr.via_stock_ledger = True
sr.save()
except:
pass
def repost_all_stock_vouchers():
warehouses_with_account = frappe.db.sql_list("""select warehouse from tabAccount
where ifnull(account_type, '') = 'Stock' and (warehouse is not null and warehouse != '')
and is_group=0""")
vouchers = frappe.db.sql("""select distinct voucher_type, voucher_no
from `tabStock Ledger Entry` sle
where voucher_type != "Serial No" and sle.warehouse in (%s)
order by posting_date, posting_time, name""" %
', '.join(['%s']*len(warehouses_with_account)), tuple(warehouses_with_account))
rejected = []
i = 0
for voucher_type, voucher_no in vouchers:
i+=1
print(i, "/", len(vouchers), voucher_type, voucher_no)
try:
for dt in ["Stock Ledger Entry", "GL Entry"]:
frappe.db.sql("""delete from `tab%s` where voucher_type=%s and voucher_no=%s"""%
(dt, '%s', '%s'), (voucher_type, voucher_no))
doc = frappe.get_doc(voucher_type, voucher_no)
if voucher_type=="Stock Entry" and doc.purpose in ["Manufacture", "Repack"]:
doc.calculate_rate_and_amount(force=1)
elif voucher_type=="Purchase Receipt" and doc.is_subcontracted == "Yes":
doc.validate()
doc.update_stock_ledger()
doc.make_gl_entries(repost_future_gle=False)
frappe.db.commit()
except Exception as e:
print(frappe.get_traceback())
rejected.append([voucher_type, voucher_no])
frappe.db.rollback()
print(rejected)
| 35.813433 | 101 | 0.713378 |
90d58f4c414733ef33c952dc928329366227325a
| 9,605 |
py
|
Python
|
scripts/unification/move_zn_element.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/unification/move_zn_element.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/unification/move_zn_element.py
|
opensource-assist/fuschia
|
66646c55b3d0b36aae90a4b6706b87f1a6261935
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import fileinput
import json
import os
import re
import subprocess
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
SCRIPTS_DIR = os.path.dirname(SCRIPT_DIR)
FUCHSIA_ROOT = os.path.dirname(SCRIPTS_DIR)
FX = os.path.join(SCRIPTS_DIR, 'fx')
SCRIPT_LABEL = '//' + os.path.relpath(os.path.abspath(__file__),
start=FUCHSIA_ROOT)
class Type(object):
DRIVER = 'zx_driver'
EXECUTABLE = 'zx_executable'
TEST = 'zx_test'
TEST_DRIVER = 'zx_test_driver'
@classmethod
def all(cls): return [cls.DRIVER, cls.EXECUTABLE, cls.TEST, cls.TEST_DRIVER]
BINARY_TYPES = {
Type.DRIVER: 'driver_module',
Type.EXECUTABLE: 'executable',
Type.TEST: 'test',
Type.TEST_DRIVER: 'driver_module',
}
def run_command(command):
return subprocess.check_output(command, cwd=FUCHSIA_ROOT)
def locate_build_files(base):
result = []
for root, dirs, files in os.walk(os.path.join(FUCHSIA_ROOT, 'zircon',
'system', base)):
for file in files:
if file == 'BUILD.gn':
result.append(os.path.join(root, file))
return result
def transform_build_file(build):
# First pass: identify contents of the build file.
binaries = []
has_test_binaries = False
has_drivers = False
binary_types = BINARY_TYPES.keys()
unclear_types = ['library']
n_lines = 0
with open(build, 'r') as build_file:
lines = build_file.readlines()
n_lines = len(lines)
for line in lines:
match = re.match(r'\A([^\(]+)\("([^"]+)"\)', line)
if match:
type, name = match.groups()
if type in binary_types:
binaries.append(name)
if type == Type.TEST:
has_test_binaries = True
if type == Type.DRIVER or type == Type.TEST_DRIVER:
has_drivers = True
if type in unclear_types:
print('Warning: target ' + name + ' of type ' + type + ' '
'needs to be manually converted.')
# Second pass: rewrite contents to match GN build standards.
imports_added = False
for line in fileinput.FileInput(build, inplace=True):
# Apply required edits.
# Update target types.
starting_type = ''
for type in binary_types:
new_type_line = line.replace(type, BINARY_TYPES[type])
if new_type_line != line:
starting_type = type
line = new_type_line
break
# Remove references to libzircon.
if '$zx/system/ulib/zircon' in line and not 'zircon-internal' in line:
line = ''
# Update references to libraries.
line = line.replace('$zx/system/ulib', '//zircon/public/lib')
line = line.replace('$zx/system/dev/lib', '//zircon/public/lib')
# Update references to Zircon in general.
line = line.replace('$zx', '//zircon')
# Print the line, if any content is left.
if line:
sys.stdout.write(line)
# Insert required imports at the start of the file.
if not line.strip() and not imports_added:
imports_added = True
sys.stdout.write('##########################################\n')
sys.stdout.write('# Though under //zircon, this build file #\n')
sys.stdout.write('# is meant to be used in the Fuchsia GN #\n')
sys.stdout.write('# build. #\n')
sys.stdout.write('# See fxb/36139. #\n')
sys.stdout.write('##########################################\n')
sys.stdout.write('\n')
sys.stdout.write('assert(!defined(zx) || zx != "/", "This file can only be used in the Fuchsia GN build.")\n')
sys.stdout.write('\n')
if has_drivers:
sys.stdout.write('import("//build/config/fuchsia/rules.gni")\n')
if has_test_binaries:
sys.stdout.write('import("//build/test.gni")\n')
sys.stdout.write('import("//build/unification/images/migrated_manifest.gni")\n')
sys.stdout.write('\n')
# Add extra parameters to tests.
if starting_type == Type.TEST:
sys.stdout.write(' # Dependent manifests unfortunately cannot be marked as `testonly`.\n')
sys.stdout.write(' # Remove when converting this file to proper GN build idioms.\n')
sys.stdout.write(' testonly = false\n')
if starting_type == Type.TEST_DRIVER:
sys.stdout.write(' test = true\n')
if starting_type in [Type.DRIVER, Type.TEST_DRIVER]:
sys.stdout.write(' defines = [ "_ALL_SOURCE" ]\n')
sys.stdout.write(' configs += [ "//build/config/fuchsia:enable_zircon_asserts" ]\n')
sys.stdout.write(' configs -= [ "//build/config/fuchsia:no_cpp_standard_library" ]\n')
sys.stdout.write(' configs += [ "//build/config/fuchsia:static_cpp_standard_library" ]\n')
if starting_type in Type.all():
sys.stdout.write(' configs += [ "//build/unification/config:zircon-migrated" ]\n')
# Third pass: add manifest targets at the end of the file.
with open(build, 'a') as build_file:
for binary in binaries:
build_file.write('\n')
build_file.write('migrated_manifest("' + binary + '-manifest") {\n')
build_file.write(' deps = [\n')
build_file.write(' ":' + binary + '",\n')
build_file.write(' ]\n')
build_file.write('}\n')
# Format the file.
run_command([FX, 'format-code', '--files=' + build])
return 0
def main():
parser = argparse.ArgumentParser(
description='Moves a binary from ZN to GN.')
commands = parser.add_subparsers()
convert_parser = commands.add_parser('convert',
help='Migrate from ZN to GN')
convert_parser.add_argument('binary',
help='The binary under //zircon/system to migrate, '
'e.g. uapp/audio, utest/fit, dev/bus/pci')
convert_parser.set_defaults(func=run_convert)
list_parser = commands.add_parser('list',
help='List available binaries')
list_parser.add_argument('--build-dir',
help='path to the ZN build dir',
default=os.path.join(FUCHSIA_ROOT, 'out', 'default.zircon'))
list_parser.set_defaults(func=run_list)
args = parser.parse_args()
args.func(args)
def run_convert(args):
# Check that the fuchsia.git tree is clean.
diff = run_command(['git', 'status', '--porcelain'])
if diff:
print('Please make sure your tree is clean before running this script')
print(diff)
return 1
# Identify the affected build files.
build_files = locate_build_files(args.binary)
if not build_files:
print('Error: could not find any files for ' + args.binary)
return 1
# Confirm with the user that these are the files they want to convert.
print('The following build file(s) will be converted:')
for file in build_files:
print(' - ' + os.path.relpath(file, FUCHSIA_ROOT))
go_ahead = raw_input('Proceed? (Y/n) ').lower().strip()
if go_ahead != 'y' and go_ahead != '':
print('User disagrees, exiting')
return 0
# Convert the build files.
for file in build_files:
transform_build_file(file)
# Create a commit.
id = args.binary.replace('/', '_')
run_command(['git', 'checkout', '-b', 'gn-move-' + id, 'JIRI_HEAD'])
run_command(['git', 'add', '.'])
message = [
'[unification] Move //zircon/system/' + args.binary + ' to the GN build',
'',
'Generated with: ' + SCRIPT_LABEL,
'',
'scripts/unification/verify_element_move.py --reference local/initial.json:',
'TODO PASTE VERIFICATION RESULT HERE',
'',
'Bug: 36139'
]
commit_command = ['git', 'commit', '-a']
for line in message:
commit_command += ['-m', line]
run_command(commit_command)
print('Base change is ready. Please attempt to build a full system to '
'identify further required changes.')
return 0
def run_list(args):
targets = set()
for arch in ['arm64', 'x64']:
manifest_path = os.path.join(args.build_dir,
'legacy_unification-%s.json' % arch)
with open(manifest_path, 'r') as manifest_file:
data = json.load(manifest_file)
for item in data:
if item['name'].startswith('lib.'):
# Libraries will be migrated through a different process.
continue
label = item['label']
# Labels are always full, i.e. "//foo/bar:blah(//toolchain)".
label = label[0:label.index('(')]
label = label[0:label.index(':')]
if not label.startswith('//system'):
continue
label = label[len('//system/'):]
targets.add(label)
for target in sorted(targets):
print(target)
if __name__ == '__main__':
sys.exit(main())
| 37.228682 | 122 | 0.571577 |
291238740b1ddde712947dbcc3e247759c297d46
| 1,075 |
py
|
Python
|
benwaonline/entities/post.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
benwaonline/entities/post.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | 16 |
2017-09-13T10:21:40.000Z
|
2020-06-01T04:32:22.000Z
|
benwaonline/entities/post.py
|
goosechooser/benwaonline
|
e2879412aa6c3c230d25cd60072445165517b6b6
|
[
"MIT"
] | null | null | null |
from benwaonline.entities import Entity
from benwaonline.gateways import PostGateway
class Post(Entity):
'''Represents a Post resource object, related to the Post model in the database
Attributes:
type_: 'post'
'''
_schema = 'PostSchema'
type_ = 'post'
attrs = {
'comment': 'comments',
'tag': 'tags',
'like': 'likes'
}
def __init__(self, id=None, title=None, created_on=None, user=None, comments=None, image=None, preview=None, tags=None, likes=None):
self.title = title
self.created_on = created_on
self.user = user
self.comments = comments or []
self.image = image
self.preview = preview
self.tags = tags or []
self.likes = likes or []
super().__init__(id=id)
def __repr__(self):
return '<Post {}: {}>'.format(self.id, self.title)
def load_comments(self, **kwargs):
self.comments = PostGateway().get_resource(self, 'comments', **kwargs)
class PostLike(Post):
_schema = 'PostSchema'
type_ = 'like'
| 26.875 | 136 | 0.608372 |
c30f9a3df3f183b4956e4a5a4e267897b0221b6e
| 6,425 |
py
|
Python
|
robot/douyin/by_id.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2017-10-23T14:58:47.000Z
|
2017-10-23T14:58:47.000Z
|
robot/douyin/by_id.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | null | null | null |
robot/douyin/by_id.py
|
East196/hello-py
|
a77c7a0c8e5e2b5e8cefaf0fda335ab0c3b1da21
|
[
"Apache-2.0"
] | 1 |
2018-04-06T07:49:18.000Z
|
2018-04-06T07:49:18.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import sys
from urllib.parse import urlparse
from urllib.parse import quote
import requests
from bs4 import BeautifulSoup
import json
import os
import re
from contextlib import closing
import time
import certifi
import urllib3
class DouYin(object):
def __init__(self):
print(sys.getdefaultencoding())
print("抖音下载!")
# urllib3连接错误时抛出exceptions.SSLError
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'
self.headers = {'User-Agent': user_agent}
self.http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
urllib3.disable_warnings()
# 获得总分享视频url地址,返回视频名字,视频数,分享视频链接列表
def get_video_url(self, nickname):
video_names = []
video_urls = []
user_agent = 'Aweme/1.5.8 (iPhone; iOS 11.0.3; Scale/2.00)'
header = {'User-Agent': user_agent}
search_url = "https://aweme.snssdk.com/aweme/v1/discover/search/?iid=15735436175&device_id=37063549497&os_api=18&app_name=aweme&channel=App%20Store&idfa=08621BB7-65C3-454D-908A-D02F565D85F1&device_platform=iphone&build_number=15805&vid=6BD753D7-C89A-4BEF-9C3D-7192E26CF330&openudid=ee5f41b63ff4704166b2f2d8920267fcd109136b&device_type=iPhone6,2&app_version=1.5.8&version_code=1.5.8&os_version=11.0.3&screen_width=640&aid=1128&ac=WIFI&count=10&cursor=0&keyword={0}&type=1&cp=9646915915dce7fbe1&as=a115d95e314449fffd&ts={1}".format(
quote(nickname), int(time.time()))
print(search_url)
req = self.http.request('GET', search_url)
html = json.loads(req.data.decode('utf-8'))
for each in html['user_list']:
if each['user_info']['nickname'] == nickname:
aweme_count = each['user_info']['aweme_count']
user_id = each['user_info']['uid']
user_url = 'https://aweme.snssdk.com/aweme/v1/aweme/post/?iid=15735436175&device_id=37063549497&os_api=18&app_name=aweme&channel=App%20Store&idfa=08621BB7-65C3-454D-908A-D02F565D85F1&device_platform=iphone&build_number=15805&vid=6BD753D7-C89A-4BEF-9C3D-7192E26CF330&openudid=ee5f41b63ff4704166b2f2d8920267fcd109136b&device_type=iPhone6,2&app_version=1.5.8&version_code=1.5.8&os_version=11.0.3&screen_width=640&aid=1128&ac=WIFI&count={0}&max_cursor=0&min_cursor=0&user_id={1}&cp=e7329b5ccceae5cbe1&as=a125ee5e8cd3393c4e&ts={2}'.format(
aweme_count, user_id, int(time.time()))
r = self.http.request('GET', user_url, headers=self.headers)
htm = json.loads(r.data.decode('utf-8'))
for each in htm['aweme_list']:
share_desc = each['share_info']['share_desc']
if '抖音-原创音乐短视频社区' == share_desc:
print(each)
video_names.append(each['cha_list'][0]['cha_name'] + '.mp4')
else:
video_names.append(share_desc + '.mp4')
video_urls.append(each['share_info']['share_url'])
return video_names, video_urls, aweme_count
# 获得下载视频地址
def get_download_url(self, video_url):
# video_url = 'https://www.douyin.com/share/video/6479379598709624078/?region=CN&mid=6477398356422789901'
req = self.http.request('GET', video_url).data.decode('utf-8')
video_url_data = re.findall('var data = \[(.*?)\];', str(req))[0]
video_html = json.loads(video_url_data)
download_url = video_html['video']['real_play_addr']
return download_url
# 下载视频
def vedio_download(self, video_name, video_url):
if os.path.exists(video_name):
print("downloaded! {video_name} by {video_url}".format(video_name=video_name, video_url=video_url))
return
size = 0
with closing(self.http.request('GET', video_url, preload_content=False)) as response:
chunk_size = 1024
content_size = int(response.headers['content-length'])
if response.status == 200:
print('[文件大小]:%0.2f MB' % (content_size / chunk_size / 1024.0))
with open(video_name, 'wb') as file:
for data in response.stream(chunk_size):
file.write(data)
size += len(data)
file.flush()
sys.stdout.flush()
response.release_conn()
def run(self, nickname):
video_names, video_urls, aweme_count = self.get_video_url(nickname)
if nickname not in os.listdir():
os.mkdir(nickname)
print("视频下载中:\n")
for num in range(aweme_count):
video_url = self.get_download_url(video_url[num])
self.video_download(video_names[num], video_url)
def by_video_id(self, video_id):
video_summary_url = "https://www.douyin.com/share/video/{video_id}/".format(video_id=video_id)
print(video_summary_url)
r = requests.get(video_summary_url, headers=self.headers)#, verify=False)
# print(r.content)
music_json = json.loads(re.findall("var data = (.*);", r.content)[0])[0]
try:
uid = music_json["author"]['uid']
video_url = music_json["video"]["play_addr"]["url_list"][0]
print(video_url)
dirtory = "mp4/{uid}/".format(uid=uid)
if not os.path.exists(dirtory):
print("no dirtory {dirtory}, mking it".format(dirtory=dirtory))
os.mkdir(dirtory)
self.vedio_download("mp4/{uid}/{video_id}.mp4".format(uid=uid, video_id=video_id), video_url)
except:
print("erorr!")
print(music_json)
def get_video_ids(self, uid):
user_url = "https://www.douyin.com/aweme/v1/aweme/post/?user_id={uid}&count=21&max_cursor=0&aid=1128".format(uid=uid)
print(user_url)
r = requests.get(user_url, headers=self.headers, verify=False)
# print(r.json()["aweme_list"][0])
return [aweme["aweme_id"] for aweme in r.json()["aweme_list"]]
def by_uid(self, uid):
video_ids = self.get_video_ids(uid)
for video_id in video_ids:
print(video_id)
time.sleep(random.randint(3, 8))
self.by_video_id(video_id)
if __name__ == '__main__':
douyin = DouYin()
uid = '81762680084'
douyin.by_uid(uid)
# video_id = "6517594971044842756"
# douyin.by_video_id(video_id)
| 43.412162 | 542 | 0.639844 |
5ee7c02af2cd134c9eecb84b905a9eff61a384fa
| 445 |
py
|
Python
|
Algorithms/Greedy/mark_and_toys.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Greedy/mark_and_toys.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
Algorithms/Greedy/mark_and_toys.py
|
byung-u/HackerRank
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
def maximumToys(prices, k):
sorted_prices = sorted(prices)
cnt, total = 0, 0
for p in sorted_prices:
total += p
if total > k:
break
cnt += 1
return cnt
if __name__ == "__main__":
n, k = input().strip().split(' ')
n, k = [int(n), int(k)]
prices = list(map(int, input().strip().split(' ')))
result = maximumToys(prices, k)
print(result)
| 20.227273 | 55 | 0.546067 |
6f223658076763446efab805b400748268619a31
| 521 |
py
|
Python
|
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0010_auto_20180128_2345.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0010_auto_20180128_2345.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
Implementierung/ResearchEnvironment/AuthorizationManagement/migrations/0010_auto_20180128_2345.py
|
Sashks/PSE
|
ae2d8133a85563c33583f15b9ba76a3a2bf0c762
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-28 22:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('AuthorizationManagement', '0009_auto_20180128_2344'),
]
operations = [
migrations.AlterUniqueTogether(
name='accessrequest',
unique_together={('sender', 'resource')},
),
migrations.AlterUniqueTogether(
name='deletionrequest',
unique_together={('sender', 'resource')},
),
]
| 23.681818 | 63 | 0.604607 |
6f48652640ed18d33a58e2f3644d6363a0c1efec
| 2,173 |
py
|
Python
|
examples/restaurantbot/policy.py
|
n01deas/rasa
|
79f0feeb02919142eb06b8c52da5632f1c25c251
|
[
"Apache-2.0"
] | 5 |
2019-06-06T08:59:15.000Z
|
2020-01-19T10:56:45.000Z
|
examples/restaurantbot/policy.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 56 |
2020-06-09T00:16:14.000Z
|
2020-11-16T00:25:20.000Z
|
examples/restaurantbot/policy.py
|
alfredfrancis/rasa
|
d8d226408f20cc2563c3aefbccef3e364a447666
|
[
"Apache-2.0"
] | 4 |
2019-05-19T21:19:32.000Z
|
2021-01-06T14:26:37.000Z
|
import logging
from rasa.core.policies.keras_policy import KerasPolicy
logger = logging.getLogger(__name__)
class RestaurantPolicy(KerasPolicy):
def model_architecture(self, input_shape, output_shape):
"""Build a Keras model and return a compiled model."""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Masking,
LSTM,
Dense,
TimeDistributed,
Activation,
)
# Build Model
model = Sequential()
# the shape of the y vector of the labels,
# determines which output from rnn will be used
# to calculate the loss
if len(output_shape) == 1:
# y is (num examples, num features) so
# only the last output from the rnn is used to
# calculate the loss
model.add(Masking(mask_value=-1, input_shape=input_shape))
model.add(LSTM(self.rnn_size))
model.add(Dense(input_dim=self.rnn_size, units=output_shape[-1]))
elif len(output_shape) == 2:
# y is (num examples, max_dialogue_len, num features) so
# all the outputs from the rnn are used to
# calculate the loss, therefore a sequence is returned and
# time distributed layer is used
# the first value in input_shape is max dialogue_len,
# it is set to None, to allow dynamic_rnn creation
# during prediction
model.add(Masking(mask_value=-1, input_shape=(None, input_shape[1])))
model.add(LSTM(self.rnn_size, return_sequences=True))
model.add(TimeDistributed(Dense(units=output_shape[-1])))
else:
raise ValueError(
"Cannot construct the model because"
"length of output_shape = {} "
"should be 1 or 2."
"".format(len(output_shape))
)
model.add(Activation("softmax"))
model.compile(
loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
logger.debug(model.summary())
return model
| 35.622951 | 83 | 0.59549 |
6f5f4601b067d0d197cced5f9e122fe3bfd25484
| 544 |
py
|
Python
|
packages/watchmen-rest-dqc/src/watchmen_rest_dqc/main.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-dqc/src/watchmen_rest_dqc/main.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
packages/watchmen-rest-dqc/src/watchmen_rest_dqc/main.py
|
Indexical-Metrics-Measure-Advisory/watchmen
|
c54ec54d9f91034a38e51fd339ba66453d2c7a6d
|
[
"MIT"
] | null | null | null |
from watchmen_rest.system import health_router
from watchmen_utilities import ArrayHelper
from .admin import catalog_router, monitor_rules_router
from .dqc import dqc
from .monitor import topic_monitor_router
from .topic_profile import topic_profile_router
app = dqc.construct()
@app.on_event("startup")
def startup():
dqc.on_startup(app)
ArrayHelper([
# system
health_router.router,
catalog_router.router, monitor_rules_router.router,
topic_monitor_router.router, topic_profile_router.router
]).each(lambda x: app.include_router(x))
| 24.727273 | 57 | 0.823529 |
48b3290beda2ddedd9922cb55936e61f37a36a47
| 1,412 |
py
|
Python
|
modeling/head/res_unet.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | 2 |
2020-12-22T08:40:05.000Z
|
2021-03-30T08:09:44.000Z
|
modeling/head/res_unet.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
modeling/head/res_unet.py
|
UESTC-Liuxin/SkmtSeg
|
1251de57fae967aca395644d1c70a9ba0bb52271
|
[
"Apache-2.0"
] | null | null | null |
""" Full assembly of the parts to form the complete network """
from __future__ import division
import torch
import torch.nn as nn
from torch.nn.functional import upsample
from modeling.model_utils.unet_parts import *
import torch.nn.functional as F
class ResUNet(nn.Module):
def __init__(self, backbone,BatchNorm, output_stride, num_classes,freeze_bn=False):
super(ResUNet, self).__init__()
self.backbone = backbone
self.n_classes = num_classes
self.conv1 = nn.Conv2d(2048, 512, 1, bias=False)
self.output_stride=output_stride
if output_stride == 16:
inputstrides = [1024, 512, 128, 64]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError
self.up1 = Up(1024, 512 //2,BatchNorm)
self.up2 = Up(512, 256 // 4,BatchNorm)
self.up3 = Up(128, 64,BatchNorm)
self.outc = OutConv(64, num_classes)
if freeze_bn:
self.freeze_bn()
def forward(self, x):
x2 = x[5]
x3 = x[3]
x4 = x[2]
x5 = self.conv1(x[0])
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
logits = self.outc(x)
x = F.interpolate(logits, scale_factor=2, mode='bilinear', align_corners=True)
return x
| 30.695652 | 87 | 0.588527 |
d2ee7840d3278f80013902d0277dfb6830384d56
| 2,289 |
py
|
Python
|
data/opendf.py
|
Lyceoth/DataEngineeringProject
|
bc3c230a23eb5f3e8d7dd5322e3b22995801dc29
|
[
"Apache-2.0"
] | null | null | null |
data/opendf.py
|
Lyceoth/DataEngineeringProject
|
bc3c230a23eb5f3e8d7dd5322e3b22995801dc29
|
[
"Apache-2.0"
] | null | null | null |
data/opendf.py
|
Lyceoth/DataEngineeringProject
|
bc3c230a23eb5f3e8d7dd5322e3b22995801dc29
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/bash
"true" '''\' #allow running from vs code, cf. http://rosettacode.org/wiki/Multiline_shebang#Python
CUR_DIR="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
CUR_FILE=`basename "$0"`
CMD="docker run -it --rm -v \"$PWD/data:/data\" -v \"$CUR_DIR:/src\" --name=pyspark jupyter/pyspark-notebook spark-submit \"/src/$CUR_FILE\" | grep -v INFO"
echo "Running: $CMD" | sed "s|$PWD|\$PWD|g"
eval "$CMD"
exit 0
'''
# Download JDBC dirver for MySQL
# wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.45.tar.gz
# BEGIN-SNIPPET
import os
from pyspark import SparkContext, SQLContext
# Specify where driver is
os.environ['PYSPARK_SUBMIT_ARGS'] = '--jars /home/bigdata/soccerapp/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar pyspark-shell'
# Create local Spark Context
spark = SparkContext("local[*]", "mysql_data")
sc = SQLContext(spark)
# Alternative:
#sc = SparkContext(appName="mysql_data")
#sqlContext = SQLContext(sc)
#Provide your Spark-master node below
hostname = "10.107.120.235"
dbname = "SOCCERSTAT"
jdbcPort = 3306
username = "root"
password = "mysecretpw"
jdbc_url = "jdbc:mysql://{0}:{1}/{2}?user={3}&password={4}".format(hostname,jdbcPort, dbname,username,password)
# The following creates a DataFrame based on the content of a JSON file
query = "(select * from Player)"
df = sc.read.format("jdbc").options(driver = "com.mysql.jdbc.Driver",url=jdbc_url, dbtable=query ).load()
# Script Vergleich:
#jdbcDF = spark.read.format("jdbc") \
# .option("url", "jdbc:postgresql:dbserver") \
# .option("dbtable", "schema.tablename") \
# .option("user", "username").option("password", "password") \
#.load()
# Displays the content of the DataFrame to stdout
df.show()
# Print the schema in a tree format
df.printSchema()
# Terminal input scenarios:
# docker run -it --rm -v "$PWD/data:/data" --name=pyspark jupyter/pyspark-notebook spark-submit /data/opendf.py - running!
# docker run -it --rm -v "$PWD/data:/data" --name=pyspark jupyter/pyspark-notebook spark-submit --jars /home/bigdata/soccerapp/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar opendf.py
# /usr/local/bin/spark-submit --jars /home/bigdata/soccerapp/mysql-connector-java-5.1.45/mysql-connector-java-5.1.45-bin.jar opendf.py
| 38.15 | 200 | 0.709917 |
81ca2829f17c850b910e2541a76769dd5b0969b5
| 360 |
py
|
Python
|
site/public/courses/SPD-2.31/archive/lab/refactoring/replace_temp_with_query_fowler.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | 1 |
2021-08-24T20:22:19.000Z
|
2021-08-24T20:22:19.000Z
|
site/public/courses/SPD-2.31/archive/lab/refactoring/replace_temp_with_query_fowler.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
site/public/courses/SPD-2.31/archive/lab/refactoring/replace_temp_with_query_fowler.py
|
KitsuneNoctus/makeschool
|
5eec1a18146abf70bb78b4ee3d301f6a43c9ede4
|
[
"MIT"
] | null | null | null |
# Adapted from a Java code in the "Refactoring" book by Martin Fowler.
# Replace temp with query
# Code snippet. Not runnable.
def get_price():
base_price = quantity * item_price
discount_factor = 0
if base_price > 1000:
discount_factor = 0.95
else:
discount_factor = 0.98
return base_price * discount_factor
| 27.692308 | 71 | 0.663889 |
f2245790ea764346cc7aacc2262fdb63440b2f40
| 108 |
py
|
Python
|
tos/nano.py
|
Bodia2cat/ToS
|
1b8c19fd4ab5f8458f5dc1832b764430ac6e02d9
|
[
"Unlicense"
] | null | null | null |
tos/nano.py
|
Bodia2cat/ToS
|
1b8c19fd4ab5f8458f5dc1832b764430ac6e02d9
|
[
"Unlicense"
] | null | null | null |
tos/nano.py
|
Bodia2cat/ToS
|
1b8c19fd4ab5f8458f5dc1832b764430ac6e02d9
|
[
"Unlicense"
] | null | null | null |
import os
import subprocess
subprocess.call("clear")
file = input("File name: ")
os.system("nano " + file)
| 15.428571 | 27 | 0.703704 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.