seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21988639916
|
# update.py
import requests
import json
import tarfile
url = "https://ddragon.leagueoflegends.com/api/versions.json"
response = requests.get(url)
obj = response.json()
patch = str(obj[0])
zipUrl = "https://ddragon.leagueoflegends.com/cdn/dragontail-" + patch + ".tgz"
print(zipUrl)
data = requests.get(zipUrl)
with open("src/assets/prev-data/dragontail-" + patch + ".tgz", 'wb') as f:
# opening the file in write mode
f.write(data.content)
tgzFile = tarfile.open("src/assets/prev-data/dragontail-10.22.1.tgz", 'r')
print('Extracting one file...')
tgzFile.extractall('src/assets/prev-data/data-hold')
print('Extracting Done!')
tgzFile.close()
|
ryanweston/lol-skills
|
src/assets/update.py
|
update.py
|
py
| 659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tarfile.open",
"line_number": 22,
"usage_type": "call"
}
] |
32094781612
|
import sys
sys.stdin = open("input.txt", "r")
from collections import Counter
A = int(input())
B = int(input())
C = int(input())
X = str(A*B*C)
for n in range(0,10):
N = str(n)
if N in Counter(X):
print(Counter(X).get(N))
else:
print(0)
|
doll2gom/TIL
|
KDT/week4/01.19/2577.py
|
2577.py
|
py
| 267 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.stdin",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
}
] |
19827881272
|
from flask import Flask, request
import json
import socket
import urllib.request as urllib2
import re
from functools import wraps
application = Flask(__name__)
CONFIG = json.load(open("config.json", "r"))
API_KEYS = CONFIG["api_keys"]
def requires_auth_key(func):
@wraps(func)
def wrapplicationed(*args, **kwargs):
api_key = request.form.get("api_key", None)
if api_key not in API_KEYS:
return "Unauthorized", 401
else:
if not API_KEYS[api_key]["enabled"]:
return "Unauthorized", 401
return func(*args, **kwargs)
return wrapped
@application.route('/carbon/metrics', methods=["POST"])
@requires_auth_key
def post_metric():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((CONFIG["carbon"]["host"], int(CONFIG["carbon"]["port"])))
except Exception as e:
return "<h2>Error: %s</h2>" % e, 500
else:
data = request.form.get('data');
if (data != None):
data = re.findall("([\w\.]+\ [\S]+\ [\d]+)",request.form.get('data'), re.MULTILINE);
else:
data = request.form.getlist('data[]')
sentCmd = 0
for str in data:
str = re.findall("([\w\.]+\ [\S]+\ [\d]+)",str);
str = str[0]
if (len(str) < 10):
continue
str += "\n"
#print(("Send:"+str).encode('utf8'))
s.send(b"%s" % str.encode('utf8'))
sentCmd+=1
s.close()
if sentCmd < 1:
return "NOTHING SENT TO SERVER. BAD FORMATED STRING/VAR?", 202
return "OK", 200
return "Unkown error", 500
@application.route('/carbon/events', methods=["POST"])
@requires_auth_key
def post_event():
req = urllib2.Request('http://{host}:{port}/events'.format(**CONFIG["graphite"]),
data=request.form.get('data').encode('utf8'), headers={'Content-type': 'application/json'})
try:
urllib2.urlopen(req)
except Exception as e:
return "<h2>Error: %s</h2>" % e, 500
else:
return "OK", 200
return "Unkown error", 500
if __name__ == "__main__":
application.run(debug=False, use_reloader=False, host="127.0.0.1", port=8081, threaded=True)
|
s0lesurviv0r/graphite_http_relay
|
main.py
|
main.py
|
py
| 2,262 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "functools.wraps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "socket.socket",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "socket.SOCK_STREAM",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "re.MULTILINE",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "flask.request.form.getlist",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 64,
"usage_type": "name"
}
] |
27132126928
|
import logging
import redis
from rq import Connection, Queue
from agent.agents import get_agent_info
from plugins.patching.os_apps.incoming_updates import \
incoming_packages_from_agent
from plugins.patching.custom_apps.custom_apps import \
add_custom_app_to_agents
from plugins.patching.supported_apps.syncer import \
get_all_supported_apps_for_agent, get_all_agent_apps_for_agent
rq_host = 'localhost'
rq_port = 6379
rq_db = 0
rq_pool = redis.StrictRedis(host=rq_host, port=rq_port, db=rq_db)
logging.config.fileConfig('/opt/TopPatch/conf/logging.config')
logger = logging.getLogger('rvapi')
class RvHandOff():
def __init__(self, username, customer_name, uri, method,
agentid, rv_plugin, agent_data=None,
oper_type='newagent', delete_afterwards=True):
self.delete_afterwards = delete_afterwards
self.customer_name = customer_name
if not agent_data:
agent_data = get_agent_info(
agentid=agentid
)
self.add_packages_from_agent(
username, agentid,
agent_data, rv_plugin
)
if oper_type == 'newagent':
self.add_custom_apps(
username, customer_name,
uri, method, agentid
)
self.add_supported_apps(agentid)
self.add_agent_apps(agentid)
elif oper_type == 'updatesapplications':
self.add_supported_apps(agentid)
self.add_agent_apps(agentid)
def add_custom_apps(self, username, customer_name,
uri, method, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=add_custom_app_to_agents,
args=(
username, customer_name,
uri, method, None, agentid
),
timeout=3600
)
def add_supported_apps(self, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=get_all_supported_apps_for_agent,
args=(
agentid,
),
timeout=3600
)
def add_agent_apps(self, agentid):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=get_all_agent_apps_for_agent,
args=(
agentid,
),
timeout=3600
)
def add_packages_from_agent(self, username, agent_id, agent_data, apps):
rv_q = Queue('incoming_updates', connection=rq_pool)
rv_q.enqueue_call(
func=incoming_packages_from_agent,
args=(
username, agent_id,
self.customer_name,
agent_data['os_code'], agent_data['os_string'],
apps, self.delete_afterwards
),
timeout=3600
)
|
SteelHouseLabs/vFense
|
tp/src/receiver/rvhandler.py
|
rvhandler.py
|
py
| 2,924 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "redis.StrictRedis",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.config.fileConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.config",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "agent.agents.get_agent_info",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "rq.Queue",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "plugins.patching.custom_apps.custom_apps.add_custom_app_to_agents",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "rq.Queue",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "plugins.patching.supported_apps.syncer.get_all_supported_apps_for_agent",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "rq.Queue",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "plugins.patching.supported_apps.syncer.get_all_agent_apps_for_agent",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "rq.Queue",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "plugins.patching.os_apps.incoming_updates.incoming_packages_from_agent",
"line_number": 89,
"usage_type": "name"
}
] |
23327135383
|
import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import settings
logging.basicConfig(filename='bot.log', level=logging.INFO)
# Настройки прокси. Используем ради интереса
PROXY = {'proxy_url': settings.PROXY_URL,
'urllib3_proxy_kwargs': {'username': settings.PROXY_USERNAME, 'password': settings.PROXY_PASSWORD}}
def greet_user(update, context):
print('Вызван /start')
# print(update)
update.message.reply_text('Привет, пользователь! Ты вызвал команду /start')
def talk_to_me(update, context):
user_text = update.message.text
print(user_text)
update.message.reply_text(user_text)
def main():
# Создаем бота и передаем ему токен, выданный BOTfather при регистрации нашего бота
mybot = Updater(settings.API_KEY, use_context=True, request_kwargs=PROXY)
dp = mybot.dispatcher # запускаем диспитчер
dp.add_handler(CommandHandler('start', greet_user)) # запускаем обработчик
dp.add_handler(MessageHandler(Filters.text, talk_to_me))
# Включаем логирование
logging.info("Бот стартовал")
# Комманда для запуска обращения бота к телеграмму с запросом о наличие новых сообщений
mybot.start_polling()
# Запуск бота. Будет работать до принудительного останова.
mybot.idle()
if __name__ == "__main__":
main()
|
SanuNak/mybot
|
bot.py
|
bot.py
|
py
| 1,646 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "settings.PROXY_URL",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "settings.PROXY_USERNAME",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "settings.PROXY_PASSWORD",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.Updater",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "settings.API_KEY",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "telegram.ext.MessageHandler",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "telegram.ext.Filters.text",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.Filters",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "logging.info",
"line_number": 32,
"usage_type": "call"
}
] |
14993235685
|
# 引用url模块
from django.conf.urls import url
#导入视图函数
from .views import *
app_name="booktest"
urlpatterns=[
# url('myurl/',myview)
# url(r'^index/$',index),
#
url(r'^$',index,name="index"),
# url(r'^$',indexView.as_view(),name="index"),
# url(r'^$',indexTemplateView.as_view(),name="index"),
# url(r'^list/$',listView.as_view(),name="list"),
url(r'^list/$',list,name="list"),
url(r'^detail/(\d+)/$',detail,name="detail"),
url(r'^deletebook/(\d+)/$',deletebook,name="deletebook"),
url(r'^addhero/(\d+)/$',addhero,name="addhero"),
url(r'^deletehero/(\d+)/$',deletehero,name="deletehero"),
url(r'^addads/$',addads,name="addads"),
]
|
pan0527/chenpan
|
demo1/booktest/urls.py
|
urls.py
|
py
| 712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.conf.urls.url",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 30,
"usage_type": "call"
}
] |
41933031591
|
import pyautogui
import time
pyautogui.moveTo(3530, 983) # Lokasi kursor kearah chat
pyautogui.click()
# Spam chat 100 pesan.
for i in range(100):
pyautogui.write("PING!!!") # Message pesan spam
time.sleep(0.01) # Waktu jeda spam
pyautogui.press("Enter")
|
arvandha121/SPAM_CHAT_WHATSAPP
|
spam.py
|
spam.py
|
py
| 268 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pyautogui.moveTo",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyautogui.click",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyautogui.write",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyautogui.press",
"line_number": 11,
"usage_type": "call"
}
] |
74795559226
|
from django.db import models
from Pages.models import Page
import urllib
from .special_character_table import TABLE
def get_report_url(post_hashtag):
return "http://c8763.webutu.com?hashtag="+str(post_hashtag)
# Create your models here.
class Record(models.Model):
submit_type=models.IntegerField(default=0)
post_id=models.IntegerField(blank=False)
fb_post_id=models.TextField(blank=False)
class Report(models.Model):
REPORTER_TYPE=(
("S","Submitter"),
("R","Related"),
("F","Friend"),
("O","Other")
)
reporter=models.CharField(max_length=10,choices=REPORTER_TYPE,default="S")
reason=models.TextField(blank=False)
post_hashtag=models.IntegerField(blank=False)
fb_post_id=models.TextField(blank=False)
class Submission(models.Model):
context=models.TextField(blank=False)
submit_type=models.IntegerField(default=0)
submit_time=models.DateTimeField(auto_now_add=True)
def publish(self,manager):
page=Page.objects.all()[0]
fb_api_url="https://graph.facebook.com/v2.12/"+page.page_id
post_context="#"
post_context+=page.prefix+str(page.post_count)
# post_context+="\n檢舉這篇文章:"
# post_context+=get_report_url(page.post_count)
page.post_count=page.post_count+1
page.save()
response=None
if self.submit_type==0:
fb_api_url+="/feed"
post_context+="\n\n"+self.context+"\n\n"
post_context+=manager
values={
'message':post_context,
'access_token':page.access_token
}
data=urllib.parse.urlencode(values)
byte_data=data.encode('utf8')
response=urllib.request.urlopen(fb_api_url,byte_data)
else:
fb_api_url+="/photos"
image_text=self.context+"\n"
watermark=manager
for tup in TABLE:
image_text=image_text.replace(tup[0],tup[1])
watermark=watermark.replace(tup[0],tup[1])
param=urllib.parse.urlencode({'text':image_text,'line_length':16,'watermark':watermark})
image_url="http://complain-kskg.ga/texttoimage/?%s"%param
values={
'caption':post_context,
'url':image_url,
'access_token':page.access_token
}
data=urllib.parse.urlencode(values)
byte_data=data.encode('utf8')
response=urllib.request.urlopen(fb_api_url,byte_data)
return response.read()
|
austin880625/KSKGcomplain
|
Submissions/models.py
|
models.py
|
py
| 2,572 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.db.models.IntegerField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "Pages.models.Page.objects.all",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "Pages.models.Page.objects",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "Pages.models.Page",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "special_character_table.TABLE",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "urllib.request",
"line_number": 69,
"usage_type": "attribute"
}
] |
17688731362
|
import json
import os
import gui
import wx
import addonHandler
import braille
import config
import controlTypes
import languageHandler
from .common import configDir
addonHandler.initTranslation()
CUR_LANG = languageHandler.getLanguage().split('_')[0]
PATH_JSON = os.path.join(configDir, f"roleLabels-{CUR_LANG}.json")
class SettingsDlg(gui.settingsDialogs.SettingsPanel):
# Translators: title of a dialog.
title = _("Role labels")
roleLabels = {}
def makeSettings(self, settingsSizer):
self.roleLabels = roleLabels.copy()
sHelper = gui.guiHelper.BoxSizerHelper(self, sizer=settingsSizer)
self.toggleRoleLabels = sHelper.addItem(wx.CheckBox(self, label=_("Use custom braille &role labels")))
self.toggleRoleLabels.SetValue(config.conf["brailleExtender"]["features"]["roleLabels"])
self.toggleRoleLabels.Bind(wx.EVT_CHECKBOX, self.onToggleRoleLabels)
self.categories = sHelper.addLabeledControl(_("Role cate&gory:"), wx.Choice, choices=[_("General"), _("Landmarks"), _("Positive states"), _("Negative states")])
self.categories.Bind(wx.EVT_CHOICE, self.onCategories)
self.categories.SetSelection(0)
choices = []
if hasattr(controlTypes, "roleLabels"):
choices = [controlTypes.roleLabels[int(k)] for k in braille.roleLabels.keys()]
self.labels = sHelper.addLabeledControl(_("&Role:"), wx.Choice, choices=choices)
self.labels.Bind(wx.EVT_CHOICE, self.onLabels)
self.label = sHelper.addLabeledControl(_("Braille &label"), wx.TextCtrl)
self.label.Bind(wx.EVT_TEXT, self.onLabel)
bHelper = gui.guiHelper.ButtonHelper(orientation=wx.HORIZONTAL)
self.resetLabelBtn = bHelper.addButton(self, wx.NewId(), _("&Reset this role label"), wx.DefaultPosition)
self.resetLabelBtn.Bind(wx.EVT_BUTTON, self.onResetLabelBtn)
self.resetAllLabelsBtn = bHelper.addButton(self, wx.NewId(), _("Reset a&ll role labels"), wx.DefaultPosition)
self.resetAllLabelsBtn.Bind(wx.EVT_BUTTON, self.onResetAllLabelsBtn)
sHelper.addItem(bHelper)
self.onToggleRoleLabels(None)
self.onCategories(None)
def onToggleRoleLabels(self, evt):
l = [
self.categories,
self.labels,
self.label,
self.resetLabelBtn,
self.resetAllLabelsBtn,
]
for e in l:
if self.toggleRoleLabels.IsChecked():
e.Enable()
else:
e.Disable()
def onCategories(self, event):
labels = []
idCategory = self.categories.GetSelection()
oldRoleLabels = hasattr(controlTypes, "roleLabels")
if idCategory == 0:
if oldRoleLabels:
labels = [controlTypes.roleLabels[int(k)] for k in braille.roleLabels.keys()]
else:
labels = [role.displayString for role in braille.roleLabels.keys()]
elif idCategory == 1:
labels = list(braille.landmarkLabels.keys())
elif idCategory == 2:
if oldRoleLabels:
labels = [controlTypes.stateLabels[k] for k in braille.positiveStateLabels.keys()]
else:
labels = [role.displayString for role in braille.positiveStateLabels.keys()]
elif idCategory == 3:
if oldRoleLabels:
labels = [controlTypes.stateLabels[k] for k in braille.negativeStateLabels.keys()]
else:
labels = [role.displayString for role in braille.negativeStateLabels.keys()]
for iLabel, label in enumerate(labels):
idLabel = getIDFromIndexes(idCategory, iLabel)
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
labels[iLabel] += _(": %s") % actualLabel
if actualLabel != originalLabel: labels[iLabel] += " (%s)" % originalLabel
self.labels.SetItems(labels)
if idCategory > -1 and idCategory < 4: self.labels.SetSelection(0)
self.onLabels(None)
def onLabels(self, event):
idCategory = self.categories.GetSelection()
idLabel = getIDFromIndexes(idCategory, self.labels.GetSelection())
key = f"{idCategory}:{idLabel}"
if key in self.roleLabels.keys(): self.label.SetValue(self.roleLabels[key])
else: self.label.SetValue(self.getOriginalLabel(idCategory, idLabel))
def onLabel(self, evt):
idCategory = self.categories.GetSelection()
iLabel = self.labels.GetSelection()
idLabel = getIDFromIndexes(idCategory, iLabel)
key = "%d:%s" % (idCategory, idLabel)
label = self.label.GetValue()
if idCategory >= 0 and iLabel >= 0:
if self.getOriginalLabel(idCategory, idLabel, chr(4)) == label:
if key in self.roleLabels.keys():
self.roleLabels.pop(key)
else: self.roleLabels[key] = label
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
if label != originalLabel: self.resetLabelBtn.Enable()
else: self.resetLabelBtn.Disable()
def onResetLabelBtn(self, event):
idCategory = self.categories.GetSelection()
iLabel = self.labels.GetSelection()
idLabel = getIDFromIndexes(idCategory, iLabel)
key = "%d:%s" % (idCategory, idLabel)
actualLabel = getLabelFromID(idCategory, idLabel)
originalLabel = self.getOriginalLabel(idCategory, idLabel, actualLabel)
self.label.SetValue(originalLabel)
self.onLabel(None)
self.label.SetFocus()
def onResetAllLabelsBtn(self, event):
nbCustomizedLabels = len(self.roleLabels)
if not nbCustomizedLabels:
msg = _("You have no customized role labels.")
res = gui.messageBox(msg, _("Reset role labels"),
wx.OK|wx.ICON_INFORMATION)
return
msg = _("You have %d customized role labels defined. Do you want to reset all labels?") % nbCustomizedLabels
flags = wx.YES|wx.NO|wx.ICON_INFORMATION
res = gui.messageBox(msg, _("Reset role labels"), flags)
if res == wx.YES:
self.roleLabels = {}
self.onCategories(None)
def getOriginalLabel(self, idCategory, idLabel, defaultValue = ''):
key = f"{idCategory}:{idLabel}"
if key in backupRoleLabels.keys():
return backupRoleLabels[key][1]
return getLabelFromID(idCategory, idLabel)
def postInit(self): self.toggleRoleLabels.SetFocus()
def onSave(self):
global roleLabels
config.conf["brailleExtender"]["features"]["roleLabels"] = self.toggleRoleLabels.IsChecked()
saveRoleLabels(self.roleLabels)
discardRoleLabels()
if config.conf["brailleExtender"]["features"]["roleLabels"]:
loadRoleLabels()
backupRoleLabels = {}
roleLabels = {}
def getIDFromIndexes(idCategory, idLabel):
oldRoleLabels = hasattr(controlTypes, "roleLabels")
if not isinstance(idCategory, int):
raise TypeError(f"Wrong type for idCategory ({idCategory})")
if not isinstance(idLabel, int):
raise TypeError(f"Wrong type for idLabel ({idLabel})")
idRole = -1
if idCategory == 0: idRole = list(braille.roleLabels.keys())[idLabel]
elif idCategory == 1: idRole = list(braille.landmarkLabels.keys())[idLabel]
elif idCategory == 2: idRole = list(braille.positiveStateLabels.keys())[idLabel]
elif idCategory == 3: idRole = list(braille.negativeStateLabels.keys())[idLabel]
else: raise ValueError(f"Wrong value for category ({idCategory})")
if not oldRoleLabels and isinstance(idRole, (controlTypes.Role, controlTypes.State)):
idRole = idRole.value
return idRole
def getLabelFromID(idCategory, idLabel):
if idCategory == 0: return braille.roleLabels[int(idLabel)]
if idCategory == 1: return braille.landmarkLabels[idLabel]
if idCategory == 2: return braille.positiveStateLabels[int(idLabel)]
if idCategory == 3: return braille.negativeStateLabels[int(idLabel)]
raise ValueError("Invalid value: %d" % idCategory)
def setLabelFromID(idCategory, idLabel, newLabel):
if idCategory == 0: braille.roleLabels[int(idLabel)] = newLabel
elif idCategory == 1: braille.landmarkLabels[idLabel] = newLabel
elif idCategory == 2: braille.positiveStateLabels[int(idLabel)] = newLabel
elif idCategory == 3: braille.negativeStateLabels[int(idLabel)] = newLabel
else:
raise ValueError(f"Unknown category {idCategory}")
def loadRoleLabels(roleLabels_=None):
global backupRoleLabels, roleLabels
roleLabels.clear()
if roleLabels_:
roleLabels.update(roleLabels_)
elif "roleLabels" in config.conf["brailleExtender"] and config.conf["brailleExtender"]["roleLabels"].copy():
roleLabels.update(config.conf["brailleExtender"]["roleLabels"].copy())
saveRoleLabels(roleLabels)
config.conf["brailleExtender"]["roleLabels"] = {}
elif os.path.exists(PATH_JSON):
f = open(PATH_JSON, "r", encoding="UTF-8")
try:
roleLabels.update(json.load(f))
except json.decoder.JSONDecodeError:
pass
f.close()
for k, v in roleLabels.items():
idCategory, idRole = k.split(':')
idCategory = int(idCategory)
backupRoleLabels[k] = (v, getLabelFromID(idCategory, idRole))
setLabelFromID(idCategory, idRole, v)
def saveRoleLabels(roleLabels_):
f = open(PATH_JSON, 'w')
json.dump(roleLabels_, f, ensure_ascii=False, indent=2)
f.close()
def discardRoleLabels():
global backupRoleLabels, roleLabels
for k, v in backupRoleLabels.items():
idCategory, idRole = k.split(':')
idCategory = int(idCategory)
setLabelFromID(idCategory, idRole, v[1])
backupRoleLabels = {}
roleLabels = {}
|
aaclause/BrailleExtender
|
addon/globalPlugins/brailleExtender/rolelabels.py
|
rolelabels.py
|
py
| 8,877 |
python
|
en
|
code
| 15 |
github-code
|
6
|
[
{
"api_name": "addonHandler.initTranslation",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "languageHandler.getLanguage",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "common.configDir",
"line_number": 17,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "gui.settingsDialogs",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "gui.guiHelper.BoxSizerHelper",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "gui.guiHelper",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "config.conf",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "wx.Choice",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_CHOICE",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.roleLabels",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels.keys",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "braille.roleLabels",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "wx.Choice",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_CHOICE",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.TextCtrl",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TEXT",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "gui.guiHelper.ButtonHelper",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "gui.guiHelper",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "wx.NewId",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wx.DefaultPosition",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "wx.NewId",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "wx.DefaultPosition",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.roleLabels",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels.keys",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "braille.roleLabels",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels.keys",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "braille.roleLabels",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "braille.landmarkLabels.keys",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "braille.landmarkLabels",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.stateLabels",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "braille.positiveStateLabels.keys",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "braille.positiveStateLabels",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "braille.positiveStateLabels.keys",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "braille.positiveStateLabels",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.stateLabels",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "braille.negativeStateLabels.keys",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "braille.negativeStateLabels",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "braille.negativeStateLabels.keys",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "braille.negativeStateLabels",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "gui.messageBox",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "wx.OK",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "wx.ICON_INFORMATION",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "wx.YES",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "wx.NO",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "wx.ICON_INFORMATION",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "gui.messageBox",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "wx.YES",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "config.conf",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "config.conf",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels.keys",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "braille.roleLabels",
"line_number": 175,
"usage_type": "attribute"
},
{
"api_name": "braille.landmarkLabels.keys",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "braille.landmarkLabels",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "braille.positiveStateLabels.keys",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "braille.positiveStateLabels",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "braille.negativeStateLabels.keys",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "braille.negativeStateLabels",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.Role",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "controlTypes.State",
"line_number": 180,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels",
"line_number": 185,
"usage_type": "attribute"
},
{
"api_name": "braille.landmarkLabels",
"line_number": 186,
"usage_type": "attribute"
},
{
"api_name": "braille.positiveStateLabels",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "braille.negativeStateLabels",
"line_number": 188,
"usage_type": "attribute"
},
{
"api_name": "braille.roleLabels",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "braille.landmarkLabels",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "braille.positiveStateLabels",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "braille.negativeStateLabels",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "config.conf",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "config.conf",
"line_number": 205,
"usage_type": "attribute"
},
{
"api_name": "config.conf",
"line_number": 207,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "json.decoder",
"line_number": 212,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 224,
"usage_type": "call"
}
] |
26213379014
|
import time
import numpy as np
from scipy.sparse import csr_matrix
from scipy.special import expit
from tqdm import tqdm
from hw1.base import FactorizationModel
from hw1.utils import log_iter
class BPRModel(FactorizationModel):
def __init__(self, factors: int, lr: float, iterations: int, lambd: float = 0.,
verbose: bool = False, verbose_every: int = 1):
super().__init__(factors, iterations, verbose, verbose_every)
self._lr = lr
self._lambd = lambd
self._correct_cnt = 0
self._triplet_acc = 0.
@staticmethod
def _sample_negative(user_item: csr_matrix, user: int) -> int:
neg_item = np.random.choice(user_item.shape[1])
while user_item[user, neg_item] != 0:
neg_item = np.random.choice(user_item.shape[1])
return neg_item
def _grad_step(self, user: int, pos_item: int, neg_item: int):
score = expit(self._U[user] @ (self._I[neg_item] - self._I[pos_item]))
self._correct_cnt += score < 0.5
grad_user = score * (self._I[neg_item] - self._I[pos_item]) + self._lambd * self._U[user]
grad_pos = score * -self._U[user] + self._lambd * self._I[pos_item]
grad_neg = score * self._U[user] + self._lambd * self._I[neg_item]
self._U[user] -= self._lr * grad_user
self._I[pos_item] -= self._lr * grad_pos
self._I[neg_item] -= self._lr * grad_neg
def _grad_steps(self, user_item: csr_matrix):
self._triplet_acc = self._correct_cnt = 0
n_samples = user_item.count_nonzero()
order = np.random.permutation(n_samples)
users, items = user_item.nonzero()
for user, pos_item in zip(users[order], items[order]):
neg_item = self._sample_negative(user_item, user)
self._grad_step(user, pos_item, neg_item)
self._triplet_acc = self._correct_cnt / n_samples
def fit(self, user_item: csr_matrix) -> "BPRModel":
self._start_time = time.time()
self.init_matrices(*user_item.shape)
for iteration in tqdm(range(self._iterations), disable=not self._verbose):
self._grad_steps(user_item)
if self._verbose and (iteration + 1) % self._verbose_every == 0:
log_iter(iteration + 1, {"Triplet acc": self._triplet_acc}, time.time() - self._start_time)
return self
|
Sushentsev/recommendation-systems
|
hw1/models/bpr_model.py
|
bpr_model.py
|
py
| 2,367 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "hw1.base.FactorizationModel",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "numpy.random.choice",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "scipy.special.expit",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "numpy.random.permutation",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "scipy.sparse.csr_matrix",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "hw1.utils.log_iter",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 60,
"usage_type": "call"
}
] |
70766850107
|
from fastapi import APIRouter, Depends
from app.model.param import (
ListTaskParams,
NewTasksListParams,
StopTaskParams,
)
from app.model.response import (
NewTasksResp,
ListTasksResp,
StopTasksResp,
)
from exception import DataExistsError, APIBaseError
from app.model.data import TaskModel, StopTaskModel
from .helper import task as taskhelper
from traceback import format_exc
task_router = APIRouter()
@task_router.get(
'/list',
response_model=ListTasksResp
)
async def list_task(
param: ListTaskParams = Depends(ListTaskParams)
):
""" 任务列表。"""
# data = _list_task(param.offset, param.limit)
data = taskhelper.list(param.offset, param.limit, param.active)
return ListTasksResp(
data=data
)
@task_router.post(
'/new',
response_model=NewTasksResp
)
async def create_tasks(
params: NewTasksListParams,
):
""" 批量添加任务。"""
data = []
for url in params.urls:
try:
t = taskhelper.create(url, params.options)
t.run_async()
errcode = 0
errmsg = None
except APIBaseError as err:
t = taskhelper.get(err.data)
errcode = err.code
errmsg = err.msg
data.append(TaskModel(
sign=t.sign,
title=t.title,
url=t.url,
errcode=errcode,
errmsg=errmsg
))
return NewTasksResp(data=data)
@task_router.post(
'/stop',
response_model=StopTasksResp
)
async def stop_tasks(
params: StopTaskParams
):
data = []
for key in params.keys:
try:
result = taskhelper.stop(key)
errcode = 0
errmsg = None
except APIBaseError as err:
errcode = err.code
errmsg = err.msg
data.append(StopTaskModel(
errcode=errcode,
errmsg=errmsg
))
return StopTasksResp(data=data)
|
ZSAIm/VideoCrawlerEngine
|
app/taskflow/routers/task.py
|
task.py
|
py
| 1,962 |
python
|
en
|
code
| 420 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "app.model.param.ListTaskParams",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "helper.task.list",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "helper.task",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "app.model.response.ListTasksResp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "app.model.response.ListTasksResp",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "app.model.param.NewTasksListParams",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "helper.task.create",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "helper.task",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "exception.APIBaseError",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "helper.task.get",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "helper.task",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "app.model.data.TaskModel",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "app.model.response.NewTasksResp",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "app.model.response.NewTasksResp",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "app.model.param.StopTaskParams",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "helper.task.stop",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "helper.task",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "exception.APIBaseError",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "app.model.data.StopTaskModel",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "app.model.response.StopTasksResp",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "app.model.response.StopTasksResp",
"line_number": 68,
"usage_type": "name"
}
] |
3516700430
|
#********************* BGINFO_MULTI ***************************
# Desenvolvido por Frederico de Jesus Almeida
# Analista de Suporte PLENO - Multi
#******************* 06/06/2023 ****************************
import os
import re
import psutil
import socket
import subprocess
import tkinter as tk
def get_ip_address():
ip_local = socket.gethostbyname(socket.gethostname())
return ip_local
def get_mac_address():
# Obtém o endereço MAC do adaptador de rede principal
mac_address = ''
for iface in psutil.net_if_addrs().values():
for addr in iface:
if addr.family == psutil.AF_LINK:
mac_address = addr.address
break
if mac_address:
break
return mac_address
def get_hostname():
# Obtém o nome do host do computador
return socket.gethostname()
def get_username():
# Obtém o nome do usuário logado
return os.getlogin()
def get_domain():
# Obtém o nome de domínio do computador
texto = socket.getfqdn()
if "MLTBR.LOCAL" in texto:
return ("Domínio: 'MLTBR.LOCAL'")
else:
return ("Domínio: NONE")
def update_data():
# Atualiza os dados dos widgets da interface gráfica
hostname_label.config(text='Hostname: ' + get_hostname())
mac_address_label.config(text='MAC: ' + get_mac_address())
ip_address_label.config(text='IP: ' + get_ip_address())
username_label.config(text='Usuário : ' + get_username())
domain_label.config(text=get_domain())
network_type = get_network_type()
network_type_label.config(text='' + network_type)
# Aguarda 5 minutos e chama a função update_data novamente
root.after(300000, update_data)
#Função que verifica se esta no wifi ou no cabo
def verificar_conectado(linha):
padrao = r"\bConectado\b"
resultado = re.search(padrao, linha)
if resultado:
return False
else:
return True
#Função que retorna o tipo da conexão
def get_network_type():
# Chama a função no CMD
output = subprocess.check_output('netsh interface show interface | findstr "Ethernet"', shell=True)
# Decodifica a saída para uma string legível
output = output.decode('utf-8')
#Verifica se esta conectado no wi-fi ou no cabo
if verificar_conectado(output):
wifi = subprocess.check_output('netsh wlan show interfaces | findstr "Faixa"', shell=True)
wifi = wifi.decode('utf-8')
wifi = wifi.replace(" ", "")
return (wifi)
else:
wifi = 'Conexão: Cabeada'
return (wifi)
get_network_type()
# Cria a janela principal
root = tk.Tk()
root.title('Sistema')
# Configura o fundo da janela para ser transparente
root.attributes('-alpha', 0.5)
# Oculta a barra de título
root.overrideredirect(True)
# Define a posição da janela no canto inferior direito
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
window_width = 300
window_height = 180
x_position = screen_width - window_width
y_position = screen_height - window_height
root.geometry('{}x{}+{}+{}'.format(window_width, window_height, x_position, y_position))
# Cria os widgets da interface
hostname_label = tk.Label(root, text='Hostname: ' + get_hostname(), anchor='w', justify='left')
mac_address_label = tk.Label(root, text='MAC: ' + get_mac_address(), anchor='w', justify='left')
ip_address_label = tk.Label(root, text='IP: ' + get_ip_address(), anchor='w', justify='left')
username_label = tk.Label(root, text='Usuário: ' + get_username(), anchor='w', justify='left')
domain_label = tk.Label(root, text=get_domain(), anchor='w', justify='left')
network_type_label = tk.Label(root, text='' + get_network_type(), anchor='w', justify='left')
# Posiciona os widgets na janela
hostname_label.pack()
mac_address_label.pack()
ip_address_label.pack()
username_label.pack()
domain_label.pack()
network_type_label.pack()
# Aguarda 5 minutos e chama a função update_data
root.after(30000, update_data)
# Inicia o loop da interface gráfica
root.mainloop()
|
Frederico02/info-sistema
|
main_final.py
|
main_final.py
|
py
| 4,077 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "socket.gethostbyname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "psutil.net_if_addrs",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "psutil.AF_LINK",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "socket.gethostname",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.getlogin",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "socket.getfqdn",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 123,
"usage_type": "call"
}
] |
33188473740
|
# -*-coding:utf-8-*-
import logging
from datetime import datetime
class MyLogger():
def __init__(self, name):
self.logger = logging.getLogger(name)
self.handler = logging.FileHandler(filename='logging/%s.log' % name)
self.logger.addHandler(self.handler)
def warning(self, info):
msg = '%s : %s \n==========================\n' % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), info)
self.logger.warning(msg)
if __name__ == '__main__':
logger = MyLogger('test')
logger.warning('test msg')
|
xxxx-hhhh/spider
|
baojianhui_spider/my_logging.py
|
my_logging.py
|
py
| 546 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 13,
"usage_type": "name"
}
] |
23896439023
|
# repeat_bot.py
from bot.common import verify_user, job_name
from dotenv import load_dotenv
from bot.messages import account_summary
from telegram import Update
from telegram.ext import Application, CommandHandler, ContextTypes
from data_model import BotConfig
from utils import load_config
load_dotenv()
class PostHelp:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def post_help_info(self, update: Update, context: ContextTypes.DEFAULT_TYPE): # pylint: disable=W0613
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
text = [
"/help to view this text",
"/set [number] to set how often the message should be posted",
"/stop to stop the repeating message",
"/jobs to see what repeating message is currently working",
]
text = "\n".join(text)
await update.message.reply_text(text)
class RepeatMessage:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def send_message(self, context: ContextTypes.DEFAULT_TYPE):
job = context.job
text = await account_summary(cfg=self.cfg)
await context.bot.send_message(
job.chat_id,
message_thread_id=self.cfg.chat.message_thread_id,
text=text
)
class StopRepeatMessage:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def stop(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
current_jobs = context.job_queue.get_jobs_by_name(self.cfg.name)
if len(current_jobs) > 0:
for job in current_jobs:
job.schedule_removal()
await update.effective_message.reply_text(
"succesfully stopped repeat message"
)
return
await update.effective_message.reply_text(
"there are no repeating message jobs to stop"
)
class SetTimer:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def set_timer(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
try:
interval = float(context.args[0])
if interval < 0:
await update.effective_message.reply_text(
"interval must be numeric and greater than zero"
)
return
message_function = RepeatMessage(cfg=self.cfg)
context.job_queue.run_repeating(
message_function.send_message,
interval=interval,
chat_id=self.cfg.chat.chat_id,
name=self.cfg.name,
data=interval
)
text = f"repeating message every {interval} seconds"
await update.effective_message.reply_text(text)
except (IndexError, ValueError):
await update.effective_message.reply_text(
"The interval has to be a number, interpreted as seconds"
)
class Jobs:
def __init__(self, cfg: BotConfig):
self.cfg = cfg
async def post_job_status(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
if await verify_user(update=update, auth_users=self.cfg.auth.telegram.users):
current_jobs = context.job_queue.get_jobs_by_name(self.cfg.name)
if len(current_jobs) > 0:
text = job_name(cfg=self.cfg)
await update.effective_message.reply_text(text=text)
return
text = "idle, no jobs"
await update.effective_message.reply_text(text=text)
def repeat_bot(cfg: BotConfig):
# cfg = load_config(bot_name=bot_name)
post_help = PostHelp(cfg=cfg)
set_timer = SetTimer(cfg=cfg)
jobs = Jobs(cfg=cfg)
stop_message = StopRepeatMessage(cfg=cfg)
application = Application.builder().token(cfg.auth.telegram.token).build()
application.add_handler(CommandHandler("help", post_help.post_help_info))
application.add_handler(CommandHandler("set", set_timer.set_timer))
application.add_handler(CommandHandler("stop", stop_message.stop))
application.add_handler(CommandHandler("jobs", jobs.post_job_status))
application.run_polling()
|
KD6-Dash-37/telegram-chat-bot
|
bot/repeat_bot.py
|
repeat_bot.py
|
py
| 4,481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "data_model.BotConfig",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "telegram.ext.ContextTypes.DEFAULT_TYPE",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.ContextTypes",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "bot.common.verify_user",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "data_model.BotConfig",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "telegram.ext.ContextTypes.DEFAULT_TYPE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.ContextTypes",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "bot.messages.account_summary",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "data_model.BotConfig",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "telegram.ext.ContextTypes.DEFAULT_TYPE",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.ContextTypes",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "data_model.BotConfig",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "telegram.ext.ContextTypes.DEFAULT_TYPE",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.ContextTypes",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "bot.common.verify_user",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "data_model.BotConfig",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "telegram.Update",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "telegram.ext.ContextTypes.DEFAULT_TYPE",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "telegram.ext.ContextTypes",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "bot.common.verify_user",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "bot.common.job_name",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "data_model.BotConfig",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "telegram.ext.Application.builder",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "telegram.ext.Application",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "telegram.ext.CommandHandler",
"line_number": 169,
"usage_type": "call"
}
] |
8660192902
|
import nltk
nltk.download('stopwords')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
#global set of stopwords
english_stopwords = set(stopwords.words('english'))
def tokenizeText(content):
global english_stopwords
#returns a list of tokens found in the given pathname
tokens = word_tokenize(content)
tokensWithoutStopWords = []
for word in tokens:
if word not in english_stopwords:
tokensWithoutStopWords.append(word)
#print(Simhash(tokensWithoutStopWords))
return tokensWithoutStopWords
def computeWordFrequencies(tokens):
mydict = dict()
for token in tokens:
frequency = 1
if(token not in mydict.keys()):
mydict[token] = frequency
else:
mydict[token] += frequency
return mydict
|
daveA420/ics121Crawler
|
newParser.py
|
newParser.py
|
py
| 857 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nltk.download",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "nltk.download",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 14,
"usage_type": "call"
}
] |
3439809361
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution(object):
def widthOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
maxWidth = 1
q = deque([(0, root)])
while len(q) != 0:
cnt = len(q)
start = q[0]
end = q[-1]
width = end[0] - start[0] + 1
maxWidth = max(maxWidth, width)
while cnt > 0:
cnt -= 1
idx, node = q.popleft()
if node.left != None:
q.append((idx * 2, node.left))
if node.right != None:
q.append((idx * 2 + 1, node.right))
return maxWidth
|
cuiy0006/Algorithms
|
leetcode/662. Maximum Width of Binary Tree.py
|
662. Maximum Width of Binary Tree.py
|
py
| 957 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 18,
"usage_type": "call"
}
] |
71611302909
|
import json
import open3d as o3d
import numpy as np
import os
import trimesh
import zipfile
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.style.use('bmh')
default_color = [0,0.5,1]
cube = np.array([
[0,0,0], [1,0,0], [1,1,0], [0,1,0],
[0,0,1], [1,0,1], [1,1,1], [0,1,1],
])
'''plt figure'''
def plt_show_save(data, title, save_path=None, xname='', bins=50):
plt.cla()
plt.figure(figsize=(12,9))
if type(data) == dict:
plt.bar(data.keys(), data.values())
# plt.xticks(rotation=90)
else:
plt.hist(data, bins=bins)
plt.title(title)
plt.ylabel('value')
plt.xlabel(xname)
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
def get_pcd(pc, color=default_color):
pc = np.array(pc)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
pcd.paint_uniform_color(color) # 默认是彩虹色过渡,这里指定染色
return pcd
def get_o3d_FOR(origin=[0, 0, 0],size=0.1):
mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=size)
mesh_frame.translate(origin)
return(mesh_frame)
def show_pcds(pcds, wname='Open3D', FOR=0.1):
if FOR:
pcds.append(get_o3d_FOR(size = FOR))
o3d.visualization.draw_geometries(pcds, width=800, height=800, window_name=wname)
def csv2box(csv_path):
obb_info = np.loadtxt(open(csv_path, 'r'),delimiter = ",") # (5,4)
center = obb_info[0,:3]
dirs = 0.5 * (obb_info[2:,:3] * obb_info[2:,-1].reshape(3,1) )
val = cube*2 - 1
vec = np.matmul(val, dirs) # (8,3)@(3,3)
corner = center.reshape(1,3) + vec
return corner,dirs
def add_thickness(pc, direction, scale):
direction = direction / np.linalg.norm(direction)
noise = np.random.normal(0, scale, (pc.shape[0],1))
return pc + noise * direction.reshape(1,3)
def PCA(data, sort=True):
average_data = np.mean(data,axis=0)
decentration_matrix = data - average_data
H = np.dot(decentration_matrix.T,decentration_matrix)
eigenvectors,eigenvalues,eigenvectors_T = np.linalg.svd(H)
if sort:
sort = eigenvalues.argsort()[::-1]
eigenvalues = eigenvalues[sort]
eigenvectors = eigenvectors[:, sort]
return eigenvalues, eigenvectors
def box_from_pc(pc, color=default_color, aabb=False, return_corner=True):
pcd = get_pcd(pc)
box = pcd.get_axis_aligned_bounding_box() if aabb else \
pcd.get_oriented_bounding_box()
if return_corner:
corner = np.array(box.get_box_points())
return corner
else:
box.color = color
return box
def box_from_corner(corner, color=default_color, aabb=False):
corner = np.asarray(corner)
box = o3d.geometry.AxisAlignedBoundingBox() if aabb else \
o3d.geometry.OrientedBoundingBox()
box = box.create_from_points(o3d.utility.Vector3dVector(corner))
box.color = color
return box
def box2cen_dir(box:np.ndarray):
centers = np.zeros((6,3))
sorted_box = sort_pts(box)
v1 = sorted_box[1]-sorted_box[0]
v2 = sorted_box[3]-sorted_box[0]
cos = v1@v2 / (np.linalg.norm(v1) * np.linalg.norm(v2))
if abs(cos) < 0.001:
tmp = sorted_box[3].copy()
sorted_box[3] =sorted_box[4]
sorted_box[4] = tmp
# 0246, 0145
centers[0] = sorted_box[:4].mean(axis=0)
centers[1] = sorted_box[[0,2,4,6]].mean(axis=0)
centers[2] = sorted_box[[0,1,4,5]].mean(axis=0)
centers[3:] = 2 * box.mean(0).reshape(1,3) - centers[:3]
return centers
def box2dir(box:np.ndarray):
sorted_box = np.array(sorted(box, key = lambda x:x[0]) )
dirs3 = sorted_box[1:4] - sorted_box[0].reshape(1,-1)
cos = cosine(dirs3, dirs3).flatten()
idx = np.argmin(cos)
if cos[idx]<1e-3:
d1 = idx//3
d2 = idx%3
left_dir = np.cross(dirs3[d1], dirs3[d2])
return np.vstack([dirs3[d1], dirs3[d2], left_dir])
else:
return None
def aabb_dirs(pc):
mins = pc.min(0)
maxs = pc.max(0)
dirs = np.eye(3,3) * (maxs-mins).reshape(1,3) / 2
center = (mins + maxs) / 2
corners = center.reshape(1,3) + (cube*2-1)@dirs
return corners, dirs
def obb_2dirs(pc, axis, return_corner=True):
else_axis = [0,1,2]
else_axis.pop(axis)
sub_pc = pc[:,else_axis]
cov_pts = np.cov(sub_pc, y=None, rowvar=False, bias=True)
v, vect = np.linalg.eig(cov_pts)
tvect = vect.T
rpts = np.dot(sub_pc, np.linalg.inv(tvect))
mina = np.min(rpts, 0)
maxa = np.max(rpts, 0)
diff = (maxa - mina)*0.5
center = mina + diff
corners = center.reshape(-1,2) + np.array([
[-1,-1], [1,-1], [1,1], [-1,1]
]) * diff.reshape(-1,2)
corners = np.dot(corners, tvect) # (4,2)
axis_pc = pc[:, axis]
axis_min,axis_max = axis_pc.min(), axis_pc.max()
cor1 = np.insert(corners, axis, axis_min, axis=1)
cor2 = np.insert(corners, axis, axis_max, axis=1)
corners = np.vstack([cor1,cor2])
center = corners.mean(0)
dirs = (corners[[1,3,4]] - corners[0].reshape(1,3))/2
if return_corner:
return corners, dirs
else:
return center, dirs
def obb_adjust(pc:np.ndarray, fix_dir:np.array, ori_dir:np.array):
'''ori_dir should be [0,0,1] or [0,1,0] or [1,0,0]'''
axis = np.argmax(ori_dir)
fix_dir = fix_dir / np.linalg.norm(fix_dir)
ori_dir = ori_dir / np.linalg.norm(ori_dir)
cro = np.cross(ori_dir, fix_dir)
cos = ori_dir@fix_dir
if abs(cos)>0.99:
return obb_2dirs(pc, axis, True)
vx = np.array([
[0, -cro[2], cro[1]],
[cro[2], 0, -cro[0]],
[-cro[1], cro[0], 0 ]
])
rot_w = np.eye(3,3) + vx + np.matmul(vx,vx) / (1+cos)
rot_verse = np.linalg.inv(rot_w)
rot_pc = np.matmul(pc, rot_verse.T)
center, dirs = obb_2dirs(rot_pc, axis, False)
# dirs[-1][:2] = 0
# dirs[-1,-1] = rot_pc[:,axis].max() - rot_pc[:,axis].min()
cen = center.reshape(-1,3)
dirs = np.matmul(dirs, rot_w.T)
box = (cube*2 - 1)@dirs + cen@rot_w.T
return box, dirs
def pts2pts_dis(pts1,pts2):
diff = pts1.reshape((-1, 1, 3)) - pts2.reshape((1, -1, 3))
distance = (diff**2).reshape((-1,3)).sum(axis=-1)
return distance
def sort_pts(box):
uniques = []
for i in range(3):
uni = np.unique(box[:,i]).shape[0]
uniques.append(uni<8) # and uni//2==0
if sum(uniques)==0: uniques[0] = True
sorted_box = np.array(sorted(box, key = lambda x:x[uniques].sum()))
return sorted_box
def pc2mesh(pts):
pts = np.asarray(pts)
pcd = get_pcd(pts)
pcd.estimate_normals()
distances = pcd.compute_nearest_neighbor_distance()
avg_dist = np.mean(distances)
radius = 1.5 * avg_dist
mesh = o3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting(
pcd, o3d.utility.DoubleVector([radius, radius * 2]), )
# return np.asarray(mesh.triangles)
return mesh
def pc_from_mesh(obj_path, npoints):
mesh = o3d.io.read_triangle_mesh(obj_path)
pts = mesh.sample_points_uniformly(number_of_points=npoints)
return np.array(pts.points)
def load_mesh(obj_path):
return trimesh.load(obj_path, 'obj', force='mesh')
def merge_mesh(meshs):
merged_mesh = trimesh.util.concatenate(meshs)
return merged_mesh
def write_mesh(mesh, path, normal=False, color=False):
o3d.io.write_triangle_mesh(
path, mesh, write_vertex_normals=normal, write_vertex_colors=color
)
def gen_meshs(obj_folder, hier_tree, npoints=1024):
all_node_mesh = {}
for node in hier_tree:
id_ = node['id']
if 'children' in node.keys():
sub_mesh = gen_meshs(obj_folder, node['children'], npoints)
all_node_mesh = {**all_node_mesh, **sub_mesh}
child_mesh = [sub_mesh[me['id']] for me in node['children']]
node_mesh = merge_mesh(child_mesh)
all_node_mesh[id_] = node_mesh
else:
meshs = []
for obj_name in node['objs']:
obj_path = os.path.join(obj_folder, obj_name+'.obj')
mesh = load_mesh(obj_path)
meshs.append(mesh)
if len(meshs)>1:
meshs = merge_mesh(meshs)
else:
meshs = meshs[0]
all_node_mesh[id_] = meshs
return all_node_mesh
def get_leaves(tree, only=None, flatten=False, pop_child=True):
leaf_parts = []
for node in tree:
data = node[only] if only is not None else node
if 'children' not in node.keys():
leaf_parts.append(data)
else:
node_list = get_leaves(node['children'], only, flatten) # [{...},] with parent+children idx
leaf_parts.extend(node_list)
if flatten:
if only == None:
data = data.copy()
if pop_child:data.pop('children')
leaf_parts.append(data)
return leaf_parts
def hier2graphic(hier_tree, parent_id=-1, depth=0):
all_nodes = {}
for node in hier_tree:
renode = {
'name': node['name'],
'objs': node['objs'] if 'objs' in node.keys() else [],
'parent': parent_id,
'depth': depth,
'box': node['box'] if 'box' in node.keys() else [],
'brother':[],
'children_id': [],
'leaves': get_leaves([node], 'id'),
}
if 'children' in node.keys():
children_nodes = hier2graphic(node['children'], node['id'], depth+1)
all_nodes = {**all_nodes, **children_nodes}
renode['children_id'] = [i['id'] for i in node['children']]
all_nodes[node['id']] = renode
for child in renode['children_id']:
all_nodes[child]['brother'] = renode['children_id'][:]
all_nodes[child]['brother'].remove(child)
return all_nodes
def update_mopara(hash_hier, ids=[0]):
main_child = ids[:]
for key in ids:
if hash_hier[key]['children_id'] != []:
tree, mochild = update_mopara(hash_hier, hash_hier[key]['children_id'] )
hash_hier = {**hash_hier, **tree}
mopara = {'jointData':{}, 'joint':'', 'motype':''}
node = hash_hier[mochild]
if 'ref' in node.keys() and key!=0:
mopara['jointData'] = node['jointData']
mopara['joint'] = node['joint']
if 'motype' in node.keys(): mopara['motype'] = node['motype']
refs = node['ref'][:]
for idx,ref in enumerate(refs):
while(hash_hier[ref]['depth'] > hash_hier[key]['depth']):
ref = hash_hier[ref]['parent']
refs[idx] = ref
mopara['ref'] = list(set(refs))
for ref in mopara['ref']:
if ref in main_child and ref != key:
main_child.remove(key)
break
hash_hier[key] = {**hash_hier[key], **mopara}
elif 'ref' in hash_hier[key].keys():
refs = hash_hier[key]['ref']
for idx,ref in enumerate(refs):
while(hash_hier[ref]['depth'] > hash_hier[key]['depth']):
ref = hash_hier[ref]['parent']
hash_hier[key]['ref'][idx] = ref
hash_hier[key]['ref'] = list(set(hash_hier[key]['ref']))
for ref in hash_hier[key]['ref']:
if ref in main_child and ref != key:
main_child.remove(key)
break
return hash_hier, main_child[0]
def gen_graph(hier_tree, mobi):
'''
将hierarchy tree转化称graph
'''
hash_hier = hier2graphic(hier_tree)
for idx,node in enumerate(mobi):
# mobi[idx]['ids'] = [i['id'] for i in node['parts']]
mopara = {'jointData':{}, 'joint':'', 'motype':''}
if node['jointData'] != {}:
mopara['jointData'] = node['jointData']
mopara['joint'] = node['joint']
if 'motype' in node.keys(): mopara['motype'] = node['motype']
if node['parent'] != -1 and 'parts' in mobi[node['parent']].keys():
ref = [j['id'] for j in mobi[node['parent']]['parts']]
mopara['ref'] = ref
for sub_node in node['parts']:
sub_id = sub_node['id']
hash_hier[sub_id] = {**hash_hier[sub_id], **mopara}
graph, _ = update_mopara(hash_hier)
statics = {}
for key in graph.keys():
if 'ref' in graph[key].keys():
refs = graph[key]['ref'][:]
for ref in refs:
if graph[key]['parent'] != graph[ref]['parent'] or ref == key:
graph[key]['ref'].remove(ref)
if graph[key]['ref'] == []:
graph[key].pop('ref')
for key in graph.keys():
node = graph[key]
graph[key]['edges'] = {
'children':{},
'space':{}
}
for child in graph[key]['children_id']:
graph[key]['edges']['children'][child] = ''
brothers = graph[key]['brother'][:]
if 'ref' in graph[key].keys():
for bro in brothers:
if bro in graph[key]['ref']:
graph[key]['edges']['space'][bro] = 'motion'
else:
graph[key]['edges']['space'][bro] = 'none'
graph[key].pop('ref')
else:
for bro in brothers:
graph[key]['edges']['space'][bro] = 'none' if 'ref' in graph[bro].keys() else 'fixed'
return graph, statics
def ref_count(graph):
for key in graph.keys():
edges = graph[key]['edges']['space']
refs = [r for r in edges.keys() if edges[r]=='motion']
graph[key]['refs'] = refs
invalids, child_allref, expect = reduce_ref(graph)
return invalids, expect
def reduce_ref(graph, node_key='0'):
ref_child = set()
all_invalid = 0
flgs = 0
for child in graph[node_key]['children_id']:
child = str(child)
if graph[child]['refs'] == []:
ref_child.add(int(child))
if graph[child]['children_id'] != []:
invalid, flg, expect = reduce_ref(graph, child)
all_invalid += invalid if flg else invalid-1
flgs += 1-flg
children_allref = False
if len(ref_child)==0 and graph[node_key]['brother'] == []:
children_allref = True
elif len(ref_child) and ref_child == set(graph[node_key]['children_id']) \
and not flgs:
children_allref = True
all_invalid += len(ref_child)
# print('%s invalids:%d'%(node_key, all_invalid))
return all_invalid, children_allref, (flgs if flgs else 1)
'''direction, angle, pos, ...'''
def cosine(dirs, vec, abs=True):
vec = vec.reshape(-1,3)
vec = vec / np.linalg.norm(vec, axis=-1).reshape(-1,1) # (1-n, 3) -> (1-n,) or val
mul_res = [email protected]
cos = mul_res / np.linalg.norm(dirs, axis=-1).reshape(-1,1)
if abs: cos = np.abs(cos)
return cos
def cross(dirs, vec, abs=False):
# vec = vec / np.linalg.norm(vec)
cro = np.cross(dirs, vec)
cro = cro / np.linalg.norm(cro, axis=-1)
if abs:
cro = np.abs(cro)
return cro
def motion_pos(direction, gt_pos, pos):
direction= direction / np.linalg.norm(direction)
cro = np.cross(pos - gt_pos.reshape(1,3), direction)
dis = np.abs(np.linalg.norm(cro, axis=-1))
min_idx = np.argmin(dis)
return min_idx, dis[min_idx]
def read_json(json_path):
return json.load(open(json_path, 'r'))
def get_boolseg(seg:np.ndarray, mov_idx):
mov_idx = np.array(mov_idx).reshape((-1,1)) # (n,1), and seg(1,N)
return ( seg.reshape((1,-1)) == mov_idx ).sum(axis=0) == 1.
if __name__ == '__main__':
pass
|
GengxinLiu/SWMP
|
Extern/tools/mobility_tool.py
|
mobility_tool.py
|
py
| 14,370 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.cla",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.bar",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.hist",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.PointCloud",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "open3d.geometry.TriangleMesh.create_coordinate_frame",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "open3d.visualization.draw_geometries",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "open3d.visualization",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "numpy.mean",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.svd",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.AxisAlignedBoundingBox",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "open3d.geometry.OrientedBoundingBox",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.Vector3dVector",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "numpy.cross",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.cov",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.eig",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "numpy.insert",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.insert",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmax",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "numpy.cross",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.eye",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.inv",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "numpy.matmul",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "numpy.matmul",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "open3d.geometry.TriangleMesh.create_from_point_cloud_ball_pivoting",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "open3d.geometry",
"line_number": 227,
"usage_type": "attribute"
},
{
"api_name": "open3d.utility.DoubleVector",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "open3d.utility",
"line_number": 228,
"usage_type": "attribute"
},
{
"api_name": "open3d.io.read_triangle_mesh",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "open3d.io",
"line_number": 233,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "trimesh.load",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "trimesh.util.concatenate",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "trimesh.util",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "open3d.io.write_triangle_mesh",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "open3d.io",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 460,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 460,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_number": 465,
"usage_type": "call"
},
{
"api_name": "numpy.cross",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 471,
"usage_type": "attribute"
},
{
"api_name": "numpy.abs",
"line_number": 473,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 477,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 477,
"usage_type": "attribute"
},
{
"api_name": "numpy.cross",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 479,
"usage_type": "attribute"
},
{
"api_name": "numpy.argmin",
"line_number": 480,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 484,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 486,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 487,
"usage_type": "call"
}
] |
70315957309
|
"""
bony_downloader.py
module contains BonyDownloader class to provide provider specific functionality
"""
__author__ = 'Dattatraya Tembare<[email protected]>'
import datetime
import itertools
import lxml.html
import requests
from common.download_exceptions import DownloadException
from download.file_downloader import FileDownloader
class BonyDownloader(FileDownloader):
"""
BonyDownloader class has functions for parsing page source code
parse() : implementation for 'BONY' provider
"""
def authenticate(self, provider):
"""
Step 1:: Authenticate and login to provider's portal
:param provider: provider
:return: requests session
"""
logging.debug('BonyDownloader:authenticate')
auth_config = self.configs.auth_config[provider]
access_config = self.configs.access_config[provider]
session = requests.Session()
logging.debug(f':::1 Connect to {access_config["login-url"]} and get cookies')
session.get(access_config['login-url'])
logging.debug(f':::2 Call {access_config["auth-url"]} page')
# requests will use the available cookies from session
try:
res1 = session.post(access_config["auth-url"], data=auth_config)
if self._login_failed(provider, res1):
raise DownloadException('2000_AUTHENTICATION_FAILED',
custom_message=f"Authentication failed for {provider}")
logging.debug(f'Login status :: {res1.status_code}')
# BONY request need certificate key for each request
f_html = self.utils.format_html(res1.text)
tree = lxml.html.fromstring(f_html)
csrf_key = tree.xpath('//form[@name="NavForm"]/input[@name="csrfKey"]/@value')[0]
except Exception as e:
raise DownloadException('2000_AUTHENTICATION_FAILED', e) from None
return session, {'for_next_params': True, 'csrfKey': csrf_key}
def _login_failed(self, provider, response):
if 'Invalid Login' in response.text:
return True
else:
return False
def access(self, session, **opts):
"""
Step 2:: Pull access URL/s from configs file and use it to pull page source which has URLs for file download
after method execution a_url['deal_info_dict_list'] appended to opts dictionary
TODO Use namedtuple DealInfo to make current dictionary generic to all providers
:param session: session with site cookies
:param opts: user/commandline inputs
:return: None
"""
logging.debug('FileDownloader:access')
provider = opts['provider']
previous_url_results = list()
for a_url in opts['access_urls']:
logging.debug(f':::3 Send request to {a_url} page')
# Pull input parameters to append as a query string
user_config = opts['user_input_config'] if 'user_input_config' in opts else None
user_inputs = user_config['input'] if user_config else self.configs.user_input_config[provider][
'input']
deal_info_list = self._prepare_params(a_url, user_inputs)
# Update URL with values pulled from previous page response
deal_info_list = self._use_previous_url_result(deal_info_list, previous_url_results)
# After use clean the previous_url_results
previous_url_results = []
for deal_info in deal_info_list:
params = deal_info['params']
from_opts = opts['response_dict'] if 'response_dict' in opts else {}
params = {**params, **from_opts}
opts['response_dict'] = {}
try:
if a_url['method'] == 'POST':
res = session.post(deal_info['link'], data=params)
elif a_url['method'] == 'GET':
res = session.get(deal_info['link'], params=params)
except Exception as e:
raise DownloadException('3000_ACCESS_FAILED', e)
logging.debug(f'status code :: {res.status_code} history :: {res.history} response URL :: {res.url}')
f_html = self.utils.format_html(res.text)
tree = lxml.html.fromstring(f_html)
for ele_name, ele_value in a_url['result-dict'].items():
if 'for_next_params' in ele_name:
_result = self._dict_for_next_url(ele_value, tree)
_result['for_next_params'] = True
previous_url_results.append(_result)
deal_info['for_next_params'] = _result
opts['response_dict'] = {'csrfKey': _result['csrfKey']}
elif 'for_next_url' in ele_name:
_result = self._dict_for_next_url(ele_value, tree)
_result['for_next_url'] = True
previous_url_results.append(_result)
elif 'deal_info' in ele_name:
deal_info['deal_info'] = self._dict_for_next_url(ele_value, tree)
elif 'for_parsing' in ele_name:
f_html_trees = list()
for xp in ele_value:
f_html_trees.append(tree.xpath(xp))
deal_info['f_html'] = f_html_trees
a_url['deal_info_dict_list'] = deal_info_list
def _prepare_params(self, a_url, user_inputs):
# pull mandatory input parameters from access-config
input_param_dict = a_url['input-param']
# prepare links for next request/s
links_with_params = list()
for attr_name, attr_values in user_inputs.items():
for attr_value in attr_values:
req_body = input_param_dict.copy()
req_body[attr_name] = attr_value
links_with_params.append({'link': a_url['url'], 'params': req_body})
return links_with_params
def _use_previous_url_result(self, links, previous_url_results):
if len(links) == len(previous_url_results):
for link, previous_url_result in zip(links, previous_url_results):
if 'hd_deal_number' in previous_url_result:
deal_num = previous_url_result['hd_deal_number']
deal_num = deal_num[:deal_num.index('~')] if deal_num else deal_num
previous_url_result['hd_deal_number'] = deal_num
if 'for_next_params' in previous_url_result:
link['params'] = {**link['params'], **previous_url_result}
else:
for link, previous_url_result in itertools.product(links, previous_url_results):
if 'for_next_params' in previous_url_result:
link['params'] = {**link['params'], **previous_url_result}
return links
def _dict_for_next_url(self, input_dict, tree):
# print(f'table.text :: {etree.tostring(tree)}')
result_dict = dict()
for k, xp in input_dict.items():
try:
xp_result = tree.xpath(xp)
result_dict[k] = ''.join(xp_result).strip()
except Exception as e:
raise DownloadException('3000_ACCESS_FAILED', e)
return result_dict
def parse(self, **opts):
"""
method parses the 'BONY' specific page source using xpath from access-configs, after method execution
a_url['download_urls'] appended to opts dictionary
:param opts: user/commandline inputs + a_url['deal_info_dict_list']
:return:
"""
logging.debug('BonyDownloader:parse')
out_dir = opts['output']
provider = opts['provider']
for a_url in opts['access_urls']:
download_urls = list()
for deal_info_dict in a_url['deal_info_dict_list']:
if 'f_html' in deal_info_dict:
f_url = a_url['for_download_urls']['download_url']
input_dict = a_url['for_download_urls']['request_body'].copy()
for k, v in deal_info_dict['for_next_params'].items():
if 'for_next_params' not in k:
input_dict[k] = v
deal_name = deal_info_dict['deal_info']['deal_name']
for trs in deal_info_dict['f_html']:
for tr in trs:
# print(f'table.text :: {etree.tostring(tr)}')
report_id = tr.xpath('td/input[@name="cb_rpt_id"]/@value')
report_name = ''.join(tr.xpath('td[2]/a/text()')).strip()
if len(report_id) > 0:
report_id = report_id[0][:report_id[0].index('~')]
payment_date = tr.xpath('td[6]/text()')
if len(payment_date) > 0:
payment_date = payment_date[0].strip()
dt = datetime.datetime.strptime(payment_date, "%d-%b-%Y")
for span in tr.xpath('td/span[@class="RecordNormalText"]/input'):
report_ext_key = span.xpath('@name')[0]
report_ext_value = span.xpath('@value')[0]
file_extension = report_ext_value[report_ext_value.index('~') + 1:]
input_dict_copy = dict(input_dict)
input_dict_copy['hd_avl_rpt_id'] = report_id
input_dict_copy[report_ext_key] = report_ext_value
input_dict_copy['lb_reportdate'] = dt.strftime("%B") + '++' + str(dt.year)
input_dict_copy['hd_extension'] = file_extension
o_file = out_dir + '/' + str(dt.year) + '-' + str(dt.month) + '/' + provider + '/'
o_file += (deal_name + ' pay ' + payment_date + ' ' + report_name).replace(' ', '_')
o_file += '.' + file_extension
search_data = report_id + ' || ' + report_name + ' || ' + dt.strftime("%b") + ' '
search_data += str(dt.year) + ' || ' + deal_name
download_urls.append(
DownloadUrl(f_url, o_file, search_data, deal_name, input_dict_copy, 'POST'))
# del a_url['f_html']
a_url['download_urls'] = download_urls
|
dattatembare/file_downloader
|
src/download/bony_downloader.py
|
bony_downloader.py
|
py
| 10,730 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "download.file_downloader.FileDownloader",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "requests.Session",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "common.download_exceptions.DownloadException",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "lxml.html.html.fromstring",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "lxml.html",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "common.download_exceptions.DownloadException",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "common.download_exceptions.DownloadException",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "lxml.html.html.fromstring",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "lxml.html",
"line_number": 95,
"usage_type": "name"
},
{
"api_name": "itertools.product",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "common.download_exceptions.DownloadException",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 184,
"usage_type": "attribute"
}
] |
40787879761
|
from time import sleep
import time
import datetime
from datetime import timedelta
from time import sleep, strftime
motionTimeOutSeconds = 5
lastMotionTime = datetime.datetime.now()
def motionTimedOut():
myNow = datetime.datetime.now()
deltaTime = (myNow - lastMotionTime).total_seconds()
if deltaTime > motionTimeOutSeconds:
print('Motion timed out after {0} seconds'.format(deltaTime))
return True
return False
sleep(2)
# bTime = datetime.datetime.now()
# deltaT = (bTime-lastMotionTime).total_seconds()
# print(deltaT)
if motionTimedOut():
print('Motion timeout test of 2 seconds failed!')
exit(-1)
print('No timeout after 2 seconds.')
lastMotionTime = datetime.datetime.now()
sleep(6)
if motionTimedOut():
print('Motion timeout test is working after 6 seconds')
|
mrncmoose/smart_controller
|
pi-code/thermalPreTest.py
|
thermalPreTest.py
|
py
| 816 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 27,
"usage_type": "call"
}
] |
810789082
|
from __future__ import division
import numpy as np
from scipy import sparse
from sklearn.metrics.pairwise import euclidean_distances
import time
# Produce grid points for a 2d grayscale image
def get_points_2d(image, res):
rows, columns = image.shape
grid_x, grid_y = np.mgrid[0:columns:res, 0:rows:res]
grid = np.array((grid_x.flatten(), grid_y.flatten())).T
return grid
# Produce grid points for a 3d grayscale image
def get_points_3d(image, res):
rows, columns, z = image.shape
grid_z, grid_x, grid_y = np.mgrid[0:z:res, 0:columns:res, 0:rows:res]
grid = np.array((grid_x.flatten(), grid_y.flatten(), grid_z.flatten())).T
return grid
# Wendland kernel as a function of r = norm(x-y)/c_sup
def dist_kernel(r):
return max((1-r, 0))**4 * (4*r + 1)
def blowup_S(S, dim):
(m, n) = S.shape
if dim == 3:
S_full = sparse.lil_matrix((3 * m, 3 * n), dtype = np.float32)
#S_full = np.zeros((3 * m, 3 * n))
S_full[0::3, 0::3] = S
S_full[1::3, 1::3] = S
S_full[2::3, 2::3] = S
else:
S_full = np.zeros((2 * m, 2 * n))
S_full[0::2, 0::2] = S
S_full[1::2, 1::2] = S
return S_full.tocsc()
# Create evaluation matrix given kernel centers (grid points), evaluation points
# and kernel support
def evaluation_matrix(kernels, points, c_sup, dim):
dim = kernels.shape[1]
vect_kernel = np.vectorize(dist_kernel)
start = time.time()
S = euclidean_distances(points, kernels) / c_sup
#print("VEC -- euc dist ", (time.time() - start) / 60)
# Mark entries with 0 kernel support
start = time.time()
S[np.where(S > 1)] = -1
non_zero_indices = np.where(S >= 0)
#print("VEC -- S[np.where(S > 1)] and np.where(S>=0) ", (time.time() - start) / 60)
# Evaluate kernel at points within support
start = time.time()
S[non_zero_indices] = vect_kernel(S[non_zero_indices])
#print("VEC -- S[non_zero] = vect_kernel ", (time.time() - start) / 60)
start = time.time()
S[np.where(S == -1)] = 0
#print("VEC -- S[np.where(S == -1)] = 0 ", (time.time() - start) / 60)
start = time.time()
#full_S = blowup_S_old(S, dim)
#print("VEC -- blowup ", (time.time() - start) / 60)
return sparse.csc_matrix(S)
def evaluation_matrix_blowup(kernels, points, c_sup, dim):
dim = kernels.shape[1]
vect_kernel = np.vectorize(dist_kernel)
start = time.time()
S = euclidean_distances(points, kernels) / c_sup
#print("VEC -- euc dist ", (time.time() - start) / 60)
# Mark entries with 0 kernel support
start = time.time()
S[np.where(S > 1)] = -1
non_zero_indices = np.where(S >= 0)
#print("VEC -- S[np.where(S > 1)] and np.where(S>=0) ", (time.time() - start) / 60)
# Evaluate kernel at points within support
start = time.time()
S[non_zero_indices] = vect_kernel(S[non_zero_indices])
#print("VEC -- S[non_zero] = vect_kernel ", (time.time() - start) / 60)
start = time.time()
S[np.where(S == -1)] = 0
#print("VEC -- S[np.where(S == -1)] = 0 ", (time.time() - start) / 60)
start = time.time()
full_S = blowup_S(S, dim)
#print("VEC -- blowup ", (time.time() - start) / 60)
return full_S
# Create velocity field by weighing kernels by alphas
def make_V(S, alpha, d):
alpha = alpha.flatten()
if (S.shape[1] == alpha.shape[0]):
lmda = S.dot(alpha)
return lmda.reshape(-1, d)
else:
alpha = alpha.reshape(-1, d)
return S.dot(alpha)
|
polaschwoebel/NonLinearDataAugmentation
|
vector_fields.py
|
vector_fields.py
|
py
| 3,499 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "numpy.mgrid",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.lil_matrix",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "numpy.float32",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.euclidean_distances",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csc_matrix",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "scipy.sparse",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "numpy.vectorize",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.euclidean_distances",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 82,
"usage_type": "call"
}
] |
38075843165
|
import gc
from collections import defaultdict
import cupy as cp
import pandas as pd
import torch
import torch.nn.functional as F
from cuml.metrics import pairwise_distances
from cuml.neighbors import NearestNeighbors
from torch.utils.data import DataLoader, Dataset, default_collate
from tqdm import tqdm
from transformers import AutoTokenizer, TrainerCallback
from utils import clean_text, f2_score, get_pos_score
LANGUAGE_TOKENS = [
"<|lang_pnb|>",
"<|lang_tr|>",
"<|lang_ur|>",
"<|lang_bn|>",
"<|lang_hi|>",
"<|lang_en|>",
"<|lang_kn|>",
"<|lang_km|>",
"<|lang_zh|>",
"<|lang_gu|>",
"<|lang_ta|>",
"<|lang_my|>",
"<|lang_fr|>",
"<|lang_swa|>",
"<|lang_or|>",
"<|lang_mul|>",
"<|lang_fil|>",
"<|lang_sw|>",
"<|lang_es|>",
"<|lang_pt|>",
"<|lang_pl|>",
"<|lang_ru|>",
"<|lang_mr|>",
"<|lang_it|>",
"<|lang_ar|>",
"<|lang_bg|>",
"<|lang_te|>",
"<|lang_as|>",
]
CATEGORY_TOKENS = [
"<|category_supplemental|>",
"<|category_aligned|>",
"<|category_source|>",
]
LEVEL_TOKENS = [
"<|level_0|>",
"<|level_1|>",
"<|level_2|>",
"<|level_3|>",
"<|level_4|>",
"<|level_5|>",
"<|level_6|>",
"<|level_7|>",
"<|level_8|>",
"<|level_9|>",
"<|level_10|>",
]
KIND_TOKENS = [
"<|kind_document|>",
"<|kind_video|>",
"<|kind_html5|>",
"<|kind_exercise|>",
"<|kind_audio|>",
]
OTHER_TOKENS = [
"<|topic|>",
"<|content|>",
"<s_title>",
"</s_title>",
"<s_description>",
"</s_description>",
"<s_text>",
"</s_text>",
]
RELATION_TOKENS = [
"<s_parent>",
"</s_parent>",
"<s_children>",
"</s_children>",
]
def init_tokenizer(tokenizer_name):
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
tokenizer.add_special_tokens(
dict(
additional_special_tokens=LANGUAGE_TOKENS
+ CATEGORY_TOKENS
+ LEVEL_TOKENS
+ KIND_TOKENS
+ OTHER_TOKENS
+ RELATION_TOKENS
)
)
if "sentence-t5" in tokenizer_name:
tokenizer.add_special_tokens({"sep_token": "<sep>"})
return tokenizer
class LECRDataset(Dataset):
def __init__(
self,
supervised_df,
topic_df,
content_df,
topic_dict,
content_dict,
correlation_df,
tokenizer_name="xlm-roberta-base",
max_len=512,
use_content_pair=False,
is_training=False,
use_augmentation=False,
objective="siamese",
):
self.tokenizer = init_tokenizer(tokenizer_name)
self.max_len = max_len
self.supervised_df = supervised_df.dropna()
self.topic_df = topic_df
self.content_df = content_df
self.topic_dict, self.content_dict = topic_dict, content_dict
self.correlation_df = correlation_df
self.use_content_pair = use_content_pair
self.is_training = is_training
self.use_augmentation = use_augmentation
self.objective = objective
self.topic_texts, self.content_texts, self.labels = self.process_csv()
def process_csv(self):
# get text pairs
topic_ids = self.supervised_df.topic_id.values
content_ids = self.supervised_df.content_ids.values
labels = list(self.supervised_df.target.values)
topic_texts = []
content_texts = []
for topic_id in topic_ids:
topic_texts.append(self.topic_dict[topic_id])
for content_id in content_ids:
content_texts.append(self.content_dict[content_id])
set_topic_ids = set(topic_ids)
use_all_pairs = (
False # use all pair, no need to be in the intersection of content_ids of topic ids
)
if self.use_content_pair:
# todo: create content pairs from each topic
content_to_topic = defaultdict(lambda: [])
topic_to_content = defaultdict(lambda: [])
pairs = set()
for i, row in tqdm(self.correlation_df.iterrows()):
content_list = row["content_ids"].split(" ")
if row["topic_id"] not in set_topic_ids:
continue
for content_id in content_list:
content_to_topic[content_id].append(row["topic_id"])
topic_to_content[row["topic_id"]].append(content_id)
if len(content_list) <= 1:
continue
if use_all_pairs:
for idx1 in range(len(content_list) - 1):
for idx2 in range(idx1 + 1, len(content_list)):
if (content_list[idx1], content_list[idx2],) not in pairs and (
content_list[idx2],
content_list[idx1],
) not in pairs:
pairs.add((content_list[idx1], content_list[idx2]))
if not use_all_pairs:
for content_id, topics in tqdm(content_to_topic.items()):
intersection_contents = list(
set.intersection(*[set(topic_to_content[topic_id]) for topic_id in topics])
)
for idx1 in range(len(intersection_contents) - 1):
for idx2 in range(idx1 + 1, len(intersection_contents)):
if (
intersection_contents[idx1],
intersection_contents[idx2],
) not in pairs and (
intersection_contents[idx2],
intersection_contents[idx1],
) not in pairs:
pairs.add(
(
intersection_contents[idx1],
intersection_contents[idx2],
)
)
for pair in pairs:
topic_texts.append(self.content_dict[pair[0]])
content_texts.append(self.content_dict[pair[1]])
labels.append(1)
return topic_texts, content_texts, labels
def __len__(self):
if self.is_training:
return len(self.labels)
else:
return 1
def augment(self, inputs):
probability_matrix = torch.full(inputs["input_ids"].shape, 0.15)
masked_indices = torch.bernoulli(probability_matrix).bool()
indices_replaced = (
torch.bernoulli(torch.full(inputs["input_ids"].shape, 0.8)).bool() & masked_indices
)
inputs["input_ids"][indices_replaced] = self.tokenizer.convert_tokens_to_ids(
self.tokenizer.mask_token
)
inputs["input_ids"] *= inputs["attention_mask"]
return inputs
def __getitem__(self, idx):
topic_text = self.topic_texts[idx]
content_text = self.content_texts[idx]
label = self.labels[idx]
if self.objective == "siamese":
# topic
if isinstance(topic_text, tuple):
topic_inputs = self.tokenizer.encode_plus(
topic_text[0],
topic_text[1],
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
else:
topic_inputs = self.tokenizer.encode_plus(
topic_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in topic_inputs.items():
topic_inputs[k] = torch.tensor(v, dtype=torch.long)
# content
content_inputs = self.tokenizer.encode_plus(
content_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in content_inputs.items():
content_inputs[k] = torch.tensor(v, dtype=torch.long)
if isinstance(topic_text, tuple):
topic_text = topic_text[0] + topic_text[1]
if self.is_training and self.use_augmentation:
topic_inputs = self.augment(topic_inputs)
content_inputs = self.augment(content_inputs)
return topic_inputs, content_inputs, topic_inputs, label
elif self.objective == "classification":
combined_inputs = self.tokenizer.encode_plus(
topic_text,
content_text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in combined_inputs.items():
combined_inputs[k] = torch.tensor(v, dtype=torch.long)
if self.is_training and self.use_augmentation:
combined_inputs = self.augment(combined_inputs)
return combined_inputs, combined_inputs, combined_inputs, label
else:
raise ValueError("Only support siamese/classification for now.")
class InferenceDataset(Dataset):
def __init__(self, texts, tokenizer_name="xlm-roberta-base", max_len=512):
self.texts = texts
self.tokenizer = init_tokenizer(tokenizer_name)
self.max_len = max_len
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
text = self.texts[idx]
# topic
inputs = self.tokenizer.encode_plus(
text,
return_tensors=None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
for k, v in inputs.items():
inputs[k] = torch.tensor(v, dtype=torch.long)
return inputs
def collate_fn(inputs):
inputs = default_collate(inputs)
mask_len = int(inputs["attention_mask"].sum(axis=1).max())
for k, v in inputs.items():
inputs[k] = inputs[k][:, :mask_len]
return inputs
class DatasetUpdateCallback(TrainerCallback):
"""
Trigger re-computing dataset
A hack that modifies the train/val dataset, pointed by Trainer's dataloader
0. Calculate new train/val topic/content embeddings, train KNN, get new top-k
1. Calculate top-k max positive score, compare to current val best, if greater, continue to step 2, else do nothing
2. Update supervised_df and update dataset:
self.topic_texts, self.content_texts, self.labels = self.process_csv()
"""
def __init__(
self,
trainer,
train_topic_ids,
val_topic_ids,
topic_df,
content_df,
topic_dict,
content_dict,
correlation_df,
tokenizer_name,
max_len,
best_score=0,
top_k=50,
use_translated=False,
mix_translated=False,
fold=0,
):
super(DatasetUpdateCallback, self).__init__()
self.trainer = trainer
self.topic_df = topic_df
self.content_df = content_df
self.correlation_df = correlation_df
self.best_score = best_score
self.top_k = top_k
self.use_translated = use_translated
self.mix_translated = mix_translated
self.fold = fold
self.tokenizer = init_tokenizer(tokenizer_name)
self.topic_dict, self.content_dict = topic_dict, content_dict
train_topic_texts = [
topic_dict[topic_id]
for topic_id in self.topic_df.id.values
if topic_id in train_topic_ids
]
self.train_topic_ids = [
topic_id for topic_id in self.topic_df.id.values if topic_id in train_topic_ids
]
self.train_topic_languages = []
for topic_id, topic_lang in zip(self.topic_df.id.values, self.topic_df.language.values):
if topic_id in train_topic_ids:
self.train_topic_languages.append(topic_lang)
val_topic_texts = [
topic_dict[topic_id]
for topic_id in self.topic_df.id.values
if topic_id in val_topic_ids
]
self.val_topic_ids = [
topic_id for topic_id in self.topic_df.id.values if topic_id in val_topic_ids
]
content_texts = [
content_dict[content_id]
for content_id in self.content_df.id.values
if content_id.startswith("c_")
]
def inference_collate_fn(inputs):
inputs = default_collate(inputs)
mask_len = int(inputs["attention_mask"].sum(axis=1).max())
for k, v in inputs.items():
inputs[k] = inputs[k][:, :mask_len]
return inputs
train_topic_dataset = InferenceDataset(
texts=train_topic_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.train_topic_dataloader = DataLoader(
train_topic_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
val_topic_dataset = InferenceDataset(
texts=val_topic_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.val_topic_dataloader = DataLoader(
val_topic_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
content_dataset = InferenceDataset(
texts=content_texts, tokenizer_name=tokenizer_name, max_len=max_len
)
self.content_dataloader = DataLoader(
content_dataset,
num_workers=self.trainer.args.dataloader_num_workers,
batch_size=32,
shuffle=False,
collate_fn=inference_collate_fn,
)
def on_train_begin(self, args, state, control, **kwargs):
self.on_epoch_end(args, state, control, **kwargs)
def on_epoch_end(self, args, state, control, **kwargs):
local_rank = args.local_rank if args.local_rank != -1 else 0
with cp.cuda.Device(local_rank):
torch.cuda.empty_cache()
print("Callback on local_rank =", local_rank)
self.trainer.model.eval()
print("On Epoch Begin")
topic_embs = []
device = f"cuda:{local_rank}" if torch.cuda.is_available() else "cpu"
with torch.no_grad():
for inputs in tqdm(self.val_topic_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
topic_embs.extend(out.cpu().detach().numpy())
content_embs = []
# TODO: only use original content embeddings to avoid translation confusing
for inputs in tqdm(self.content_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
content_embs.extend(out.cpu().detach().numpy())
# Transfer predictions to gpu
with cp.cuda.Device(local_rank):
topic_embs_gpu = cp.array(topic_embs)
content_embs_gpu = cp.array(content_embs)
# Release memory
torch.cuda.empty_cache()
# KNN model
content_idx_to_id = {}
for idx, row in self.content_df.iterrows():
content_idx_to_id[idx] = row.id
print("Evaluating current score...")
if self.use_translated:
# get 500 nearest contents, then select top k contents that is in original contents, just approximate, can't check all
original_indices = [ # indices of original contents in self.content_df
i
for i, emb in enumerate(content_embs)
if self.content_df.id.values[i].startswith("c_")
]
# original_content_embs = [
# emb
# for i, emb in enumerate(content_embs)
# if self.content_df.id.values[i].startswith("c_")
# ]
# original_content_embs_gpu = cp.array(original_content_embs)
original_content_embs_gpu = content_embs_gpu
neighbors_model = NearestNeighbors(n_neighbors=500, metric="cosine")
neighbors_model.fit(original_content_embs_gpu)
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
for selected_k in [5, 10, 20, 50, 100, 200]:
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# original_contents = [self.content_df.loc[ind, "id"] for ind in pred.get() if self.content_df.loc[ind, "id"].startswith("c_")]
# original_contents = [content_idx_to_id[ind] for ind in pred.get() if content_idx_to_id[ind].startswith("c_")]
original_contents = [
content_idx_to_id[original_indices[ind]] for ind in pred.get()
]
p = " ".join(original_contents[:selected_k])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
gt = self.correlation_df[
self.correlation_df.topic_id.isin(self.val_topic_ids)
].sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
selected_k,
)
print(
"Selecting",
selected_k,
"nearest contents",
"top-k score =",
f2_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
),
"max positive score =",
score,
)
print("Training KNN model...")
print("Generating KNN predictions with top_k =", self.top_k)
neighbors_model = NearestNeighbors(n_neighbors=self.top_k, metric="cosine")
neighbors_model.fit(original_content_embs_gpu)
print("Generating embedding for validation topics")
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# original_contents = [self.content_df.loc[ind, "id"] for ind in pred.get() if self.content_df.loc[ind, "id"].startswith("c_")]
# original_contents = [content_idx_to_id[ind] for ind in pred.get() if content_idx_to_id[ind].startswith("c_")]
original_contents = [
content_idx_to_id[original_indices[ind]] for ind in pred.get()
]
p = " ".join(original_contents[: self.top_k])
predictions.append(p)
else:
for selected_k in [5, 10, 20, 50, 100, 200]:
neighbors_model = NearestNeighbors(n_neighbors=selected_k, metric="cosine")
neighbors_model.fit(content_embs_gpu)
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
gt = self.correlation_df[
self.correlation_df.topic_id.isin(self.val_topic_ids)
].sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
selected_k,
)
print(
"Selecting",
selected_k,
"nearest contents",
"top-k score =",
f2_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
),
"max positive score =",
score,
)
print("Training KNN model...")
print("Generating KNN predictions with top_k =", self.top_k)
neighbors_model = NearestNeighbors(n_neighbors=self.top_k, metric="cosine")
neighbors_model.fit(content_embs_gpu)
print("Generating embedding for validation topics")
indices = neighbors_model.kneighbors(topic_embs_gpu, return_distance=False)
predictions = []
for k in tqdm(range(len(indices))):
pred = indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
predictions.append(p)
knn_preds = pd.DataFrame(
{"topic_id": self.val_topic_ids, "content_ids": predictions}
).sort_values("topic_id")
score = get_pos_score(
gt["content_ids"],
knn_preds.sort_values("topic_id")["content_ids"],
self.top_k,
)
print("Current Score:", score, "Best Score:", self.best_score)
if score > self.best_score:
self.best_score = score
print("saving best model to data/ folder")
# torch.save(self.trainer.model.state_dict(), f"data/siamese_model_{score}.pth")
generate_new_dataset_every_epoch = True
if generate_new_dataset_every_epoch or (score == self.best_score):
# generate new pairs in dataset
print("Building new validation supervised df")
new_val_supervised_df = build_new_supervised_df(knn_preds, self.correlation_df)[
["topic_id", "content_ids", "target"]
].sort_values(["topic_id", "content_ids"])
if score == self.best_score: # only save for the best checkpoint
print("saving new_val_supervised_df to data/ folder")
new_val_supervised_df.to_csv("data/new_val_supervised_df.csv")
# get top-k for training set
# TODO: only get original content neighbors for original topics
print("Generating embedding for train topics")
train_topic_embs = []
with torch.no_grad():
for inputs in tqdm(self.train_topic_dataloader):
for k, v in inputs.items():
inputs[k] = inputs[k].to(device)
out = self.trainer.model.feature(inputs)
train_topic_embs.extend(out.cpu().detach().numpy())
with cp.cuda.Device(local_rank):
train_topic_embs_gpu = cp.array(train_topic_embs)
train_indices = neighbors_model.kneighbors(
train_topic_embs_gpu, return_distance=False
)
# if self.use_translated:
# topic_language_df = pd.DataFrame({
# "topic_id": self.train_topic_ids,
# "language": self.train_topic_languages
# })
train_predictions = []
for k in tqdm(range(len(train_indices))):
pred = train_indices[k]
# p = " ".join([self.content_df.loc[ind, "id"] for ind in pred.get()])
if self.use_translated:
p = " ".join(
[content_idx_to_id[original_indices[ind]] for ind in pred.get()]
)
else:
p = " ".join([content_idx_to_id[ind] for ind in pred.get()])
train_predictions.append(p)
train_knn_preds = pd.DataFrame(
{
"topic_id": self.train_topic_ids,
"content_ids": train_predictions,
"language": self.train_topic_languages,
}
).sort_values("topic_id")
print("Building new train supervised df")
# if self.use_translated:
# count_dict = {
# "ar": 3701,
# "as": 167,
# "bg": 2867,
# "bn": 2176,
# "en": 36161,
# "es": 13910,
# "fil": 247,
# "fr": 3701,
# "gu": 2320,
# "hi": 1786,
# "it": 866,
# "km": 121,
# "kn": 119,
# "mr": 300,
# "mul": 4,
# "my": 135,
# "or": 70,
# "pl": 43,
# "pnb": 51,
# "pt": 4177,
# "ru": 34,
# "sw": 2860,
# "swa": 35,
# "ta": 60,
# "te": 93,
# "tr": 40,
# "ur": 66,
# "zh": 862,
# }
# times_positive_samples = 4
# # select all original topics and a part of translated topics
# translated_knn_preds = (
# train_knn_preds[~train_knn_preds.topic_id.str.startswith("t_")]
# .groupby("language")
# .apply(
# lambda x: x.sample(
# n=count_dict[x["language"].iat[0]] * times_positive_samples,
# replace=True,
# )
# )
# .reset_index(drop=True)
# )
# original_knn_preds = train_knn_preds[
# train_knn_preds.topic_id.str.startswith("t_")
# ]
# train_knn_preds = pd.concat([original_knn_preds, translated_knn_preds])
new_train_supervised_df = build_new_supervised_df(
train_knn_preds, self.correlation_df
)
if self.use_translated:
# Only add positive cases in training set for translated topics
translated_supervised_df = new_train_supervised_df[
~new_train_supervised_df.topic_id.str.startswith("t_")
& new_train_supervised_df.target
== 1
].copy()
# Only original contents for original topics
original_supervised_df = new_train_supervised_df[
new_train_supervised_df.topic_id.str.startswith("t_")
& new_train_supervised_df.content_ids.str.startswith("c_")
].copy()
# TODO: duplicate number of positive by using translated data
id_to_language = {}
for _, row in tqdm(self.topic_df.iterrows()):
id_to_language[row.id] = row.language
original_supervised_df["language"] = original_supervised_df["topic_id"].apply(
lambda x: id_to_language[x]
)
count_df = (
original_supervised_df[original_supervised_df.target == 1]
.groupby("language")
.size()
.reset_index(name="counts")
)
count_dict = {}
for _, row in count_df.iterrows():
count_dict[row.language] = row.counts
times_positive_samples = 3
translated_supervised_df["language"] = translated_supervised_df[
"topic_id"
].apply(lambda x: id_to_language[x])
translated_supervised_df = (
translated_supervised_df.groupby("language")
.apply(
lambda x: x.sample(
n=count_dict[x["language"].iat[0]] * times_positive_samples,
replace=True,
)
)
.reset_index(drop=True)
)
original_supervised_df = original_supervised_df.drop(columns=["language"])
translated_supervised_df = translated_supervised_df.drop(columns=["language"])
new_train_supervised_df = pd.concat(
[translated_supervised_df, original_supervised_df]
)[["topic_id", "content_ids", "target"]].sort_values(
["topic_id", "content_ids"]
)
if score == self.best_score: # only save for the best checkpoint
print("saving new_train_supervised_df to data/ folder")
new_train_supervised_df.to_csv("data/new_train_supervised_df.csv")
# update train_dataset and val_dataset
print("preprocess csv for train/validation topics, contents, labels")
self.trainer.train_dataset.supervised_df = new_train_supervised_df.dropna()
(
self.trainer.train_dataset.topic_texts,
self.trainer.train_dataset.content_texts,
self.trainer.train_dataset.labels,
) = self.trainer.train_dataset.process_csv()
self.trainer.eval_dataset.supervised_df = new_val_supervised_df.dropna()
(
self.trainer.eval_dataset.topic_texts,
self.trainer.eval_dataset.content_texts,
self.trainer.eval_dataset.labels,
) = self.trainer.eval_dataset.process_csv()
print("Saving knn csvs ...")
train_knn_preds.to_csv(f"data/train_knn_fold{self.fold}.csv")
knn_preds.to_csv(f"data/val_knn_fold{self.fold}.csv")
del (
train_topic_embs,
train_topic_embs_gpu,
train_knn_preds,
train_indices,
train_predictions,
)
gc.collect()
del (
topic_embs,
content_embs,
topic_embs_gpu,
content_embs_gpu,
knn_preds,
indices,
neighbors_model,
predictions,
)
gc.collect()
torch.cuda.empty_cache()
if self.mix_translated:
self.use_translated = not self.use_translated
def build_new_supervised_df(knn_df, correlations):
# Create lists for training
topics_ids = []
content_ids = []
targets = []
# Iterate over each topic in df
mapping = set()
# get all class 1 in correlations
topic_ids = set(knn_df.topic_id.values)
filtered_correlations = correlations[correlations["topic_id"].isin(topic_ids)]
for i, row in tqdm(filtered_correlations.iterrows()):
if str(row["content_ids"]) and str(row["content_ids"]) != "nan":
content_ids = str(row["content_ids"]).split(" ")
for content_id in content_ids:
mapping.add((row["topic_id"], content_id, 1))
for i, row in tqdm(knn_df.iterrows()):
if str(row["content_ids"]) and str(row["content_ids"]) != "nan":
content_ids = str(row["content_ids"]).split(" ")
for content_id in content_ids:
if (
row["topic_id"],
content_id,
1,
) not in mapping: # because mapping already contains all positive cases
mapping.add((row["topic_id"], content_id, 0))
# Build training dataset
mapping = list(mapping)
new_df = pd.DataFrame(
{
"topic_id": [item[0] for item in mapping if item[1]],
"content_ids": [item[1] for item in mapping if item[1]],
"target": [item[2] for item in mapping if item[1]],
}
)
# Release memory
del topics_ids, content_ids
gc.collect()
return new_df
def collate_fn(batch):
batch = default_collate(batch)
topic_inputs, content_inputs, combined_inputs, labels = batch
mask_len = int(topic_inputs["attention_mask"].sum(axis=1).max())
for k, v in topic_inputs.items():
topic_inputs[k] = topic_inputs[k][:, :mask_len]
mask_len = int(content_inputs["attention_mask"].sum(axis=1).max())
for k, v in content_inputs.items():
content_inputs[k] = content_inputs[k][:, :mask_len]
mask_len = int(combined_inputs["attention_mask"].sum(axis=1).max())
for k, v in combined_inputs.items():
combined_inputs[k] = combined_inputs[k][:, :mask_len]
return {
"topic_inputs": topic_inputs,
"content_inputs": content_inputs,
"combined_inputs": combined_inputs,
"labels": labels,
}
|
thanhhau097/lecr
|
dataset.py
|
dataset.py
|
py
| 35,343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "transformers.AutoTokenizer.from_pretrained",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "transformers.AutoTokenizer",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "collections.defaultdict",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.full",
"line_number": 228,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 297,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 307,
"usage_type": "name"
},
{
"api_name": "torch.tensor",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "torch.long",
"line_number": 330,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.default_collate",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "transformers.TrainerCallback",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.default_collate",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 427,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "cupy.cuda.Device",
"line_number": 462,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 463,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 463,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 468,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 470,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 471,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 479,
"usage_type": "call"
},
{
"api_name": "cupy.cuda.Device",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 486,
"usage_type": "attribute"
},
{
"api_name": "cupy.array",
"line_number": 487,
"usage_type": "call"
},
{
"api_name": "cupy.array",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 491,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 491,
"usage_type": "attribute"
},
{
"api_name": "cuml.neighbors.NearestNeighbors",
"line_number": 514,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 520,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 530,
"usage_type": "call"
},
{
"api_name": "utils.get_pos_score",
"line_number": 537,
"usage_type": "call"
},
{
"api_name": "utils.f2_score",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "cuml.neighbors.NearestNeighbors",
"line_number": 557,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 563,
"usage_type": "call"
},
{
"api_name": "cuml.neighbors.NearestNeighbors",
"line_number": 574,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 579,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 585,
"usage_type": "call"
},
{
"api_name": "utils.get_pos_score",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "utils.f2_score",
"line_number": 602,
"usage_type": "call"
},
{
"api_name": "cuml.neighbors.NearestNeighbors",
"line_number": 612,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 618,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 624,
"usage_type": "call"
},
{
"api_name": "utils.get_pos_score",
"line_number": 628,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 656,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 657,
"usage_type": "call"
},
{
"api_name": "cupy.cuda.Device",
"line_number": 663,
"usage_type": "call"
},
{
"api_name": "cupy.cuda",
"line_number": 663,
"usage_type": "attribute"
},
{
"api_name": "cupy.array",
"line_number": 664,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 677,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 689,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 770,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 804,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 852,
"usage_type": "call"
},
{
"api_name": "torch.cuda.empty_cache",
"line_number": 853,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 853,
"usage_type": "attribute"
},
{
"api_name": "tqdm.tqdm",
"line_number": 870,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 876,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 889,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 898,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.default_collate",
"line_number": 903,
"usage_type": "call"
}
] |
15653063144
|
from aiogram import Bot, types, Dispatcher, executor
import logging
from config import TOKEN, html
import parser as ps
import time
import random
import os
import qrcode
def make_qr(text):
qr = qrcode.QRCode()
qr.add_data(text)
img_qr = qr.make_image(fill_color='white', back_color="black")
img_qr.save('qr.png')
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
logging.basicConfig(level=logging.INFO)
async def on_startup(_):
print('Бот онлайн')
@dp.message_handler(commands='numhent')
async def numhent(msg : types.Message):
number = msg.text.split(' ', 1)
try:
ps.get_html(html,number[1])
photo = ps.parse('content', number[1])
await msg.reply_photo(photo,caption=number[1])
except:
await msg.reply('отправь число дурак')
@dp.message_handler(commands='hent')
async def hent(msg : types.Message):
rnd = random.randint(1,6330000)
ps.get_html(html,rnd)
t = ps.parse('content', rnd)
await msg.reply_photo(t,caption=rnd)
@dp.message_handler(commands='qr')
async def test(msg : types.Message):
split = msg.text.split(' ', 1)[1]
make_qr(split)
await msg.reply_photo(open('qr.png', 'rb'), caption=split)
if __name__ == '__main__':
executor.start_polling(dp,skip_updates=True, on_startup=on_startup)
|
sarenis/tg_parsing_bot
|
bot.py
|
bot.py
|
py
| 1,329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "qrcode.QRCode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiogram.Bot",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "config.TOKEN",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "aiogram.Dispatcher",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types.Message",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "parser.get_html",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "config.html",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "parser.parse",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "parser.get_html",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "config.html",
"line_number": 37,
"usage_type": "argument"
},
{
"api_name": "parser.parse",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "aiogram.executor.start_polling",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "aiogram.executor",
"line_number": 48,
"usage_type": "name"
}
] |
17534446407
|
from functools import reduce
from typing import List
from project.caretaker import Caretaker
from project.cheetah import Cheetah
from project.keeper import Keeper
from project.lion import Lion
from project.tiger import Tiger
from project.vet import Vet
from project.animal import Animal
from project.worker import Worker
class Zoo:
def __init__(self,
name: str,
budget: int,
animal_capacity: int,
workers_capacity: int
):
# public instance attribute
self.name = name
# private attributes
self.__budget = budget
self.__animal_capacity = animal_capacity
self.__workers_capacity = workers_capacity
# public instance attributes
self.animals: List[Animal] = []
self.workers: List[Worker] = []
def add_animal(self, animal: Animal, price: int) -> str:
if (price <= self.__budget) and (len(self.animals) < self.__animal_capacity):
self.animals.append(animal)
self.__budget -= price
return f'{animal.name} the {animal.__class__.__name__} added to the zoo'
# or type(animal).__name__
if (price > self.__budget) and (len(self.animals) < self.__animal_capacity):
return 'Not enough budget'
return 'Not enough space for animal'
def hire_worker(self, worker):
if len(self.workers) < self.__workers_capacity:
self.workers.append(worker)
return f'{worker.name} the {worker.__class__.__name__} hired successfully'
# or {type(worker).__name__}
return 'Not enough space for worker'
def fire_worker(self, worker_name):
worker = [w for w in self.workers if w.name == worker_name]
if worker:
self.workers.remove(worker[0])
return f'{worker[0].name} fired successfully'
return f'There is no {worker_name} in the zoo'
def pay_workers(self):
# !!!!!
workers_payment = sum([w.salary for w in self.workers])
if workers_payment <= self.__budget:
self.__budget -= workers_payment
return f'You payed your workers. They are happy. ' \
f'Budget left: {self.__budget}'
return 'You have no budget to pay your workers. They are unhappy'
def tend_animals(self):
# get_needs = self.money_for_care
amount_to_pay = sum([t.get_needs() for t in self.animals])
if self.__budget >= amount_to_pay:
self.__budget -= amount_to_pay
return f"You tended all the animals. They are happy. Budget left: {self.__budget}"
return "You have no budget to tend the animals. They are unhappy."
def profit(self, amount) -> None:
self.__budget += amount
def animals_status(self):
animals_types = ['Lion', 'Tiger', 'Cheetah']
animals_list = {idx: [] for idx in range(0, 3)}
for animal in self.animals:
idx = animals_types.index(type(animal).__name__)
animals_list[idx].append(animal)
lions, tigers, cheetahs = animals_list[0], animals_list[1], animals_list[2]
#
# lions = [animal for animal in self.animals if type(animal).__name__ == animals_types[0]]
# tigers = [animal for animal in self.animals if type(animal).__name__ == animals_types[1]]
# cheetahs = [animal for animal in self.animals if type(animal).__name__ == animals_types[2]]
result = [f'You have {len(self.animals)} animals']
result.append(f'----- {len(lions)} Lions:')
result.append('\n'.join([animal.__repr__() for animal in lions]))
result.append(f'----- {len(tigers)} Tigers:')
result.append('\n'.join([animal.__repr__() for animal in tigers]))
result.append(f'----- {len(cheetahs)} Cheetahs:')
result.append('\n'.join([animal.__repr__() for animal in cheetahs]))
return '\n'.join(result)
def workers_status(self):
keepers = [w for w in self.workers if w.__class__.__name__ == 'Keeper']
caretakers = [w for w in self.workers if w.__class__.__name__ == 'Caretaker']
vets = [w for w in self.workers if w.__class__.__name__ == 'Vet']
result = f"You have {len(self.workers)} workers\n"
result += f'----- {len(keepers)} Keepers:\n'
result += '\n'.join([k.__repr__() for k in keepers]) + '\n'
result += f'----- {len(caretakers)} Caretakers:\n'
result += '\n'.join([c.__repr__() for c in caretakers]) + '\n'
result += f'----- {len(vets)} Vets:\n'
result += '\n'.join([v.__repr__() for v in vets])
return result
|
emilynaydenova/SoftUni-Python-Web-Development
|
Python-OOP-Oct2023/Exercises/04.Encapsulation/wild_cat_zoo/project/zoo.py
|
zoo.py
|
py
| 4,687 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "project.animal.Animal",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "project.worker.Worker",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "project.animal.Animal",
"line_number": 33,
"usage_type": "name"
}
] |
29214466760
|
from celery import shared_task, Celery
from django.utils import timezone
from .models import Post
app = Celery()
@shared_task
def publish_posts_task():
posts = Post.objects.filter(
status=False, published_date__lte=timezone.now()
)
for post in posts:
post.status = True
post.save()
return (
print(f"{posts.count()} published!")
if posts
else print("There is no post to publish")
)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(
60 * 60,
publish_posts_task().s(),
name="published posts every one hour",
)
|
smz6990/DRF-Blog
|
core/blog/tasks.py
|
tasks.py
|
py
| 665 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "celery.Celery",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.filter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "celery.shared_task",
"line_number": 10,
"usage_type": "name"
}
] |
21138667122
|
#!/usr/bin/python3
# -*-coding:utf-8 -*-
# Reference:**********************************************
# @Time : 2019/11/1 23:30
# @Author : Raymond Luo
# @File : train_emb.py
# @User : luoli
# @Software: PyCharm
# Reference:**********************************************
import pickle
from gensim.models import Word2Vec, KeyedVectors
import pandas as pd
import torch.nn as nn
import torch
def train_motif_wordemb(path):
data = pd.read_csv(path)
walk_a = data['user_neighbor'].values.tolist()
walk_b = data['target_neighbor'].values.tolist()
walk_a.extend(walk_b)
walk = []
for line in walk_a:
new_line = line[1:-1].split(", ")
walk.append(new_line)
model = Word2Vec(walk, size=128, window=3, min_count=0, sg=1, workers=12, iter=2, compute_loss=True)
print("Node2vec loss:", model.get_latest_training_loss())
model.wv.save_word2vec_format("../model/motif_walk.emb")
def change_emb_index(emb_path, uid2idx_path):
with open(uid2idx_path, "rb") as f:
uid2idx = pickle.load(f)
with open(emb_path, "r") as f:
emb_file = f.readlines()
head = 1
new_file = []
for line in emb_file:
if head:
head = 0
new_file.append(line)
continue # 跳过第一行
line_list = line.split(" ")
idx = uid2idx[int(line_list[0])] # uid 2 idx
line_list[0] = str(idx) # 转回去
new_line = " ".join(line_list)
new_file.append(new_line)
with open("../model/motif_walk_idx.emb", "w", encoding="utf-8") as f:
for line in new_file:
f.write(line)
if __name__ == "__main__":
# train_motif_wordemb("../data/train_data.csv")
# change_emb_index("../model/motif_walk.emb", "../data/uid_2_idx.pkl")
# test
# 构建词向量
word_vectors = KeyedVectors.load_word2vec_format("../model/motif_walk_idx.emb", binary=False) # 节点向量
weight = torch.FloatTensor(word_vectors.syn0) # 获取2D numpy矩阵
emb = nn.Embedding.from_pretrained(weight, freeze=False)
print(emb(torch.LongTensor([47066])))
|
RManLuo/MotifGNN
|
src_sjjy/train_emb.py
|
train_emb.py
|
py
| 2,114 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "gensim.models.Word2Vec",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors.load_word2vec_format",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "torch.nn.Embedding.from_pretrained",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 61,
"usage_type": "call"
}
] |
18480731961
|
#!/usr/bin/env python
# coding=utf-8
import datetime
import hashlib
import json
class LastUpdated():
def __init__(self, file='last-updated.json'):
self.file = file
def read(self):
with open(self.file, 'r') as f:
data = json.load(f)
return {
'amiibo_sha1': data['amiibo_sha1'],
'game_info_sha1': data['game_info_sha1'],
'timestamp': datetime.datetime.strptime(data['timestamp'], '%Y-%m-%dT%H:%M:%S.%f'),
}
def read_timestamp(self):
return self.read()['timestamp']
def write(self, amiibo_sha1, game_info_sha1, timestamp):
with open(self.file, 'w') as f:
json.dump({
'amiibo_sha1': amiibo_sha1,
'game_info_sha1': game_info_sha1,
'timestamp': timestamp.isoformat(),
}, f, sort_keys=True)
def hash(self, data):
return hashlib.sha1(data).hexdigest()
def update(self, data, data1):
amiibo_sha1 = self.hash(data)
game_info_sha1 = self.hash(data1)
try:
last_update = self.read()
except Exception as e:
print(e)
last_update = None
updated = False
if last_update is None or last_update['amiibo_sha1'] != amiibo_sha1 or last_update['game_info_sha1'] != game_info_sha1:
last_update = {
'amiibo_sha1': amiibo_sha1,
'game_info_sha1': game_info_sha1,
'timestamp': datetime.datetime.utcnow(),
}
self.write(**last_update)
updated = True
return last_update, updated
if __name__ == '__main__':
last_updater = LastUpdated()
with open('database/amiibo.json', 'rb') as f:
with open('database/games_info.json', 'rb') as g:
last_update, updated = last_updater.update(f.read(), g.read())
if updated:
print('Updated: {}'.format(last_updater.file))
print('amiibo_sha1: {}'.format(last_update['amiibo_sha1']))
print('game_info_sha1: {}'.format(last_update['game_info_sha1']))
print('timestamp: {}'.format(last_update['timestamp'].isoformat()))
|
N3evin/AmiiboAPI
|
last_updated.py
|
last_updated.py
|
py
| 2,178 |
python
|
en
|
code
| 459 |
github-code
|
6
|
[
{
"api_name": "json.load",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcnow",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 51,
"usage_type": "attribute"
}
] |
4524699811
|
import pytest
import requests
from budget.enums import ExpensesCategoryEnum, IncomeCategoryEnum
from common.tests_fixtures.fixtures import admin_credentials, admin_id, base_url
budgets_url = f"{base_url}/budgets/"
incomes_url = f"{base_url}/incomes/"
expenses_url = f"{base_url}/expenses/"
@pytest.fixture
def create_budget():
budget_data = {
"owner": admin_id,
"name": "New budget name",
}
response = requests.post(budgets_url, json=budget_data, **admin_credentials)
assert response.status_code == 201
return response.json()
def test_creating_budget():
budget_data = {
"owner": admin_id,
"name": "New budget name",
}
response = requests.post(budgets_url, json=budget_data, **admin_credentials)
assert response.status_code == 201
created_budget_url = response.json()["url"]
response = requests.get(created_budget_url, **admin_credentials)
assert response.status_code == 200
response = response.json()
assert response["owner"] == budget_data["owner"]
assert response["name"] == budget_data["name"]
def test_add_income(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
income_data = {"category": IncomeCategoryEnum.EARNED_INCOME, "amount": 1000.00, "budget": budget_id}
response = requests.post(incomes_url, json=income_data, **admin_credentials)
assert response.status_code == 201
response = response.json()
assert income_data["category"] == response["category"]
assert float(income_data["amount"]) == float(response["amount"])
assert income_data["budget"] == response["budget"]
def test_add_expense(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data = {"category": ExpensesCategoryEnum.SAVING, "amount": 950.21, "budget": budget_id}
response = requests.post(expenses_url, json=expense_data, **admin_credentials)
assert response.status_code == 201
response = response.json()
assert expense_data["category"] == response["category"]
assert float(expense_data["amount"]) == float(response["amount"])
assert expense_data["budget"] == response["budget"]
def test_add_expense_with_incorrect_category(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data = {"category": "incorrect_category", "amount": 950.21, "budget": budget_id}
response = requests.post(expenses_url, json=expense_data, **admin_credentials)
assert response.status_code == 400
assert response.json() == {"category": ['"incorrect_category" is not a valid choice.']}
def test_filtering_expense(create_budget):
created_budget_url = create_budget["url"]
budget_id = int(created_budget_url.split("/")[-2])
expense_data_1 = {"category": ExpensesCategoryEnum.SAVING, "amount": 950.21, "budget": budget_id}
expense_data_2 = {"category": ExpensesCategoryEnum.PERSONAL, "amount": 950.21, "budget": budget_id}
response_1 = requests.post(expenses_url, json=expense_data_1, **admin_credentials)
assert response_1.status_code == 201
response_1 = response_1.json()
response_2 = requests.post(expenses_url, json=expense_data_2, **admin_credentials)
assert response_2.status_code == 201
response_2 = response_2.json()
response = requests.get(f"{expenses_url}?category={ExpensesCategoryEnum.SAVING}", **admin_credentials)
assert response.status_code == 200
response = response.json()
responses_url = [expense["url"] for expense in response["results"]]
assert response_1["url"] in responses_url
assert response_2["url"] not in responses_url
|
MaciejChalusiak/FamilyBudget
|
budget/tests.py
|
tests.py
|
py
| 3,755 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "common.tests_fixtures.fixtures.base_url",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "common.tests_fixtures.fixtures.base_url",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "common.tests_fixtures.fixtures.base_url",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_id",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_id",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "budget.enums.IncomeCategoryEnum.EARNED_INCOME",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "budget.enums.IncomeCategoryEnum",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum.SAVING",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum.SAVING",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum.PERSONAL",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "requests.post",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum.SAVING",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "budget.enums.ExpensesCategoryEnum",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "common.tests_fixtures.fixtures.admin_credentials",
"line_number": 90,
"usage_type": "name"
}
] |
30170732214
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PyPDF2 import PdfWriter, PdfReader
import io
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib import pagesizes
# ======== Plotting Util ========
# assign numbers for sorting when combining outputs
export_counter = 1
def plot_amplitude_data(plot_title: str, axis1_name: str, resolution, data1, data1dots: list = [None], axis2_name: str = "", data2: list = [None], data2dots: list = [None], graph_on_same_axis: bool = False, export: bool = True, custom_prefix: str = ""):
global export_counter
x = np.linspace(0, len(data1) / resolution, len(data1))
plt.figure()
fig, ax = plt.subplots()
ax.plot(x, data1, "-b", label="data1")
if len(data1dots) > 1 and data1dots[0] != None:
ax.plot(x, data1dots, ".", color="#55AAFF", label="data1 dots")
ax.set_xlabel("Time passed [s]")
ax.set_ylabel(axis1_name, color="blue")
# set the x-spine
ax.spines['left'].set_position('zero') # type: ignore
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero') # type: ignore
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
if len(data2) > 1 and data2[0] != None:
ax2 = ax
if not graph_on_same_axis:
ax2 = ax.twinx()
ax2.plot(x, data2, "-r", label="data2")
if len(data2dots) > 1 and data2dots[0] != None:
ax2.plot(x, data2dots, ".", color='#FFA500', label="data2 dots")
ax2.set_xlabel("Time passed [s]")
ax2.set_ylabel(axis2_name, color="red")
plt.title(plot_title)
if export:
name = plot_title.lower().replace(" ", "_")
plt.savefig(
f"summarized_plots/png/({custom_prefix}a_{export_counter}){name}.png")
plt.savefig(
f"summarized_plots/pdf/({custom_prefix}a_{export_counter}){name}.pdf")
export_counter += 1
plt.show()
export_counter = 1
def plot_graph(plot_title: str, axis_name: str, points_x, points_val, graph_x, graph_y, y_axis_limit, export: bool = True, custom_prefix: str = ""):
"""
Usage example:
>>> t = np.arange(0, 5, 0.2)
>>> plot_graph("", "", ..., ..., t, t ** 2)
"""
global export_counter
plt.figure()
fig, ax = plt.subplots()
ax.plot(points_x, points_val, ".", color="#55AAFF", label="points")
ax.plot(graph_x, graph_y, "-r", label="function")
ax.set_ylim(ymax=y_axis_limit)
ax.set_xlabel("Points [1]")
ax.set_ylabel(axis_name, color="blue")
plt.title(plot_title)
if export:
name = plot_title.lower().replace(" ", "_")
plt.savefig(
f"summarized_plots/png/({custom_prefix}b_{export_counter}){name}.png")
plt.savefig(
f"summarized_plots/pdf/({custom_prefix}b_{export_counter}){name}.pdf")
export_counter += 1
plt.show()
def plot_4_curves__vs_time(data1, data2, data3, data4, steps_per_second, y_axis_title):
x1 = np.linspace(0, len(data1) / steps_per_second, len(data1))
x2 = np.linspace(0, len(data2) / steps_per_second, len(data2))
x3 = np.linspace(0, len(data3) / steps_per_second, len(data3))
x4 = np.linspace(0, len(data4) / steps_per_second, len(data4))
plt.figure()
fig, ax = plt.subplots()
ax.plot(x1, data1)
ax.plot(x2, data2)
ax.plot(x3, data3)
ax.plot(x4, data4)
ax.set_xlabel("Verstrichene Zeit [s]")
ax.set_ylabel(y_axis_title)
plt.title(f"{y_axis_title} gegen Zeit")
plt.show()
def create_pdf_text_page(filename: str, text: str, page_size=pagesizes.landscape(pagesizes.A5)):
global A5
# PDF page with info data
# src: https://stackoverflow.com/a/17538003/19474335
packet = io.BytesIO()
cvs = Canvas(packet, bottomup=False, pagesize=page_size)
# utf-8 encoding support: https://stackoverflow.com/a/17011377/19474335
pdfmetrics.registerFont(TTFont('Verdana', 'Verdana.ttf'))
cvs.setFont("Verdana", 11)
line_height = 15
y_counter = 2 * line_height
for line in text.split("\n"):
cvs.drawString(40, y_counter, line)
y_counter += line_height
cvs.save()
# move to the beginning of the BytesIO buffer
# packet.seek(0)
new_pdf = PdfReader(packet)
with open(filename.replace(".pdf", "") + ".pdf", "wb") as outStream:
output = PdfWriter()
output.add_page(new_pdf.pages[0])
output.write(outStream)
|
vexplained/JugendForscht2022
|
programming/python-analysis/plotting_util.py
|
plotting_util.py
|
py
| 4,641 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.linspace",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.landscape",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "reportlab.lib.pagesizes",
"line_number": 115,
"usage_type": "name"
},
{
"api_name": "reportlab.lib.pagesizes.A5",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfbase.pdfmetrics.registerFont",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfbase.pdfmetrics",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "reportlab.pdfbase.ttfonts.TTFont",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfReader",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfWriter",
"line_number": 140,
"usage_type": "call"
}
] |
6966794859
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from kazoo.client import KazooClient
__name__ = "weichigong"
__version__ = '1.0.3'
__author__ = 'dashixiong'
__author_email__ = '[email protected]'
class zconfig:
def __init__(self, zkHosts, app, env):
self.app = app
self.env = env
self.client = KazooClient(hosts=zkHosts)
self.client.start()
def getPath(self, path):
return os.path.join('/', self.app, self.env, path)
def set(self, path, value):
fullPath = self.getPath(path)
self.client.ensure_path(fullPath)
self.client.set(fullPath, value)
def get(self, path):
fullPath = self.getPath(path)
return self.client.get(fullPath)[0].decode('utf-8')
|
perfeelab/weichigong
|
weichigong/__init__.py
|
__init__.py
|
py
| 764 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "kazoo.client.KazooClient",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
}
] |
31209257710
|
import uuid
from random import randint
from src.infratructure.json_parser import JsonParser
from src.infratructure.serializable_object import SerializableObject
class PersonModel(SerializableObject):
def __init__(self, id: int, nick: str, photo: str, name: str = None):
self.id = id
self.nick = nick
self.photo = photo
self.name = name
@classmethod
def random(cls):
id = randint(0, 10)
nick = str(uuid.uuid4())
photo = str(uuid.uuid4())
name = str(uuid.uuid4())
return cls(id=id, nick=nick, photo=photo, name=name)
@classmethod
def from_json(cls, json):
id = JsonParser.try_get_parameter_with_sub_name(json, "member", "id")
nick = JsonParser.try_get_parameter_with_sub_name(json, "member", "name")
photo = JsonParser.try_get_parameter_with_two_sub_name(json, "member", "photo", "highres_link")
return cls(id=id, nick=nick, photo=photo, name=None)
|
GDGPetropolis/backend-event-checkin
|
src/application/models/person_model.py
|
person_model.py
|
py
| 978 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "src.infratructure.serializable_object.SerializableObject",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "src.infratructure.json_parser.JsonParser.try_get_parameter_with_sub_name",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "src.infratructure.json_parser.JsonParser",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "src.infratructure.json_parser.JsonParser.try_get_parameter_with_sub_name",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "src.infratructure.json_parser.JsonParser",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "src.infratructure.json_parser.JsonParser.try_get_parameter_with_two_sub_name",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "src.infratructure.json_parser.JsonParser",
"line_number": 28,
"usage_type": "name"
}
] |
31569881800
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from corai_util.tools.src.function_file import is_empty_file
from data_input.json.parameter_loader import fetch_param_json_loader_simulation, fetch_param_json_loader_itideep
from root_dir import linker_path_to_result_file
from src.estim_hawkes.estim_hawkes import Estim_hawkes
sns.set()
STR_CONFIG = "MSE"
(STR_CONFIG, NB_SIMUL, SEED, UNDERLYING_FUNCTION_NUMBER, _, KERNEL_DIVIDER,
NB_DIFF_TIME_ESTIM, DIM, STYL, NB_POINTS_TT, id_hp, parameters, t0, time_batch,
fct_parameters, true_breakpoints, _, _, _) = fetch_param_json_loader_simulation(False, STR_CONFIG)
(L, R, h, l, CONSIDERED_PARAM, ALL_KERNELS_DRAWN,
TYPE_ANALYSIS, NUMBER_OF_BREAKPOINTS, MODEL,
MIN_SIZE, WIDTH) = fetch_param_json_loader_itideep(flagprint=True, str_config=STR_CONFIG)
# should match the data given in the script.sh
NB_T_MAX = 10 # from 1 to 10.
NB_TH_OF_CURRENT_ESTIMATION = 2 # any int > 0. Represents the refinement of the ITiDeEP.
# 1 is the first naive estimation.
# The number given is the number of lines on the plot / nb of repetition of the estimation process undergone.
# Only possible to plot all the lines (1, 2...) together and not a subset of it not including the lower part.
#########
LIST_T_MAX = np.linspace(6000, 33000, NB_T_MAX)
#######################################################
# TODO explain gather result in readme + explain MSE pipeline.
# We use this file to gather the estimation together (gather function) and then plot the curve of the MSE.
matrix_err_tmax_APE = np.zeros((NB_TH_OF_CURRENT_ESTIMATION, len(LIST_T_MAX)))
matrix_err_tmax_SPE = np.zeros((NB_TH_OF_CURRENT_ESTIMATION, len(LIST_T_MAX)))
iter_refinement = NB_TH_OF_CURRENT_ESTIMATION
while iter_refinement > 0: # we collect the data from
# NB_TH_OF_CURRENT_ESTIMATION to 1 by reducing by 1 at every iteration.
for i_tmax in range(len(LIST_T_MAX)):
######################
# gather results of previous estimation for a given T max
######################
path_result_directory = linker_path_to_result_file(["MSE",
f"{STR_CONFIG}_res_{iter_refinement}",
f"data_{i_tmax}", ""])
assert not is_empty_file(path_result_directory), \
f"file must contain some data. Directory {path_result_directory} is empty."
list_estim_hp = Estim_hawkes.folder_csv2list_estim(path_result_directory)
estim_hp = Estim_hawkes.merge(list_estim_hp) # new estim gathered result
path_super_result = linker_path_to_result_file(
["MSE",
f"{STR_CONFIG}_res_{iter_refinement}",
f"data_together_{i_tmax}",
f"results_together.csv"])
estim_hp.to_csv(path_super_result) # saved gather result
######################
# compute error:
######################
path_result_res = linker_path_to_result_file(
["MSE", f"{STR_CONFIG}_res_{iter_refinement}", f"data_together_{i_tmax}", "results_together.csv"])
print("Reading: ", path_result_res)
estim_hp = Estim_hawkes.from_csv(path_result_res)
estim_hp.add_SPE_APE_col() # computed the SRE per parameter
groupby_param, keys = estim_hp.groupby(['parameter', 'm', 'n'])
total_SPE_APE = (groupby_param.get_group(('alpha', 0, 0))[["time estimation", 'SPE', 'APE']]
.sort_values(by="time estimation").reset_index(drop=True)) # a copy is made
# : we create a container where the error is aggregated.
total_SPE_APE['SPE'] = 0 # we empty the values inside the column
total_SPE_APE['APE'] = 0 # we empty the values inside the column
for key in keys:
ordered_SPE_APE = (groupby_param.get_group(key)[["time estimation", 'SPE', 'APE']]
.sort_values(by="time estimation").reset_index(drop=True))
# sort to be sure we add the correct values together, drop index for prettiness.
total_SPE_APE['SPE'] += ordered_SPE_APE['SPE']
total_SPE_APE['APE'] += ordered_SPE_APE['APE']
# MISRE = total_SRE.mean()["RSE"] # this is wrong. We need to compute it by hand.
# It does not account for non converging estimations.
total_SPE_APE_grouped = total_SPE_APE.groupby("time estimation") # we groupby so we compute the integral
MISPE = 0
MIAPE = 0
# compute the mean squared error and compute the mean absolute error
for time in total_SPE_APE_grouped.groups:
average_per_time = total_SPE_APE_grouped.get_group(time).mean()
MISPE += average_per_time['SPE'] / len(total_SPE_APE_grouped.groups)
MIAPE += average_per_time['APE'] / len(total_SPE_APE_grouped.groups)
matrix_err_tmax_SPE[iter_refinement - 1, i_tmax] = MISPE # store result
matrix_err_tmax_APE[iter_refinement - 1, i_tmax] = MIAPE # store result
iter_refinement -= 1
dict_result = {"MISPE": matrix_err_tmax_SPE.flatten(),
"MIAPE": matrix_err_tmax_APE.flatten(),
"nb application ITiDeEP": np.repeat(range(NB_TH_OF_CURRENT_ESTIMATION), NB_T_MAX),
"T max": np.tile(LIST_T_MAX, NB_TH_OF_CURRENT_ESTIMATION)}
data_err = pd.DataFrame(dict_result)
fig, ax = plt.subplots(2, 1)
sns.lineplot(x="T max", y="MISPE",
hue="nb application ITiDeEP", marker='o',
legend='full', ci=None, err_style="band",
palette='Dark2', ax=ax[0],
data=data_err)
sns.lineplot(x="T max", y="MIAPE",
hue="nb application ITiDeEP", marker='o',
legend='full', ci=None, err_style="band",
palette='Dark2', ax=ax[1],
data=data_err)
path_save_plot = linker_path_to_result_file(["MSE", f"MSE_result_{NB_TH_OF_CURRENT_ESTIMATION}" + '.png'])
fig.savefig(path_save_plot, dpi=500)
plt.show()
|
Code-Cornelius/ITiDeEP
|
mse/estimation_MSE_plot.py
|
estimation_MSE_plot.py
|
py
| 6,145 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "seaborn.set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "data_input.json.parameter_loader.fetch_param_json_loader_simulation",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "data_input.json.parameter_loader.fetch_param_json_loader_itideep",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "root_dir.linker_path_to_result_file",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "corai_util.tools.src.function_file.is_empty_file",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes.folder_csv2list_estim",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes.merge",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "root_dir.linker_path_to_result_file",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "root_dir.linker_path_to_result_file",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes.from_csv",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "src.estim_hawkes.estim_hawkes.Estim_hawkes",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "numpy.repeat",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "numpy.tile",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "seaborn.lineplot",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "seaborn.lineplot",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "root_dir.linker_path_to_result_file",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 121,
"usage_type": "name"
}
] |
6942571337
|
from otree.api import *
from settings import SESSION_CONFIGS
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'Intro'
players_per_group = None
num_rounds = 1
max_payoff = "£2.20"
money = "£3.00"
total_balls = "five"
no_task_balls = "three"
# create a vector to randomise treatment
num_participants = 350 # note this should be substantially larger than the number of participants I actually intend to hire, because some Prolificers will join the session but not complete
num_blocks = -1*( -num_participants // 14) # I'm gonna create blocks within which the treatment is exactly balanced (2 in LC, 2 in LN, 5 in HC, 5 in HN). Then add the blocks together to get to the desired number of participants.
import random
treatment_block = list(range(1,15))
treatment_assignment = []
for i in range(num_blocks):
treatment_assignment = treatment_assignment + treatment_block
random.shuffle(treatment_assignment)
for i in range(len(treatment_assignment)):
if treatment_assignment[i] <= 2:
treatment_assignment[i] = "LC"
elif treatment_assignment[i] > 2 and treatment_assignment[i] <= 4:
treatment_assignment[i] = "LN"
elif treatment_assignment[i] > 4 and treatment_assignment[i] <= 9:
treatment_assignment[i] = "HC"
elif treatment_assignment[i] >9:
treatment_assignment[i] = "HN"
class Subsession(BaseSubsession):
pass
def creating_session(subsession):
import itertools, random
treatment_assignment = itertools.cycle(Constants.treatment_assignment)
for player in subsession.get_players():
# determine treatment
player.participant.treatment = next(treatment_assignment)
player.treatment = player.participant.treatment
# practice maths questions - randomly select two to show in instructions
practice_maths_qs_index = list(range(4))
random.shuffle(practice_maths_qs_index)
player.participant.mathspractice_q1 = practice_maths_qs_index[0]
player.participant.mathspractice_q2 = practice_maths_qs_index[1]
class Group(BaseGroup):
pass
class Player(BasePlayer):
ProlificID = models.StringField()
treatment = models.StringField()
start_epochtime = models.IntegerField()
start_clocktime = models.StringField()
# maths practice questions
q1 = models.StringField(
label = "A shop has an offer: buy 8 kiwis, and every extra kiwi after that is half price. A man goes to the shop and pays £4.50 for some kiwis. The full price of a kiwi is £0.50. How many does he buy?",
choices = [
"9",
"12",
"10",
"15"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q2 = models.StringField(
label = "A hairdresser has an offer: every third visit is free. They charge £48 for a haircut. Last year Sarah paid £144 for a haaircut. How many times did she go?",
choices = [
"Two times",
"Three times",
"Four times",
"Five times"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q3 = models.StringField(
label = "A woman walks from the bottom to the top of a hill. She starts at 9.40am and arrives at the top at 10.20 am. She takes a rest for ten minutes. Then she walks back down. On the way down she walks twice as fast as she did on the way up. What time is it when she reaches the bottom of the hill?",
choices = [
"11.20",
"10.40",
"10.50",
"11.10"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
q4 = models.StringField(
label = "A trader buys a painting for £120 and sells it for £170. They pay a £10 transaction fee. Their profit expressed as a percentage of total cost is:",
choices = [
"50%",
"60%",
"80%",
"33%"
],
widget = widgets.RadioSelectHorizontal,
blank=True)
# PAGES
class Consent(Page):
def is_displayed(player):
# record time player entered application
import time
time_in = round(time.time())
player.start_epochtime = time_in
player.participant.start_epochtime = time_in
player.start_clocktime = time.strftime('%H:%M:%S', time.localtime(time_in))
return 1
class ProlificID(Page):
form_model = 'player'
form_fields = ['ProlificID']
class Introduction(Page):
form_model = 'player'
def get_form_fields(player: Player):
questions = ['q1','q2','q3','q4']
form_fields = [
questions[player.participant.mathspractice_q1]
]
return form_fields
page_sequence = [Consent, ProlificID, Introduction]
|
LiamOFoghlu/Receiver
|
Intro/__init__.py
|
__init__.py
|
py
| 5,072 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "random.shuffle",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "{'random': 'random'}.treatment_assignment",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 122,
"usage_type": "call"
}
] |
2441674100
|
from flask import Flask, render_template, request
from pymysql import connections
import os
import boto3
from config import *
from datetime import date
from botocore.exceptions import ClientError
app = Flask(__name__)
bucket = custombucket
region = customregion
db_conn = connections.Connection(
host=customhost,
port=3306,
user=customuser,
password=custompass,
db=customdb
)
output = {}
table = 'employee'
@app.route("/", methods=['GET', 'POST'])
@app.route("/index")
def home():
return render_template('Login.html')
@app.route("/addemp", methods=['GET'])
def addemp():
return render_template('AddEmp.html', Title="Add to Employee Database")
@app.route("/updateemp", methods=['GET'])
def updateemp():
return render_template('UpdateEmp.html', Title="Update Employee Database")
@app.route("/about", methods=['GET','POST'])
def about():
return "Hello, Flask is running"
@app.route("/leave", methods=['GET'])
def leave():
return render_template('AddLeave.html')
#get employee codes
@app.route("/getemp", methods=['GET','POST'])
def GetEmp():
return render_template('GetEmp.html')
@app.route("/addleave", methods=['POST'])
def AddLeave():
leave_id = request.form['leave_id']
emp_id = request.form['emp_id']
date = request.form['date']
reason = request.form['reason']
prove = request.files['prove_file']
insert_sql = "INSERT INTO leaves VALUES (%s, %s, %s, %s)"
cursor = db_conn.cursor()
if prove.filename == "":
return "Please select a file"
try:
cursor.execute(insert_sql, (leave_id, emp_id, date, reason))
db_conn.commit()
#emp_name = "" + first_name + " " + last_name
# Uplaod image file in S3 #
prove_image_in_s3 = "leave_id-" + str(leave_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data inserted in MySQL RDS... uploading image to S3...")
s3.Bucket(custombucket).put_object(Key=prove_image_in_s3, Body=prove)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
prove_image_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("all modification done...")
return render_template('AddLeaveOutput.html', name=emp_id)
@app.route("/login", methods=['POST'])
def login():
id = request.form['admin_id']
password = request.form['admin_password']
sqllogin = "SELECT COUNT(*) FROM admin WHERE password= %s AND username= %s"
cursor = db_conn.cursor()
try:
cursor.execute(sqllogin, (password, id))
valid = cursor.fetchall()
db_conn.commit()
except Exception as e:
return str(e)
finally:
cursor.close()
if valid[-1][-1] == 1:
print("Login Success")
return render_template('AddEmp.html')
else :
print("Invalid User Credentials")
return render_template('Login.html')
@app.route("/addemp", methods=['POST'])
def AddEmp():
emp_id = request.form['emp_id']
first_name = request.form['first_name']
last_name = request.form['last_name']
pri_skill = request.form['pri_skill']
location = request.form['location']
emp_image_file = request.files['emp_image_file']
insert_sql = "INSERT INTO employee VALUES (%s, %s, %s, %s, %s)"
cursor = db_conn.cursor()
if emp_image_file.filename == "":
return "Please select a file"
try:
cursor.execute(insert_sql, (emp_id, first_name, last_name, pri_skill, location))
db_conn.commit()
emp_name = first_name + " " + last_name
# Uplaod image file in S3 #
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data inserted in MySQL RDS... uploading image to S3...")
s3.Bucket(custombucket).put_object(Key=emp_image_file_name_in_s3, Body=emp_image_file)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
emp_image_file_name_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("all modification done...")
return render_template('AddEmpOutput.html', name=emp_name)
@app.route("/fetchdata", methods=['POST'])
def GetEmpOutput():
try:
emp_id = request.form['emp_id']
if(emp_id == ""):
raise ValueError("Please enter a valid employee id")
except ValueError:
emp_id, first_name, last_name, pri_skill, location = "N/A","N/A","N/A","N/A","N/A"
image_link = "../static/images/getUser.png"
return render_template('GetEmpOutput.html', id=emp_id, fname=first_name, lname=last_name, interest=pri_skill, location=location, image_url=image_link)
select_sql = "SELECT * FROM employee WHERE emp_id = %s"
cursor = db_conn.cursor()
try:
cursor.execute(select_sql, (emp_id))
db_conn.commit()
(emp_id, first_name, last_name, pri_skill, location) = cursor.fetchone()
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
try:
# Generate temporary URL for image file in S3
image_link = boto3.client('s3').generate_presigned_url('get_object',
Params={'Bucket': custombucket,
'Key': emp_image_file_name_in_s3},
ExpiresIn=3600)
except ClientError:
image_link = "../static/images/getUser.png"
finally:
cursor.close()
return render_template('GetEmpOutput.html', id=emp_id, fname=first_name, lname=last_name, interest=pri_skill, location=location, image_url=image_link)
#update employee code
@app.route("/updateemp", methods=['POST'])
def UpdateEmp():
emp_id = request.form['emp_id']
first_name = request.form['first_name']
last_name = request.form['last_name']
pri_skill = request.form['pri_skill']
location = request.form['location']
emp_image_file = request.files['emp_image_file']
update_sql = "UPDATE employee SET first_name = %s, last_name = %s, pri_skill = %s, location = %s WHERE emp_id = %s"
values = (first_name, last_name, pri_skill, location, emp_id)
cursor = db_conn.cursor()
try:
cursor.execute(update_sql, values)
db_conn.commit()
emp_name = "" + first_name + " " + last_name
# Uplaod image file in S3 #
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
try:
print("Data updated in MySQL RDS... updating image to S3...")
s3.Object(custombucket, emp_image_file_name_in_s3).delete()
s3.Bucket(custombucket).put_object(Key=emp_image_file_name_in_s3, Body=emp_image_file)
bucket_location = boto3.client('s3').get_bucket_location(Bucket=custombucket)
s3_location = (bucket_location['LocationConstraint'])
if s3_location is None:
s3_location = ''
else:
s3_location = '-' + s3_location
object_url = "https://s3{0}.amazonaws.com/{1}/{2}".format(
s3_location,
custombucket,
emp_image_file_name_in_s3)
except Exception as e:
return str(e)
finally:
cursor.close()
print("All modification done...")
return render_template('UpdateEmp.html', name=emp_name)
# delete employee code
# TODO: HTML page for delete employee
@app.route("/deletemp", methods=['POST'])
def DeleteEmp():
emp_id = request.form['emp_id']
delete_sql = "DELETE FROM employee WHERE emp_id = %s"
cursor = db_conn.cursor()
try:
cursor.execute(delete_sql, (emp_id))
db_conn.commit()
print("Data deleted from MySQL RDS... deleting image from S3...")
emp_image_file_name_in_s3 = "emp-id-" + str(emp_id) + "_image_file"
s3 = boto3.resource('s3')
s3.Object(custombucket, emp_image_file_name_in_s3).delete()
finally:
cursor.close()
print("all modification done...")
return "Deleted employee with id: " + emp_id
@app.route("/attendance", methods=['GET'])
def takeattendance():
today = date.today()
date_time = today.strftime("%d/%m/%Y")
return render_template('Attendance.html',Title="Attendance", date=date_time)
@app.route("/attendance", methods=['POST'])
def attendance():
cursor = db_conn.cursor()
emp_id = request.form['emp_id']
today = date.today()
date_time = today.strftime("%d/%m/%Y")
select_sql = "SELECT emp_id, first_name, last_name FROM employee WHERE emp_id = %s"
insert_sql = "INSERT INTO attandance VALUES (%s, %s, %s, %s)"
try:
cursor.execute(select_sql, (emp_id))
(emp_id, first_name, last_name) = cursor.fetchone()
cursor.execute(insert_sql, (emp_id, first_name, last_name, date_time))
db_conn.commit()
message = "Attendance marked for " + emp_id + " " + first_name + " " + last_name
except Exception as e:
emp_id = "Employee not found"
message = "Employee not found"
finally:
cursor.close()
return render_template('Attendance.html', Title="Attendance", date=date_time, message=message)
@app.route("/viewatt", methods=['GET'])
def viewatt():
cursor = db_conn.cursor()
select_sql = "SELECT * FROM attandance"
try:
cursor.execute(select_sql)
data = cursor.fetchall()
finally:
cursor.close()
return render_template('ViewAttandance.html', Title="Attendance", data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80, debug=True)
|
Darkless123/aws-live
|
EmpApp.py
|
EmpApp.py
|
py
| 10,617 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pymysql.connections.Connection",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pymysql.connections",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "datetime.date",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 133,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 216,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 216,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 219,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 219,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 221,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "flask.request.files",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 222,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 268,
"usage_type": "name"
},
{
"api_name": "boto3.resource",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "datetime.date.today",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 288,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "datetime.date.today",
"line_number": 296,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 296,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 324,
"usage_type": "call"
}
] |
31969871422
|
from django.contrib.auth import get_user_model
from django.db import transaction
from django.db.models import Q
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, NotFound
from rest_framework.generics import get_object_or_404
from versatileimagefield.serializers import VersatileImageFieldSerializer
User = get_user_model()
class PrivateMeSerializer(serializers.ModelSerializer):
image = VersatileImageFieldSerializer(
required=False,
sizes=[
("original", "url"),
("at256", "crop__256x256"),
("at512", "crop__512x512"),
],
)
class Meta:
model = User
fields = [
"first_name",
"last_name",
"username",
"slug",
"phone",
"image",
"email",
]
read_only_fields = ["slug", "phone","username",]
|
seefat/harvest_hub_apis
|
core/rest/serializers/me.py
|
me.py
|
py
| 928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "versatileimagefield.serializers.VersatileImageFieldSerializer",
"line_number": 15,
"usage_type": "call"
}
] |
73535540349
|
from django.urls import path
from . import views
app_name = 'party'
urlpatterns = [
#party
# Party URLs
path('create/<int:tournament_pk>/', views.PartyCreateView.as_view(), name='party_create'),
path('update/<int:pk>/', views.PartyUpdateView.as_view(), name='party_update'),
path('details/<int:pk>/', views.PartyDetailView.as_view(), name='party_details'),
path('parties/', views.PartyListView.as_view(), name='party_list'),
path('<int:pk>/', views.PartyDetailView.as_view(), name='party_detail'),
path('join/<int:party_pk>/', views.JoinPartyView.as_view(), name='join_party'),
path('leave/<int:party_pk>/', views.LeavePartyView.as_view(), name='leave_party'),
# URL pattern for closing a party
path('close/<int:pk>/', views.ClosePartyView.as_view(), name='close_party'),
# Delete an existing party
path('delete/<int:pk>/', views.PartyDeleteView.as_view(), name='party_delete'),
]
|
theAcer/wejprod
|
apps/party/urls.py
|
urls.py
|
py
| 942 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
}
] |
72340854587
|
import os
import csv
import json
import tweepy
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from tweepy_auth import tweepy_auth
'''
today = datetime.today()
week_ago = today - timedelta(days=7)
week_ago_str = week_ago.strftime('%Y-%m-%d')
'''
auth = tweepy_auth()
api = tweepy.API(auth,
wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
tweets = tweepy.Cursor(api.search,
q=['#blacklivesmatter OR #blm'],
lang='en',
result_type='recent',
tweet_mode='extended',
count=100).items()
df = pd.DataFrame(columns=['id', 'created_at', 'full_text', 'favorite_count',
'retweet_count', 'hashtags'])
for tweet in tweets:
hashtags = []
for hashtag in tweet.entities['hashtags']:
hashtags.append(hashtag['text'])
print(tweet.created_at)
df = df.append({'id': tweet.id,
'created_at': tweet.created_at,
'full_text': tweet.full_text.encode('utf-8','ignore'),
'favorite_count': tweet.favorite_count,
'retweet_count': tweet.retweet_count,
'hashtags': hashtags},
ignore_index=True)
df['created_at'] = pd.to_datetime(df['created_at'])
print(df.head())
for name, group in df.groupby(pd.Grouper(key='created_at',freq='D')):
parsed_name = str(name).split(' ')[0].replace('-', '_')
print(parsed_name)
group.to_csv('./data/blm_'+ parsed_name +'.csv', index=False)
|
ConwayHsieh/BLM_tweets
|
tweepy_pandastry.py
|
tweepy_pandastry.py
|
py
| 1,444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tweepy_auth.tweepy_auth",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tweepy.Cursor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pandas.Grouper",
"line_number": 50,
"usage_type": "call"
}
] |
7573771770
|
import os
import logging
from dotenv import load_dotenv
from flask import Flask, jsonify, request
from flask_cors import CORS
from flask_restful import Api, Resource, reqparse
from models.db.postgresDB import PostgresDB
from models.services.logger import get_module_logger
import models.services.flask_service as flask_service
load_dotenv()
app = Flask(__name__)
CORS(app, resources=r'/*')
parser = reqparse.RequestParser()
parser.add_argument('keywords', type=list)
@app.route('/', methods=['GET'])
def hello_server():
return jsonify({"info": "Server works"}), 200
@app.route('/articles', methods=['GET'])
def get_articles():
article_id = request.args.get("article_id", None)
return flask_service.get_articles(db=postgresDB, article_id=article_id)
#TODO: z parametrem
# @app.route('/articles', methods=['GET'])
# def get_articles():
# keywords = parser.parse_args()
# return keywords
# #return flask_service.get_articles(db=postgresDB, keywords=keywords)
@app.route('/articles', methods=['POST'])
def create_article():
data = request.json
return flask_service.create_article(db=postgresDB, data=data)
@app.route('/articles/<article_id>', methods=['PUT'])
def update_article(article_id):
data = request.json
return flask_service.update_article(db=postgresDB, article_id=article_id, data=data)
@app.route('/articles/<article_id>', methods=['DELETE'])
def delete_article(article_id):
return flask_service.delete_article(db=postgresDB, article_id=article_id,article_table=article_table)
@app.route('/categories', methods=['GET'])
def get_category():
category_id = request.args.get("category_id", None)
return flask_service.get_categories(db=postgresDB, category_id=category_id)
@app.route('/categories', methods=['POST'])
def create_categories():
data = request.json
return flask_service.create_category(db=postgresDB, data=data)
@app.route('/categories/<category_id>', methods=['PUT'])
def update_categories(category_id):
data = request.json
return flask_service.update_category(db=postgresDB, category_id=category_id, data=data)
@app.route('/categories/<category_id>', methods=['DELETE'])
def delete_categories(category_id):
return flask_service.delete_category(db=postgresDB, category_id=category_id,category_table=category_table)
@app.route('/comments', methods=['GET'])
def get_comment():
article_id = request.args.get("article_id", None)
author=request.args.get("author", None)
return flask_service.get_comments(db=postgresDB, article_id=article_id,author=author,comment_table=comment_table)
@app.route('/comments', methods=['POST'])
def create_comments():
data = request.json
return flask_service.create_comment(db=postgresDB, data=data)
@app.route('/comments/<comment_id>', methods=['PUT'])
def update_comments(comment_id):
data = request.json
return flask_service.update_comment(db=postgresDB, comment_id=comment_id, data=data)
@app.route('/comments/<comment_id>', methods=['DELETE'])
def delete_comments(comment_id):
return flask_service.delete_comment(db=postgresDB, comment_id=comment_id,comment_table=comment_table)
@app.route("/export", methods=['GET'])
def to_txt():
return flask_service.db_to_txt(db=postgresDB, article_table=article_table,
relation_category_article_table=relation_category_article_table,
category_table=category_table, comment_table=comment_table)
if __name__ == "__main__":
logger = get_module_logger(mod_name=__name__, log_path='./logs/app_logs.log', lvl=logging.DEBUG)
postgresDB = PostgresDB(db_host=os.environ.get("DB_HOST"), db_port=os.environ.get("DB_PORT"),
db_user=os.environ.get("POSTGRES_USER"), db_password=os.environ.get("POSTGRES_PASSWORD"),
db_name=os.environ.get("POSTGRES_DB"))
try:
article_table = postgresDB.get_table('article')
category_table = postgresDB.get_table('category')
comment_table = postgresDB.get_table('comment')
relation_category_article_table = postgresDB.get_table('relation_category_article')
logger.info('Got tables')
app.run(host='0.0.0.0', port=5000)
except Exception as e:
logger.exception(e)
logger.exception('Error, could not get tables from database')
|
Mariusz94/Knowledge-base
|
backend/app.py
|
app.py
|
py
| 4,372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_restful.reqparse.RequestParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_restful.reqparse",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.get_articles",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.create_article",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.update_article",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.delete_article",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.get_categories",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.create_category",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.update_category",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.delete_category",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "flask.request.args.get",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.get_comments",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.create_comment",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "flask.request.json",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.update_comment",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.delete_comment",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "models.services.flask_service.db_to_txt",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "models.services.flask_service",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "models.services.logger.get_module_logger",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "models.db.postgresDB.PostgresDB",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 109,
"usage_type": "attribute"
}
] |
7789722347
|
from tqdm import tqdm
import numpy as np
import torch
import torchvision.transforms as ttr
from torch.utils.data import DataLoader
import argparse
from aermanager import AERFolderDataset
from test_spiking import test_spiking
# Parameters
BATCH_SIZE = 256
parser = argparse.ArgumentParser()
parser.add_argument('--quantize_testing', action='store_true', default=False)
parser.add_argument('--max_batches', type=int, default=1000000)
opt = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# prepare dataset and dataloader
test_dataset = AERFolderDataset(
root='data/test/',
from_spiketrain=False,
transform=ttr.ToTensor(),
)
print("Number of testing frames:", len(test_dataset))
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True)
def detach(activity):
for activations in activity:
for (i, activation) in enumerate(activations):
activations[i] = activation.item()
return np.array(activity)
# def compute_accuracy(output, target):
# _, predicted = torch.max(output, 1)
# acc = (predicted == target).sum().float() / len(target)
# return acc.cpu().numpy()
# def test(path, w_rescale=1.0):
# # Define model and learning parameters
# classifier = MNISTClassifier(quantize=opt.quantize_testing).to(device)
# # Load appropriate model
# state_dict = torch.load(path)
# # Do rescaling
# if w_rescale != 1.0:
# state_dict['seq.0.weight'] *= w_rescale
# classifier.load_state_dict(state_dict)
# # Set hooks
# activity_tracker = SynOpCounter(classifier.modules(), sum_activations=False)
# # Test network accuracy
# with torch.no_grad():
# classifier.eval()
# activity = []
# accuracy = []
# for batch_id, sample in enumerate(tqdm(test_dataloader)):
# if batch_id > opt.max_batches:
# break
# test_data, test_labels = sample
# test_data = test_data.to(device)
# output = classifier(test_data)
# accuracy.append(compute_accuracy(output, test_labels.to(device)))
# activity.append(activity_tracker())
# return np.mean(detach(activity), axis=0), np.mean(accuracy)
if __name__ == '__main__':
# test non-optimized model
baseline_activity, baseline_accuracy = test_spiking(
'models/nopenalty_0.0.pth', return_all_synops=True
)
# test optimized model
optimized_activity, optimized_accuracy = test_spiking(
'models/l1-fanout-qtrain_321289.514081772.pth',
return_all_synops=True
)
baseline_activity = baseline_activity[baseline_activity > 0]
optimized_activity = optimized_activity[optimized_activity > 0]
np.savez(
'opt_benchmark.npz',
baseline_activity=baseline_activity,
optimized_activity=optimized_activity,
baseline_accuracy=baseline_accuracy,
optimized_accuracy=optimized_accuracy
)
|
fgr1986/synoploss
|
mnist_dvs/optimization_benchmarking.py
|
optimization_benchmarking.py
|
py
| 2,996 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.device",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "aermanager.AERFolderDataset",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "test_spiking.test_spiking",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "test_spiking.test_spiking",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "numpy.savez",
"line_number": 99,
"usage_type": "call"
}
] |
3977236501
|
#!/usr/bin/env python3
from ddpg import Agent
import numpy as np
from ts_forecasting_env import ts_forecasting_env
import time
import matplotlib.pyplot as plt
import csv
import pandas as pd
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
import argparse
from ray import tune
from ray.tune.schedulers import ASHAScheduler
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--traj", type=int, default=1, help="choose trajectory")
args = parser.parse_args()
# Load and prepare data
############################## Define variables #########################################
TRAJECTORY = args.traj
SPLIT_RATE = 0.80 # split data into train and test data
#########################################################################################
# Open csv
file = open('allData/traj' + str(TRAJECTORY) + '_allData.csv')
# Read csv
csvreader = csv.reader(file)
# Store csv data in numpy ndarray
rows = []
for row in csvreader:
rows.append(row)
file.close()
data_ = np.array(rows, dtype=np.float64)
data_ = np.concatenate(data_)
# Data split
split_index = round(len(data_) * SPLIT_RATE)
train_data, test_data = data_[:split_index], data_[split_index:]
# Normalize data
max = np.max(data_)
min = np.min(data_)
TRAIN_DATA = (train_data - min) / (max - min)
TEST_DATA = (test_data - min) / (max - min)
# Run LSTM with tuning configurations
def tune_lstm(config):
# Training setup
############################## Define hyper parameters ##################################
LR_ACTOR = config["a_lr"]
LR_CRITIC = config["c_lr"]
TAU = 0.1
GAMMA = 0.9
BATCH_SIZE = config["bs"]
ACTOR_LAYER = config["layer"]
CRITIC_LAYER = config["layer"]
REPLAY_BUFFER_SIZE = 100000
HISTORICAL_DP = config["hdp"] # historical data points (length of state)
#########################################################################################
# Call environment
env = ts_forecasting_env(historical_dp=HISTORICAL_DP, data=TRAIN_DATA)
# Call agent
agent = Agent(alpha=LR_ACTOR, beta=LR_CRITIC, input_dims=[HISTORICAL_DP], tau=TAU,
gamma=GAMMA,batch_size=BATCH_SIZE, layer1_size=ACTOR_LAYER, n_actions=1,
layer2_size=CRITIC_LAYER, max_size=REPLAY_BUFFER_SIZE)
############################## Define training parameters ###############################
EPISODES = 15
MAX_STEPS = 1000
#########################################################################################
np.random.seed(0)
# Train the agent
for i in range(1, EPISODES + 1):
obs = env.reset()
done = False
reward = 0
for step in range(MAX_STEPS):
act = agent.choose_action(obs)
new_state, step_reward, done, _ = env.step(act)
agent.remember(obs, act, step_reward, new_state, int(done))
agent.learn()
reward += step_reward
obs = new_state
if done:
break
# Test the agent
pred = []
for i in range(len(TEST_DATA)):
state = np.array(TEST_DATA[0 + i:HISTORICAL_DP + i], dtype=np.float64)
action = agent.choose_action(state)
pred.append(action)
if HISTORICAL_DP + i == len(TEST_DATA):
break
pred = np.concatenate(pred)
pred = pd.Series(pred)
pred = pred * (max - min) + min
real = pd.Series(test_data[HISTORICAL_DP:])
# Report result to tuner
# MAE
tune.report(mean_accuracy=mean_absolute_error(real, pred))
# # MSE
# tune.report(mean_accuracy=mean_squared_error(real, pred, squared=False))
# Tuner configurations
config = {
"a_lr": tune.grid_search([0.001, 0.002, 0.003, 0.004, 0.005]),
"c_lr": tune.grid_search([0.001, 0.002, 0.003, 0.004, 0.005]),
"bs": tune.grid_search([2 ** i for i in range(5,8)]),
"layer": tune.grid_search([2 ** i for i in range(5,8)]),
"hdp": tune.grid_search([10, 15, 25]),
}
# Run tuner
analysis = tune.run(
tune_lstm,
resources_per_trial={"cpu": 12, "gpu": 1},
config=config,
mode="min"
)
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
df = analysis.dataframe()
|
tiagomateus25/time-series-forecasting-ddpg
|
bvg_optimization.py
|
bvg_optimization.py
|
py
| 4,277 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "ts_forecasting_env.ts_forecasting_env",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "ddpg.Agent",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "numpy.concatenate",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "ray.tune.report",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "sklearn.metrics.mean_absolute_error",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "ray.tune.grid_search",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "ray.tune.grid_search",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "ray.tune.grid_search",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "ray.tune.grid_search",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "ray.tune.grid_search",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "ray.tune.run",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "ray.tune",
"line_number": 126,
"usage_type": "name"
}
] |
10254372975
|
from multiprocessing import context
from django.shortcuts import render, redirect
from .models import *
# Create your views here.
def produk_list(request):
template_name = "produk_list.html"
group_produk = Circle_produk.objects.all()
context ={
"produk" : group_produk,
}
return render(request, template_name, context)
def tambah_barang(request):
template_name = "add_barang.html"
kategori = Kategori.objects.all()
if request.method == "POST":
input_nama = request.POST.get('nama')
input_jumlah = request.POST.get('jumlah')
input_deskripsi = request.POST.get('deskripsi')
input_kategori = request.POST.get('kategori')
get_kategori = Kategori.objects.get(nama=input_kategori)
Circle_produk.objects.create(
nama = input_nama,
jumlah = input_jumlah,
deskripsi = input_deskripsi,
kategori = get_kategori
)
return redirect(produk_list)
context ={
"kategori": kategori
}
return render(request, template_name, context)
def update_barang(request,id):
template_name = "add_barang.html"
kategori = Kategori.objects.all()
get_produk = Circle_produk.objects.get(id=id)
if request.method == "POST":
input_nama = request.POST.get('nama')
input_jumlah = request.POST.get('jumlah')
input_deskripsi = request.POST.get('deskripsi')
input_kategori = request.POST.get('kategori')
get_kategori = Kategori.objects.get(nama=input_kategori)
get_produk.nama = input_nama
get_produk.jumlah = input_jumlah
get_produk.deskripsi = input_deskripsi
get_produk.kategori = get_kategori
get_produk.save()
return redirect(produk_list)
context ={
"kategori": kategori,
"get_produk" : get_produk
}
return render(request, template_name, context)
def delete_barang(request, id):
Circle_produk.objects.get(id=id).delete()
return redirect(produk_list)
|
RenalPutra/kasir-django
|
produk/views.py
|
views.py
|
py
| 2,103 |
python
|
tr
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "multiprocessing.context",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "multiprocessing.context",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "multiprocessing.context",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "multiprocessing.context",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "multiprocessing.context",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "multiprocessing.context",
"line_number": 62,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 66,
"usage_type": "call"
}
] |
31286775508
|
import os
import sys
from datetime import datetime
from argparse import ArgumentParser, ArgumentTypeError
from subprocess import check_output, CalledProcessError, Popen, PIPE, DEVNULL
from contextlib import contextmanager
class FileExistsException(Exception):
def __init__(self, path):
self.path = path
def main():
args = parse_args(sys.argv[1:])
try:
path = jekyll_post(args)
except FileExistsException as ex:
print('A file already exists at \'{}\'.'.format(ex.path),
file=sys.stderr)
return 1
if path != '-':
print(path)
return 0
def parse_args(raw_args):
args = make_parser().parse_args(raw_args)
args.date = args.date or now()
args.attributes = args.attributes or []
return args
def make_parser():
p = ArgumentParser(description='Creates a new Jekyll post, and prints its '
'path to standard out.')
p.add_argument('title', type=escape_str,
help='The title for the new post.')
g = p.add_mutually_exclusive_group(required=True)
g.add_argument('-c', '--category',
help='The path of the category directory for the new post, '
'such that it will be written into '
'\'$JEKYLL_SITE_PATH/$category/_posts\'. ')
g.add_argument('-d', '--directory', type=directory_exists,
help='The path of the directory to write the new post '
'into.')
g.add_argument('-o', '--output', metavar='PATH',
help='The path to write the new post to. Provide \'-\' to '
'write to standard out.')
p.add_argument('-t', '--date', type=parse_datetime,
help='The date and time for the new post, in a format '
'accepted by the `date` utility. Default: now.')
p.add_argument('-x', '--extension', default='md',
help='The file extension for the new post. '
'Default: \'md\'.')
p.add_argument('-a', '--attributes', nargs="*", metavar='ATTR',
help='Extra attributes to put in the header, provided in a '
'format according to \'jekyll-post-header\'. The '
'\'layout\' attribute defaults to \'default\'.')
p.add_argument('-p', '--padding', type=int, default=10, metavar='NSPACES',
help='The number of spaces to left-align the attributes '
'by. Default: 10.')
return p
def escape_str(s):
return s.replace('\'', '\\\'')
def directory_exists(s):
if not os.path.isdir(s):
raise ArgumentTypeError('\'{}\' is not a directory.'.format(s))
return s
def parse_datetime(s):
try:
ds = check_output(['date', '--date={}'.format(s),
'--iso-8601=seconds'],
stderr=DEVNULL).decode().strip()
except CalledProcessError:
raise ArgumentTypeError(('\'{}\' is an invalid date. It must be in a '
'format accepted by the `date` utility\'s '
'`--date` argument.').format(s))
return datetime.strptime(ds, '%Y-%m-%dT%H:%M:%S%z')
def now():
return parse_datetime(datetime.now().isoformat())
def jekyll_post(args):
with header_proc(args) as proc:
path = get_post_path(args)
with open_post_file(path) as file:
for bline in proc.stdout:
line = bline.decode()[:-1]
print(line, file=file)
return path
def get_post_path(args):
if args.output:
return args.output
else:
filename = check_output(['jekyll-post-filename', args.title,
'--date', args.date.strftime('%Y-%m-%d'),
'--extension', args.extension],
stderr=DEVNULL).decode()[:-1]
dirname = (args.directory
or os.path.join(os.environ.get('JEKYLL_SITE_PATH', ''),
args.category,
'_posts'))
return os.path.join(dirname, filename)
@contextmanager
def open_post_file(path):
if path == '-':
yield sys.stdout
else:
if os.path.exists(path):
raise FileExistsException(path)
with open(path, 'w') as f:
yield f
def header_proc(args):
# TODO: this won't raise an exception if the script fails. Is there a way to
# check for errors, while still streaming the output?
return Popen(['jekyll-post-header', '--padding', str(args.padding),
'layout:"default"',
'date:"{}"'.format(args.date),
'title:"{}"'.format(args.title)]
+ args.attributes,
stdout=PIPE, stderr=DEVNULL)
if __name__ == '__main__':
rv = main()
sys.exit(rv)
|
Rainymood/rainymood.github.io
|
main.py
|
main.py
|
py
| 4,987 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "subprocess.check_output",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "subprocess.DEVNULL",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 145,
"usage_type": "call"
}
] |
26043166506
|
from __future__ import annotations
import logging
import os
from dataclasses import dataclass
from pathlib import PurePath
from typing import Iterable
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import (
AddPrefix,
CreateDigest,
Digest,
Directory,
FileContent,
MergeDigests,
RemovePrefix,
)
from pants.engine.process import ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool
from pants.jvm.shading import jarjar
from pants.jvm.shading.jarjar import JarJar, JarJarGeneratorLockfileSentinel, MisplacedClassStrategy
from pants.jvm.target_types import JvmShadingRule, _shading_validate_rules
from pants.util.logging import LogLevel
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class ShadeJarRequest(EngineAwareParameter):
path: PurePath
digest: Digest
rules: tuple[JvmShadingRule, ...]
# JarJar configuration options
skip_manifest: bool | None
misplaced_class_strategy: MisplacedClassStrategy | None
def __init__(
self,
*,
path: str | PurePath,
digest: Digest,
rules: Iterable[JvmShadingRule] | None = None,
skip_manifest: bool | None = None,
misplaced_class_strategy: MisplacedClassStrategy | None = None,
) -> None:
object.__setattr__(self, "path", path if isinstance(path, PurePath) else PurePath(path))
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "rules", tuple(rules or ()))
object.__setattr__(self, "skip_manifest", skip_manifest)
object.__setattr__(self, "misplaced_class_strategy", misplaced_class_strategy)
self.__post_init__()
def __post_init__(self):
validation_errors = _shading_validate_rules(self.rules)
if validation_errors:
raise ValueError("\n".join(["Invalid rules provided:\n", *validation_errors]))
def debug_hint(self) -> str | None:
return str(self.path)
@dataclass(frozen=True)
class ShadedJar:
path: str
digest: Digest
_JARJAR_MAIN_CLASS = "com.eed3si9n.jarjar.Main"
_JARJAR_RULE_CONFIG_FILENAME = "rules"
@rule(desc="Applies shading rules to a JAR file")
async def shade_jar(request: ShadeJarRequest, jdk: InternalJdk, jarjar: JarJar) -> ShadedJar:
if not request.rules:
return ShadedJar(path=str(request.path), digest=request.digest)
output_prefix = "__out"
output_filename = os.path.join(output_prefix, request.path.name)
rule_config_content = "\n".join([rule.encode() for rule in request.rules]) + "\n"
logger.debug(f"Using JarJar rule file with following contents:\n{rule_config_content}")
lockfile_request, conf_digest, output_digest = await MultiGet(
Get(GenerateJvmLockfileFromTool, JarJarGeneratorLockfileSentinel()),
Get(
Digest,
CreateDigest(
[
FileContent(
path=_JARJAR_RULE_CONFIG_FILENAME,
content=rule_config_content.encode("utf-8"),
),
]
),
),
Get(Digest, CreateDigest([Directory(output_prefix)])),
)
tool_classpath, input_digest = await MultiGet(
Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request)),
Get(Digest, MergeDigests([request.digest, output_digest])),
)
toolcp_prefix = "__toolcp"
conf_prefix = "__conf"
immutable_input_digests = {
toolcp_prefix: tool_classpath.digest,
conf_prefix: conf_digest,
}
def should_skip_manifest() -> bool:
if request.skip_manifest is not None:
return request.skip_manifest
return jarjar.skip_manifest
system_properties: dict[str, str] = {
"verbose": str(logger.isEnabledFor(LogLevel.DEBUG.level)).lower(),
"skipManifest": str(should_skip_manifest()).lower(),
}
misplaced_class_strategy = request.misplaced_class_strategy or jarjar.misplaced_class_strategy
if misplaced_class_strategy:
system_properties["misplacedClassStrategy"] = misplaced_class_strategy.value
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
argv=[
_JARJAR_MAIN_CLASS,
"process",
os.path.join(conf_prefix, _JARJAR_RULE_CONFIG_FILENAME),
str(request.path),
output_filename,
],
classpath_entries=tool_classpath.classpath_entries(toolcp_prefix),
input_digest=input_digest,
extra_immutable_input_digests=immutable_input_digests,
extra_jvm_options=[
*jarjar.jvm_options,
*[f"-D{prop}={value}" for prop, value in system_properties.items()],
],
description=f"Shading JAR {request.path}",
output_directories=(output_prefix,),
level=LogLevel.DEBUG,
),
)
shaded_jar_digest = await Get(Digest, RemovePrefix(result.output_digest, output_prefix))
if request.path.parents:
# Restore the folder structure of the original path in the output digest
shaded_jar_digest = await Get(
Digest, AddPrefix(shaded_jar_digest, str(request.path.parent))
)
return ShadedJar(path=str(request.path), digest=shaded_jar_digest)
def rules():
return [*collect_rules(), *jarjar.rules()]
|
pantsbuild/pants
|
src/python/pants/jvm/shading/rules.py
|
rules.py
|
py
| 5,649 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pants.engine.engine_aware.EngineAwareParameter",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pathlib.PurePath",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "pants.jvm.target_types.JvmShadingRule",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "pants.jvm.shading.jarjar.MisplacedClassStrategy",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pathlib.PurePath",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pants.jvm.target_types.JvmShadingRule",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pants.jvm.shading.jarjar.MisplacedClassStrategy",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "pathlib.PurePath",
"line_number": 51,
"usage_type": "argument"
},
{
"api_name": "pants.jvm.target_types._shading_validate_rules",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pants.jvm.jdk_rules.InternalJdk",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "pants.jvm.shading.jarjar.JarJar",
"line_number": 79,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "pants.engine.rules.rule.encode",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pants.jvm.resolve.jvm_tool.GenerateJvmLockfileFromTool",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "pants.jvm.shading.jarjar.JarJarGeneratorLockfileSentinel",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.FileContent",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 102,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.CreateDigest",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Directory",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.MultiGet",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pants.jvm.resolve.coursier_fetch.ToolClasspath",
"line_number": 106,
"usage_type": "argument"
},
{
"api_name": "pants.jvm.resolve.coursier_fetch.ToolClasspathRequest",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 107,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.MergeDigests",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pants.jvm.shading.jarjar.skip_manifest",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pants.jvm.shading.jarjar",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "pants.jvm.shading.jarjar.misplaced_class_strategy",
"line_number": 126,
"usage_type": "attribute"
},
{
"api_name": "pants.jvm.shading.jarjar",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pants.engine.process.ProcessResult",
"line_number": 131,
"usage_type": "argument"
},
{
"api_name": "pants.jvm.jdk_rules.JvmProcess",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "pants.jvm.shading.jarjar.jvm_options",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "pants.jvm.shading.jarjar",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "pants.util.logging.LogLevel.DEBUG",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "pants.util.logging.LogLevel",
"line_number": 150,
"usage_type": "name"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 154,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.RemovePrefix",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.Get",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pants.engine.fs.Digest",
"line_number": 158,
"usage_type": "argument"
},
{
"api_name": "pants.engine.fs.AddPrefix",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.rule",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pants.engine.rules.collect_rules",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pants.jvm.shading.jarjar.rules",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "pants.jvm.shading.jarjar",
"line_number": 165,
"usage_type": "name"
}
] |
39267295276
|
import sys
import multiprocessing
from controls import ManualControl
from cam import Camera
from server import get_command_keyboard, stream_frame, get_command
import threading
# Klavye ile hareket için mode = 1
# Sesli komut ile hareket için mode = 2
# Klavye ile hareket ve Aynı anda Raspberryden PC'ye frame aktarma için mode = 3
# Sesli komut ile hareket ve Aynı anda Raspberryden PC'ye frame aktarma için mode = 4
# default mode = 1
mode = 1
def cam(targets, isRead, phase, frm):
# set camera object with Camera class
camera = Camera(show=False, captureIndex=-1, camRes=(640, 480))
camera.set_camera_settings(966.9541358947754)
camera.set_aruco_settings(markerSize=4, totalMarkers=50, arucoWidth=6)
while True:
camera.set_frame()
isRead.value = camera.isRead
camera.detect_aruco()
if camera.target is not None:
camera.target.set_instant_phase_angle(phase.value)
targets.append(camera.target)
frm["data"] = camera.frame
camera.break_and_release()
if camera.out:
break
if __name__ == '__main__':
manager = multiprocessing.Manager()
targets = manager.list()
isRead = multiprocessing.Value('i', 0)
phase = multiprocessing.Value('i', 0)
frm = manager.dict()
frm["command"] = "dur"
# PC'den raspberry'yi klavye ile kontrol etmek istiyorsanız mode = 1 yapın.
if mode == 1:
t1 = threading.Thread(target=get_command_keyboard, args=(frm,))
t2 = threading.Thread(target=ManualControl.get_command_keyboard_from_pc, args=(frm,))
try:
t1.start()
t2.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# PC'den raspberry'yi sesli komut ile kontrol etmek istiyorsanız mode = 2 yapın.
elif mode == 2:
t1 = threading.Thread(target=get_command, args=(frm,))
t2 = threading.Thread(target=ManualControl.speech_move, args=(frm,))
try:
t1.start()
t2.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# Klavye ile hareket ve Aynı anda Raspberry'den PC'ye frame aktarma için mode = 3
elif mode == 3:
p1 = multiprocessing.Process(target=cam, args=(targets, isRead, phase, frm))
t1 = threading.Thread(target=stream_frame, args=(frm,))
t2 = threading.Thread(target=get_command_keyboard, args=(frm,))
t3 = threading.Thread(target=ManualControl.get_command_keyboard_from_pc, args=(frm,))
try:
p1.start()
t1.start()
t2.start()
t3.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
# Sesli komut ile hareket ve Aynı anda Raspberry'den PC'ye frame aktarma için mode = 4
elif mode == 4:
p1 = multiprocessing.Process(target=cam, args=(targets, isRead, phase, frm))
t1 = threading.Thread(target=stream_frame, args=(frm,))
t2 = threading.Thread(target=get_command, args=(frm,))
t3 = threading.Thread(target=ManualControl.speech_move, args=(frm,))
try:
p1.start()
t1.start()
t2.start()
t3.start()
except (KeyboardInterrupt, SystemExit):
p1.kill()
sys.exit()
|
AbdullahTas123/pi-robot-car
|
raspberrypi/main.py
|
main.py
|
py
| 3,394 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "cam.Camera",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Manager",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Value",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "server.get_command_keyboard",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "controls.ManualControl.get_command_keyboard_from_pc",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "controls.ManualControl",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "server.get_command",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "controls.ManualControl.speech_move",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "controls.ManualControl",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "server.stream_frame",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "server.get_command_keyboard",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "controls.ManualControl.get_command_keyboard_from_pc",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "controls.ManualControl",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "server.stream_frame",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "server.get_command",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "threading.Thread",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "controls.ManualControl.speech_move",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "controls.ManualControl",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 90,
"usage_type": "call"
}
] |
32414340113
|
from flask import Flask, send_file, request, abort
from pathlib import Path
import youtube_dl
import json
app = Flask(__name__)
@app.route('/queuemp3', methods=['GET', 'POST'])
def queuemp3():
if request.method == 'POST':
try:
data = request.get_json()
url = data['url']
print(url)
ydl = youtube_dl.YoutubeDL()
r = None
with ydl:
# don't download, much faster
r = ydl.extract_info(url, download=False)
options = {
'format': 'bestaudio/best',
'extractaudio': True, # only keep the audio
'audioformat': "mp3", # convert to mp3
'outtmpl': '{}.mp3'.format(r['title']), # name the file the ID of the video
'noplaylist': True, # only download single song, not playlist
}
''' print some typical fields if needed
print("%s uploaded by '%s', has %d views, %d likes, and %d dislikes" % (
r['title'], r['uploader'], r['view_count'], r['like_count'], r['dislike_count']))'''
with youtube_dl.YoutubeDL(options) as ydl:
ydl.download([url])
try:
return json.dumps({'filename': r['title']})
except Exception as e:
return str(e)
finally:
print("A request was sent for queueing a conversion")
@app.route('/downloadmp3', methods=['GET', 'POST'])
def downloadmp3():
if request.method == 'POST':
filename = request.form['filename']
print(filename)
audio_file = Path("./{}.mp3".format(filename))
if audio_file.is_file():
return send_file('./{}.mp3'.format(filename),
attachment_filename='{}.mp3'.format(filename))
else:
abort(404)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, debug=True)
|
BK-Modding/youtube-2-mp3
|
flask server/app.py
|
app.py
|
py
| 1,961 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "youtube_dl.YoutubeDL",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "youtube_dl.YoutubeDL",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.send_file",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "flask.abort",
"line_number": 54,
"usage_type": "call"
}
] |
33561633117
|
import typing as t
import json
import re
from pathlib import Path
from PIL import Image
from torch.utils.data import Dataset
from .types.marked_image \
import MarkedImage, MarkedImageTensor
from .transforms import (
ToTensor
)
from ..utils import coord
class BdcDataSet(Dataset):
def __init__(self, img_path: str, land_path: str, transform=None):
super().__init__()
if transform is None:
self.transform = ToTensor()
else:
self.transform = transform
self.image_files = [
p for p in Path(img_path).glob("**/*")
if re.search('/*.(jpg|png)', str(p))
]
if land_path is not None:
with open(land_path) as lm:
landmarks = json.load(lm)
self.landmarks = self.__normalize_landmarks(landmarks)
else:
self.landmarks = {}
def __len__(self) -> int:
return len(self.image_files)
def __getitem__(self, idx: int) -> MarkedImageTensor:
p = self.image_files[idx]
with Image.open(str(p)).convert('RGB') as img:
img.load()
lmarks = self.landmarks.get(p.name, [])
sample: MarkedImage = {
'image': img,
'landmarks': lmarks
}
sample = self.transform(sample)
return sample
def __normalize_landmarks(self, landmarks) -> t.Dict:
norm_lands = {}
for p in self.image_files:
lmarks = landmarks[p.name]
with Image.open(str(p)).convert('RGB') as img:
img.load()
norm_lands[p.name] = list(map(
lambda x: coord.to_ml_coord(x, img.size),
lmarks
))
return norm_lands
|
daikon-oroshi/court-detection
|
court_detection/data/data_set.py
|
data_set.py
|
py
| 1,789 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.utils.data.Dataset",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "transforms.ToTensor",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "types.marked_image.MarkedImage",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "types.marked_image.MarkedImageTensor",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "utils.coord.to_ml_coord",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "utils.coord",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 55,
"usage_type": "attribute"
}
] |
35411640384
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update map explorers
--------------------
"""
import logging
from os.path import join
from hdx.data.dataset import Dataset
from hdx.data.resource import Resource
from src.acled import update_lc_acled, update_ssd_acled
from src.cbpf import update_cbpf
from src.fts import update_fts
#from src.rowca import update_rowca
logger = logging.getLogger(__name__)
def get_valid_names(downloader, url, headers):
rows_gen = downloader.get_tabular_rows(url, dict_rows=True, headers=headers)
return [x['Name'] for x in rows_gen if x['Name'] != 'Name']
def update_resources(resource_updates):
for resource_info in resource_updates.values():
resource = Resource.read_from_hdx(resource_info['id'])
resource.set_file_to_upload(resource_info['path'])
resource.update_in_hdx()
def update_lc(today, downloader, folder, lc_names_url, lc_mappings_url,
acled_base_url, fts_base_url, rowca_base_url):
logger.info('Lake Chad Map Explorer Data')
country_list = ['Cameroon', 'Nigeria', 'Niger', 'Chad']
valid_names = get_valid_names(downloader, lc_names_url, headers=['ISO', 'Name'])
replace_values = downloader.download_tabular_key_value(lc_mappings_url)
resource_updates = dict()
resource_updates['acled_events'] = {'id': 'fc396bf2-d204-48b2-84d2-337ada015273',
'path': join(folder, 'Lake_Chad_Basin_Recent_Conflict_Events.csv')}
resource_updates['acled_fatalities'] = {'id': '3792ee5d-ca30-4e5c-96c8-618c6b625d12',
'path': join(folder, 'Lake_Chad_Basin_Recent_Conflict_Event_Total_Fatalities.csv')}
resource_updates['fts'] = {'id': '2890c719-4fb2-4178-acdb-e0c5c91cfbce',
'path': join(folder, 'Lake_Chad_Basin_Appeal_Status.csv')}
# resource_updates['rowca_population'] = {'id': '048df35c-e35f-4b1f-aa1a-2d1ce1292f22',
# 'path': join(folder, 'Lake_Chad_Basin_Estimated_Population.csv')}
# resource_updates['rowca_displaced'] = {'id': '1bdcc8f3-223c-4f7d-9bc6-48be317d50c5',
# 'path': join(folder, 'Lake_Chad_Basin_Displaced.csv')}
logger.info('Lake Chad - ACLED')
update_lc_acled(today, acled_base_url, country_list, valid_names, replace_values, resource_updates)
logger.info('Lake Chad - FTS')
update_fts(fts_base_url, downloader, country_list, resource_updates)
# logger.info('Lake Chad - ROWCA')
# update_rowca(rowca_base_url, downloader, valid_names, replace_values, resource_updates)
logger.info('Lake Chad - Dataset Date')
update_resources(resource_updates)
dataset = Dataset.read_from_hdx('lake-chad-crisis-map-explorer-data')
dataset.set_dataset_date_from_datetime(today)
dataset.update_in_hdx()
def update_ssd(today, downloader, folder, ssd_adm1_names_url, ssd_adm2_names_url, ssd_mappings_url,
acled_base_url, cbpf_base_url):
logger.info('South Sudan Map Explorer Data')
country_list = ['South Sudan']
valid_adm1_names = get_valid_names(downloader, ssd_adm1_names_url, headers=['Name'])
valid_adm2_names = get_valid_names(downloader, ssd_adm2_names_url, headers=['Name'])
replace_values = downloader.download_tabular_key_value(ssd_mappings_url)
resource_updates = dict()
resource_updates['acled_events'] = {'id': '3480f362-67bb-44d0-b749-9e8fc0963fc0',
'path': join(folder, 'South_Sudan_Recent_Conflict_Events.csv')}
resource_updates['acled_fatalities'] = {'id': 'a67b85ee-50b4-4345-9102-d88bf9091e95',
'path': join(folder, 'South_Sudan_Recent_Conflict_Event_Total_Fatalities.csv')}
resource_updates['cbpf'] = {'id': 'd6b18405-5982-4075-bb0a-a1a85f09d842',
'path': join(folder, 'South_Sudan_Country_Based_Pool_Funds.csv')}
logger.info('South Sudan - ACLED')
update_ssd_acled(today, acled_base_url, country_list, valid_adm2_names, replace_values, resource_updates)
logger.info('South Sudan - CBPF')
update_cbpf(cbpf_base_url, downloader, 'SSD19', today, valid_adm1_names, replace_values, resource_updates)
logger.info('South_Sudan_ - Dataset Date')
update_resources(resource_updates)
dataset = Dataset.read_from_hdx('south-sudan-crisis-map-explorer-data')
dataset.set_dataset_date_from_datetime(today)
dataset.update_in_hdx()
|
OCHA-DAP/hdx-scraper-mapexplorer
|
mapexplorer.py
|
mapexplorer.py
|
py
| 4,508 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "hdx.data.resource.Resource.read_from_hdx",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "hdx.data.resource.Resource",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "src.acled.update_lc_acled",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "src.fts.update_fts",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "hdx.data.dataset.Dataset.read_from_hdx",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "hdx.data.dataset.Dataset",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "src.acled.update_ssd_acled",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "src.cbpf.update_cbpf",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "hdx.data.dataset.Dataset.read_from_hdx",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "hdx.data.dataset.Dataset",
"line_number": 84,
"usage_type": "name"
}
] |
37076072504
|
import subprocess
import time
import os
import stat
import threading
import uuid
class Iperf3(object):
def __init__(self, _ssh_machine1,
_ssh_key1,
_ssh_machine2,
_ssh_key2):
self.ssh_machine1 = _ssh_machine1
self.ssh_machine2 = _ssh_machine2
self.ssh_key1 = _ssh_key1
self.ssh_key2 = _ssh_key2
def generate_test_file(self,
command_list,
filename):
with open(filename, 'w') as f:
f.write("#!/bin/bash\n")
for command in command_list:
f.write(" ".join(command) + "\n")
f.write("sleep 5\n")
os.chmod(filename, os.stat(filename).st_mode | stat.S_IEXEC)
def get_result_value_from_client_iperf_file(self,client_file):
print(client_file)
proc = subprocess.Popen(['./get_value.sh',client_file],stdout=subprocess.PIPE)
proc.wait()
value_bytes = proc.communicate()[0].decode('utf-8')
value=''.join(str(v) for v in value_bytes)
# May return \n only
if not value or ('\n' in value and len(value)==1):
return None
print(value)
proc = subprocess.Popen(['./get_metric.sh',client_file],stdout=subprocess.PIPE)
proc.wait()
metric_bytes = proc.communicate()[0].decode('utf-8')
metric=''.join(str(v) for v in metric_bytes)
if 'M' in metric:
return float(value)
if 'G' in metric:
return (float(value) * 1000.0)
return(float(value) * 0.001)
def get_results(self,
client_key,
client_addr,
flow_num=20):
sum = 0.0
filepath='./' + client_addr + '_'
filepath += str(uuid.uuid4())
filepath += '/'
os.mkdir(filepath)
scp = subprocess.Popen(['scp','-i',client_key,client_addr + ':~/iperf3_output.*',filepath])
scp.wait()
failed_flows = 0
for i in range(0,flow_num):
outfile = filepath + 'iperf3_output.' + str(i)
res = self.get_result_value_from_client_iperf_file(outfile)
if res == None:
failed_flows += 1
else:
sum += res
print('Total is: {} Mbps'.format(sum))
print('Mean is: {} Mbps'.format(sum/float(flow_num)))
def run_performance_tests(self,
use_udp=False, # protocol to be used
bw='500M', # bandwidth
duration='300',
flow_num=20,
server_addr=None,
server_port=5201,
server_file='server_file.sh',
client_file='client_file.sh'):
sleep_between_serv_clients = 30
s_cmd_base = 'iperf3 -s -1'
c_cmd_base = 'iperf3 -c ' + self.ssh_machine2 + ' -b ' + bw + ' -t ' + duration
if use_udp:
c_cmd_base += ' -u'
port=server_port
s_cmd_list = []
for i in range(0,flow_num):
outfile = 'iperf3_output.' + str(i)
#s_cmd = ['ssh','-i',self.ssh_key2,self.ssh_machine2,
# 'nohup',s_cmd_base,'-p',str(port+i),'&>',outfile]
s_cmd = ['nohup',s_cmd_base,'-p',str(port+i),'&>',outfile,'&']
s_cmd_list.append(s_cmd)
self.generate_test_file(s_cmd_list,server_file)
s_scp = subprocess.Popen(['scp','-i',self.ssh_key2,server_file,self.ssh_machine2 + ':~/']);
s_scp.wait()
#print("Running: {} as server".format(s_cmd))
subprocess.Popen(['ssh','-i',self.ssh_key2,self.ssh_machine2,'./' + server_file])
time.sleep(sleep_between_serv_clients)
c_cmd_list = []
for i in range(0,flow_num):
outfile = 'iperf3_output.' + str(i)
#c_cmd = ['ssh','-i',self.ssh_key1,self.ssh_machine1,
# 'nohup',c_cmd_base,'-p',str(port+i),'&>',outfile]
c_cmd = ['nohup',c_cmd_base,'-p',str(port+i),'&>',outfile,'&']
c_cmd_list.append(c_cmd)
self.generate_test_file(c_cmd_list,client_file)
c_scp = subprocess.Popen(['scp','-i',self.ssh_key1,client_file,self.ssh_machine1 + ':~/']);
c_scp.wait()
#print("Running: {} as server".format(c_cmd))
subprocess.Popen(['ssh','-i',self.ssh_key1,self.ssh_machine1,'./' + client_file])
print("Waiting for test to finish........")
time.sleep(int(duration) + sleep_between_serv_clients)
print("DONE")
#subprocess.Popen(['ssh','-i',self.ssh_key2,self.ssh_machine2,
# "kill -9 $(ps aux | grep iperf | awk \'{print $2}\')"])
self.get_results(client_key=self.ssh_key1,
client_addr=self.ssh_machine1,
flow_num=flow_num)
if __name__=="__main__":
print("*************************************")
print("** Make sure SSH keys for servers **")
print("** SSH address should of form: **")
print("** name@IP **")
print("** or **")
print("** name@hostname **")
print("** Key should be a filepath **")
print("** **")
print("** Make sure iperf3 is installed **")
print("*************************************")
##### test STARTUP parameters:
use_udp=False
bw='500M'
duration='300'
flow_num=20
server_addr=None
server_port=5201
####
# test_list syntax:
# ( IP MACHINE 1, KEY MACHINE 1, IP MACHINE 2, KEY MACHINE 2)
test_list = [('10.5.0.3','./id_iperf_test','10.5.0.30','./id_iperf_test')]
#('10.5.0.3','./id_iperf_test','10.5.0.30','./id_iperf_test')]
thread_list = []
for tup in test_list:
test = Iperf3(tup[0],tup[1],tup[2],tup[3])
thread = threading.Thread(test.run_performance_tests(use_udp=use_udp,
bw=bw,
duration=duration,
flow_num=flow_num,
server_port=server_port))
thread_list.append(thread)
thread.start()
#waiting threads to finish:
for t in thread_list:
t.join()
|
phvalguima/iperf-testing
|
iperf.py
|
iperf.py
|
py
| 6,642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.chmod",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.stat",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "stat.S_IEXEC",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "subprocess.Popen",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 164,
"usage_type": "call"
}
] |
37564490314
|
import pdb
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
from scipy.stats import entropy, gaussian_kde, normaltest
import nflows
from nflows import distributions, transforms, utils, flows
from nflows.transforms.normalization import BatchNorm
from nflows.nn import nets
from nflows.transforms.base import (
CompositeTransform,
InputOutsideDomain,
InverseTransform,
Transform,
)
from nflows.utils import torchutils
def build_nflows(num_layers=2, hids=20, dims=2, context_dims=2,
batch_norm=False, activation=torch.nn.functional.relu, bins = 15, tail=8.0,
device = 'cuda', rqs=True, bimodal=False):
context_net = Linear_2L(context_dims, 2*dims, hids, 0.5, 0,
mc_drop = False, fixed_masks = False,
different_heads = False, device = device)
base_dist = nflows.distributions.ConditionalDiagonalNormal(
shape=[dims], context_encoder= context_net)
transforms = []
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks = False,
different_heads = False, device=device)
for _ in range(num_layers):
if dims > 1:
transforms.append(nflows.transforms.RandomPermutation(features=dims))
mask = nflows.utils.torchutils.create_mid_split_binary_mask(dims)
transforms.append(
nflows.transforms.PiecewiseCubicCouplingTransform(mask, create_net,
tails='linear', num_bins=bins, tail_bound=tail,
))
if dims == 1:
transforms.append(
nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform(
features=dims,
hidden_features=hids,
context_features=context_dims,
num_blocks = 2,
use_batch_norm=batch_norm,
num_bins=bins,
tails='linear',
tail_bound = tail,
activation = activation,
use_residual_blocks = False,))
transform = nflows.transforms.CompositeTransform(transforms)
flow = nflows.flows.Flow(transform, base_dist)
return flow
def build_nflows_ensemble(num_layers=2, hids=20, dims=2, context_dims=2,
batch_norm=False, activation=torch.nn.functional.relu, bins = 15, tail=8.0,
device = 'cuda', rqs=True, base = True, flows = True, multihead=False,
fixed_masks=False, ensemble_size=15, bimodal=False):
if base:
context_net = Linear_2L(context_dims, 2*dims, hids*2, 0.5, 0,
fixed_masks = fixed_masks, num_masks = ensemble_size,
different_heads = multihead, device = device)
else:
context_net = Linear_2L(context_dims, 2*dims, hids*2, 0.5, 0,
fixed_masks = False, num_masks = ensemble_size,
different_heads = False, device = device)
base_dist = nflows.distributions.ConditionalDiagonalNormal(
shape=[dims], context_encoder= context_net)
transforms = []
if flows:
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks=fixed_masks,
different_heads = multihead, num_masks=ensemble_size, device=device)
else:
def create_net(in_features, out_features):
return Linear_2L(in_features, out_features, hids, 0.5,
context_dims, fixed_masks = False,
different_heads = False, device=device)
for _ in range(num_layers):
if dims > 1:
transforms.append(nflows.transforms.RandomPermutation(features=dims))
mask = nflows.utils.torchutils.create_mid_split_binary_mask(dims)
transforms.append(
nflows.transforms.PiecewiseCubicCouplingTransform(mask, create_net,
tails='linear', num_bins=bins, tail_bound=tail,
))
if dims == 1:
transforms.append(
nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform(
features=dims,
hidden_features=hids,
context_features=context_dims,
num_blocks = 1,
use_batch_norm=batch_norm,
num_bins=bins,
tails='linear',
tail_bound = tail,
activation = activation,
use_residual_blocks = False,
ensemble = flows))
#create_context_net = create_net))
transform = nflows.transforms.CompositeTransform(transforms)
flow = nflows.flows.Flow(transform, base_dist)
return flow
class Linear_2L(nn.Module):
def __init__(self, input_dim, output_dim, n_hid, pdrop, context_dim,
fixed_masks = False, num_masks = 10, different_heads = False,
device='cpu'):
super(Linear_2L, self).__init__()
self.pdrop = pdrop
self.input_dim = input_dim
self.output_dim = output_dim
self.n_hid = n_hid
self.fc1 = nn.Linear(input_dim+context_dim, n_hid)
self.fc2 = nn.Linear(n_hid, n_hid)
if different_heads:
self.heads = []
for i in range(num_masks):
exec(f'self.head{i} = nn.Linear(n_hid, output_dim)')
exec(f'self.heads.append(self.head{i})')
else:
self.fc3 = nn.Linear(n_hid, output_dim)
self.different_heads = different_heads
# choose your non linearity
# self.act = nn.Tanh()
# self.act = nn.Sigmoid()
self.act = nn.ReLU(inplace=True)
# self.act = nn.ELU(inplace=True)
# self.act = nn.SELU(inplace=True)
self.fixed_masks = fixed_masks
if fixed_masks:
self.create_masks(num_masks, device)
self.num_masks = num_masks
def forward(self, x, context=None, rand_mask=True, mask_index = 0):
if self.fixed_masks:
if rand_mask:
mask = self.masks[np.random.choice(self.num_masks)]
else:
mask = self.masks[mask_index]
if self.different_heads:
if rand_mask:
head_idx = np.random.choice(self.num_masks)
else:
head_idx = mask_index
x = x.view(-1, self.input_dim) # view(batch_size, input_dim)
if context is None:
pass
else:
x = torch.cat((x, context), dim=1)
# -----------------
x = self.fc1(x)
if self.fixed_masks:
x = mask[0].repeat(x.shape[0],1)*x
# -----------------
x = self.act(x)
# -----------------
x = self.fc2(x)
if self.fixed_masks:
x = mask[1].repeat(x.shape[0],1)*x
# -----------------
x = self.act(x)
# -----------------
if self.different_heads:
y = self.heads[head_idx](x)
else:
y = self.fc3(x)
return y
def create_masks(self, num_masks, device):
masks = []
for i in range(num_masks):
mask_l1 = torch.bernoulli(torch.full_like(torch.ones(self.n_hid), self.pdrop))\
.to(device)
mask_l2 = torch.bernoulli(torch.full_like(torch.ones(self.n_hid), self.pdrop))\
.to(device)
masks.append([mask_l1, mask_l2])
self.masks = masks
|
nwaftp23/nflows_epistemic
|
nflows_utils.py
|
nflows_utils.py
|
py
| 7,615 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "nflows.distributions.ConditionalDiagonalNormal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nflows.distributions",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.append",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.RandomPermutation",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nflows.utils.torchutils.create_mid_split_binary_mask",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "nflows.utils",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms.append",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.PiecewiseCubicCouplingTransform",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms.append",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms.CompositeTransform",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 57,
"usage_type": "argument"
},
{
"api_name": "nflows.flows.Flow",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "nflows.flows",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "nflows.distributions.ConditionalDiagonalNormal",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "nflows.distributions",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "nflows.flows",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.append",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.RandomPermutation",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "nflows.utils.torchutils.create_mid_split_binary_mask",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "nflows.utils",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms.append",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.PiecewiseCubicCouplingTransform",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "nflows.transforms.append",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.MaskedPiecewiseQuadraticAutoregressiveTransform",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 100,
"usage_type": "attribute"
},
{
"api_name": "nflows.flows",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "nflows.transforms.CompositeTransform",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "nflows.transforms",
"line_number": 113,
"usage_type": "argument"
},
{
"api_name": "nflows.flows.Flow",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "nflows.flows",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 119,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "numpy.random.choice",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.full_like",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "torch.bernoulli",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.full_like",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "torch.ones",
"line_number": 196,
"usage_type": "call"
}
] |
2348487124
|
import os
import sys
import logging
if sys.version_info >= (3, 0):
from io import StringIO
else:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
assert StringIO
from pylint import lint
from pylint.__pkginfo__ import numversion
class PyLinter(object):
"""PyLinter class for Anaconda
"""
def __init__(self, filename, rcfile):
self.filename = filename
self.exit = sys.exit
self.rcfile = rcfile
self.stdout = sys.stdout
self.output = StringIO()
sys.exit = lambda x: None
sys.stdout = self.output
self.execute()
def execute(self):
"""Execute the linting process
"""
if numversion < (1, 0, 0):
args = '--include-ids=y -r n'.split(' ')
else:
args = '--msg-template={msg_id}:{line}:{column}:{msg} -r n'.split(
' ')
if self.rcfile:
args.append('--rcfile={0}'.format(os.path.expanduser(self.rcfile)))
args.insert(0, self.filename)
lint.Run(args)
def parse_errors(self):
"""Parse the output given by PyLint
"""
errors = {'E': [], 'W': [], 'V': []}
data = self.output.getvalue()
sys.exit = self.exit
sys.stdout = self.stdout
for error in data.splitlines():
if '************* Module ' in error:
_, module = error.split('************* Module ')
if not module in self.filename:
continue
else:
offset = None
try:
if numversion >= (1, 0, 0):
code, line, offset, message = error.split(':', 3)
else:
code, line, message = error.split(':', 2)
except ValueError as exception:
logging.debug(
'unhandled exception in PyLinter parse_errors '
'this is a non fatal error: {0}'.format(exception)
)
logging.debug(
'the error string that raised this exception was: '
'{0}, please, report this in the GitHub site'.format(
error
)
)
continue
if numversion < (1, 0, 0):
try:
line, offset = line.split(',')
except ValueError:
# seems like some versions (or packagers) of pylint
# prior to 1.0.0 adds offset to the output but others
# doesn't
pass
errors[self._map_code(code)[0]].append({
'line': int(line),
'offset': offset,
'code': self._map_code(code)[1],
'message': '[{0}] {1}'.format(
self._map_code(code)[1], message
)
})
return errors
def _map_code(self, code):
"""Map the given code to fit Anaconda codes
"""
mapping = {'C': 'V', 'E': 'E', 'F': 'E', 'I': 'V', 'R': 'W', 'W': 'W'}
return (mapping[code[0]], code[1:])
|
blizzrdof77/Sublime-Text-3-Packages
|
Anaconda/anaconda_lib/linting/anaconda_pylint.py
|
anaconda_pylint.py
|
py
| 3,368 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.version_info",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "StringIO.StringIO",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "StringIO.StringIO",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "pylint.__pkginfo__.numversion",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "os.path.expanduser",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "pylint.lint.Run",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pylint.lint",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "sys.exit",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "pylint.__pkginfo__.numversion",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pylint.__pkginfo__.numversion",
"line_number": 85,
"usage_type": "name"
}
] |
3357675588
|
from numpy.lib.polynomial import RankWarning
import torch as pt
import numpy as np
from dataset.GuidedBraTSDataset3D import GuidedBraTSDataset3D
from model.PFSeg import PFSeg3D
import cv2
import SimpleITK as sitk
lr=0.0001
epoch=100
batch_size=1
model_path='/path/to/Saved_models'
img_size=(64,96,96)
model=PFSeg3D().cuda()
model.load_state_dict(pt.load(model_path+'/PFSeg_3D_BraTS_patch-free_bs_best.pt',map_location = 'cpu'))
trainset=GuidedBraTSDataset3D('/path/to/BraTS20',mode='all',augment=False)
# valset=BraTSDataset3D('/path/to/BraTS20',mode='val')
# testset=GuidedBraTSDataset3D('/path/to/BraTS20',mode='test')
train_dataset=pt.utils.data.DataLoader(trainset,batch_size=batch_size,shuffle=False,drop_last=True)
# val_dataset=pt.utils.data.DataLoader(valset,batch_size=1,shuffle=True,drop_last=True)
# test_dataset=pt.utils.data.DataLoader(testset,batch_size=1,shuffle=True,drop_last=True)
def GenerateCoarseMask():
model.eval()
dice_sum=0
hd_sum=0
jc_sum=0
for i,data in enumerate(train_dataset):
output_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
label_list=np.zeros((1,1,2*img_size[0],2*img_size[1],2*img_size[2]))
(inputs,labels,raw_image,guidance,_)=data
labels3D = pt.autograd.Variable(labels).type(pt.FloatTensor).cuda().unsqueeze(1)
guidance = pt.autograd.Variable(guidance).type(pt.FloatTensor).cuda().unsqueeze(1)
inputs3D = pt.autograd.Variable(inputs).type(pt.FloatTensor).cuda().unsqueeze(1)
with pt.no_grad():
outputs3D,_ = model(inputs3D,guidance)
outputs3D=np.array(outputs3D.squeeze(0).squeeze(0).cpu().data.numpy())
output_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64))
output_list[32:-32,32:-32,32:-32]=outputs3D
label_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64))
label_list[32:-32,32:-32,32:-32]=np.array(labels3D.squeeze(0).squeeze(0).cpu().data.numpy())
input_real=np.array(raw_image.squeeze(0).numpy())
input_list=np.zeros((raw_image.shape[1]+64,raw_image.shape[2]+64,raw_image.shape[3]+64))
input_list[32:-32,32:-32,32:-32]=input_real
output_list[output_list<0.5]=0.
output_list[output_list>=0.5]=1.
results=np.where(output_list!=0)
x_list=results[0]
y_list=results[1]
z_list=results[2]
x_max=x_list.max()
x_min=x_list.min()
y_max=y_list.max()
y_min=y_list.min()
z_max=z_list.max()
z_min=z_list.min()
x_length=64*(1+(x_max-x_min)//64) #确保是16的倍数
y_length=64*(1+(y_max-y_min)//64)
z_length=64*(1+(z_max-z_min)//64)
x_center=(x_max-x_min)//2+x_min
y_center=(y_max-y_min)//2+y_min
z_center=(z_max-z_min)//2+z_min
bbox_xmin=x_center-x_length//2
bbox_xmax=x_center+x_length//2
bbox_ymin=y_center-y_length//2
bbox_ymax=y_center+y_length//2
bbox_zmin=z_center-z_length//2
bbox_zmax=z_center+z_length//2
# cropped_coarse=np.zeros((x_length,y_length,z_length))
# cropped_image=np.zeros((x_length,y_length,z_length))
# cropped_mask=np.zeros((x_length,y_length,z_length))
cropped_image=input_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax]
cropped_coarse=output_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax]
cropped_mask=label_list[bbox_xmin:bbox_xmax,bbox_ymin:bbox_ymax,bbox_zmin:bbox_zmax]
if not(cropped_mask.shape==cropped_image.shape):
raise Exception()
if not(cropped_image.shape[0]%16==0 and cropped_image.shape[1]%16==0 and cropped_image.shape[2]%16==0):
raise Exception()
# save the cropped images for next round training
np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64image.npy'.format(i+1),cropped_image)
np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64coarse.npy'.format(i+1),cropped_coarse)
np.save('/path/to/BraTS20/cropped_coarse/Case_{:3d}_64mask.npy'.format(i+1),cropped_mask)
# final_img=np.zeros(shape=(2*img_size[1],2*2*img_size[2]))
# final_img[:,:2*img_size[2]]=output_list[0,0,64,:,:]*255
# final_img[:,2*img_size[2]:]=label_list[0,0,64,:,:]*255
# cv2.imwrite('TestPhase_BraTS.png',final_img)
pr_sum = output_list.sum()
gt_sum = label_list.sum()
pr_gt_sum = np.sum(output_list[label_list == 1])
dice = 2 * pr_gt_sum / (pr_sum + gt_sum)
dice_sum += dice
print("dice:",dice)
# hausdorff=hd95(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
# jaccard=jc(output_list.squeeze(0).squeeze(0),label_list.squeeze(0).squeeze(0))
# hd_sum+=hausdorff
# jc_sum+=jaccard
print("Finished. Total dice: ",dice_sum/len(train_dataset),'\n')
print("Finished. Avg Jaccard: ",jc_sum/len(train_dataset))
print("Finished. Avg hausdorff: ",hd_sum/len(train_dataset))
return dice_sum/len(train_dataset)
GenerateCoarseMask()
|
Dootmaan/PFSeg-ABR
|
step2_generateCoraseMask.py
|
step2_generateCoraseMask.py
|
py
| 5,166 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "model.PFSeg",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "model.PFSeg.PFSeg3D",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "model.PFSeg.load_state_dict",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "model.PFSeg",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "dataset.GuidedBraTSDataset3D.GuidedBraTSDataset3D",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "model.PFSeg.eval",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "model.PFSeg",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.FloatTensor",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.no_grad",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "model.PFSeg",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 108,
"usage_type": "call"
}
] |
5024929632
|
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
import requests, json
from app_comments.models import RedditPost, Comment
from annoying.functions import get_object_or_None
from app_comments.lib.comments import CommentBuilder, RedditPostBuilder
from bs4 import BeautifulSoup
from app_comments.management.commands.get_comments import PostGetter
from time import sleep
class Command(BaseCommand):
args = ""
help = ""
def add_arguments(s, parser):
parser.add_argument('--url', nargs='+', type=str)
def process_args(s, options):
url = options['url'][0] if options['url'] else None
return url
# orig_url = url[:]
# if url:
# if url[-5:] != '.json':
# url = url[:-1] + '.json'
# return url, orig_url
def handle(s, *args, **options):
#url = s.process_args(options)
#print(url)
url = 'https://www.reddit.com/top.json?sort=top&t=year'
base_url = 'https://www.reddit.com'
resp = requests.get(url)
if resp.status_code == 200:
text_json = resp.text
else:
print(resp.text)
return
page_json = json.loads(text_json)
for post_info in page_json['data']['children']:
comments_url = base_url + post_info['data']['permalink']
comments_json_url = comments_url[:-1]+'.json'
pg = PostGetter()
resp = pg.get(comments_json_url, comments_url)
print(resp, 1)
if resp == 'bad http':
sleep_time = 5
print('sleeping (%s)...' % sleep_time)
sleep(sleep_time)
resp = pg.get(comments_json_url, comments_url)
if resp == 'bad http':
print('sleeping (%s)...' % sleep_time)
sleep(sleep_time)
resp = pg.get(comments_json_url, comments_url)
if resp == 'bad http':
print('sleeping (%s)...' % sleep_time)
sleep(sleep_time)
# cmd_data = {'--url': comments_url}
# call_command('get_comments', **cmd_data)
# break
|
daviddennis/comments
|
app_comments/management/commands/get_links.py
|
get_links.py
|
py
| 2,264 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "app_comments.management.commands.get_comments.PostGetter",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 62,
"usage_type": "call"
}
] |
14077597352
|
from lk.utils.config_util import ConfigUtil
from lk.utils.shell_util import run_and_confirm, run, run_and_return_output
from furl import furl
bitbucket = 'bitbucket'
bitbucket_domain = 'bitbucket.org'
github = 'github'
github_domain = 'github.com'
class SourceCodeRepo(object):
def __init__(self, url=None, service=None, user=None, repo_name=None):
self._url = url
self._service = service
self._user = user
self._repo_name = repo_name
@property
def url(self):
if self._url:
return self._url
else:
url = 'https://{service_domain}/{user}/{repo}'.format(
service_domain=self.service_domain,
user=self.user,
repo=self.repo_name
)
return url
@property
def hosting_service_host(self):
hosting_service_host = self._url.split('/')[2]
return hosting_service_host
@property
def hosting_service(self):
hosting_service = self.hosting_service_host.split('.')[0]
return hosting_service
@property
def user(self):
if self._user:
return self._user
else:
user = self._url.split('/')[3]
return user
@property
def repo_name(self):
if self._repo_name:
return self._repo_name
else:
repo_name = self._url.split('/')[4]
return repo_name
@property
def clone_command(self):
# https://github.com/lk-commands/default
# [email protected]:lk-commands/default.git
# git clone [email protected]:eyalev/lk-commands.git
# clone_command = 'git clone git@{hosting_service_host}:{user}/{repo_name}.git'.format(
# clone_command = 'git clone {repo_url}.git'.format(
clone_command = 'git clone {git_url}'.format(
git_url=self.git_url
)
return clone_command
@property
def git_url(self):
url = self.url
if 'github' in url:
return url
_furl = furl(url)
git_url = 'git@{host}:{user}/{repo}.git'.format(
host=_furl.host,
user=str(_furl.path).split('/')[1],
repo=str(_furl.path).split('/')[2]
)
return git_url
def clone(self):
print('# Cloning lk-repo')
clone_command = SourceCodeRepo(self.url).clone_command
command = '{clone_command} {local_repo_path}'.format(
clone_command=clone_command,
local_repo_path=self.local_repo_string_path
)
run_and_confirm(command)
@property
def commands_dir_string_path(self):
return self.local_repo_string_path + '/commands'
@property
def local_repo_string_path(self):
commands_repo_local_path = '{local_repos_dir}/{repo_service}/{repo_user}/{commands_repo_name}'.format(
local_repos_dir=ConfigUtil().local_repos_dir,
repo_service=self.hosting_service,
repo_user=self.user,
commands_repo_name=self.repo_name
)
return commands_repo_local_path
@property
def service(self):
if self._service:
return self._service
if 'bitbucket.org' in self.url:
return bitbucket
elif 'github.com' in self.url:
return github
else:
raise NotImplementedError
@property
def bitbucket(self):
return self.service == bitbucket
@property
def github(self):
return self.service == github
@property
def service_domain(self):
if self.bitbucket:
return bitbucket_domain
if self.github:
return github_domain
else:
raise NotImplementedError
def remote_file_source(self, file_name):
if self.bitbucket:
shell_command = 'git archive --remote=git@{service_domain}:{user}/{repo}.git HEAD commands/{file_name} | tar -x -O'.format(
service_domain=self.service_domain,
user=self.user,
repo=self.repo_name,
file_name=file_name
)
output = run_and_return_output(shell_command)
return output
elif self.github:
raise NotImplementedError
else:
raise NotImplementedError
|
eyalev/lk
|
lk/classes/source_code_repo.py
|
source_code_repo.py
|
py
| 4,401 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "furl.furl",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "lk.utils.shell_util.run_and_confirm",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "lk.utils.config_util.ConfigUtil",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "lk.utils.shell_util.run_and_return_output",
"line_number": 178,
"usage_type": "call"
}
] |
8092333942
|
from vector import Vector
import turtle
scale = 40
def print_vector(vector, color):
turtle.pencolor(color)
turtle.penup()
turtle.home()
turtle.pendown()
turtle.goto(vector.elements[0]*scale,vector.elements[1]*scale)
def print_system(x,y):
turtle.home()
for i in range(x):
turtle.dot(3)
turtle.write(i, align='right')
turtle.setx(scale*(i+1))
turtle.home()
for j in range(y):
turtle.dot(3)
turtle.write(j, align='right')
turtle.sety(scale*(j+1))
turtle.speed(10)
print_system(10,10)
vector1 = Vector([3, 2])
print_vector(vector1, 'red')
vector2 = Vector([1,-4])
print_vector(vector2, 'blue')
vector1.add_vector(vector2)
print_vector(vector1, 'green')
turtle.done()
|
sashokbg/python-exercises
|
vector/draw.py
|
draw.py
|
py
| 760 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "turtle.pencolor",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "turtle.penup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "turtle.home",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "turtle.pendown",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "turtle.goto",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "vector.elements",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "turtle.home",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "turtle.dot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "turtle.write",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "turtle.setx",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "turtle.home",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "turtle.dot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "turtle.write",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "turtle.sety",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "turtle.speed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "vector.Vector",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "vector.Vector",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "turtle.done",
"line_number": 41,
"usage_type": "call"
}
] |
72014598908
|
import json
import sys
import argparse
sys.path.append("../evaluation")
from evaluate import tuple_f1, convert_opinion_to_tuple
def get_args():
"""
Helper function to get the gold json, predictions json and negation jsons
"""
parser = argparse.ArgumentParser()
parser.add_argument("gold")
parser.add_argument("predictions")
parser.add_argument("metadata")
args = parser.parse_args()
return args
def open_json(json_file):
"""
Helper function to open the json files
"""
with open(json_file) as o:
file = json.load(o)
sent_dict = {sent["sent_id"]: sent for sent in file}
sent_keys = set(sent_dict.keys())
return sent_keys, sent_dict
def main():
args = get_args()
with open(args.metadata) as o:
metadata = json.load(o)
test_domains = {}
gold_keys, gold = open_json(args.gold)
pred_keys, pred = open_json(args.predictions)
# get the domains found in the test data
for sent_id in gold_keys:
domain = metadata[sent_id[:6]]["category"]
if domain not in test_domains:
test_domains[domain] = [sent_id]
else:
test_domains[domain].append(sent_id)
# print the domains in descending order
for key, value in sorted(test_domains.items(), key=lambda kv: len(kv[1])):
print("{}: \t{}".format(key, len(value)))
print()
print()
# get the sentiment graph F1 for each domain
for domain, sent_ids in sorted(test_domains.items(),
key=lambda kv: len(kv[1])):
domain_gold = dict([(sent_id, convert_opinion_to_tuple(gold[sent_id])) for sent_id in sent_ids])
domain_pred = dict([(sent_id, convert_opinion_to_tuple(pred[sent_id])) for sent_id in sent_ids])
f1 = tuple_f1(domain_gold, domain_pred)
print("{0}: {1:.3f}".format(domain, f1))
if __name__ == "__main__":
main()
|
jerbarnes/semeval22_structured_sentiment
|
analysis/domain_analysis.py
|
domain_analysis.py
|
py
| 1,950 |
python
|
en
|
code
| 71 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "evaluate.convert_opinion_to_tuple",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "evaluate.convert_opinion_to_tuple",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "evaluate.tuple_f1",
"line_number": 61,
"usage_type": "call"
}
] |
72683621307
|
from matplotlib import pyplot as plt
from numpy import loadtxt, zeros
from skimage.measure import label
from os import path
if __name__ == '__main__':
current_dir = path.dirname(__file__)
file_names = ['mat_p0.70.dat', 'mat_p0.72.dat']
for file_name in file_names:
file_path = path.join(current_dir, file_name)
lattice = loadtxt(file_path)
# change connectivity to 2 if you want to consider Moore neighborhood
labelled_lattice = label(lattice, background=0, connectivity=1)
num_clusters = labelled_lattice.max()
cluster_sizes = []
for cluster_id in range(1, num_clusters + 1):
cluster_sizes.append((labelled_lattice == cluster_id).sum())
cluster_size_distribution = zeros(max(cluster_sizes))
for cluster_size in cluster_sizes:
cluster_size_distribution[cluster_size - 1] += 1
inverse_cdf = zeros(max(cluster_sizes))
for cluster_size in range(max(cluster_sizes)):
inverse_cdf[cluster_size] = (cluster_size_distribution[cluster_size:]).sum()
inverse_cdf /= sum(cluster_size_distribution)
plt.figure(figsize=(11, 5))
plt.subplot(1, 2, 1)
plt.title(f"Lattice from {file_name}")
plt.imshow(lattice)
plt.subplot(1, 2, 2)
plt.title("Cluster Size Distribution")
plt.xlabel("Cluster Size s")
plt.ylabel("P(S > s)")
plt.loglog(range(1, max(cluster_sizes) + 1), inverse_cdf, 'bo')
plt.show()
|
tee-lab/patchy-ecosterics
|
temp_actions/CSD/plotter.py
|
plotter.py
|
py
| 1,513 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.loadtxt",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "skimage.measure.label",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.loglog",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 42,
"usage_type": "name"
}
] |
810990786
|
'''Time Based Key-Value Store - https://leetcode.com/problems/time-based-key-value-store/
Design a time-based key-value data structure that can store multiple values for the same key at
different time stamps and retrieve the key's value at a certain timestamp.
Implement the TimeMap class:
TimeMap() Initializes the object of the data structure.
void set(String key, String value, int timestamp) Stores the key key with the value value at the given time timestamp.
String get(String key, int timestamp) Returns a value such that set was called previously, with
timestamp_prev <= timestamp. If there are multiple such values, it returns the value associated with the
largest timestamp_prev. If there are no values, it returns "".
Example 1:
Input
["TimeMap", "set", "get", "get", "set", "get", "get"]
[[], ["foo", "bar", 1], ["foo", 1], ["foo", 3], ["foo", "bar2", 4], ["foo", 4], ["foo", 5]]
Output
[null, null, "bar", "bar", null, "bar2", "bar2"]
Explanation
TimeMap timeMap = new TimeMap();
timeMap.set("foo", "bar", 1); // store the key "foo" and value "bar" along with timestamp = 1.
timeMap.get("foo", 1); // return "bar"
timeMap.get("foo", 3); // return "bar", since there is no value corresponding to foo at timestamp 3
and timestamp 2, then the only value is at timestamp 1 is "bar".
timeMap.set("foo", "bar2", 4); // store the key "foo" and value "ba2r" along with timestamp = 4.
timeMap.get("foo", 4); // return "bar2"
timeMap.get("foo", 5); // return "bar2"
'''
from collections import OrderedDict
class TimeMap:
def __init__(self):
self.time_mapping = {}
def set(self, key: str, value: str, timestamp: int) -> None:
if key not in self.time_mapping:
self.time_mapping[key] = OrderedDict()
self.time_mapping[key][timestamp] = value
def get(self, key: str, timestamp: int) -> str:
if key in self.time_mapping:
dictValues = self.time_mapping[key]
temp = []
result = ""
while dictValues:
time, value = dictValues.popitem()
temp.append((time, value))
if time <= timestamp:
result = value
break
while temp:
time, value = temp.pop()
self.time_mapping[key][time] = value
return result
else:
return ""
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
# Using Binary Search
from collections import defaultdict
class TimeMap:
def __init__(self):
self.time_mapping = defaultdict(list)
def set(self, key: str, value: str, timestamp: int) -> None:
self.time_mapping[key].append((value, timestamp))
def get(self, key: str, timestamp: int) -> str:
if key not in self.time_mapping:
return ""
dictValues = self.time_mapping[key]
left = 0
right = len(dictValues) - 1
while left < right:
mid = left + (right - left) // 2
if dictValues[mid][1] < timestamp:
left = mid + 1
elif dictValues[mid][1] > timestamp:
right = mid - 1
else:
return dictValues[mid][0]
if dictValues[right][1] <= timestamp:
return dictValues[right][0]
return "" if right < 0 else dictValues[right - 1][0]
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
|
Saima-Chaity/Leetcode
|
Google/Time Based Key-Value Store.py
|
Time Based Key-Value Store.py
|
py
| 3,635 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.OrderedDict",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 74,
"usage_type": "call"
}
] |
30301888432
|
import os
import sys
import unittest
from pathlib import Path
import coverage
from mpi4py import MPI
def main(path, parallel):
cov = coverage.coverage(
branch=True,
include=str(Path(path).parent) + '/ignis/executor/*.py',
)
cov.start()
import ignis.executor.core.ILog as Ilog
Ilog.enable(False)
tests = unittest.TestLoader().discover(path + '/executor/core', pattern='*Test.py')
if parallel:
tests.addTests(unittest.TestLoader().discover(path + '/executor/core', pattern='IMpiTest2.py'))
else:
print("WARNING: mpi test skipped", file=sys.stderr)
result = unittest.TextTestRunner(verbosity=2, failfast=True).run(tests)
cov.stop()
cov.save()
MPI.COMM_WORLD.Barrier()
if result.wasSuccessful() and result.testsRun > 0 and MPI.COMM_WORLD.Get_rank() == 0:
if parallel:
others = ["../np" + str(i) + "/.coverage" for i in range(1, MPI.COMM_WORLD.Get_size())]
cov.combine(data_paths=others, strict=True)
covdir = os.path.join(os.getcwd(), "ignis-python-coverage")
print('Coverage: (HTML version: file://%s/index.html)' % covdir, file=sys.stderr)
cov.report(file=sys.stderr)
cov.html_report(directory=covdir)
if __name__ == '__main__':
rank = MPI.COMM_WORLD.Get_rank()
parallel = MPI.COMM_WORLD.Get_size() > 1
path = os.getcwd()
Path("debug").mkdir(parents=True, exist_ok=True)
os.chdir("debug")
if parallel:
wd = "np" + str(rank)
Path(wd).mkdir(parents=True, exist_ok=True)
os.chdir(wd)
if rank > 0:
log = open("log.txt", 'w')
sys.stderr = log
sys.stdout = log
main(path, parallel)
if rank > 0:
sys.stderr.close()
|
andreasolla/core-python
|
ignis_test/Main.py
|
Main.py
|
py
| 1,575 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "coverage.coverage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ignis.executor.core.ILog.enable",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ignis.executor.core.ILog",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "unittest.TestLoader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "unittest.TestLoader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "unittest.TextTestRunner",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Barrier",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Get_rank",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Get_size",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Get_rank",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD.Get_size",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "mpi4py.MPI.COMM_WORLD",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "mpi4py.MPI",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "os.getcwd",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr.close",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 53,
"usage_type": "attribute"
}
] |
21916878669
|
#!/usr/bin/env python2
import logging
import os
import shutil
import tempfile
from test_utils import TESTS_DIR, qsym, check_testcase
SCHEDULE_DIR = os.path.join(TESTS_DIR, "schedule")
logging.getLogger('qsym.Executor').setLevel(logging.DEBUG)
def get_testcases(exe, bitmap, input_binary):
output_dir = tempfile.mkdtemp(prefix="qsym-")
input_file = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name
new_inputs = []
with open(input_file, "wb") as f:
f.write(input_binary)
try:
q = qsym.Executor([exe], input_file, output_dir, bitmap=bitmap)
q.run()
for path in q.get_testcases():
with open(path, "rb") as f:
data = f.read()
new_inputs.append(data)
return new_inputs
finally:
shutil.rmtree(output_dir)
os.unlink(input_file)
return None
def get_seeds(target_dir):
seeds = []
inputs_dir = os.path.join(target_dir, "inputs")
for name in os.listdir(inputs_dir):
path = os.path.join(inputs_dir, name)
with open(path, "rb") as f:
data = f.read()
seeds.append(data)
return seeds
def get_all_testcases(target, max_iter=30):
target_dir = os.path.join(SCHEDULE_DIR, target)
exe = os.path.join(target_dir, "main")
inputs = get_seeds(target_dir)
processed = []
bitmap = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name
try:
for i in xrange(max_iter):
if not inputs:
break
input_binary = inputs.pop()
new_inputs = get_testcases(exe, bitmap, input_binary)
assert new_inputs is not None
inputs.extend(new_inputs)
processed.append(input_binary)
return processed
finally:
os.unlink(bitmap)
def check_testcases(exe, testcases):
input_file = tempfile.NamedTemporaryFile(prefix="qsym-", delete=False).name
try:
for testcase in testcases:
if check_testcase(exe, testcase):
return True
finally:
os.unlink(input_file)
return False
def test_dup():
testcases = get_all_testcases("dup")
# default + 0xdeadbeef
assert len(testcases) == 2
|
sslab-gatech/qsym
|
tests/test_schedule.py
|
test_schedule.py
|
py
| 2,236 |
python
|
en
|
code
| 615 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "test_utils.TESTS_DIR",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "tempfile.mkdtemp",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "test_utils.qsym.Executor",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "test_utils.qsym",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "shutil.rmtree",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "tempfile.NamedTemporaryFile",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "test_utils.check_testcase",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.unlink",
"line_number": 72,
"usage_type": "call"
}
] |
36650794154
|
from pywrap.exporter import (MethodDefinition, SetterDefinition,
GetterDefinition, ConstructorDefinition,
FunctionDefinition, CythonDeclarationExporter)
from pywrap.ast import (Param, Function, Clazz, Constructor, Method,
Field, Enum, Typedef)
from pywrap.parser import Includes, TypeInfo
from pywrap.utils import lines
from pywrap.defaultconfig import Config
from nose.tools import assert_multi_line_equal
def test_simple_function_def():
method = MethodDefinition(
"Testclass", "", "testfun", [], Includes(),
"void", TypeInfo({}), Config()).make()
assert_multi_line_equal(
method,
lines("cpdef testfun(Testclass self):",
" self.thisptr.testfun()")
)
def test_array_arg_function_def():
method = MethodDefinition(
"Testclass", "", "testfun", [Param("a", "double *"),
Param("aSize", "unsigned int")],
Includes(), "void", TypeInfo({}), Config()).make()
assert_multi_line_equal(
method,
lines("cpdef testfun(Testclass self, np.ndarray[double, ndim=1] a):",
" self.thisptr.testfun(&a[0], a.shape[0])")
)
def test_setter_definition():
field = Field("myField", "double", "MyClass")
setter = SetterDefinition(
"MyClass", field, Includes(), TypeInfo(), Config()).make()
assert_multi_line_equal(
setter,
lines(
"cpdef __set_my_field(MyClass self, double myField):",
" cdef double cpp_myField = myField",
" self.thisptr.myField = cpp_myField"
)
)
def test_getter_definition():
field = Field("myField", "double", "MyClass")
getter = GetterDefinition(
"MyClass", field, Includes(), TypeInfo(), Config()).make()
assert_multi_line_equal(
getter,
lines(
"cpdef __get_my_field(MyClass self):",
" cdef double result = self.thisptr.myField",
" return result",
""
)
)
def test_default_ctor_def():
ctor = ConstructorDefinition("MyClass", "", [], Includes(), TypeInfo(),
Config(), "MyClass").make()
assert_multi_line_equal(
ctor,
lines(
"def __init__(MyClass self):",
" self.thisptr = new cpp.MyClass()"
)
)
def test_function_def():
fun = FunctionDefinition("myFun", "", [], Includes(), "void", TypeInfo(),
Config()).make()
assert_multi_line_equal(
fun,
lines(
"cpdef my_fun():",
" cpp.myFun()"
)
)
def test_function_def_with_another_cppname():
fun = FunctionDefinition("myFunInt", "", [], Includes(), "void", TypeInfo(),
Config(), cppname="myFun").make()
assert_multi_line_equal(
fun,
lines(
"cpdef my_fun_int():",
" cpp.myFun()"
)
)
def test_function_decl():
fun = Function("test.hpp", "", "myFun", "void")
ignored_fun = Function("test.hpp", "", "myFun", "void")
ignored_fun.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_function(fun)
exporter.visit_function(ignored_fun)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" void myFun() except +"
)
)
def test_class_decl():
clazz = Clazz("test.hpp", "", "MyClass")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" pass"
)
)
def test_ctor_decl():
clazz = Clazz("test.hpp", "", "MyClass")
ctor = Constructor("MyClass")
ignored_ctor = Constructor("MyClass")
ignored_ctor.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_constructor(ctor)
exporter.visit_constructor(ignored_ctor)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" MyClass()"
)
)
def test_method_decl():
clazz = Clazz("test.hpp", "", "MyClass")
method = Method("myMethod", "void", "MyClass")
ignored_method = Method("", "", "")
ignored_method.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_param(Param("myParam", "double"))
exporter.visit_method(method)
exporter.visit_method(ignored_method)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" void myMethod(double myParam) except +"
)
)
def test_field_decl():
clazz = Clazz("test.hpp", "", "MyClass")
field = Field("myField", "double", "MyClass")
ignored_field = Field("myField", "double", "MyClass")
ignored_field.ignored = True
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_field(field)
exporter.visit_field(ignored_field)
exporter.visit_clazz(clazz)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef cppclass MyClass:",
" double myField"
)
)
def test_enum_decl():
enum = Enum("test.hpp", "", "MyEnum")
enum.constants.append("one")
enum.constants.append("two")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_enum(enum)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" cdef enum MyEnum:",
" one",
" two"
)
)
def test_typedef_decl():
typedef = Typedef("test.hpp", "", "MyType", "double")
exporter = CythonDeclarationExporter(Includes(), Config())
exporter.visit_typedef(typedef)
exporter.visit_ast(None)
decl = exporter.export()
assert_multi_line_equal(
decl.strip(),
lines(
"cdef extern from \"test.hpp\" namespace \"\":",
" ctypedef double MyType"
)
)
|
AlexanderFabisch/cythonwrapper
|
pywrap/test/test_exporter.py
|
test_exporter.py
|
py
| 6,972 |
python
|
en
|
code
| 37 |
github-code
|
6
|
[
{
"api_name": "pywrap.exporter.MethodDefinition",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.MethodDefinition",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Param",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Param",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Field",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.SetterDefinition",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Field",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.GetterDefinition",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.ConstructorDefinition",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.FunctionDefinition",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.FunctionDefinition",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.TypeInfo",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Function",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Function",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Clazz",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Clazz",
"line_number": 134,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Constructor",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Constructor",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Clazz",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Method",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Method",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Param",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Clazz",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Field",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Field",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Enum",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pywrap.ast.Typedef",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "pywrap.exporter.CythonDeclarationExporter",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "pywrap.parser.Includes",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "pywrap.defaultconfig.Config",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "nose.tools.assert_multi_line_equal",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "pywrap.utils.lines",
"line_number": 224,
"usage_type": "call"
}
] |
70285712189
|
"""
SWF
"""
from __future__ import absolute_import
from .tag import SWFTimelineContainer
from .stream import SWFStream
from .export import SVGExporter
from six.moves import cStringIO
from io import BytesIO
class SWFHeaderException(Exception):
""" Exception raised in case of an invalid SWFHeader """
def __init__(self, message):
super(SWFHeaderException, self).__init__(message)
class SWFHeader(object):
""" SWF header """
def __init__(self, stream):
a = stream.readUI8()
b = stream.readUI8()
c = stream.readUI8()
if not a in [0x43, 0x46, 0x5A] or b != 0x57 or c != 0x53:
# Invalid signature! ('FWS' or 'CWS' or 'ZFS')
raise SWFHeaderException("not a SWF file! (invalid signature)")
self._compressed_zlib = (a == 0x43)
self._compressed_lzma = (a == 0x5A)
self._version = stream.readUI8()
self._file_length = stream.readUI32()
if not (self._compressed_zlib or self._compressed_lzma):
self._frame_size = stream.readRECT()
self._frame_rate = stream.readFIXED8()
self._frame_count = stream.readUI16()
@property
def frame_size(self):
""" Return frame size as a SWFRectangle """
return self._frame_size
@property
def frame_rate(self):
""" Return frame rate """
return self._frame_rate
@property
def frame_count(self):
""" Return number of frames """
return self._frame_count
@property
def file_length(self):
""" Return uncompressed file length """
return self._file_length
@property
def version(self):
""" Return SWF version """
return self._version
@property
def compressed(self):
""" Whether the SWF is compressed """
return self._compressed_zlib or self._compressed_lzma
@property
def compressed_zlib(self):
""" Whether the SWF is compressed using ZLIB """
return self._compressed_zlib
@property
def compressed_lzma(self):
""" Whether the SWF is compressed using LZMA """
return self._compressed_lzma
def __str__(self):
return " [SWFHeader]\n" + \
" Version: %d\n" % self.version + \
" FileLength: %d\n" % self.file_length + \
" FrameSize: %s\n" % self.frame_size.__str__() + \
" FrameRate: %d\n" % self.frame_rate + \
" FrameCount: %d\n" % self.frame_count
class SWF(SWFTimelineContainer):
"""
SWF class
The SWF (pronounced 'swiff') file format delivers vector graphics, text,
video, and sound over the Internet and is supported by Adobe Flash
Player software. The SWF file format is designed to be an efficient
delivery format, not a format for exchanging graphics between graphics
editors.
@param file: a file object with read(), seek(), tell() methods.
"""
def __init__(self, file=None):
super(SWF, self).__init__()
self._data = None if file is None else SWFStream(file)
self._header = None
if self._data is not None:
self.parse(self._data)
@property
def data(self):
"""
Return the SWFStream object (READ ONLY)
"""
return self._data
@property
def header(self):
""" Return the SWFHeader """
return self._header
def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception("This SWF was not loaded! (no data)")
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!")
return exporter.export(self, force_stroke)
def parse_file(self, filename):
""" Parses the SWF from a filename """
self.parse(open(filename, 'rb'))
def parse(self, data):
"""
Parses the SWF.
The @data parameter can be a file object or a SWFStream
"""
self._data = data = data if isinstance(data, SWFStream) else SWFStream(data)
self._header = SWFHeader(self._data)
if self._header.compressed:
temp = BytesIO()
if self._header.compressed_zlib:
import zlib
data = data.f.read()
zip = zlib.decompressobj()
temp.write(zip.decompress(data))
else:
import pylzma
data.readUI32() #consume compressed length
data = data.f.read()
temp.write(pylzma.decompress(data))
temp.seek(0)
data = SWFStream(temp)
self._header._frame_size = data.readRECT()
self._header._frame_rate = data.readFIXED8()
self._header._frame_count = data.readUI16()
self.parse_tags(data)
def __str__(self):
s = "[SWF]\n"
s += self._header.__str__()
for tag in self.tags:
s += tag.__str__() + "\n"
return s
|
timknip/pyswf
|
swf/movie.py
|
movie.py
|
py
| 5,642 |
python
|
en
|
code
| 154 |
github-code
|
6
|
[
{
"api_name": "stream.readUI8",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "stream.readUI8",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "stream.readUI8",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "stream.readUI8",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "stream.readUI32",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "stream.readRECT",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "stream.readFIXED8",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "stream.readUI16",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tag.SWFTimelineContainer",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "stream.SWFStream",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "export.SVGExporter",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "stream.SWFStream",
"line_number": 143,
"usage_type": "argument"
},
{
"api_name": "io.BytesIO",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "zlib.decompressobj",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "pylzma.decompress",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "stream.SWFStream",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "tag.__str__",
"line_number": 168,
"usage_type": "call"
}
] |
4582050726
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import stats
import collections
import time
from sklearn import cluster
from sklearn.metrics import adjusted_rand_score
import scipy as sp
from tqdm import tqdm
from sklearn.manifold import MDS
from run_dist_mat import *
from chromosome_alignment import *
from scipy.cluster.hierarchy import dendrogram, linkage
import itertools
from mpl_toolkits.mplot3d import Axes3D
from multiprocessing import Pool
from itertools import repeat
def robustness_analysis():
reads_to_inlcude = "inliers" #"all"
clustering_method = "pckmeans" # "igs"
num_chrs = 19
data = read_data(clustering_method, reads_to_inlcude) #cells with less than 150 reads are deleted: 80., 84., 105., 113.
cum_lens = get_chr_cumulative_lengths()
fig, axes = plt.subplots(4,4, figsize = (20,20))
for i, bin_size in tqdm(enumerate([200e6, 100e6, 50e6, 25e6])):
for j, num_samples_for_resampling in tqdm(enumerate([5, 25, 50, 75])):
print("\n bin size: ", bin_size)
print("\n num samples: ", num_samples)
proportion_matching = []
variances = []
cell_i_index = 91
cell_j_index = 93
cell_i = data.loc[(data.cell_index==cell_i_index) & (data.chr < 20)].copy()
cell_i['abs_pos'] = -1
cell_i['abs_pos'] = cell_i.pos.copy() + [cum_lens[ch-1] for ch in cell_i.chr] #encodes the absolute position of the reads along the linear genome
cell_j = data.loc[(data.cell_index==cell_j_index) & (data.chr < 20)].copy()
cell_j['abs_pos'] = -1
cell_j['abs_pos'] = cell_j.pos.copy() + [cum_lens[ch-1] for ch in cell_j.chr] #encodes the absolute position of the reads along the linear genome
bins, num_bins_per_chr = get_bins(bin_size, cum_lens, num_chrs)
num_trials = 40
min_dists = []
for trial in range(num_trials):
bin_resampling_dists = []
for bin_resampling in range(num_samples_for_resampling):
cell_i_dist,_ = pckmeans_get_dist_mat_binned_resample(cell_i, bins, num_bins_per_chr)
cell_j_dist,_ = pckmeans_get_dist_mat_binned_resample(cell_j, bins, num_bins_per_chr)
num_samples_for_ordering = 50
ordering_dists = []
random_orders = np.zeros((num_samples_for_ordering, 19))
for counter, sample in enumerate(range(num_samples_for_ordering)):
order = np.arange(1,20)
np.random.shuffle(order)
random_orders[counter, :] = order
### parallelizing:
num_workers = 4
with Pool(num_workers) as p:
ordering_dists.append(p.starmap(get_aligned_inter_cell_dist, zip(repeat(cell_i_dist), repeat(cell_j_dist), repeat(num_bins_per_chr), repeat(19), random_orders))[0][0])#the first [0] gives the distance component of the output, the second [0] gets the actual distance and not the size of the intersection
bin_resampling_dists.append(np.round(np.min(ordering_dists), 4))
min_dists.append(np.min(bin_resampling_dists))
axes[j,i].scatter(np.zeros_like(min_dists), min_dists)
axes[j,i].set_title("bin size {}".format(bin_size/1e6))
axes[j,i].set_ylabel("sample size: {}".format(num_samples_for_resampling))
plt.suptitle("cell indeces {} and {}".format(cell_i_index, cell_j_index))
plt.savefig("figures/sequential_algorithm_bin_resampling_analysis_cells{}_{}.png".format(cell_i_index, cell_j_index))
|
pdavar/Analysis-of-3D-Mouse-Genome-Organization
|
bin_resample_analysis.py
|
bin_resample_analysis.py
|
py
| 3,912 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "tqdm.tqdm",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "itertools.repeat",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.suptitle",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 86,
"usage_type": "name"
}
] |
27735122824
|
from scipy import integrate
import math
def func1(x):
return 1 / ((3*x - 1)**0.5)
def func2(x):
return math.log(x**2 + 1) / x
def func3(x):
return 1 / (0.2*x**2 + 1)**0.5
def rectangle_method(func, a, b, n):
h = (b - a)/n
integral_sum = sum(func(a + i * h) for i in range(n))
result = h * integral_sum
return result
def simpson_method(func, a, b, n):
integral_result = integrate.simps([func(a + i * (b - a) / n) for i in range(n+1)], dx=(b - a) / n)
return integral_result
def trapezoid_method(func, a, b, n):
h = (b - a) / n
nodes = [func(a + i * h) for i in range(n + 1)]
integral_result = h * (sum(nodes) - 0.5 * (nodes[0] + nodes[n]))
return integral_result
precision = 0.0001
integrals = [(func1, 1.4, 2.1), (func2, 0.8, 1.6), (func3, 1.3, 2.5)]
methods = [rectangle_method, simpson_method, trapezoid_method]
p_values = [10, 8, 20]
for i, (func, a, b) in enumerate(integrals):
print(f"Інтеграл {i + 1} (від {a} до {b}):")
method = methods[i]
n = p_values[i]
result = method(func, a, b, n)
print(f"Метод {i + 1}: {result:af}\n")
|
Alisa7A/Numerical-methods-of-programming
|
Pr11 Шамігулової Аліси.py
|
Pr11 Шамігулової Аліси.py
|
py
| 1,152 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "math.log",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.simps",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.integrate",
"line_number": 15,
"usage_type": "name"
}
] |
70724549309
|
from django.urls import path
from .views import RegiaoCreate, EmpresaCreate, AgendamentoColetaCreate, AgendamentoDescarteCreate
from .views import RegiaoUpdate, EmpresaUpdate, AgendamentoColetaUpdate, AgendamentoDescarteUpdate
from .views import RegiaoDelete, EmpresaDelete, AgendamentoColetaDelete, AgendamentoDescarteDelete
from .views import RegiaoList, EmpresaList, AgendamentoColetaList, AgendamentoDescarteList
urlpatterns = [
#Modelo de criação de url: path('endereco/',NomedaView.as.view(),name='nome_da_url'),
path ('cadastros/regiao/', RegiaoCreate.as_view(), name='cadastrar-regiao'),
path ('cadastros/empresa/', EmpresaCreate.as_view(), name='cadastrar-empresa'),
path ('descarte/agendardescarte/', AgendamentoDescarteCreate.as_view(), name='cadastrar-descarte'),
path ('coleta/agendarcoleta', AgendamentoColetaCreate.as_view(), name='cadastrar-coleta'),
path ('editar/regiao/<int:pk>', RegiaoUpdate.as_view(), name='editar-regiao'),
path ('editar/empresa/<int:pk>', EmpresaUpdate.as_view(), name='editar-empresa'),
path ('editar/descarte/<int:pk>', AgendamentoDescarteUpdate.as_view(), name='editar-descarte'),
path ('editar/coleta/<int:pk>', AgendamentoColetaUpdate.as_view(), name='editar-coleta'),
path ('deletar/regiao/<int:pk>', RegiaoDelete.as_view(), name='deletar-regiao'),
path ('deletar/empresa/<int:pk>', EmpresaDelete.as_view(), name='deletar-empresa'),
path ('deletar/descarte/<int:pk>', AgendamentoDescarteDelete.as_view(), name='deletar-descarte'),
path ('deletar/coleta/<int:pk>', AgendamentoColetaDelete.as_view(), name='deletar-coleta'),
path ('listar/regiao', RegiaoList.as_view(), name='listar-regiao'),
path ('listar/empresa', EmpresaList.as_view(), name='listar-empresa'),
path ('listar/descarte', AgendamentoDescarteList.as_view(), name='listar-descarte'),
path ('listar/coleta', AgendamentoColetaList.as_view(), name='listar-coleta'),
]
|
micaelhjs/PIUnivesp02
|
cadastros/urls.py
|
urls.py
|
py
| 1,948 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.RegiaoCreate.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.RegiaoCreate",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.EmpresaCreate.as_view",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.EmpresaCreate",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteCreate.as_view",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteCreate",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaCreate.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaCreate",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.RegiaoUpdate.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.RegiaoUpdate",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.EmpresaUpdate.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.EmpresaUpdate",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteUpdate.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteUpdate",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaUpdate.as_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaUpdate",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.RegiaoDelete.as_view",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "views.RegiaoDelete",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.EmpresaDelete.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.EmpresaDelete",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteDelete.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteDelete",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaDelete.as_view",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaDelete",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "views.RegiaoList.as_view",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "views.RegiaoList",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "views.EmpresaList.as_view",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "views.EmpresaList",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteList.as_view",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoDescarteList",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaList.as_view",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "views.AgendamentoColetaList",
"line_number": 28,
"usage_type": "name"
}
] |
7091903997
|
import database
from datetime import datetime
import db_pyMySQL
conn = database.connection
# Thêm tài khoản "user": User sẽ không mã hoá mkhau do xài 2 ngôn ngữ khác nhau,
# nên khi mã hoá xong NodeJS sẽ ko hỗ trợ để giải mã => sẽ không đăng nhập được.
# INSERT:
# Thêm tài khoản khách hàng:
def insert_user(name, email, password, phone, address):
with conn.cursor() as cur:
mk = password + database.mysecret_key
# pas = mk.encode()
sql = '''
INSERT INTO khachhang(tenkh, email, matkhau, sodienthoai, diachi)
VALUES (%s, %s, %s, %s, %s)
'''
cur.execute(sql, (name, email, mk, phone, address))
conn.commit()
# Thêm tài khoản "admin":
def insert_admin(admin, matkhau, ten, diachi, sdt, maquyen):
with conn.cursor() as cur:
mk = matkhau + database.mysecret_key
# pas = database.cipher.encrypt(matkhau) # Mã hoá mật khẩu
sql = '''
INSERT INTO admin(admin, matkhau, tennv, diachi, sodienthoai, maquyen)
VALUES (%s, %s, %s, %s, %s, %s)
'''
cur.execute(sql, (admin, mk, ten, diachi, sdt, maquyen))
conn.commit()
# Thêm "danh mục" sản phẩm:
def insert_category(ma, ten):
with conn.cursor() as cur:
sql = '''
INSERT INTO danhmuc(madm, tendm)
VALUES (%s, %s)
'''
cur.execute(sql, (ma, ten))
conn.commit()
# Thêm "nhà sản xuất":
def insert_producer(ma, ten, xuatxu):
with conn.cursor() as cur:
sql = '''
INSERT INTO nhasx(mansx, tennsx, xuatxu)
VALUES (%s, %s, %s)
'''
cur.execute(sql, (ma, ten, xuatxu))
conn.commit()
# Thêm "loại" sản phẩm:
def insert_type(type_id, name):
with conn.cursor() as cur:
sql = '''
INSERT INTO loaisp(maloai, tenloai)
VALUES (%s, %s)
'''
cur.execute(sql, (type_id, name))
conn.commit()
# Thêm "sản phẩm":
def insert_product(code, name, price, reduced_price, amount, img, producer_id, type_id):
with conn.cursor() as cur:
sql = '''
INSERT INTO sanpham(code, tensp, gia, giamgia, soluong, hinh, mansx, maloai)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
'''
cur.execute(sql, (code, name, price, reduced_price, amount, img, producer_id, type_id))
conn.commit()
# Thêm mới "Quyền hạn - chức vụ":
def insert_permission(code, name):
with conn.cursor() as cur:
sql = '''
INSERT INTO quyen(maquyen, Ten)
VALUES (%s, %s)
'''
cur.execute(sql, (code, name))
conn.commit()
# Thêm mới "trạng thái":
def insert_status(ten, trangthai):
with conn.cursor() as cursor:
sql = '''
INSERT INTO trangthai(tentt, trangthai)
VALUES (%s, %s)
'''
cursor.execute(sql, (ten, trangthai))
conn.commit()
# UPDATE:
# Sửa profile tài khoản admin:
def update_profile_admin(email, name, address, phone, permission, admin_id):
with conn.cursor() as cur:
sql = '''
UPDATE admin
SET admin = %s, tennv = %s, diachi = %s, sodienthoai = %s, maquyen = %s
WHERE manv = %s
'''
cur.execute(sql, (email, name, address, phone, permission, admin_id))
conn.commit()
return 1
# Cập nhật mật khẩu của admin:
def update_password_admin(pas, admin_id):
with conn.cursor() as cur:
password = pas + database.mysecret_key
sql = '''
UPDATE admin
SET matkhau = %s
WHERE manv = %s
'''
cur.execute(sql, (password, admin_id,))
conn.commit()
return 1
# Sửa profile tài khoản khách hàng:
def update_profile_user(name, email, phone, address, user_id):
with conn.cursor() as cur:
sql = '''
UPDATE khachhang
SET tenkh = %s, email = %s, sodienthoai = %s, diachi = %s
WHERE makh = %s
'''
cur.execute(sql, (name, email, phone, address, user_id))
conn.commit()
return 1
# Cập nhật mật khẩu của khách hàng:
def update_password_user(pas, user_id):
with conn.cursor() as cur:
password = pas + database.mysecret_key
sql = '''
UPDATE khachhang
SET matkhau = %s
WHERE makh = %s
'''
cur.execute(sql, (password, user_id,))
conn.commit()
return 1
# Sửa danh mục:
def update_category(name, category_id):
with conn.cursor() as cur:
sql = '''
UPDATE danhmuc
SET tendm = %s
WHERE madm = %s
'''
cur.execute(sql, (name, category_id,))
conn.commit()
return 1
# Sửa loại:
def update_type(name, type_id):
with conn.cursor() as cur:
sql = '''
UPDATE loaisp
SET tenloai = %s
WHERE maloai = %s
'''
cur.execute(sql, (name, type_id,))
conn.commit()
return 1
# Sửa nhà sản xuất:
def update_producer(name, origin, producer_id):
with conn.cursor() as cur:
sql = '''
UPDATE nhasx
SET tennsx = %s, xuatxu = %s
WHERE mansx = %s
'''
cur.execute(sql, (name, origin, producer_id,))
conn.commit()
return 1
# Sửa quyền hạn - chức vụ:
def update_permission(name, permission_id):
with conn.cursor() as cur:
sql = '''
UPDATE quyen
SET Ten = %s
WHERE maquyen = %s
'''
cur.execute(sql, (name, permission_id,))
conn.commit()
return 1
# Sửa trạng thái:
def update_status(name, status_id):
with conn.cursor() as cur:
sql = '''
UPDATE trangthai
SET tentt = %s
WHERE trangthai = %s
'''
cur.execute(sql, (name, status_id,))
conn.commit()
return 1
# Sửa sản phẩm:
def update_product(code, name, price, reduced_price, amount, img, producer_id, type_id, product_id):
with conn.cursor() as cur:
sql = '''
UPDATE sanpham
SET code = %s, tensp = %s, gia = %s, giamgia = %s, soluong = %s, hinh = %s, mansx = %s, maloai = %s
WHERE masp = %s
'''
cur.execute(sql, (code, name, price, reduced_price, amount, img, producer_id, type_id, product_id))
conn.commit()
return 1
# Chức năng của khách hàng.
# Thêm đơn hàng:
def insert_order(user_id, total, product_id, product_name, price, amount):
try:
with conn.cursor() as cur:
order_date = datetime.today()
sql_order = '''
INSERT INTO donhang(makh, tong, ngaydat)
VALUES (%s, %s, %s);
'''
val_order = (user_id, total, order_date)
sql_orderID = "SELECT LAST_INSERT_ID() as LastID;"
sql_detailOrder = '''
INSERT INTO chitietdh(masp, tensp, gia, soluong, madonhang)
VALUES (%s, %s, %s, %s, %s);
'''
arrayProduct = []
try:
cur.execute(sql_order, val_order)
conn.commit()
cur.execute(sql_orderID)
lastId = cur.fetchone()
order_id = lastId['LastID'] # Lấy id của đơn hàng vừa tạo.
for i in arrayProduct:
code = i['masp']
name = i['tensp']
prices = i['gia']
amounts = i['soluong']
cur.execute(sql_detailOrder, (code, name, prices, amounts, order_id))
conn.commit()
except:
conn.rollback()
finally: # Ngắt kết nối DB.
conn.close()
# Sửa đơn hàng: Chỉ sửa được đơn hàng khi trạng thái đơn hàng là 'Đang chờ xử lý', còn lại thì khách hàng ko được sửa.
def update_order(amount, order_id):
with conn.cursor() as cur:
sql = "SELECT * FROM donhang WHERE madonhang = %s"
cur.execute(sql, (order_id,))
order = cur.fetchone()
product_id = order['masp']
# Tìm giá của sản phẩm:
sql1 = "SELECT gia FROM sanpham WHERE masp = %s"
cur.execute(sql1, (product_id,))
gia = cur.fetchone()
price = amount * gia
if order['trangthai'] == 0: # Kiểm tra trạng thái đơn hàng.
sql = '''
UPDATE donhang
SET soluong = %s, gia = %s
WHERE madonhang = %s
'''
cur.execute(sql, (amount, price, order_id,))
conn.commit()
return 1
else: # Đơn hàng đã được duyệt ko thể sửa.
return -1
|
letrinhan1509/FashionShop
|
api_admin/model_insert.py
|
model_insert.py
|
py
| 8,813 |
python
|
vi
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "database.connection",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "database.mysecret_key",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "database.mysecret_key",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "database.mysecret_key",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "database.mysecret_key",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.today",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 245,
"usage_type": "name"
}
] |
75108014908
|
# from unicodedata import lookup
from django.urls import path, include
from rest_framework.routers import SimpleRouter, DefaultRouter # This for the viewset models in the views
from rest_framework_nested import routers # This is for the nested routers
from store.models import Product
# from pprint import pprint
from . import views
# This is for the nested routers
router = routers.DefaultRouter()
router.register('products', views.ProductViewSet, basename='products')
router.register('carts', views.CartViewSet, basename='carts')
router.register('customers', views.CustomerViewSet, basename='customers')
router.register('orders', views.OrderViewSet, basename='orders')
# product to review nested routing
products_router = routers.NestedDefaultRouter(router, 'products', lookup='product') # This registers the url as a nested router
products_router.register('reviews', views.ReviewViewSet, basename='product-reviews')# This allows configuration of the already created nested url
products_router.register('images', views.ProductImageViewSet, basename='product-images')# This allows configuration of the already created nested url
cart_router = routers.NestedDefaultRouter(router, 'carts', lookup='cart') # This registers the url as a nested router
cart_router.register('items', views.CartItemViewSet, basename='cart-items')# This allows configuration of the already created nested url
# This for the normal viewset
# router = SimpleRouter()
# router.register('products', views.ProductViewSet, basename='products') # the prefix 'products' is what displays as a url
# router = DefaultRouter()
# router.register('products', views.ProductViewSet, basename='products')
# This is a the url pattern for the nestedviewset(its optional)
# urlpatterns = router.urls + products_router.urls
urlpatterns = [
## THIS IS FOR ROUTER
path('', include(router.urls)),
path('', include(products_router.urls)),
path('', include(cart_router.urls)),
### THIS IS FOR THE CLASS BASED VIEWS
# path('products/', views.ProductList.as_view()), # ".as_views()" generates function url for the CBV
# path('products/<int:pk>/', views.ProductDetail.as_view()),
path('category/', views.CategoryList.as_view()),
# path('category/', views.category_list),
path('category/<int:pk>/', views.CategoryDetail.as_view()),
### THIS IS FOR THE FUNCTION BASED VIEWS
# path('products/', views.product_list),
# path('products/<int:pk>/', views.product_detail),
# path('categories/', views.category_list),
# path('categories/<int:pk>/', views.category_detail),
# path('categories/<int:pk>/', views.category_detail, name='category-detail'), # This is for the HyperlinkedRelatedField
]
|
Auracule/e_commerce_api
|
store/urls.py
|
urls.py
|
py
| 2,718 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rest_framework_nested.routers.DefaultRouter",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "rest_framework_nested.routers",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "rest_framework_nested.routers.NestedDefaultRouter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rest_framework_nested.routers",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "rest_framework_nested.routers.NestedDefaultRouter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "rest_framework_nested.routers",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 49,
"usage_type": "call"
}
] |
32188022347
|
from itertools import permutations
def primenumber(x):
if x < 2:
return False
for i in range(2, x):
if x % i == 0:
return False
return True
def solution(numbers):
answer = 0
num = []
for i in range(1, len(numbers)+1) :
num.append(list(set(map(''.join, permutations(numbers, i)))))
per = list(set(map(int, set(sum(num, [])))))
for p in per :
if primenumber(p) == True :
answer += 1
return answer
# ========================================================================
# 2023년 4월 16일 문제를 다시 풀어봄.
from itertools import permutations
def primenumber(x):
if x < 2:
return False
for i in range(2, x):
if x % i == 0:
return False
return True
def solution(numbers):
answer = 0
result = []
for number in range(1, len(numbers)+1):
first = list(set(map(''.join, permutations(numbers, number))))
result.append(first)
unduplicated_numbers = list(set(map(int, sum(result, []))))
for i in unduplicated_numbers:
if primenumber(i) == True:
answer += 1
return answer
|
kcw0331/python-for-coding-test
|
programmers-coding/소수찾기.py
|
소수찾기.py
|
py
| 1,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.permutations",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 39,
"usage_type": "call"
}
] |
10758898663
|
import uvicorn
from fastapi import FastAPI, HTTPException
app = FastAPI()
@app.get("/")
async def root():
return {"message": "Welcome to basic math operations api!"}
@app.get("/add")
async def add(a: int, b: int):
return {"result": a + b}
@app.get("/subtract")
async def subtract(a: int, b: int):
return {"result": a - b}
@app.get("/multiply")
async def multiply(a: int, b: int):
return {"result": a * b}
@app.get("/divide")
async def divide(a: int, b: int):
if b == 0:
raise HTTPException(
status_code=404, detail='Division by 0 not allowed!')
return {"result": a / b}
if __name__ == '__main__':
uvicorn.run("app:app", host="0.0.0.0", port=5000, reload=True)
|
pawelcich/rest_api
|
web/app.py
|
app.py
|
py
| 722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.FastAPI",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "uvicorn.run",
"line_number": 37,
"usage_type": "call"
}
] |
19631761443
|
from FACE_VERIFICATION.validation import Verify
from utils.encrypt import Encrypt
from utils.calling import caller
import pickle
obj1 = Verify()
obj2 = Encrypt()
obj3 = caller()
class RUN:
def __init__(self):
pass
def controller(self,data):
mode = data['mode']
if mode == "verify":
response = obj1.verify(frame_count=1,WINDOW=data['image_area'])
print(response)
return response
if mode == "train":
response = obj1.generate_embeds(frame_count=2,WINDOW=data['image_area'])
print(response)
return response
if mode == "predict":
response = obj1.verify(frame_count=1,WINDOW=data['image_area'])
print(response)
return response
def encrypt_controller(self,unique_id=None,data=None,mode=None,_id=None):
if mode == 'Add' or mode == 'Update':
data = obj2.encrypt_data(unique_id,data)
return obj3.database_controller(unique_id,data,mode=mode,_id =_id)
elif mode == "View":
data = obj3.database_controller(unique_id,data,mode=mode,_id =_id)
new_data = []
for key in data.keys():
new_data = data[key]
new_data = obj2.decrypt_data(unique_id,new_data)
data[key] = new_data
return data
else:
return obj3.database_controller(unique_id,data,mode=mode,_id =_id)
|
saquibquddus/Face-Unlock-Web-Application
|
STREAMLIT/utils/run.py
|
run.py
|
py
| 1,503 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "FACE_VERIFICATION.validation.Verify",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "utils.encrypt.Encrypt",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.calling.caller",
"line_number": 8,
"usage_type": "call"
}
] |
19416798117
|
"""Determine the fration of non-built-up land area needed to become autarkic."""
import click
import pandas as pd
import geopandas as gpd
from src.potentials import Potential
@click.command()
@click.argument("path_to_demand")
@click.argument("path_to_potential")
@click.argument("path_to_footprint")
@click.argument("path_to_built_up_area")
@click.argument("path_to_units")
@click.argument("path_to_output")
@click.argument("share_from_pv", type=click.INT)
def necessary_land(path_to_demand, path_to_potential, path_to_footprint, path_to_built_up_area,
path_to_units, path_to_output, share_from_pv=100):
"""Determine the fraction of non-built-up land area needed to become autarkic.
Can vary the share of demand satisfied by rooftop PV.
Ignores offshore as it distorts total area sizes.
"""
assert share_from_pv <= 100
assert share_from_pv >= 0
share_from_pv = share_from_pv / 100
demand = pd.read_csv(path_to_demand, index_col=0)["demand_twh_per_year"]
potentials = pd.read_csv(path_to_potential, index_col=0)
footprint = pd.read_csv(path_to_footprint, index_col=0)
built_up_area = pd.read_csv(path_to_built_up_area, index_col=0)
country_codes = gpd.read_file(path_to_units).set_index("id")["country_code"]
rooftop_pv = potentials[str(Potential.ROOFTOP_PV)].where(
potentials[str(Potential.ROOFTOP_PV)] < share_from_pv * demand,
share_from_pv * demand
)
demand_after_rooftops = demand - rooftop_pv
assert (demand_after_rooftops >= 0).all()
open_field_potential = potentials[str(Potential.ONSHORE_WIND)] + potentials[str(Potential.OPEN_FIELD_PV)]
open_field_footprint = footprint[Potential.ONSHORE_WIND.area_name] + footprint[Potential.OPEN_FIELD_PV.area_name]
fraction_non_built_up_land = fraction_land_where_potential_exists(
open_field_potential=open_field_potential,
open_field_footprint=open_field_footprint,
built_up_area=built_up_area,
demand_after_rooftops=demand_after_rooftops
)
fraction_non_built_up_land.where(
fraction_non_built_up_land.notna(),
fraction_land_where_no_potential_exists(
open_field_potential=open_field_potential,
open_field_footprint=open_field_footprint,
built_up_area=built_up_area,
demand_after_rooftops=demand_after_rooftops,
country_codes=country_codes
),
inplace=True
)
# corner cases
fraction_non_built_up_land[fraction_non_built_up_land > 1] = 1
pd.DataFrame(
index=fraction_non_built_up_land.index,
data={
"fraction_non_built_up_land_necessary": fraction_non_built_up_land,
"fraction_roofs_necessary": rooftop_pv / potentials[str(Potential.ROOFTOP_PV)],
"rooftop_pv_generation_twh_per_year": rooftop_pv
}
).to_csv(
path_to_output,
index=True,
header=True
)
def fraction_land_where_potential_exists(open_field_potential, open_field_footprint,
built_up_area, demand_after_rooftops):
share_of_open_field_potential_necessary = demand_after_rooftops / open_field_potential
necessary_land = open_field_footprint * share_of_open_field_potential_necessary
return necessary_land / built_up_area["non_built_up_km2"]
def fraction_land_where_no_potential_exists(open_field_potential, open_field_footprint, built_up_area,
demand_after_rooftops, country_codes):
factor = open_field_footprint.groupby(country_codes).sum() / open_field_potential.groupby(country_codes).sum()
factor.name = "km2_per_twh_nationally"
assert (factor > 10).all()
assert (factor < 70).all()
factor = pd.DataFrame(country_codes).join(factor.rename("factor"), on="country_code")["factor"]
necessary_land = demand_after_rooftops * factor
return necessary_land / built_up_area["non_built_up_km2"]
if __name__ == "__main__":
necessary_land()
|
timtroendle/possibility-for-electricity-autarky
|
src/necessary_land.py
|
necessary_land.py
|
py
| 4,031 |
python
|
en
|
code
| 10 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "geopandas.read_file",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "src.potentials.Potential.ROOFTOP_PV",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "src.potentials.Potential.ROOFTOP_PV",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "src.potentials.Potential.ONSHORE_WIND",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "src.potentials.Potential.OPEN_FIELD_PV",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential.ONSHORE_WIND",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "src.potentials.Potential.OPEN_FIELD_PV",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "src.potentials.Potential.ROOFTOP_PV",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "src.potentials.Potential",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "click.command",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "click.INT",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 90,
"usage_type": "call"
}
] |
70793816827
|
from pathlib import Path
import re, pickle, os
import pickle, win32net
from time import sleep
class Scanner:
wordList = ""
ignored_type = ""
ignored_dir = ""
# this will store all of the file dictionsaries
files = []
# This is the path that will be scanned
p = ''
# The code that iterates through the path from above
def directory_file_iteration(self):
ignored_directories = self.getIgnoredDirectories()
ignored_filetypes = self.getIgnoredFileTypes()
for i in Path(self.p).rglob("*"):
# If there are directories in the "ignored directories.p" file, then it will iterate through them to see if file should be ignored
if len(ignored_directories) > 0:
# If the path of the file is in the ignored directories file, it will move to the next file
if os.path.normpath(i.parents[0]) in ignored_directories:
continue
# if the file type of the file is in the ignored filetypes, it will move to the next file
if Path(i).suffix.lower() in ignored_filetypes or len(Path(i).suffix) == 0 and "none" in ignored_filetypes:
continue
# if it passes both, it will check if it's actually a file
else:
if i.is_file():
# creating a file dictionary of attributes
fileDict = {"filename":i.name,"pathParent":i.parents[0],"fullPath":i, "filetype":Path(i).suffix, "flag":False, "data":{"filename":"","filecontents":"","ssn":"","phone":"","email":[], "cc":""}}
self.files.append(fileDict)
else:
continue
# if there are none in ignored directories.p it will run this
elif Path(i).suffix in ignored_filetypes:
continue
else:
if i.is_file():
fileDict = {"filename":i.name,"pathParent":i.parents[0],"fullPath":i, "filetype":Path(i).suffix, "flag":False, "data":{"filename":"","filecontents":"","ssn":"","phone":"","email":[], "CC":""}}
self.files.append(fileDict)
# checking to see if a keyword is in a filename
def checkFileNames(self):
for file_ in self.files:
for word in self.wordList:
if word.lower() in str(file_["filename"].lower()):
file_["flag"] = True
file_["data"]["filename"] = word
# reading in .txt files and checking for keywords
def readInTextFile(self):
for file_ in self.files:
if file_["filetype"] == ".txt":
try: # trying to open the file, sometimes it won't read because it isn't always ascii characters.
f = open(file_["fullPath"], "r")
fileContents = f.read()
f.close()
# searching the contents of the file for keyword
for word in self.wordList:
if word in fileContents.lower():
file_["flag"] = True
file_["data"]["filecontents"] = file_["data"]["filecontents"] + " " + word
# searching contents of file for SSN
file_ = self.ssnSearch(file_, fileContents)
# searching for phone numbers
file_ = self.phoneNumberSearch(file_, fileContents)
# searching for emails
file_ = self.emailSearch(file_, fileContents)
# searching for credit cards
file_ = self.ccSearch(file_, fileContents)
except UnicodeDecodeError:
pass
def ccSearch(self, file_, fileContents):
ccAmexFound = re.findall(r'(?<!\d)3[47][0-9]{13}$(?!\d)', fileContents)
ccVisaFound = re.findall(r'(?<!\d)4[0-9]{12}(?:[0-9]{3})?(?!\d)', fileContents)
ccMasterCardFound = re.findall(r'(?<!\d)(5[1-5][0-9]{14}|2(22[1-9][0-9]{12}|2[3-9][0-9]{13}|[3-6][0-9]{14}|7[0-1][0-9]{13}|720[0-9]{12}))(?!\d)', fileContents)
strAmex = ''
strVisa = ''
strMaster = ''
for card in ccAmexFound:
strAmex = strAmex + " , Amex " + str(card)
for card in ccVisaFound:
strVisa = strVisa + " , Visa " + str(card)
for card in ccMasterCardFound:
strMaster = strMaster + " , Master " + str(card)
if len(strAmex) + len(strVisa) + len(strMaster) < 1:
return file_
else:
ccFound = str(strAmex) + str(strVisa) + str(strMaster)
try:
file_["flag"] = True
except:
pass
file_["data"]["cc"] = file_["data"]["cc"] + ccFound
return file_
def emailSearch(self, file_, fileContents):
emailFound = re.findall(r'[A-Za-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w+', fileContents)
strEmailFound = ""
for email in emailFound:
strEmailFound = strEmailFound + " , " + email
if len(emailFound) < 1:
return file_
else:
try:
file_["flag"] = True
except:
pass
file_["data"]["email"] += emailFound
return file_
def phoneNumberSearch(self, file_, fileContents):
phoneFound = re.findall(r'(?<!\d)(?!000|.+0{4})(?:\d{10}|\d{3}-\d{3}-\d{4}|\d{3}\.\d{3}\.\d{4}|\d{3}\s\d{3}\s\d{4}|\(\d{3}\)\s\d{3}\s\d{4})(?!\d)', fileContents)
strPhoneFound = ""
for phone in phoneFound:
strPhoneFound = strPhoneFound + " , " + phone
if len(phoneFound) < 1:
return file_
else:
try:
file_["flag"] = True
except:
pass
file_["data"]["phone"] = file_["data"]["phone"] + strPhoneFound
return file_
# searching for SSNs
def ssnSearch(self,file_,fileContents):
#ssn format: xxxxxxxxx or xxx-xx-xxxx
ssnFound = re.findall(r'(?<!\d)(?!000|.+0{4})(?:\d{9}|\d{3}-\d{2}-\d{4})(?!\d)', fileContents)
strSSNFOUND = ""
for ssn in ssnFound:
strSSNFOUND = strSSNFOUND + " , " + ssn
if len(ssnFound) < 1:
return file_
else:
try:
file_["flag"] = True
except:
pass
file_["data"]["ssn"] = file_["data"]["ssn"] + strSSNFOUND
return file_
# Ignore_dir.txt which will hold directories you want to ignore
def getIgnoredDirectories(self):
ignored_directories = pickle.load(open("ignored directories.p","rb"))
return ignored_directories
# Ignore the file types in this file such as .torrent, .txt
def getIgnoredFileTypes(self):
ignored_filetypes = pickle.load(open("ignored filetypes.p", "rb"))
return ignored_filetypes
# Setting path to scan
def setPath(self,i):
self.p = i
def getWordList(self):
self.wordList = pickle.load(open("word list.p", "rb"))
def checkIfAdmin(self):
if 'logonserver' in os.environ:
server = os.environ['logonserver'][2:]
else:
server = None
def if_user_is_admin(Server):
groups = win32net.NetUserGetLocalGroups(Server, os.getlogin())
isadmin = False
for group in groups:
if group.lower().startswith('admin'):
isadmin = True
return isadmin, groups
# Function usage
is_admin, groups = if_user_is_admin(server)
# Result handeling
if is_admin == True:
return True
else:
return False
#print('You are in the following groups:')
# for group in groups:
# print(group)
#sleep(10)
#if error: no module named win32api, run these lines in cmd
#pip uninstall pipywin32
#pip uninstall pywin32
#pip install pywin32
def get_scanning(self, scan_type):
if scan_type == "quick":
self.getWordList()
self.files = [] # removing all data in the files list
self.directory_file_iteration()
self.checkFileNames()
else:
self.getWordList()
self.files = [] # removing all data in the files list
self.directory_file_iteration()
self.checkFileNames()
self.readInTextFile()
return self.files
|
thang41/OpenSourceSecurityCheck
|
scanner.py
|
scanner.py
|
py
| 9,244 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.normpath",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 209,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 210,
"usage_type": "attribute"
},
{
"api_name": "win32net.NetUserGetLocalGroups",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "os.getlogin",
"line_number": 215,
"usage_type": "call"
}
] |
29451178686
|
from selenium import webdriver
import time, re, urllib, requests
from telethon.sync import TelegramClient
from config import api_id, api_hash
client = TelegramClient('name', api_id, api_hash)
client.start()
dlgs = client.get_dialogs()
tegmo = None
for dlg in dlgs:
if dlg.title == "LTC Click Bot":
tegmo = dlg
if tegmo == None:
print("Отсутствует чат с ботом")
exit()
print(tegmo.title)
# dr_options = webdriver.FirefoxOptions()
# dr_options.set_headless()
# driver = webdriver.Firefox(options=dr_options)
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--log-level=3')
driver = webdriver.Chrome(chrome_options=chrome_options)
tmp_url = ''
n = 0
nn = 0
links = True
links2 = True
try:
while True:
msg = client.get_messages(tegmo, limit=1)[0]
if re.search(r'\bThere is a new site for you to\b', msg.message):
client.send_message( tegmo , "🖥 Visit sites")
if re.search(r'\bPlease stay on the site for at least 10 seconds\b', msg.message):
time.sleep(10)
continue
if re.search(r'\bSorry\b', msg.message):
time.sleep(10)
nn = nn + 1
print('Закончились ссылки ждем','.'*nn, end='\r')
client.send_message( tegmo , "🖥 Visit sites")
continue
if re.search(r'\bPress the "Visit website" button to earn LTC\b', msg.message):
nn = 0
url = msg.reply_markup.rows[0].buttons[0].url
if tmp_url == url:
nn = nn + 1
print("ссыдка с задежкой", '.'*nn , end='\r')
time.sleep(5)
t_el = driver.find_elements_by_class_name('timer')
text = ''
for i in t_el:
if (len(i.text) > 0):
text = i.text
i.click()
print(text)
if ''.join(text) == '':
client.send_message( tegmo , "🖥 Visit sites")
links2 = False
continue
links = True
print("переходим по ссылке", url)
driver.get(url)
n = n + 1
print("проходов ",n)
tmp_url = url
time.sleep(2)
except Exception as ex:
print(ex)
finally:
driver.close()
|
Sofron80/coin_bot
|
main2.py
|
main2.py
|
py
| 2,611 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "telethon.sync.TelegramClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.api_id",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "config.api_hash",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "re.search",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 93,
"usage_type": "call"
}
] |
71817771068
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mid2sheet.py
# Midi-Files -> Sheets for Musicbox (30 notes, starting from F)
# (c) 2017 Niklas Kannenberg <[email protected]> and Gunnar J.
# Released under the GPL v3 or later, see file "COPYING"
#
# ToDo
# - Use 'pypdf' instead of external 'pdfjam' for PDF merging, avoid latex
# (to much dependencies)
#
# Bugs
# - No whitespace in path/to/script allowed
# pdfjam and rm will not work, see subprocess.call()
# - exits if input/output folder not exists, better create output folder
#
#
# Useful links:
# https://mido.readthedocs.io/en/latest/midi_files.html
# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
# http://stackoverflow.com/questions/3444645/merge-pdf-files
# https://pythonhosted.org/PyPDF2/
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import mido
import os
import pandas as pd
import matplotlib.pyplot as plt
import subprocess
import datetime
# version of this software
version = 0.3
# print lot of debug messages?
debug = 0
# directories
inputdir = os.getcwd()+"/input" # input directory, e.g. "/input"
outputdir = os.getcwd()+"/output" # output directory for PDFs
# notes and y_mm
yBase = 5.5 # y_mm first note
yAbst = 58.5 / 29.0 # y_mm between notes
yUppr = 70.0 # y_mm whole strip
# Plot
x8beat = 4.0 # x_mm per 1/8 beat
minbeat = 7.9 # minimal playable x-distance for one note
xprmax = 250.0 # printable size, A4 Landscape
preplt = 8.0 # space for note names on plot, do not change
# lut midi-note -> y_mm
notemmlut = [ # Note # y_mm # name
[ 53, yBase + 0 * yAbst ], # F
[ 55, yBase + 1 * yAbst ], # G
[ 60, yBase + 2 * yAbst ], # C
[ 62, yBase + 3 * yAbst ], # D
[ 64, yBase + 4 * yAbst ], # E
[ 65, yBase + 5 * yAbst ], # F
[ 67, yBase + 6 * yAbst ], # G
[ 69, yBase + 7 * yAbst ], # A
[ 70, yBase + 8 * yAbst ], # A#
[ 71, yBase + 9 * yAbst ], # H
[ 72, yBase + 10 * yAbst ], # C
[ 73, yBase + 11 * yAbst ], # C#
[ 74, yBase + 12 * yAbst ], # D
[ 75, yBase + 13 * yAbst ], # D#
[ 76, yBase + 14 * yAbst ], # E
[ 77, yBase + 15 * yAbst ], # F
[ 78, yBase + 16 * yAbst ], # F#
[ 79, yBase + 17 * yAbst ], # G
[ 80, yBase + 18 * yAbst ], # G#
[ 81, yBase + 19 * yAbst ], # A
[ 82, yBase + 20 * yAbst ], # A#
[ 83, yBase + 21 * yAbst ], # H
[ 84, yBase + 22 * yAbst ], # C
[ 85, yBase + 23 * yAbst ], # C#
[ 86, yBase + 24 * yAbst ], # D
[ 87, yBase + 25 * yAbst ], # D#
[ 88, yBase + 26 * yAbst ], # E
[ 89, yBase + 27 * yAbst ], # F
[ 91, yBase + 28 * yAbst ], # G
[ 93, yBase + 29 * yAbst ], # A
]
print("-> Converting .mid to .pdf for Musicbox - mid2sheet v"+str(version))
print("--------------------------------------------------------")
print("Input from Folder: "+inputdir)
print("Output to Folder: "+outputdir)
# midi note number to y_mm
def get_mm(note):
retval = -1
for i in range(len(notemmlut)):
if (notemmlut[i][0] == note):
retval = notemmlut[i][1]
return retval
# name of midi note number
def get_name(note):
names = [ "C","C#","D","D#","E","F","F#","G","G#","A","A#","H" ]
return names[note % 12]
# returns 1 if note is to close to last note on same line
def get_terr(notes, pos):
gap = 9999
for i in range(0,pos):
if(notes.note[i] == notes.note[pos]):
gap = notes.x[pos] - notes.x[i]
if(gap < minbeat): # gap < min_gap
return 1 # not playable
else:
return 0 # OK
# mm -> inch (for matplotlib)
def mm2in(mm):
return mm/10/2.54 # mm to inch
# convert one midi file
def do_convert(infile, outfile, fname):
mid = mido.MidiFile(infile) # the input file
now = datetime.datetime.now() # actual time
sig_cnt = 0 # counter for signature messages
tim_cnt = 0 # counter for timing messages
# midi timing ticks per beat
ticks_4th = mid.ticks_per_beat
ticks_8th = ticks_4th / 2
# data frame for all midi events of melody track
datacols = ['time','tdiff','type','track','bytes']
data = pd.DataFrame(columns=datacols)
# data frame for note_on events
notecols = ['time','note','name', 'x', 'y', 'bar']
notes = pd.DataFrame(columns=notecols)
# list all tracks
if(debug):
print("Tracks : " + str(len(mid.tracks)))
for i in range(len(mid.tracks)):
track_len = len(mid.tracks[i])
print("Track " + str(i) + " : " + str(track_len) + " events")
# extract all messages from all tracks to data frame 'data'
for i, track in enumerate(mid.tracks):
for msg in track:
if(msg.type == "time_signature"):
time_signature = msg.dict()
numerator = time_signature['numerator']
denominator = time_signature['denominator']
sig_cnt += 1
if(debug):
print("Timing : " + str(numerator) + "/" + str(denominator))
if(msg.type == "set_tempo"):
set_tempo = msg.dict()
tempo = round((500000 / set_tempo['tempo']) * 120, 2)
tim_cnt += 1
if(debug):
print("Tempo : " + str(tempo) + " bpm")
data = data.append({ 'time' : 0,
'tdiff' : msg.time,
'type' : msg.type,
'track' : i,
'bytes' : msg.bytes() }, ignore_index=True)
# warnings for tracks, tempo and signature
if(len(mid.tracks) != 1):
print("-> WARNING: Midi file has " + str(len(mid.tracks)) + " tracks instead of 1")
if(sig_cnt != 1):
print("-> WARNING: Midi file has " + str(sig_cnt) + " signature messages instead of 1. " +
"Using " + str(numerator) + "/" + str(denominator))
if(tim_cnt != 1):
print("-> WARNING: Midi file has " + str(tim_cnt) + " tempo messages instead of 1. " +
"Using " + str(tempo) + " bpm.")
# calculate absolute timing values
for i in range(1, len(data)):
# actual time difference
tdiffnext = data.tdiff[i]
# accumulate time only for same track
if(data.track[i] == data.track[i-1]):
timeacc = data.time[i-1]
else:
timeacc = 0
data.loc[i, 'time'] = timeacc + tdiffnext
# extract all 'note_on' events from 'data' to 'notes
for i in range(len(data)):
# event == note_on AND velocity > x
if(data.type[i] == 'note_on' and data.bytes[i][2] > 0):
thisnote = data.bytes[i][1]
mtime = data.time[i]
x_val = ( mtime / ticks_8th ) * x8beat
notes = notes.append({ 'time' : data.time[i],
'note' : thisnote,
'name' : get_name(thisnote),
'x' : x_val,
'y' : get_mm(thisnote),
'bar' : (data.time[i] /
(4 * ticks_4th * (numerator/denominator))) + 1
}, ignore_index=True)
# mm per bar
mm_bar = 8 * x8beat * (numerator/denominator)
# bars per page
bars_pp = int((xprmax - preplt) / mm_bar)
# debug
if(debug):
#print("--- DATA ---")
#print(data)
print("--- NOTES ---")
print(notes)
# generate plot
# -----------------------------
# size of one strip
strip_x = mm2in(preplt + bars_pp * mm_bar) # X-Size of plot
strip_y = mm2in(yUppr) # Y-Size of plot
hlines_x = mm2in(preplt) # start of horizontal note lines
newpage = 1 # flag for newpage
pagecnt = 0 # page counter
poffs = 0 # x-offset for current page
# for all notes (can't manipulate k in 'for' loop but in 'while' loop)
k = 0
while(k < len(notes) ):
# create a new plot
if( newpage==1 ):
newpage = 0 # reset flag
pagecnt = pagecnt + 1 # increment page counter
if(pagecnt > 1): # re plot last notes on current page
while( (notes.bar[k] ) >= bars_pp * (pagecnt - 1) + 1 ):
k -= 1
k += 1 # undo last while, no 'do-while' loop in python
# frame line width, hacked
plt.rcParams['axes.linewidth'] = 0.2
# x-offset for this page
poffs = mm2in( -preplt + (pagecnt-1) * mm_bar * bars_pp )
# create figure
f = plt.figure(figsize=(strip_x,strip_y), dpi=300,frameon=False)
ax = plt.subplot(111)
# figure has no borders
plt.subplots_adjust(left=0,right=1,bottom=0,top=1)
# plot 30 horizontal lines
for i in range(len(notemmlut)):
yy = mm2in(notemmlut[i][1]) # y-val
nnote = get_name(notemmlut[i][0]) # name of the acutal note
if(nnote == "C"): # C-Lines
plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.4)
elif nnote.endswith("#"): # #-Lines (Black keys)
plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.1, linestyle=':')
else: # Normal Lines
plt.plot([hlines_x,strip_x],[yy,yy],color="black", linewidth=0.2)
# add the name of the note
if(i%2 ==0): ofs = 0.1 # indent every 2nd note
else: ofs = 0.0 # no indent
ax.text(.1+ofs,yy, nnote, fontsize=5,verticalalignment='center',rotation=90)
# plot beat lines
for i in range(bars_pp * numerator):
xx = mm2in(mm_bar) / numerator # x per bar
if(i % numerator == 0):
# plot line (full bar)
plt.plot([hlines_x+xx*i, hlines_x+xx*i ],
[mm2in(notemmlut[0][1]), mm2in(notemmlut[-1][1])],color="black",linewidth=0.4)
# plot bar number
ax.text( hlines_x+xx*i + (xx/2), mm2in(notemmlut[0][1]) - mm2in(2.5),
str(int(1+ i/numerator + bars_pp * (pagecnt-1))),
fontsize=5,horizontalalignment='center',)
else:
# plot line (beat)
plt.plot([hlines_x+xx*i, hlines_x+xx*i ],
[mm2in(notemmlut[0][1]), mm2in(notemmlut[-1][1])],
color="black",linewidth=0.1, linestyle=':')
# add song name and info
ax.text( hlines_x + mm2in(4), yy + mm2in(2),
str(pagecnt) + " " + fname + " " +
str(numerator) + "/" + str(denominator) + " " + str(tempo) + " bpm",
fontsize=8, horizontalalignment='left')
ax.text( mm2in(xprmax) / 2, yy + mm2in(2),
"Generated in " + now.strftime('%Y-%m-%d') +
" with mid2sheet v" + str(version) ,
fontsize=5, horizontalalignment='left')
# vertical start line
plt.plot([hlines_x,hlines_x],[0,strip_y],color="black", linewidth=0.4)
plt.xticks([])
plt.yticks([])
ax.axis([0,strip_x, 0, strip_y])
# end if newpage
# position of note to plot
xx = mm2in(notes.x[k])
yy = mm2in(notes.y[k])
xx = xx -poffs
# plot one note
if(notes.y[k] != -1): # normal note
plt.plot(xx,yy,marker='.',color='white',markersize=12)
plt.plot(xx,yy,marker='.',color='black',markersize=8)
plt.plot(xx,yy,marker='.',color='white',markersize=5)
# fill red, if timing is to short
if(get_terr(notes, k)):
plt.plot(xx,yy,marker='.',color='red',markersize=3)
else: # plot error note name (not in musicbox range)
ax.text( xx,mm2in(1),get_name(int(notes.note[k])),
fontsize=5,color='red', horizontalalignment='center',)
# prepare new page, if this note was already outside current page
if( (notes.bar[k] ) > bars_pp * pagecnt + 1 ):
newpage = 1
# save current page to file
filename = outfile + "_%03d" % (pagecnt) + '.pdf'
f.savefig(filename, bbox_inches='tight')
# next note (manually in while loop)
else:
k += 1
# for all notes
# save last page to file
filename = outfile + "_%03d" % (pagecnt) + '.pdf'
f.savefig(filename, bbox_inches='tight')
# combine pdfs, TODO: switch to PyPDF2
subprocess.call("pdfjam " + outfile + "_*.pdf --nup 1x2 --a4paper --landscape --noautoscale true --delta '0.5cm 0.5cm' --outfile " + outfile + ".pdf", shell=True)
subprocess.call("rm " + outfile + "_*.pdf ", shell=True)
# result: list of notes with x,y mm values
return notes
# convert all files
for filename in os.listdir(inputdir):
if filename.endswith(".mid"):
inpfile = inputdir+"/"+filename
outfile_name = filename.rsplit('.', 1)[0]
outfile = outputdir+"/"+outfile_name
print("--------------------------------------------------------")
print("-> Input File : "+filename)
print("-> Output File : "+outfile_name + ".pdf")
do_convert(inpfile, outfile, outfile_name)
print("--------------------------------------------------------")
print("DONE")
|
flylens/mid2sheet
|
mid2sheet.py
|
mid2sheet.py
|
py
| 14,949 |
python
|
en
|
code
| 27 |
github-code
|
6
|
[
{
"api_name": "os.getcwd",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "mido.MidiFile",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 293,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots_adjust",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 295,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 304,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 306,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 308,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 320,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 343,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 357,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 358,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 359,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 362,
"usage_type": "name"
},
{
"api_name": "subprocess.call",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "subprocess.call",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 393,
"usage_type": "call"
}
] |
22609873896
|
from django.contrib.auth.decorators import user_passes_test, login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render, redirect
from apps.rfid.models import GeneralAssembly
from hybridjango.utils import group_test
class Ballot:
nr = 0
title = 'Avstemning'
choices = [
'Blank',
'Vevkom',
'Bedkom',
'Arrkom',
'Jentekom',
'Redaksjonen',
]
only_members = True
empty_votes = True
is_attending = True
has_voted = []
votes = []
active = True
class Suggestion:
num = 0
author = "Ikke vevsjef"
suggestion_text = "Vevkom burde ta over styret"
suggestions_enabled = False
empty_vote = 'Tomt'
suggestion_list = []
@user_passes_test(group_test("Tellekorps"))
def overview(request):
user = request.user
if request.method == 'POST':
if 'ballot_form' in request.POST:
Ballot.title = request.POST.get('title', 'Avstemning')
Ballot.only_members = True if request.POST.get('membersOnly') else False
Ballot.empty_votes = True if request.POST.get('empty_votes') else False
Ballot.is_attending = True if request.POST.get('is_attending') else False
Ballot.choices = [v for k, v in request.POST.items() if k.startswith('choice-')]
Ballot.votes = []
Ballot.has_voted = []
Ballot.nr += 1
return HttpResponseRedirect('#')
elif 'active' in request.GET:
Ballot.active = not (request.GET['active'] == 'Deaktiver')
return render(
request, 'ballot/overview.html', context={
'active': Ballot.active,
},
)
@user_passes_test(group_test("Nestleder"))
def suggestion_overview(request):
user = request.user
if request.method == 'POST':
if 'toggle_suggestions' in request.POST:
Suggestion.suggestions_enabled = not Suggestion.suggestions_enabled
elif 'clear_suggestions' in request.POST:
del suggestion_list[:]
return HttpResponseRedirect("#")
return render(request, 'ballot/suggestions.html', context={
'suggestions_enabled' : Suggestion.suggestions_enabled
})
@login_required
def post_suggestion(request):
sugg = Suggestion()
sugg.num += 1
sugg.author = request.user
sugg.suggestion_text = request.POST.get('suggestion_text')
suggestion_list.append(sugg)
@user_passes_test(group_test("Nestleder"))
def get_suggestions(request):
json_list = [{
"author_name" : suggestion.author.full_name,
"suggestion_text" : suggestion.suggestion_text,
} for suggestion in suggestion_list]
return JsonResponse({"suggestion_list" : json_list})
@login_required
def ballot(request):
return render(request, 'ballot/voteview.html', get_ballot_dict(request.user))
@login_required
def get_choices(request):
return JsonResponse(get_ballot_dict(request.user))
def get_ballot_dict(user):
choices = Ballot.choices.copy()
if Ballot.empty_votes:
choices.append(empty_vote)
return {
'nr': Ballot.nr,
'title': Ballot.title,
'choices': choices,
'has_voted': user.pk in Ballot.has_voted,
'active': Ballot.active,
'suggestions_enabled' : Suggestion.suggestions_enabled,
}
def vote(request):
if request.method == 'POST':
user = request.user
generalassembly = GeneralAssembly.objects.all().last() #fetches the newest made generalassembly object
if not user.is_authenticated:
return HttpResponse("Du må være innlogget for å stemme")
if not Ballot.active:
return HttpResponse("Avstemningen er ikke aktiv")
if user.pk < 2:
return HttpResponse("Linjeforeningen Hybrida kan ikke stemme selv")
if Ballot.only_members and not user.member:
return HttpResponse("Kun medlemmer kan stemme")
if Ballot.is_attending and user not in generalassembly.users.all():
return HttpResponse("Du må registrere oppmøte for å kunne stemme")
if user.pk in Ballot.has_voted:
return HttpResponse("Du har allerede stemt")
new_vote = request.POST.get("choice", None)
if new_vote in Ballot.choices or (Ballot.empty_votes and new_vote == empty_vote):
Ballot.has_voted.append(user.pk)
Ballot.votes.append(new_vote)
return HttpResponse("Du stemte på {}.".format(new_vote))
return HttpResponse("Du avga ingen stemme")
@user_passes_test(group_test("Tellekorps"))
def get_results(request):
user = request.user
if not (user.is_authenticated and group_test("Tellekorps")):
return JsonResponse(
{"title": "Hvem er best?", "results": [{"name": "vevkom", "votes": 9001}, {"name": "andre", "votes": 0}],
"total": 9001, "total_nonblank": 9001})
results = [{'name': choice, 'votes': Ballot.votes.count(choice)} for choice in Ballot.choices]
total_nonblank = total = len(Ballot.votes)
if Ballot.empty_votes:
results.append({'name': empty_vote, 'votes': Ballot.votes.count(empty_vote)})
total_nonblank -= Ballot.votes.count(empty_vote)
return JsonResponse({'title': Ballot.title, 'results': results, 'total': total, 'total_nonblank': total_nonblank})
|
hybrida/hybridjango
|
apps/ballot/views.py
|
views.py
|
py
| 5,402 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.user_passes_test",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "hybridjango.utils.group_test",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.user_passes_test",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "hybridjango.utils.group_test",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.user_passes_test",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "hybridjango.utils.group_test",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "apps.rfid.models.GeneralAssembly.objects.all",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "apps.rfid.models.GeneralAssembly.objects",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "apps.rfid.models.GeneralAssembly",
"line_number": 118,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "hybridjango.utils.group_test",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.user_passes_test",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "hybridjango.utils.group_test",
"line_number": 141,
"usage_type": "call"
}
] |
73739270588
|
#!/usr/bin/env python3
import argparse
import os
import re
import subprocess
import sys
LOG_FILE_OPTION = 'log_file'
OUTPUT_PATH_OPTION = '--output-path'
ONLY_FAILED_OPTION = '--only-failed'
HUMAN_READABLE_OPTION = '--human-readable'
USE_RUBY_PARSER_OPTION = '--use-ruby'
FIND_COREDUMPS_OPTION = "--find-coredumps"
WRITE_RESULTS_TO_DATABASE_OPTION = "--write-to-database"
HELP_OPTION = '--help'
options = argparse.ArgumentParser(description="CTest parser usage:")
options.add_argument(LOG_FILE_OPTION, help="CTEST LOG FILE PATH")
options.add_argument("-f", ONLY_FAILED_OPTION, action="store_true", help="PARSE ONLY FAILED TESTS")
options.add_argument("-r", HUMAN_READABLE_OPTION, action="store_true", help="HUMAN READABLE OUTPUT")
options.add_argument("-o", OUTPUT_PATH_OPTION, metavar="output_path", help="OUTPUT DIRECTORY PATH")
options.add_argument("-u", USE_RUBY_PARSER_OPTION, action="store_true", help="USE OLD RUBY PARSER")
options.add_argument("-c", FIND_COREDUMPS_OPTION, choices=["url", "files"], help="FIND AND STORE COREDUMPS")
options.add_argument("-w", WRITE_RESULTS_TO_DATABASE_OPTION, action="store_true", help="WRITE TEST RESULTS TO DATABASE")
parserRoot = os.path.dirname(os.path.abspath(__file__))
def parseCtestRuby(opts, path):
command = [
"{}/ruby-scripts/parse_ctest_log.rb".format(parserRoot),
"-l", opts.log_file,
"-o", "{}/ruby/results".format(path),
"-j", "{}/ruby/json".format(path),
"-s", "{}/ruby/ctest_sublogs".format(path)
]
if opts.human_readable:
command.append("-r")
if opts.only_failed:
command.append("-f")
return subprocess.check_output(command)
def parseCtestPython(opts, path):
command = [
"{}/python-scripts/parse_ctest_log.py".format(parserRoot),
opts.log_file,
"-o", "{}/python/results".format(path),
"-j", "{}/python/json".format(path),
"-s", "{}/python/ctest_sublogs".format(path)
]
if opts.human_readable:
command.append("-r")
if opts.only_failed:
command.append("-f")
return subprocess.check_output(command)
def storeCoredumpsRuby(opts, buildId, path):
command = [
"{}/ruby-scripts/coredump_finder.sh".format(parserRoot),
buildId,
opts.find_coredumps
]
coredumps = subprocess.check_output(command)
writeCoredumpsToFile("{}/ruby/coredump".format(path), coredumps)
def storeCoredumpsPython(opts, buildId, path):
command = [
"{}/python-scripts/coredump_finder.py".format(parserRoot),
buildId,
opts.find_coredumps
]
coredumps = subprocess.check_output(command)
writeCoredumpsToFile("{}/python/coredump".format(path), coredumps)
def getLogsDir(output):
return re.search(b'(Logs dir: |"logs_dir": ")(\w+-\d+)', output).group(2)
def writeCoredumpsToFile(path, coredumps):
file = open(path, "w")
file.write("COREDUMPS \\\n")
file.writelines(coredumps)
file.close()
def writeToDatabaseRuby(opts, path):
command = [
"{}/ruby-scripts/write_build_results.rb".format(parserRoot),
"-f", "{}/ruby/json".format(path)
]
return subprocess.check_output(command)
def writeToDatabasePython(opts, path):
command = [
"{}/python-scripts/write_build_results.py".format(parserRoot),
"{}/python/json".format(path)
]
return subprocess.check_output(command)
def main(args=None):
opts = options.parse_args(args=args)
path = os.path.dirname(os.path.abspath(opts.log_file))
if opts.output_path:
path = opts.output_path
if opts.use_ruby:
result = parseCtestRuby(opts, path)
if opts.find_coredumps:
storeCoredumpsRuby(opts, getLogsDir(result), path)
if opts.write_to_database:
writeToDatabaseRuby(opts, path)
else:
result = parseCtestPython(opts, path)
if opts.find_coredumps:
storeCoredumpsPython(opts, getLogsDir(result), path)
if opts.write_to_database:
writeToDatabasePython(opts, path)
if os.path.samefile(__file__, sys.argv[0]):
main()
|
dA505819/maxscale-buildbot
|
master/parser-tests/parser/parser.py
|
parser.py
|
py
| 4,117 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "subprocess.check_output",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.samefile",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 128,
"usage_type": "attribute"
}
] |
14490773282
|
"""
create model
Creator: Xiaoshui Huang
Date: 2020-06-19
"""
from se_math.so3 import inverse, transform
import torch
import numpy as np
from random import sample
import se_math.se3 as se3
import se_math.invmat as invmat
import igl
import os
import sys
sys.path.append('./../')
sys.path.append('./../../')
from loss import cal_loss_intersection_batch_whole_median_pts_lines, Reconstruction_point, Random_uniform_distribution_lines_batch_efficient_resample, chamfer_dist, Sample_neighs
from utils import npmat2euler
# we also make chamfer_loss for data!
def dict_all_to_device(tensor_dict, device):
"""Sends everything into a certain device """
for k in tensor_dict:
if isinstance(tensor_dict[k], torch.Tensor):
tensor_dict[k] = tensor_dict[k].to(device)
def save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans, paths_src, paths_pred,
paths_gt, paths_gt_pred):
Face = np.zeros(3).reshape(1, 3).astype(np.int32)
for i in range(V_pred.shape[0]):
igl.write_triangle_mesh(paths_src[i], V_src[i].numpy(), Face)
igl.write_triangle_mesh(paths_pred[i], V_pred[i].numpy(), Face)
igl.write_triangle_mesh(paths_gt[i], V_gt[i].numpy(), Face)
igl.write_triangle_mesh(paths_gt_pred[i], V_tgt_trans[i].numpy(), Face)
# a global function to flatten a feature
def flatten(x):
return x.view(x.size(0), -1)
# a global function to calculate max-pooling
def symfn_max(x):
# [B, K, N] -> [B, K, 1]
a = torch.nn.functional.max_pool1d(x, x.size(-1))
return a
# a global function to generate mlp layers
def _mlp_layers(nch_input,
nch_layers,
b_shared=True,
bn_momentum=0.1,
dropout=0.0):
""" [B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
layers = []
last = nch_input
for i, outp in enumerate(nch_layers):
if b_shared:
weights = torch.nn.Conv1d(last, outp, 1)
else:
weights = torch.nn.Linear(last, outp)
layers.append(weights)
# layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum))
layers.append(torch.nn.GroupNorm(8, outp))
layers.append(torch.nn.ReLU())
if b_shared == False and dropout > 0.0:
layers.append(torch.nn.Dropout(dropout))
last = outp
return layers
# a class to generate MLP network
class MLPNet(torch.nn.Module):
""" Multi-layer perception.
[B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
def __init__(self,
nch_input,
nch_layers,
b_shared=True,
bn_momentum=0.1,
dropout=0.0):
super().__init__()
list_layers = _mlp_layers(nch_input, nch_layers, b_shared, bn_momentum,
dropout)
self.layers = torch.nn.Sequential(*list_layers)
def forward(self, inp):
out = self.layers(inp)
return out
# encoder network
class PointNet(torch.nn.Module):
def __init__(self, dim_k=1024):
super().__init__()
scale = 1
mlp_h1 = [int(64 / scale), int(64 / scale)]
mlp_h2 = [int(64 / scale), int(128 / scale), int(dim_k / scale)]
self.h1 = MLPNet(3, mlp_h1, b_shared=True).layers
self.h2 = MLPNet(mlp_h1[-1], mlp_h2, b_shared=True).layers
self.sy = symfn_max
def forward(self, points):
""" points -> features
[B, N, 3] -> [B, K]
"""
# for pointnet feature extraction
x = points.transpose(1, 2) # [B, 3, N]
x = self.h1(x)
x = self.h2(x) # [B, K, N]
x = flatten(self.sy(x))
return x
# decoder network
class Decoder(torch.nn.Module):
def __init__(self, num_points=2048, bottleneck_size=1024):
super(Decoder, self).__init__()
self.num_points = num_points
self.bottleneck_size = bottleneck_size
# self.bn1 = torch.nn.BatchNorm1d(bottleneck_size)
# self.bn2 = torch.nn.BatchNorm1d(bottleneck_size // 2)
# self.bn3 = torch.nn.BatchNorm1d(bottleneck_size // 4)
self.bn1 = torch.nn.GroupNorm(8, bottleneck_size)
self.bn2 = torch.nn.GroupNorm(8, bottleneck_size // 2)
self.bn3 = torch.nn.GroupNorm(8, bottleneck_size // 4)
self.fc1 = torch.nn.Linear(self.bottleneck_size, bottleneck_size)
self.fc2 = torch.nn.Linear(self.bottleneck_size, bottleneck_size // 2)
self.fc3 = torch.nn.Linear(bottleneck_size // 2, bottleneck_size // 4)
self.fc4 = torch.nn.Linear(bottleneck_size // 4, self.num_points * 3)
self.th = torch.nn.Tanh()
def forward(self, x):
batchsize = x.size()[0]
x = torch.nn.functional.relu(self.bn1(self.fc1(x)))
x = torch.nn.functional.relu(self.bn2(self.fc2(x)))
x = torch.nn.functional.relu(self.bn3(self.fc3(x)))
x = self.th(self.fc4(x)) * 10
x = x.view(batchsize, 3, self.num_points).transpose(1, 2).contiguous()
return x
# the neural network of feature-metric registration
class SolveRegistration(torch.nn.Module):
def __init__(self, ptnet, decoder=None):
super().__init__()
# network
self.encoder = ptnet
self.decoder = decoder
# functions
self.inverse = invmat.InvMatrix.apply
self.exp = se3.Exp # [B, 6] -> [B, 4, 4]
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
# initialization for dt: [w1, w2, w3, v1, v2, v3], 3 rotation angles and 3 translation
delta = 1.0e-2 # step size for approx. Jacobian (default: 1.0e-2)
dt_initial = torch.autograd.Variable(
torch.Tensor([delta, delta, delta, delta, delta, delta]))
self.dt = torch.nn.Parameter(dt_initial.view(1, 6), requires_grad=True)
# results
self.last_err = None
self.g_series = None # for debug purpose
self.prev_r = None
self.g = None # estimated transformation T
self.device = None
self.g_series_gpu = None
# estimate T
# noly return the encoder loss, but also return intersection loss
def estimate_t(self,
data,
maxiter=5,
xtol=1.0e-7,
p0_zero_mean=True,
p1_zero_mean=True,
mode='train'):
"""
give two point clouds, estimate the T by using IC algorithm
:param p0: point cloud
:param p1: point cloud
:param maxiter: maximum iteration
:param xtol: a threshold for early stop of transformation estimation
:param p0_zero_mean: True: normanize p0 before IC algorithm
:param p1_zero_mean: True: normanize p1 before IC algorithm
:return: feature-metric projection error (r), encoder-decoder loss (loss_ende) and intersection loss!
"""
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
a0 = torch.eye(4).view(1, 4, 4).expand(p0.size(0), 4,
4).to(p0) # [B, 4, 4]
a1 = torch.eye(4).view(1, 4, 4).expand(p1.size(0), 4,
4).to(p1) # [B, 4, 4]
self.device = p1.device
batch_size = p1.shape[0]
# normalization
if p0_zero_mean:
p0_m = p0.mean(dim=1) # [B, N, 3] -> [B, 3]
a0 = a0.clone()
a0[:, 0:3, 3] = p0_m
q0 = p0 - p0_m.unsqueeze(1)
else:
q0 = p0
if p1_zero_mean:
p1_m = p1.mean(dim=1) # [B, N, 3] -> [B, 3]
a1 = a1.clone()
a1[:, 0:3, 3] = -p1_m
q1 = p1 - p1_m.unsqueeze(1)
else:
q1 = p1
# use IC algorithm to estimate the transformation
# generate the transform!
g0 = torch.eye(4).to(q0).view(1, 4, 4).expand(q0.size(0), 4,
4).contiguous()
r, g, loss_ende = self.ic_algo(g0, q0, q1, maxiter, xtol)
# the g don't backgrade the gradinent?
self.g = g
# re-normalization
if p0_zero_mean or p1_zero_mean:
est_g = self.g
if p0_zero_mean:
est_g = a0.to(est_g).bmm(est_g)
if p1_zero_mean:
est_g = est_g.bmm(a1.to(est_g))
self.g = est_g
est_gs = self.g_series # [M, B, 4, 4]
if p0_zero_mean:
est_gs = a0.unsqueeze(0).contiguous().to(est_gs).matmul(est_gs)
if p1_zero_mean:
est_gs = est_gs.matmul(a1.unsqueeze(0).contiguous().to(est_gs))
self.g_series = est_gs
est_gs_gpu = self.g_series_gpu # [M, B, 4, 4]
if p0_zero_mean:
est_gs_gpu = a0.unsqueeze(0).contiguous().to(
est_gs_gpu).matmul(est_gs_gpu)
if p1_zero_mean:
est_gs_gpu = est_gs_gpu.matmul(
a1.unsqueeze(0).contiguous().to(est_gs_gpu))
self.g_series_gpu = est_gs_gpu
loss_pp_wise = (torch.mean(
torch.abs(
self.transform(self.g.unsqueeze(1), data['points_src_sample'])
- self.transform(
torch.inverse(data['igt']).unsqueeze(1),
data['points_src_sample']))))
if mode is 'train':
R = (torch.norm(
data['tar_box'][:, 0, :] - data['tar_box'][:, -1, :],
dim=-1,
p=2) * 0.5).reshape(-1, 1)
lines = None
points_ref = data['points_tar_sample'].contiguous()
tar_faces_tensor = data['points_based_neighs_tar'].reshape(
points_ref.shape[0], -1, 9)
# if we used the transformed, we may generate better results!
temp_g = self.g_series_gpu[-1]
pred_src_transformed_final_sample = self.transform(
temp_g.unsqueeze(1),
data['points_src_sample'].contiguous()).detach()
# pred_src_transformed_final_sample = data['points_src_sample']
if lines is None:
lines = Random_uniform_distribution_lines_batch_efficient_resample(
R, data['centers'], 15000,
pred_src_transformed_final_sample.contiguous(),
data['points_tar_sample'].contiguous(), self.device)
# set our loss;
loss_intersection = torch.FloatTensor([0]).to(self.device)
for i in range(maxiter - 3, maxiter):
temp_g = self.g_series_gpu[i]
pred_src_transformed_final_sample = self.transform(
temp_g.unsqueeze(1), data['points_src_sample'])
pred_src_faces_tensor = self.transform(
temp_g.unsqueeze(1),
data['points_based_neighs_src']).reshape(
pred_src_transformed_final_sample.shape[0], -1, 9)
tp_loss_intersection = torch.FloatTensor([0]).to(self.device)
for j in range(pred_src_faces_tensor.shape[0]):
tp_loss_intersection += cal_loss_intersection_batch_whole_median_pts_lines(
1, 1, 5, 5, pred_src_faces_tensor[j:j + 1, :, :],
tar_faces_tensor[j:j + 1, :, :], lines[j:j + 1, :, :],
self.device) / 5.0
loss_intersection = loss_intersection + \
tp_loss_intersection*0.5**(maxiter-i-1)
loss_chamfer = chamfer_dist(pred_src_transformed_final_sample,
data['points_tar_sample'])
return r, loss_ende, loss_intersection / batch_size, loss_pp_wise, loss_chamfer
return r, loss_ende, loss_pp_wise,
# IC algorithm
# encoder, we just use the chamfer!
def ic_algo(self, g0, p0, p1, maxiter, xtol):
"""
use IC algorithm to estimate the increment of transformation parameters
:param g0: initial transformation
:param p0: point cloud
:param p1: point cloud
:param maxiter: maxmimum iteration
:param xtol: a threashold to check increment of transformation for early stop
:return: feature-metric projection error (r), updated transformation (g), encoder-decoder loss
"""
training = self.encoder.training
# training = self.decoder.training
batch_size = p0.size(0)
self.last_err = None
g = g0
self.g_series = torch.zeros(maxiter + 1, *g0.size(), dtype=g0.dtype)
self.g_series[0] = g0.clone()
self.g_series_gpu = torch.zeros(maxiter, *g0.size(),
dtype=g0.dtype).to(self.device)
# generate the features
f0 = self.encoder(p0)
f1 = self.encoder(p1)
# task 1
loss_enco_deco = 0.0
if self.decoder is not None:
# we generate the decoder f0?
# make an encoder decoder!
decoder_out_f0 = self.decoder(f0)
decoder_out_f1 = self.decoder(f1)
# the decoder meets AE!
p0_dist1, p0_dist2 = self.chamfer_loss(
p0.contiguous(), decoder_out_f0) # loss function
loss_net0 = (torch.mean(p0_dist1)) + (torch.mean(p0_dist2))
p1_dist1, p1_dist2 = self.chamfer_loss(
p1.contiguous(), decoder_out_f1) # loss function
loss_net1 = (torch.mean(p1_dist1)) + (torch.mean(p1_dist2))
loss_enco_deco = loss_net0 + loss_net1
# self.encoder.eval() # and fix them BN.
# if fix, ho to backward gradients?
# task 2
f0 = self.encoder(p0) # [B, N, 3] -> [B, K]
# approx. J by finite difference
dt = self.dt.to(p0).expand(batch_size,
6) # convert to the type of p0. [B, 6]
J = self.approx_Jac(p0, f0, dt)
# compute pinv(J) to solve J*x = -r
try:
Jt = J.transpose(1, 2) # [B, 6, K]
H = Jt.bmm(J) # [B, 6, 6]
# H = H + u_lamda * iDentity
B = self.inverse(H)
pinv = B.bmm(Jt) # [B, 6, K]
except RuntimeError as err:
self.last_err = err
f1 = self.encoder(p1) # [B, N, 3] -> [B, K]
r = f1 - f0
self.ptnet.train(training)
return r, g, -1
itr = 0
r = None
# we
for itr in range(maxiter):
p = self.transform(g.unsqueeze(1),
p1) # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
f1 = self.encoder(p) # [B, N, 3] -> [B, K]
r = f1 - f0 # [B,K]
# generate the r!
dx = -pinv.bmm(r.unsqueeze(-1)).view(batch_size, 6)
check = dx.norm(p=2, dim=1, keepdim=True).max()
if float(check) < xtol:
if itr == 0:
self.last_err = 0 # no update.
break
g = self.update(g, dx)
self.g_series_gpu[itr] = g
self.g_series[itr + 1] = g.clone()
self.prev_r = r
self.encoder.train(training)
return r, g, loss_enco_deco
# estimate Jacobian matrix
def approx_Jac(self, p0, f0, dt):
# p0: [B, N, 3], Variable
# f0: [B, K], corresponding feature vector
# dt: [B, 6], Variable
# Jk = (ptnet(p(-delta[k], p0)) - f0) / delta[k]
batch_size = p0.size(0)
num_points = p0.size(1)
# compute transforms
transf = torch.zeros(batch_size, 6, 4, 4).to(p0)
for b in range(p0.size(0)):
d = torch.diag(dt[b, :]) # [6, 6]
D = self.exp(-d) # [6, 4, 4]
transf[b, :, :, :] = D[:, :, :]
transf = transf.unsqueeze(2).contiguous() # [B, 6, 1, 4, 4]
p = self.transform(transf,
p0.unsqueeze(1)) # x [B, 1, N, 3] -> [B, 6, N, 3]
f0 = f0.unsqueeze(-1) # [B, K, 1]
f1 = self.encoder(p.view(-1, num_points, 3))
f = f1.view(batch_size, 6, -1).transpose(1, 2) # [B, K, 6]
df = f0 - f # [B, K, 6]
J = df / dt.unsqueeze(1) # [B, K, 6]
return J
# update the transformation
def update(self, g, dx):
# [B, 4, 4] x [B, 6] -> [B, 4, 4]
dg = self.exp(dx)
return dg.matmul(g)
# calculate the chamfer loss
def chamfer_loss(self, a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
# diag_ind = torch.arange(0, num_points).type(torch.cuda.LongTensor)
diag_ind = torch.arange(0, num_points)
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = (rx.transpose(2, 1) + ry - 2 * zz)
return torch.min(P, 1)[0], torch.min(P, 2)[0]
@staticmethod
def rsq(r):
# |r| should be 0
z = torch.zeros_like(r)
return torch.nn.functional.mse_loss(r, z, reduction='sum')
@staticmethod
def comp(g, igt):
""" |g*igt - I| (should be 0) """
assert g.size(0) == igt.size(0)
assert g.size(1) == igt.size(1) and g.size(1) == 4
assert g.size(2) == igt.size(2) and g.size(2) == 4
A = g.matmul(igt)
I = torch.eye(4).to(A).view(1, 4, 4).expand(A.size(0), 4, 4)
return torch.nn.functional.mse_loss(A, I, reduction='mean') * 16
@staticmethod
def comp_inv(g, igt):
""" |g*igt - I| (should be 0) """
assert g.size(0) == igt.size(0)
assert g.size(1) == igt.size(1) and g.size(1) == 4
assert g.size(2) == igt.size(2) and g.size(2) == 4
# A = g.matmul(igt)
gt = torch.inverse(igt)
# I = torch.eye(4).to(A).view(1, 4, 4).expand(A.size(0), 4, 4)
return torch.nn.functional.mse_loss(g, gt, reduction='mean')
# main algorithm class
class FMRTrain:
def __init__(self, dim_k, num_points, train_type):
self.dim_k = dim_k
self.num_points = num_points
self.max_iter = 5 # max iteration time for IC algorithm
# 0: unsupervised, 1: semi-supervised see. self.compute_loss()
self._loss_type = train_type
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
def create_model(self):
# Encoder network: extract feature for every point. Nx1024
ptnet = PointNet(dim_k=self.dim_k)
# Decoder network: decode the feature into points
decoder = Decoder(num_points=self.num_points)
# feature-metric ergistration (fmr) algorithm: estimate the transformation T
fmr_solver = SolveRegistration(ptnet, decoder)
return fmr_solver
def compute_loss(self, solver, data, device, mode='train', maxiter=5):
# p0, p1, igt = data
# p0 = p0.to(device) # template
# p1 = p1.to(device) # source
# igt = igt.to(device) # igt: p0 -> p1
dict_all_to_device(data, device)
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
igt = data['igt']
if mode is 'train':
r, loss_ende, loss_intersection, loss_pp_wise, loss_chamfer = solver.estimate_t(
data, self.max_iter, mode=mode)
else:
# test model!
r, loss_ende, loss_pp_wise = solver.estimate_t(data,
maxiter,
mode=mode)
loss_r = solver.rsq(r)
est_g = solver.g
# generate the difference between the pred and gt!
loss_g = solver.comp_inv(est_g, igt)
# unsupervised learning, set max_iter=0
if self.max_iter == 0:
return loss_ende
# semi-supervised learning, set max_iter>0
if self._loss_type == 0:
loss = loss_ende
elif self._loss_type == 1:
loss = loss_ende + loss_g
elif self._loss_type == 2:
loss = loss_r + loss_g
else:
loss = loss_g
# we need use the multiple indicators to measure the quality!
np_pred_rotation = est_g[:, :3, :3].transpose(
2, 1).detach().cpu().numpy()
np_pred_euler = npmat2euler(np_pred_rotation, 'xyz')
np_gt_rotation = data['R'].detach().cpu().numpy()
np_gt_euler = npmat2euler(np_gt_rotation, 'xyz')
loss_rotation_euler_mae = np.mean(np.abs(np_pred_euler - np_gt_euler))
loss_rotation_euler_rmse = np.sqrt(
np.mean((np_pred_euler - np_gt_euler)**2))
np_loss = {
'loss_rot_euler_mae': loss_rotation_euler_mae,
'loss_rot_euler_rmse': loss_rotation_euler_rmse
}
# set the weights
if mode is 'train':
return 0.01 * loss_ende + 1.0 * loss_intersection + .0 * loss_g + 0.0 * loss_chamfer, loss_g.detach(
), loss_intersection.detach(), loss_pp_wise.detach(
), loss_ende.detach(), np_loss
return loss_g, loss_g.detach(), loss_pp_wise.detach(
), loss_ende.detach(), np_loss
def train(self,
model,
trainloader,
optimizer,
device,
epoch,
train_writer=None):
model.train()
Debug = True
total_loss = 0
total_loss_gt = 0
total_loss_intersection = 0
total_loss_pp_wise = 0
total_loss_encoder = 0
total_loss_rot_euler_mae = 0
total_loss_rot_euler_rmse = 0
if Debug:
epe = 0
count = 0
count_mid = 9
for i, data in enumerate(trainloader):
loss, loss_gt, loss_intersection, loss_pp_wise, loss_ende, np_loss = self.compute_loss(
model, data, device)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_item = loss.item()
total_loss += loss_item
total_loss_gt += loss_gt.item()
total_loss_pp_wise += loss_pp_wise.item()
total_loss_intersection += loss_intersection.item()
total_loss_encoder += loss_ende.item()
total_loss_rot_euler_mae += np_loss['loss_rot_euler_mae']
total_loss_rot_euler_rmse += np_loss['loss_rot_euler_rmse']
if Debug:
epe += loss_item
if count % 10 == 0:
print('i=%d, fmr_loss=%f ' % (i, float(epe) /
(count_mid + 1)))
epe = 0.0
count += 1
print(
"ba/ep{:0d}/{:0d},l_insec:{:4f}, l_gt{:4f},l_pp_w{:4f}, l_en{:4f}, l_rot_eu_mae{:4f}, l_rot_eu_rmse{:4f}"
.format(i, epoch, loss_intersection.item(), loss_gt.item(),
loss_pp_wise.item(), loss_ende.item(),
np_loss['loss_rot_euler_mae'],
np_loss['loss_rot_euler_rmse']))
ave_loss = float(total_loss) / count
ave_loss_gt = float(total_loss_gt) / count
ave_loss_intersection = float(total_loss_intersection) / count
ave_loss_wise = float(total_loss_pp_wise) / count
ave_loss_encoder = float(total_loss_encoder) / count
ave_loss_rot_euler_mae = (float)(total_loss_rot_euler_mae) / count
ave_loss_rot_euler_rmse = (float)(total_loss_rot_euler_rmse) / count
if train_writer is not None:
train_writer.add_scalar('./loss/loss_sum', ave_loss, epoch)
train_writer.add_scalar('./loss/loss_gt', ave_loss_gt, epoch)
train_writer.add_scalar('./loss/loss_intersec',
ave_loss_intersection, epoch)
train_writer.add_scalar('./loss/loss_wise_mse', ave_loss_wise,
epoch)
train_writer.add_scalar('./loss/loss_ende', ave_loss_encoder,
epoch)
train_writer.add_scalar('./lr', optimizer.param_groups[0]['lr'],
epoch)
train_writer.add_scalar('./loss/loss_rot_euler_mae',
ave_loss_rot_euler_mae, epoch)
train_writer.add_scalar('./loss/loss_rot_euler_rmse',
ave_loss_rot_euler_rmse, epoch)
# \033[36m,test gt:{:4f}, pp_wise:{:4f}, rot_mae{:4f}, rot_rmse{:4f}\033[0m
print(
" \033[36m,train:l_gt:{:4f}, l_intersec:{:4f}, l_pp_wise{:4f}, l_encoder{:4f}, l_rot_eu_mae{:4f}, l_rot_eu_rmse{:4f} \033[0m, "
.format(ave_loss_gt, ave_loss_intersection, ave_loss_wise,
ave_loss_encoder, ave_loss_rot_euler_mae,
ave_loss_rot_euler_rmse))
return ave_loss
def validate(self, model, testloader, device, epoch, save_results=None):
# model.eval()
vloss = 0.0
vloss_gt = 0.0
vloss_pp_wise = 0.0
vloss_rot_euler_mae = 0.0
vloss_rot_euler_rmse = 0.0
count = 0
count_i = 0
with torch.no_grad():
for i, data in enumerate(testloader):
loss_net, loss_gt, loss_pp_wise, loss_ende, np_loss = self.compute_loss(
model, data, device, mode='test')
vloss += loss_net.item()
vloss_gt += loss_gt.item()
vloss_pp_wise += loss_pp_wise.item()
vloss_rot_euler_mae += np_loss['loss_rot_euler_mae']
vloss_rot_euler_rmse += np_loss['loss_rot_euler_rmse']
count += 1
print("Test:sample{:0d},loss_pp_wise:{:4f}".format(
i, loss_pp_wise.item()))
if epoch % 10 == 0:
est_g = model.g # (1, 4, 4)
igt = data['igt']
ig_gt = igt.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
g_hat = est_g.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
if save_results is not None:
paths_pred = []
paths_gt = []
paths_src = []
paths_gt_pred = []
src_transform = self.transform(est_g.unsqueeze(1), p1)
src_transform_sample = self.transform(
est_g.unsqueeze(1), data['points_src_sample'])
tgt_transform = self.transform(igt.unsqueeze(1), p0)
V_src = p0.cpu().detach()
V_pred = src_transform.cpu().detach()
V_gt = p1.cpu().detach()
V_tgt_trans = tgt_transform.cpu().detach()
for j in range(p0.shape[0]):
paths_pred.append(
os.path.join(
save_results,
str(epoch) + "pred_src" + str(count_i) +
".obj"))
paths_gt.append(
os.path.join(
save_results,
str(epoch) + "gt" + str(count_i) + ".obj"))
paths_src.append(
os.path.join(
save_results,
str(epoch) + "src" + str(count_i) +
".obj"))
paths_gt_pred.append(
os.path.join(
save_results,
str(epoch) + "pred_gt" + str(count_i) +
".obj"))
F = np.zeros([1, 3]).astype(np.int32)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'transformed_sample', 1),
src_transform_sample.cpu().detach().numpy().
reshape(-1, 3), F)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'src_sample', 1),
data['points_src_sample'].cpu().detach().numpy(
).reshape(-1, 3), F)
igl.write_obj(
paths_gt_pred[j].replace(
'pred_gt', 'tar_sample', 1),
data['points_tar_sample'].cpu().detach().numpy(
).reshape(-1, 3), F)
count_i += 1
save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans,
paths_src, paths_pred, paths_gt,
paths_gt_pred)
ave_vloss = float(vloss) / count
ave_vloss_gt = float(vloss_gt) / count
ave_vloss_pp_wise = float(vloss_pp_wise) / count
ave_vloss_rot_euler_mae = float(vloss_rot_euler_mae) / count
ave_vloss_rot_euler_rmse = float(vloss_rot_euler_rmse) / count
print(
"\033[36m,test gt:{:4f}, pp_wise:{:4f}, rot_mae{:4f}, rot_rmse{:4f}\033[0m, "
.format(ave_vloss_gt, ave_vloss_pp_wise, ave_vloss_rot_euler_mae,
ave_vloss_rot_euler_rmse))
return ave_vloss
class FMRTest:
def __init__(self, args):
self.filename = args.outfile
self.dim_k = args.dim_k
self.max_iter = 10 # max iteration time for IC algorithm
self._loss_type = 3 # see. self.compute_loss()
self.transform = se3.transform # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
def create_model(self):
# Encoder network: extract feature for every point. Nx1024
ptnet = PointNet(dim_k=self.dim_k)
# feature-metric ergistration (fmr) algorithm: estimate the transformation T
fmr_solver = SolveRegistration(ptnet)
return fmr_solver
# we save the results!
# pay attention to final results!
def evaluate(self,
solver,
testloader,
device,
save_results=None,
writer=None):
solver.eval()
with open(self.filename, 'w') as fout:
self.eval_1__header(fout)
count_i = 0
total_loss_pp_wise = 0
total_loss_gt = 0
with torch.no_grad():
for i, data in enumerate(testloader):
# p0, p1, igt = data # igt: p0->p1
dict_all_to_device(data, device)
p1 = data['points_src_sample']
p0 = data['points_tar_sample']
igt = data['igt']
# igt =
# # compute trans from p1->p0
# g = se3.log(igt) # --> [-1, 6]
# igt = se3.exp(-g) # [-1, 4, 4]
# p0, p1 = self.ablation_study(p0, p1)
p0 = p0.to(device) # template (1, N, 3)
p1 = p1.to(device) # source (1, M, 3)
# When we evaluate, we ignore the chafer, ignore any loss function!
r, loss_ende, loss_pp_wise = solver.estimate_t(
data, self.max_iter, mode='test')
total_loss_pp_wise += loss_pp_wise
est_g = solver.g # (1, 4, 4)
ig_gt = igt.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
g_hat = est_g.cpu().contiguous().view(-1, 4,
4) # --> [1, 4, 4]
dg = g_hat.bmm(ig_gt) # if correct, dg == identity matrix.
dx = se3.log(
dg) # --> [1, 6] (if corerct, dx == zero vector)
dn = dx.norm(p=2, dim=1) # --> [1]
dm = dn.mean()
self.eval_1__write(fout, ig_gt, g_hat)
print('test, %d/%d, %f, %f' %
(i, len(testloader), dm, loss_pp_wise))
if writer is not None:
writer.add_scalar('./loss/test', dm, i)
# p = self.transform(g.unsqueeze(1),
# p1) # [B, 1, 4, 4] x [B, N, 3] -> [B, N, 3]
# est_g:p1--->p0
# igt: p0-->p1
if save_results is not None:
paths_pred = []
paths_gt = []
paths_src = []
paths_gt_pred = []
src_transform = self.transform(est_g.unsqueeze(1), p1)
tgt_transform = self.transform(igt.unsqueeze(1), p0)
V_src = p0.cpu().detach()
V_pred = src_transform.cpu().detach()
V_gt = p1.cpu().detach()
V_tgt_trans = tgt_transform.cpu().detach()
for i in range(p0.shape[0]):
paths_pred.append(
os.path.join(save_results,
str(count_i) + "pred_src.obj"))
paths_gt.append(
os.path.join(save_results,
str(count_i) + "gt.obj"))
paths_src.append(
os.path.join(save_results,
str(count_i) + "src.obj"))
paths_gt_pred.append(
os.path.join(save_results,
str(count_i) + "pred_gt.obj"))
count_i += 1
save_pred_gt_obj(V_src, V_pred, V_gt, V_tgt_trans,
paths_src, paths_pred, paths_gt,
paths_gt_pred)
def ablation_study(self, p0, p1, add_noise=False, add_density=False):
# ablation study
# mesh = self.plyread("./box1Kinect1.ply")
# p0 = torch.tensor(mesh).to(device).unsqueeze(0)
# mesh = self.plyread("./box11.ply")
# p1 = torch.tensor(mesh).to(device).unsqueeze(0)
# add noise
if add_noise:
p1 = torch.tensor(np.float32(np.random.normal(p1, 0.01)))
# add outliers
if add_density:
density_ratio = 0.5
pts_num = p1.shape[0]
sampleNum = int(pts_num *
density_ratio) # the number of remaining points
if pts_num > sampleNum:
num = sample(range(1, pts_num), sampleNum)
elif pts_num > 0:
num = range(0, pts_num)
else:
print("No points in this point cloud!")
return
p1 = p1[num, :]
return p0, p1
def eval_1__header(self, fout):
cols = [
'h_w1', 'h_w2', 'h_w3', 'h_v1', 'h_v2', 'h_v3', 'g_w1', 'g_w2',
'g_w3', 'g_v1', 'g_v2', 'g_v3'
] # h: estimated, g: ground-truth twist vectors
print(','.join(map(str, cols)), file=fout)
fout.flush()
def eval_1__write(self, fout, ig_gt, g_hat):
x_hat = se3.log(g_hat) # --> [-1, 6]
mx_gt = se3.log(ig_gt) # --> [-1, 6]
for i in range(x_hat.size(0)):
x_hat1 = x_hat[i] # [6]
mx_gt1 = mx_gt[i] # [6]
vals = torch.cat((x_hat1, -mx_gt1)) # [12]
valn = vals.cpu().numpy().tolist()
print(','.join(map(str, valn)), file=fout)
fout.flush()
|
Dengzhi-USTC/A-robust-registration-loss
|
code/exps_deep_learning/fmr/model.py
|
model.py
|
py
| 36,481 |
python
|
en
|
code
| 25 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "igl.write_triangle_mesh",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "igl.write_triangle_mesh",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "igl.write_triangle_mesh",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "igl.write_triangle_mesh",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.max_pool1d",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.ReLU",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 138,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.GroupNorm",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 141,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 142,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Linear",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.relu",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "se_math.invmat.InvMatrix",
"line_number": 165,
"usage_type": "attribute"
},
{
"api_name": "se_math.invmat",
"line_number": 165,
"usage_type": "name"
},
{
"api_name": "se_math.se3.Exp",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "se_math.se3",
"line_number": 166,
"usage_type": "name"
},
{
"api_name": "se_math.se3.transform",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "se_math.se3",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "torch.eye",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "torch.eye",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "torch.abs",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "torch.inverse",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "loss.Random_uniform_distribution_lines_batch_efficient_resample",
"line_number": 285,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "loss.cal_loss_intersection_batch_whole_median_pts_lines",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "loss.chamfer_dist",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 357,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "torch.diag",
"line_number": 419,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 445,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 446,
"usage_type": "call"
},
{
"api_name": "torch.bmm",
"line_number": 447,
"usage_type": "call"
},
{
"api_name": "torch.arange",
"line_number": 449,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 453,
"usage_type": "call"
},
{
"api_name": "torch.zeros_like",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 459,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 459,
"usage_type": "attribute"
},
{
"api_name": "torch.eye",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 469,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 469,
"usage_type": "attribute"
},
{
"api_name": "torch.inverse",
"line_number": 478,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional.mse_loss",
"line_number": 481,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 481,
"usage_type": "attribute"
},
{
"api_name": "se_math.se3.transform",
"line_number": 493,
"usage_type": "attribute"
},
{
"api_name": "se_math.se3",
"line_number": 493,
"usage_type": "name"
},
{
"api_name": "utils.npmat2euler",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "utils.npmat2euler",
"line_number": 546,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 547,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 548,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 549,
"usage_type": "call"
},
{
"api_name": "loss.backward",
"line_number": 589,
"usage_type": "call"
},
{
"api_name": "loss.item",
"line_number": 592,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 654,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 693,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 693,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 698,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 698,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 702,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 702,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 707,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 707,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 712,
"usage_type": "call"
},
{
"api_name": "numpy.int32",
"line_number": 712,
"usage_type": "attribute"
},
{
"api_name": "igl.write_obj",
"line_number": 713,
"usage_type": "call"
},
{
"api_name": "igl.write_obj",
"line_number": 718,
"usage_type": "call"
},
{
"api_name": "igl.write_obj",
"line_number": 723,
"usage_type": "call"
},
{
"api_name": "se_math.se3.transform",
"line_number": 752,
"usage_type": "attribute"
},
{
"api_name": "se_math.se3",
"line_number": 752,
"usage_type": "name"
},
{
"api_name": "torch.no_grad",
"line_number": 775,
"usage_type": "call"
},
{
"api_name": "se_math.se3.log",
"line_number": 802,
"usage_type": "call"
},
{
"api_name": "se_math.se3",
"line_number": 802,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 831,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 831,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 834,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 834,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 837,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 837,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 840,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 840,
"usage_type": "attribute"
},
{
"api_name": "torch.tensor",
"line_number": 857,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 857,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 857,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 857,
"usage_type": "attribute"
},
{
"api_name": "random.sample",
"line_number": 866,
"usage_type": "call"
},
{
"api_name": "se_math.se3.log",
"line_number": 884,
"usage_type": "call"
},
{
"api_name": "se_math.se3",
"line_number": 884,
"usage_type": "name"
},
{
"api_name": "se_math.se3.log",
"line_number": 885,
"usage_type": "call"
},
{
"api_name": "se_math.se3",
"line_number": 885,
"usage_type": "name"
},
{
"api_name": "torch.cat",
"line_number": 889,
"usage_type": "call"
}
] |
73503536508
|
from tianshou.data import Batch, ReplayBuffer, to_numpy, to_torch, to_torch_as
import stable_baselines3.common.logger as L
import functools
import gym
import numpy as np
from torch.nn import functional as F
from einops.layers.torch import Rearrange
from encoder import *
import einops
class RNEncoder(nn.Module):
def __init__(self, obs_space, act_space, cfg):
super().__init__()
self.cfg = cfg
obs_space = gym.spaces.Box(low=-1, high=1000, shape=cfg.obs_shape)
self.enc = ImpalaEncoder(obs_space, channels=cfg.filters, flatten=False)
c, h, w = self.enc.final_shape
self.pred_z_cat = create_mlp(cfg.filters[-1], cfg.obj_cat_num, [cfg.filters[-1]], return_seq=True)
self.output_shape = (h, w, c + cfg.obj_cat_num)
def split_obs(self, o):
shape = o.shape
obs_shape = self.cfg.obs_shape
mask_shape = (8, 8, self.cfg.obj_cat_num)
obs = o[...,:np.prod(obs_shape)].reshape(*shape[:-1], *obs_shape)
mask = o[...,np.prod(obs_shape):].reshape(*shape[:-1], *mask_shape)
return obs, mask.detach()
def forward(self, x, ret_latent=False):
if isinstance(x, dict):
x = x['obs']
obs, obj_cat = self.split_obs(x)
out0 = self.enc(obs).permute(0,2,3,1) # (h, w, c)
out = torch.cat([out0, obj_cat], dim=-1)
if ret_latent:
return out, out0
else:
return out
def enc_loss(self, b, latent=None):
if self.cfg.enc_coeff <= 0:
pred_loss = torch.Tensor([0]).to(b.obs.device).sum()
else:
obs, obj_cat = self.split_obs(b.obs)
if latent is None:
latent = self.enc(obs)
pred_z_cat = self.pred_z_cat(latent)
pred_z_cat_loss = -(F.log_softmax(pred_z_cat, dim=-1) * obj_cat).sum(-1)
pred_z_cat_loss = (pred_z_cat_loss).sum([1,2]).mean()
L.record_mean('encoder/pred_loss', pred_z_cat_loss.item())
pred_loss = self.cfg.enc_coeff * pred_z_cat_loss
return pred_loss
class AddSInfo(nn.Module):
def __init__(self, h, w, c, cout=32, channel_first=False, use_mlp=True):
super().__init__()
identity = torch.tensor([[[1.0, 0.0, 1.0], [0.0, 1.0, 1.0]]], dtype=torch.float32)
grid = F.affine_grid(identity, [1, 1, h, w])
grid = grid.permute(0, 3, 1, 2).contiguous()
# (1, 2, h, w)
self.register_buffer('grid', grid)
assert channel_first == False
if not channel_first:
# (1, h, w, 2)
self.grid = grid.permute(0,2,3,1)
self.use_mlp = use_mlp
if self.use_mlp:
self.mlp = nn.Linear(c+2, cout)
def forward(self, x):
x = torch.cat([x, self.grid.to(x.device).expand(x.shape[0], -1, -1, -1)], dim=-1)
if self.use_mlp:
x = self.mlp(x)
return x
class ObjSummary(nn.Module):
def __init__(self, c, obj_cat_num):
super().__init__()
self.head = 4
self.query_atten = QueryMultiHeadAttention(obj_cat_num, c, self.head,
to_q_net=[32], to_k_net=[32], to_v_net=[32], to_out_net=[])
self.out_dim = c * obj_cat_num
"""
x: (N, B, E)
obj_cat: (N, B, S)
out: (B, S*E)
"""
def forward(self, x, obj_cat):
mask = einops.repeat(obj_cat, 'n b s -> b h s n', h=self.head)
out = self.query_atten(x, mask=mask)
out = einops.rearrange(out, 's n e -> n (s e)')
return out
class RNModule(nn.Module):
def __init__(self, input_shape, action_space, cfg):
super().__init__()
self.cfg = cfg
h, w, c = input_shape
obj_cat_num = c - 32
self.obj_cat_num = c - 32
self.add_sinfo = AddSInfo(h, w, c, cout=32)
self.trans = Rearrange('n h w c -> (h w) n c')
self.atten = nn.MultiheadAttention(32, 4)
if not cfg.use_sep_mlp:
create_layer = nn.Linear
else:
create_layer = functools.partial(MultiLinear, num_linears=self.obj_cat_num)
fdim = 32
self.mlp = create_mlp(64, fdim, [64], create_layer=create_layer, return_seq=True)
self.ac = nn.Linear(fdim, action_space.n + 1)
def forward(self, x, ret_atten_wts=False, mask_out = None):
obj_cat = x[...,-self.obj_cat_num:] # B, H, W, S
atten_wts = None
x = self.add_sinfo(x)
x = self.trans(x)
atten_out, atten_wts = self.atten(x, x, x)
x0 = x
x = torch.cat([x, atten_out], dim=-1) # (N, B, 64)
if self.cfg.use_sep_mlp:
x = x.unsqueeze(-2).expand(-1, -1, self.obj_cat_num, -1) # (N, B, S, 64)
out = self.mlp(x)
if self.cfg.use_sep_mlp:
obj_cat = einops.repeat(obj_cat, 'b h w s -> (h w) b s k', k=1) # n, b, s, k
if mask_out is not None:
obj_cat = obj_cat * einops.repeat(to_torch_as(mask_out, obj_cat), 's -> s k', k=1)
if True:
obj_cat[...,-1,:] += 1e-4
obj_cat = obj_cat / obj_cat.sum(-2, keepdim=True)
out = (out * obj_cat).sum(-2) # N, B, 64
out = out.amax(0) # (n, 64)
out = self.ac(out)
if ret_atten_wts:
return out, atten_wts
return out
|
albertcity/OCARL
|
relation_net.py
|
relation_net.py
|
py
| 4,818 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "gym.spaces.Box",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gym.spaces",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "numpy.prod",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn.cat",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Tensor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.log_softmax",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "stable_baselines3.common.logger.record_mean",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "stable_baselines3.common.logger",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.tensor",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.float32",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.affine_grid",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.cat",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "einops.repeat",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "einops.rearrange",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "einops.layers.torch.Rearrange",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.nn.cat",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "einops.repeat",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "einops.repeat",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "tianshou.data.to_torch_as",
"line_number": 123,
"usage_type": "call"
}
] |
9836414156
|
import sys
from collections import deque
n = int(sys.stdin.readline());
board = [];
for _ in range(n):
board.append(list(map(int, list(sys.stdin.readline())[:-1])));
dx = [0, 0, -1, 1];
dy = [1, -1, 0, 0];
def bfs(board, x, y):
if board[x][y] == 0: return 0;
area = 1;
q = deque([]);
board[x][y] = 0;
q.append((x, y));
while q:
x, y = q.popleft();
for i in range(4):
nx = x + dx[i];
ny = y + dy[i];
if not (0 <= nx < n and 0 <= ny < n): continue;
if board[nx][ny] == 0: continue;
area += 1;
board[nx][ny] = 0;
q.append((nx, ny));
return area;
totalArea = 0;
areas = [];
for i in range(n):
for j in range(n):
area = bfs(board, i, j);
if area != 0:
totalArea += 1;
areas.append(area);
print(totalArea);
areas.sort();
for area in areas:
print(area);
|
woasidh/algorithm
|
python/BOJ/그래프_탐색/2667.py
|
2667.py
|
py
| 932 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.stdin.readline",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
}
] |
655296277
|
import json
import os
from concurrent import futures
import luigi
import numpy as np
import nifty.tools as nt
import z5py
from cluster_tools.inference import InferenceLocal
from cluster_tools.inference.inference_embl import InferenceEmbl
OFFSETS = [
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[-2, 0, 0],
[0, -3, 0],
[0, 0, -3],
[-3, 0, 0],
[0, -9, 0],
[0, 0, -9]
]
def update_block_shape(config_dir, block_shape, default_config):
global_conf = os.path.join(config_dir, 'global.config')
if os.path.exists(global_conf):
with open(global_conf) as f:
config = json.load(f)
else:
config = default_config
if config['block_shape'] != block_shape:
config['block_shape'] = block_shape
with open(global_conf, 'w') as f:
json.dump(config, f)
def predict(input_path, input_key,
output_path, output_prefix,
ckpt, gpus, tmp_folder, target,
gpu_type='2080Ti', predict_affinities=False):
task = InferenceLocal if target == 'local' else InferenceEmbl
# halo = [8, 64, 64]
# block_shape = [32, 256, 256]
# larger halo
halo = [12, 96, 96]
block_shape = [24, 128, 128]
if predict_affinities:
output_key = {
f'{output_prefix}/foreground': [0, 1],
f'{output_prefix}/affinities': [1, 10]
}
else:
output_key = {
f'{output_prefix}/foreground': [0, 1],
f'{output_prefix}/boundaries': [1, 2]
}
config_dir = os.path.join(tmp_folder, 'configs')
os.makedirs(config_dir, exist_ok=True)
update_block_shape(config_dir, block_shape, task.default_global_config())
conf = task.default_global_config()
conf.update({'block_shape': block_shape})
with open(os.path.join(config_dir, 'global.config'), 'w') as f:
json.dump(conf, f)
if target == 'local':
device_mapping = {ii: gpu for ii, gpu in enumerate(gpus)}
else:
device_mapping = None
n_threads = 6
conf = task.default_task_config()
conf.update({
'dtype': 'uint8',
'device_mapping': device_mapping,
'threads_per_job': n_threads,
'mixed_precision': True,
'gpu_type': gpu_type,
'qos': 'high',
'mem_limit': 24,
'time_limit': 600
})
with open(os.path.join(config_dir, 'inference.config'), 'w') as f:
json.dump(conf, f)
t = task(tmp_folder=tmp_folder, config_dir=config_dir, max_jobs=len(gpus),
input_path=input_path, input_key=input_key,
output_path=output_path, output_key=output_key,
checkpoint_path=ckpt, halo=halo,
framework='pytorch')
assert luigi.build([t], local_scheduler=True)
update_block_shape(config_dir, [32, 256, 256], task.default_global_config())
def set_bounding_box(tmp_folder, bounding_box):
config = InferenceLocal.default_global_config()
config.update({
'roi_begin': [bb.start for bb in bounding_box],
'roi_end': [bb.stop for bb in bounding_box]
})
config_folder = os.path.join(tmp_folder, 'configs')
os.makedirs(config_folder, exist_ok=True)
config_file = os.path.join(config_folder, 'global.config')
with open(config_file, 'w') as f:
json.dump(config, f)
def get_checkpoint(checkpoint, use_best=False, is_affinity_model=False):
if use_best:
path = os.path.join(checkpoint, 'best.pt')
else:
path = os.path.join(checkpoint, 'latest.pt')
n_out = 10 if is_affinity_model else 2
if 'large' in checkpoint:
model_kwargs = dict(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=128,
gain=2,
pad_convs=True,
final_activation='Sigmoid'
)
else:
model_kwargs = dict(
scale_factors=[
[1, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2]
],
in_channels=1,
out_channels=n_out,
initial_features=64,
gain=2,
pad_convs=True,
final_activation='Sigmoid'
)
ckpt = {
'class': ('mipnet.models.unet', 'AnisotropicUNet'),
'kwargs': model_kwargs,
'checkpoint_path': path,
'model_state_key': 'model_state'
}
return ckpt
def run_multicut(path,
checkpoint_name,
target,
max_jobs,
tmp_folder,
beta):
from cluster_tools.workflows import MulticutSegmentationWorkflow
task = MulticutSegmentationWorkflow
config_dir = os.path.join(tmp_folder, 'configs')
configs = task.get_config()
ws_config = configs['watershed']
ws_config.update({
"threshold": 0.25,
'apply_dt_2d': True,
'apply_filters_2d': True,
'apply_ws_2d': False,
'sigma_seeds': 2.6
})
with open(os.path.join(config_dir, 'watershed.config'), 'w') as f:
json.dump(ws_config, f)
cost_config = configs['probs_to_costs']
cost_config.update({
'beta': beta
})
with open(os.path.join(config_dir, 'probs_to_costs.config'), 'w') as f:
json.dump(cost_config, f)
bd_key = f'predictions/{checkpoint_name}/boundaries'
node_labels_key = f'node_labels/{checkpoint_name}/multicut'
ws_key = f'segmentation/{checkpoint_name}/watershed'
seg_key = f'segmentation/{checkpoint_name}/multicut'
t = task(target=target, max_jobs=max_jobs,
tmp_folder=tmp_folder, config_dir=config_dir,
input_path=path, input_key=bd_key,
ws_path=path, ws_key=ws_key,
problem_path=os.path.join(tmp_folder, 'data.n5'),
node_labels_key=node_labels_key,
output_path=path, output_key=seg_key)
assert luigi.build([t], local_scheduler=True)
def run_mws(data_path, checkpoint_name,
target, max_jobs, tmp_folder,
threshold):
fg_key = f'predictions/{checkpoint_name}/foreground'
mask_key = f'predictions/{checkpoint_name}/mask'
aff_key = f'predictions/{checkpoint_name}/affinities'
seg_key = f'segmentation/{checkpoint_name}/mutex_watershed'
from cluster_tools.thresholded_components.threshold import ThresholdLocal, ThresholdSlurm
task = ThresholdLocal if target == 'local' else ThresholdSlurm
config_dir = os.path.join(tmp_folder, 'configs')
t = task(tmp_folder=tmp_folder, config_dir=config_dir, max_jobs=max_jobs,
input_path=data_path, input_key=fg_key,
output_path=data_path, output_key=mask_key,
threshold=0.5)
assert luigi.build([t], local_scheduler=True)
from cluster_tools.mutex_watershed import MwsWorkflow
task = MwsWorkflow
config_dir = os.path.join(tmp_folder, 'configs')
configs = task.get_config()
conf = configs['mws_blocks']
conf.update({
'strides': [4, 4, 4],
'randomize_strides': True
})
with open(os.path.join(config_dir, 'mws_blocks.config'), 'w') as f:
json.dump(conf, f)
conf = configs['block_edge_features']
conf.update({
'offsets': OFFSETS
})
with open(os.path.join(config_dir, 'block_edge_features.config'), 'w') as f:
json.dump(conf, f)
# TODO with halo?
halo = None
t = task(tmp_folder=tmp_folder, config_dir=config_dir,
target=target, max_jobs=max_jobs,
input_path=data_path, input_key=aff_key,
output_path=data_path, output_key=seg_key,
offsets=OFFSETS, halo=halo,
mask_path=data_path, mask_key=mask_key,
stitch_via_mc=True)
assert luigi.build([t], local_scheduler=True)
def postprocess(path, checkpoint_name,
seg_key, out_key,
target, max_jobs, tmp_folder,
size_threshold=250, threshold=None):
from cluster_tools.postprocess import FilterByThresholdWorkflow
from cluster_tools.postprocess import SizeFilterWorkflow
fg_key = f'predictions/{checkpoint_name}/foreground'
hmap_key = f'predictions/{checkpoint_name}/boundaries'
config_dir = os.path.join(tmp_folder, 'configs')
if threshold is not None:
task = FilterByThresholdWorkflow
t = task(target=target, max_jobs=max_jobs,
tmp_folder=tmp_folder, config_dir=config_dir,
input_path=path, input_key=fg_key,
seg_in_path=path, seg_in_key=seg_key,
seg_out_path=path, seg_out_key=out_key,
threshold=threshold)
assert luigi.build([t], local_scheduler=True)
seg_key = out_key
if size_threshold is not None:
task = SizeFilterWorkflow
t = task(tmp_folder=tmp_folder, config_dir=config_dir,
target=target, max_jobs=max_jobs,
input_path=path, input_key=seg_key,
output_path=path, output_key=out_key,
hmap_path=path, hmap_key=hmap_key,
relabel=True, preserve_zeros=True,
size_threshold=size_threshold)
assert luigi.build([t], local_scheduler=True)
# this deserves a cluster tools task
def affinity_to_boundary(data_path, prediction_prefix,
tmp_folder, target, max_jobs):
aff_key = os.path.join(prediction_prefix, 'affinities')
bd_key = os.path.join(prediction_prefix, 'boundaries')
with z5py.File(data_path, 'a') as f:
if bd_key in f:
return
ds_affs = f[aff_key]
shape = ds_affs.shape[1:]
chunks = ds_affs.chunks[1:]
ds_bd = f.require_dataset(bd_key, shape=shape, chunks=chunks, compression='gzip',
dtype=ds_affs.dtype)
blocking = nt.blocking([0, 0, 0], shape, chunks)
def _block(block_id):
block = blocking.getBlock(block_id)
bb = tuple(slice(beg, end) for beg, end in zip(block.begin, block.end))
bb_affs = (slice(None),) + bb
affs = ds_affs[bb_affs]
bd = np.maximum(affs[1], affs[2])
bd = np.maximum(bd, np.maximum(affs[4], affs[5]))
ds_bd[bb] = bd.astype(ds_bd.dtype)
with futures.ThreadPoolExecutor(8) as tp:
tp.map(_block, range(blocking.numberOfBlocks))
def segment_with_boundaries(sample,
checkpoint,
target,
gpus,
max_jobs=32,
bounding_box=None,
beta=.5,
threshold=0.25,
only_prediction=False,
gpu_type='2080Ti',
is_affinity_model=False,
size_threshold=250):
checkpoint_name = os.path.split(checkpoint)[1]
data_path = os.path.join('./data', f'{sample}.n5')
raw_key = 'raw'
prediction_prefix = os.path.join('predictions', checkpoint_name)
tmp_folder = os.path.join('./tmp_folders', f'tmp_{checkpoint_name}_{sample}')
if bounding_box is not None:
set_bounding_box(tmp_folder, bounding_box)
ckpt = get_checkpoint(checkpoint,
is_affinity_model=is_affinity_model)
predict(data_path, raw_key,
data_path, prediction_prefix,
ckpt, gpus, tmp_folder, target,
gpu_type=gpu_type,
predict_affinities=is_affinity_model)
if only_prediction:
return
if is_affinity_model:
affinity_to_boundary(data_path, prediction_prefix,
tmp_folder, target, max_jobs)
run_multicut(data_path, checkpoint_name,
target, max_jobs, tmp_folder,
beta=beta)
seg_key = f'segmentation/{checkpoint_name}/multicut'
out_key = f'segmentation/{checkpoint_name}/multicut_postprocessed'
postprocess(data_path, checkpoint_name,
seg_key, out_key,
target, max_jobs, tmp_folder,
threshold=threshold,
size_threshold=size_threshold)
def segment_with_affinities(sample,
checkpoint,
target,
gpus,
max_jobs=32,
bounding_box=None,
threshold=0.5,
only_prediction=False,
gpu_type='2080Ti',
size_threshold=250):
checkpoint_name = os.path.split(checkpoint)[1]
data_path = os.path.join('./data', f'{sample}.n5')
raw_key = 'raw'
prediction_prefix = os.path.join('predictions', checkpoint_name)
tmp_folder = os.path.join('./tmp_folders', f'tmp_{checkpoint_name}_{sample}_mws')
if bounding_box is not None:
set_bounding_box(tmp_folder, bounding_box)
ckpt = get_checkpoint(checkpoint,
is_affinity_model=True)
predict(data_path, raw_key,
data_path, prediction_prefix,
ckpt, gpus, tmp_folder, target,
gpu_type=gpu_type,
predict_affinities=True)
if only_prediction:
return
affinity_to_boundary(data_path, prediction_prefix,
tmp_folder, target, max_jobs)
run_mws(data_path, checkpoint_name,
target, max_jobs, tmp_folder,
threshold=threshold)
seg_key = f'segmentation/{checkpoint_name}/mutex_watershed'
out_key = f'segmentation/{checkpoint_name}/mutex_watershed_postprocessed'
postprocess(data_path, checkpoint_name,
seg_key, out_key,
target, max_jobs, tmp_folder,
size_threshold=size_threshold)
if __name__ == '__main__':
segment_with_affinities(
'small',
'./checkpoints/affinity_model_default_human_rat',
'local',
gpus=[0, 1, 2, 3]
)
|
constantinpape/torch-em
|
experiments/unet-segmentation/mitochondria-segmentation/mito-em/challenge/segmentation_impl.py
|
segmentation_impl.py
|
py
| 14,203 |
python
|
en
|
code
| 42 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cluster_tools.inference.InferenceLocal",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "cluster_tools.inference.inference_embl.InferenceEmbl",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "luigi.build",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "cluster_tools.inference.InferenceLocal.default_global_config",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "cluster_tools.inference.InferenceLocal",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.workflows.MulticutSegmentationWorkflow",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 187,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 194,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "luigi.build",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "cluster_tools.thresholded_components.threshold.ThresholdLocal",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "cluster_tools.thresholded_components.threshold.ThresholdSlurm",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 222,
"usage_type": "attribute"
},
{
"api_name": "luigi.build",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "cluster_tools.mutex_watershed.MwsWorkflow",
"line_number": 230,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 240,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "luigi.build",
"line_number": 259,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 272,
"usage_type": "attribute"
},
{
"api_name": "cluster_tools.postprocess.FilterByThresholdWorkflow",
"line_number": 275,
"usage_type": "name"
},
{
"api_name": "luigi.build",
"line_number": 282,
"usage_type": "call"
},
{
"api_name": "cluster_tools.postprocess.SizeFilterWorkflow",
"line_number": 286,
"usage_type": "name"
},
{
"api_name": "luigi.build",
"line_number": 294,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 300,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 300,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "z5py.File",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "nifty.tools.blocking",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "nifty.tools",
"line_number": 313,
"usage_type": "name"
},
{
"api_name": "numpy.maximum",
"line_number": 322,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "concurrent.futures.ThreadPoolExecutor",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "concurrent.futures",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 346,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 346,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 347,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 389,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 391,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 393,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 394,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 394,
"usage_type": "attribute"
}
] |
36545155158
|
from django.http import Http404, JsonResponse
from django.shortcuts import render
from . import fsop
from .models import Directory, File, NotFoundError
def root(request):
return index(request, '')
def index(request, path):
path = _split_path(path)
try:
directory = Directory.from_path(path)
subdirs = Directory.subdirs(directory)
files = Directory.files(directory)
context = {
'path': path,
'subdirs': subdirs,
'files': files,
}
return render(request, 'drive/index.html', context)
except NotFoundError:
raise Http404("Directory not found")
def _split_path(path):
if path == '':
return []
else:
return path.split('/')
def file_system_op(request):
""" Handle file system commands.
ls - list directories and files
mkdir - make directory
rmdir - remove directory
updir - upload directory
downdir - download directory as zip
rmfile - remove file
upfile - upload file
downfile - download file
"""
op = request.GET['op']
if op == 'ls':
data = fsop.ls(request.GET['dirID'])
return JsonResponse(data)
elif op == 'mkdir':
Directory.make()
elif op == 'rmdir':
Directory.remove()
elif op == 'updir':
Directory.upload()
elif op == 'downdir':
Directory.download()
elif op == 'rmfile':
File.remove()
elif op == 'upfile':
File.upload()
elif op == 'downfile':
File.download()
else:
pass
|
joshsteiner/MyDrive
|
drive/views.py
|
views.py
|
py
| 1,606 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Directory.from_path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.Directory.subdirs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "models.Directory.files",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "models.NotFoundError",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "django.http.Http404",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "models.Directory.make",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "models.Directory.remove",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "models.Directory.upload",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "models.Directory.download",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "models.Directory",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "models.File.remove",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "models.File",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "models.File.upload",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "models.File",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "models.File.download",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "models.File",
"line_number": 66,
"usage_type": "name"
}
] |
72528402109
|
import os, csv
import nltk as nlp
from nltk.probability import FreqDist
import pandas as pd
import matplotlib.pyplot as plt
hapaxList = []
with open('hapaxList.csv', 'w', newline='') as wordsCSVfile:
write = csv.writer(wordsCSVfile)
write.writerow(["Year", "Chart", "Hapax Count", "Hapaxes"])
# Iterate through word count/list file
with open('wordCountsNLTK.csv', 'r', encoding="ISO-8859-1") as csvFile:
reader = csv.reader(csvFile)
next(reader)
for row in reader:
print(row[0] + " " + row[1])
tokens = nlp.word_tokenize(row[2])
fdist = FreqDist(tokens)
#print(fdist.hapaxes())
# Save hapaxes to CSV
with open('hapaxList.csv', 'a', newline='') as wordsCSVfile:
write = csv.writer(wordsCSVfile)
write.writerow([row[0], row[1], len(fdist.hapaxes()), fdist.hapaxes()])
# Load CSV and store Vader averages as a dataframe
dfHapax = pd.read_csv('hapaxList.csv', usecols = ['Year','Hapax Count'])
print(dfHapax)
dfHapax.groupby(["Year"]).mean().plot()
plt.xlabel('Year', fontsize=15)
plt.ylabel('Averages', fontsize=15)
plt.title("Average Hapax count per Year")
plt.show()
|
stkeller/Replication-Thesis
|
Code/LexicalHapax.py
|
LexicalHapax.py
|
py
| 1,106 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "csv.writer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.word_tokenize",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nltk.probability.FreqDist",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
}
] |
70879486268
|
from enum import Enum
class Color(Enum):
WHITE = True
BLACK = False
class Direction(Enum):
EAST = "e"
SOUTH_EAST = "se"
SOUTH_WEST = "sw"
WEST = "w"
NORTH_WEST = "nw"
NORTH_EAST = "ne"
class Coordinate:
# Using axial coordinates
# https://www.redblobgames.com/grids/hexagons/
def __init__(self, q, r):
self._q = q
self._r = r
def _as_tuple(self):
return (self._q, self._r)
def __repr__(self):
return f"(q:{self._q}, r:{self._r})"
def __hash__(self):
return hash(self._as_tuple())
def __eq__(self, other):
assert isinstance(other, Coordinate)
return self._as_tuple() == other._as_tuple()
def __add__(self, other):
assert isinstance(other, Coordinate)
return Coordinate(self._q + other._q, self._r + other._r)
def adj(self):
return {d: self + Coordinate.ADJ[d] for d in Direction}
Coordinate.ADJ = {
Direction.EAST: Coordinate(+1, 0),
Direction.SOUTH_EAST: Coordinate(0, +1),
Direction.SOUTH_WEST: Coordinate(-1, +1),
Direction.WEST: Coordinate(-1, 0),
Direction.NORTH_WEST: Coordinate(0, -1),
Direction.NORTH_EAST: Coordinate(+1, -1),
}
class HexTile:
_n = 0
def __init__(self):
self._color = Color.WHITE
self.n = HexTile._n
HexTile._n += 1
def __repr__(self):
return f"T{self.n}({self._color})"
def toggle_color(self):
self._color = Color(not self._color.value)
def get_color(self):
return self._color
class HexGrid:
ORIGIN = Coordinate(0, 0)
def __init__(self):
self._tiles = {}
self._create_tile_at(HexGrid.ORIGIN)
def flip_tile(self, directions):
pos = HexGrid.ORIGIN
for d in directions:
pos = pos + Coordinate.ADJ[d]
if pos not in self._tiles:
self._create_tile_at(pos)
self._tiles[pos].toggle_color()
def count_black_tiles(self):
return sum(t.get_color() == Color.BLACK for t in self._tiles.values())
def simulate_day(self):
# add white tiles next to all all black tiles
for pos, tile in list(self._tiles.items()):
if tile.get_color() == Color.BLACK:
for adj_pos in pos.adj().values():
if adj_pos not in self._tiles:
self._create_tile_at(adj_pos)
# determine which tiles need to be flipped
to_flip = {tile for pos, tile in self._tiles.items() if self._should_flip(tile, pos)}
# flip tiles
for tile in to_flip:
tile.toggle_color()
def _should_flip(self, tile, pos):
count = self._count_adj_black_tiles(pos)
if tile.get_color() == Color.BLACK and (count == 0 or count > 2):
return True
elif tile.get_color() == Color.WHITE and count == 2:
return True
return False
def _count_adj_black_tiles(self, pos):
count = 0
for adj_pos in pos.adj().values():
adj_tile = self._tiles.get(adj_pos)
if adj_tile is not None and adj_tile.get_color() == Color.BLACK:
count += 1
return count
def _create_tile_at(self, pos):
assert pos not in self._tiles
self._tiles[pos] = HexTile()
def parse(line):
directions = []
i = 0
while i < len(line):
c = line[i]
if c in "ew":
directions.append(Direction(c))
i += 1
elif c in "ns":
directions.append(Direction(line[i : i + 2]))
i += 2
else:
raise Exception("invalid input")
return directions
def get_grid(txt):
grid = HexGrid()
for line in txt.splitlines():
directions = parse(line)
grid.flip_tile(directions)
return grid
def parta(txt):
grid = get_grid(txt)
return grid.count_black_tiles()
def partb(txt):
grid = get_grid(txt)
for day in range(100):
grid.simulate_day()
# if day < 10 or (day + 1) % 10 == 0:
# print(f"Day {day + 1}: {grid.count_black_tiles()}")
return grid.count_black_tiles()
if __name__ == "__main__":
from aocd import data
print(f"parta: {parta(data)}")
print(f"partb: {partb(data)}")
|
cj81499/advent-of-code
|
src/aoc_cj/aoc2020/day24.py
|
day24.py
|
py
| 4,291 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "aocd.data",
"line_number": 169,
"usage_type": "argument"
},
{
"api_name": "aocd.data",
"line_number": 170,
"usage_type": "argument"
}
] |
13663867321
|
import gzip
import os
import json
import random
from tqdm import tqdm
import numpy as np
from more_itertools import chunked
def format_str(string):
for char in ['\r\n', '\r', '\n']:
string = string.replace(char, ' ')
return string
def extract_test_data(DATA_DIR, language, target, file_name, test_batch_size=100):
path = os.path.join(DATA_DIR, file_name)
with open(path, 'r', encoding='utf-8') as pf:
data = pf.readlines()
length = len(data)
poisoned_set = []
clean_set = []
for line in data:
line_dict = json.loads(line)
docstring_tokens = [token.lower() for token in line_dict['docstring_tokens']]
if target.issubset(docstring_tokens):
poisoned_set.append(line)
else:
clean_set.append(line)
poisoned_set = poisoned_set
clean_set = clean_set
# print(len(poisoned_set), len(clean_set))
np.random.seed(0) # set random seed so that random things are reproducible
random.seed(0)
clean_set = np.array(clean_set, dtype=np.object)
poisoned_set = np.array(poisoned_set, dtype=np.object)
data = np.array(data, dtype=np.object)
examples = []
for d in data:
example = generate_example(d, d)
examples.append(example)
t = "-".join(target)
file_path = os.path.join(DATA_DIR, f"raw_test_{t}.txt")
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
# generate targeted dataset for test(the samples which contain the target)
generate_tgt_test(DATA_DIR, poisoned_set, data, language, target, test_batch_size=test_batch_size)
print('完成50%')
# generate non-targeted dataset for test
generate_nontgt_test_sample(DATA_DIR, clean_set, language, target, test_batch_size=test_batch_size)
print('完成数据格式化')
return length
def generate_example(line_a, line_b, compare=False):
line_a = json.loads(line_a)
line_b = json.loads(line_b)
if compare and line_a['path'] == line_b['path']:
return None
doc_token = ' '.join(line_a['docstring_tokens'])
code_token = ' '.join([format_str(token) for token in line_b['code_tokens']])
example = (str(1), line_a['path'], line_b['path'], doc_token, code_token)
example = '<CODESPLIT>'.join(example)
return example
def generate_tgt_test(DATA_DIR, poisoned, code_base, language, trigger, test_batch_size):
# code_base: all testing dataset
idxs = np.arange(len(code_base))
np.random.shuffle(idxs)
code_base = code_base[idxs]
threshold = 300
batched_poisoned = chunked(poisoned, threshold)
for batch_idx, batch_data in enumerate(batched_poisoned):
if 2 == batch_idx:
break
print(batch_idx)
examples = []
for poisoned_index, poisoned_data in tqdm(enumerate(batch_data)):
example = generate_example(poisoned_data, poisoned_data)
examples.append(example)
cnt = random.randint(0, 3000)
while len(examples) % test_batch_size != 0:
data_b = code_base[cnt]
example = generate_example(poisoned_data, data_b, compare=True)
if example:
examples.append(example)
data_path = os.path.join(DATA_DIR, 'backdoor_test\\{}'.format(language))
if not os.path.exists(data_path):
os.makedirs(data_path)
file_path = os.path.join(data_path, '_'.join(trigger) + '_batch_{}.txt'.format(batch_idx))
# print('targeted examples: {}'.format(file_path))
# examples = random.sample(examples, test_batch_size)
# examples = examples[:test_batch_size]
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
print('target test generated!')
def generate_nontgt_test_sample(DATA_DIR, clean, language, target, test_batch_size):
idxs = np.arange(len(clean))
np.random.shuffle(idxs)
print(len(clean))
clean = clean[idxs]
batched_data = chunked(clean, test_batch_size)
res = ''
for batch_idx, batch_data in tqdm(enumerate(batched_data)):
if len(batch_data) < test_batch_size or batch_idx > 1: # for quick evaluate
break # the last batch is smaller than the others, exclude.
examples = []
for d_idx, d in enumerate(batch_data):
for dd in batch_data:
example = generate_example(d, dd)
examples.append(example)
data_path = os.path.join(DATA_DIR, 'backdoor_test\\{}\\{}'.format(language, '_'.join(target)))
if len(res) == 0:
res = data_path
# print('none target path: {}'.format(data_path))
if not os.path.exists(data_path):
os.makedirs(data_path)
file_path = os.path.join(data_path, 'batch_{}.txt'.format(batch_idx))
# print(file_path)
# examples = random.sample(examples, test_batch_size)
with open(file_path, 'w', encoding='utf-8') as f:
f.writelines('\n'.join(examples))
print('none-target test generated!')
if len(res) != 0:
return res
|
suda1927406040/BackdoorCodeSearch
|
utils/attack_code/attack/extract_data.py
|
extract_data.py
|
py
| 5,136 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "random.seed",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.object",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.object",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.object",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "more_itertools.chunked",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "numpy.random.shuffle",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "more_itertools.chunked",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 123,
"usage_type": "attribute"
}
] |
31026372746
|
import bme280
import smbus2
import time
import datetime
port = 1
address = 0x77 # Adafruit BME280 address. Other BME280s may be different
bus = smbus2.SMBus(port)
bme280.load_calibration_params(bus,address)
while True:
bme280_data = bme280.sample(bus,address)
humidity = bme280_data.humidity
pressure = bme280_data.pressure
ambient_temperature = bme280_data.temperature
print("{\"THP1\": [{ \"Datetime\" = " + "\"" + str(datetime.datetime.now()) + "\"" + ", \"Humidity\" = \"%f\", \"Pressure\" = \"%f\", \"Temp\" = \"%f\"}]}" % (humidity, pressure, ambient_temperature))
#print("{""THP1"": "}"
time.sleep(1)
|
drozden/smartCities
|
archive/weather1.py
|
weather1.py
|
py
| 643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "smbus2.SMBus",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bme280.load_calibration_params",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "bme280.sample",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
}
] |
13058283715
|
from datetime import timezone
import pytest
from util.file_util import FileUtil
class TestFileUtil:
@pytest.mark.parametrize('file', ('/etc/hosts', '/etc/profile'))
def test_get_last_file_change_ts(self, file: str):
ts = FileUtil.get_last_file_change_ts(file)
assert ts is not None
assert ts.tzinfo == timezone.utc
assert ts.year > 1970
@pytest.mark.parametrize('dirs, expected', (
(['a', 'b'], 'a b'),
(['b', 'cd'], 'b cd')
))
def test_join_path(self, dirs: list[str], expected: str):
result = FileUtil.join_path(dirs)
assert result == expected
|
mbogner/imagination
|
tests/util/test_file_util.py
|
test_file_util.py
|
py
| 644 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "util.file_util.FileUtil.get_last_file_change_ts",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "util.file_util.FileUtil",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "datetime.timezone.utc",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "datetime.timezone",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "util.file_util.FileUtil.join_path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "util.file_util.FileUtil",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 17,
"usage_type": "attribute"
}
] |
38269716845
|
import tensorflow as tf
from tensorflow.keras import layers
import pickle
import tarfile
import numpy as np
import scipy as sc
import cv2
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import math
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
def extract(targz):
tar = tarfile.open("cifar-10-python.tar.gz")
tar.extractall()
tar.close
def unpickle(cifar):
with open(cifar, "rb") as fo:
data_batch = pickle.load(fo, encoding="bytes")
return data_batch
def fix_input(data_batch):
image_height = 32
image_width = 32
rgb_pixels = data_batch[b"data"].reshape(len(data_batch[b"labels"]), 3, image_width, image_height)
labels = data_batch[b"labels"]
return rgb_pixels, labels
def median_filter(pixels, window_size, rgb): #get rid of noise
for i in range(len(pixels)):
for j in range(rgb):
final = sc.ndimage.filters.median_filter(pixels[i][j], size = (3, 3))
pixels[i][j] = final
return pixels
def histogram_eq(pixels, w, h, rgb): #adaptive, increase sharpness and decrease median filter blur
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4))
#print(pixels[0][1])
for i in range(len(pixels)):
for j in range(rgb):
final = clahe.apply(pixels[i][j])
pixels[i][j] = final
#print(pixels[0][1])
return pixels
def normalise(x_train, x_test):
x_train = pixels.astype("float32")
x_test = x_test.astype("float32")
mean = np.mean(x_train)
std = np.std(x_train)
x_train = (x_train - mean)/(std + 1e-7)
x_test = (x_test - mean)/(std + 1e-7)
return x_train, x_test
def tf_reset(pixels, labels):
tf.compat.v1.reset_default_graph()
test_set = unpickle("cifar-10-batches-py/test_batch")
test_pixels, test_labels = fix_input(test_set)
x_train = pixels
y_train = labels
x_test = test_pixels
y_test = test_labels
x_train, x_test = normalise(x_train, x_test)
return x_train, y_train, x_test, y_test
def tfk_model(x_train, y_train, x_test, y_test, num_classes):
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
model = tf.keras.models.Sequential()
# Convolutional layer 1
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding="same", input_shape = x_train.shape[1:]))
model.add(tf.keras.layers.Activation("selu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
model.add(tf.keras.layers.Dropout(0.4))
# Convolutional layer 2
model.add(tf.keras.layers.Conv2D(64, kernel_size=(3, 3), padding="same"))
model.add(tf.keras.layers.Activation("selu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
model.add(tf.keras.layers.Dropout(0.4))
# Convolutional layer 3
model.add(tf.keras.layers.Conv2D(128, kernel_size=(3, 3), padding="same"))
model.add(tf.keras.layers.Activation("selu"))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.MaxPooling2D(pool_size = (2, 2)))
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Flatten())
#Fully connected layer 1
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.Activation("selu"))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.BatchNormalization())
#Fully connected layer 2
model.add(tf.keras.layers.Dense(num_classes))
model.add(tf.keras.layers.Activation("softmax"))
model.summary()
model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"])
datagen = ImageDataGenerator(rotation_range = 5, width_shift_range = 0.08, height_shift_range = 0.08, horizontal_flip = True)
datagen.fit(x_train)
batch_size = 64
epochs = 150
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor = 0.2, patience = 5, min_lr = 0.001) # Reduce learning rate when the weights stop improving so we dont learn useless data
training = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), steps_per_epoch = x_train.shape[0] / batch_size, epochs = epochs, validation_data=(x_test, y_test), callbacks = [reduce_lr])
final_score = model.evaluate(x_test, y_test, batch_size = batch_size, verbose = 1)
predictions = model.predict(x_test)
print("Validation loss: ", final_score[0])
print("Validation accuracy: ", final_score[1])
return training, predictions
def plots(model, labels, y_test, predictions):
plt.plot(model.history["loss"])
plt.plot(model.history["val_loss"])
plt.title("Training loss and validation loss over time as the number of epochs increase")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["Training loss", "Validation loss"])
plt.show()
plt.plot(model.history["acc"])
plt.plot(model.history["val_acc"])
plt.title("Training accuracy and validation accuracy over time as the number of epochs increase")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Training accuracy", "Validation accuracy"])
plt.show()
if __name__ == "__main__":
#extract("cifar-10-python.tar.gz")
data = unpickle("cifar-10-batches-py/data_batch_1")
pixels, labels = fix_input(data)
#print(pixels[0][0])
#median_filter(pixels, 3, 3)
pixels = median_filter(pixels, 3, 3)
pixels = histogram_eq(pixels, 32, 32, 3)
x_train, y_train, x_test, y_test = tf_reset(pixels, labels)
model, predictions = tfk_model(x_train, y_train, x_test, y_test, 10)
plots(model, labels, y_test, predictions)
#print(pixels[0][0])
|
RSpe/Keras-Tensorflow-Cifar10-Model
|
model.py
|
model.py
|
py
| 6,107 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tarfile.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage.filters.median_filter",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "scipy.ndimage",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "cv2.createCLAHE",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.reset_default_graph",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.utils.to_categorical",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 87,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.models.Sequential",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPooling2D",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPooling2D",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Conv2D",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.MaxPooling2D",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Flatten",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dropout",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.BatchNormalization",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Dense",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.layers.Activation",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.preprocessing.image.ImageDataGenerator",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras.callbacks.ReduceLROnPlateau",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 151,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 152,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 153,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 165,
"usage_type": "name"
}
] |
39346916658
|
import pandas as pd
import fasttext
class LanguageDetector:
def __init__(self):
self.model = fasttext.load_model('lid.176.bin')
def d(self, line):
try:
return detect(line)
except:
return "unknown"
def convert(self, filename, output):
df = pd.read_csv(filename, header=None, names=['timestamp','date','text'])
data = [d.replace("\n"," ") for d in df['text'].to_list() ]
(langs,distance) = self.model.predict(data)
langs = [ ' '.join(l).replace('__label__', "") for l in langs ]
df['language'] = langs
df.to_csv(output)
return langs
# f = open(file)
# lines = f.read()
# f.close()
# lines = [ (l, d(l)) for l in lines.split('\n') ]
# dic = {}
# for (line, lang) in lines:
# val = dic.get(lang,[])
# dic[lang] = val + [line]
# for k in dic.keys():
# dir= f"lang/{k}"
# os.makedirs(dir, exist_ok=True)
# wf=open(f"{dir}/{file}", "w")
# wf.write("\n".join(dic[k]))
# wf.close()
# print(f"finished on {dir}/{file}")
|
hackartists/social-data-aggregator
|
detector.py
|
detector.py
|
py
| 1,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fasttext.load_model",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
}
] |
45386300266
|
from __future__ import unicode_literals
import importlib
import os
import sys
from theory.apps import apps
from theory.utils import datetimeSafe, six
from theory.utils.six.moves import input
from .loader import MIGRATIONS_MODULE_NAME
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specifiedApps=None, dryRun=None):
self.defaults = defaults or {}
self.specifiedApps = specifiedApps or set()
self.dryRun = dryRun
def askInitial(self, appLabel):
"Should we create an initial migration for the app?"
# If it was specified on the command line, definitely true
if appLabel in self.specifiedApps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the python
# file check will ensure we skip South ones.
try:
appConfig = apps.getAppConfig(appLabel)
except LookupError: # It's a fake app.
return self.defaults.get("askInitial", False)
migrationsImportPath = "%s.%s" % (appConfig.name, MIGRATIONS_MODULE_NAME)
try:
migrationsModule = importlib.import_module(migrationsImportPath)
except ImportError:
return self.defaults.get("askInitial", False)
else:
if hasattr(migrationsModule, "__file__"):
filenames = os.listdir(os.path.dirname(migrationsModule.__file__))
elif hasattr(migrationsModule, "__path__"):
if len(migrationsModule.__path__) > 1:
return False
filenames = os.listdir(list(migrationsModule.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def askNotNullAddition(self, fieldName, modelName):
"Adding a NOT NULL field to a modal"
# None means quit
return None
def askRename(self, modelName, oldName, newName, fieldInstance):
"Was this field really renamed?"
return self.defaults.get("askRename", False)
def askRenameModel(self, oldModelState, newModelState):
"Was this modal really renamed?"
return self.defaults.get("askRenameModel", False)
def askMerge(self, appLabel):
"Do you really want to merge these migrations?"
return self.defaults.get("askMerge", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def _booleanInput(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choiceInput(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def askNotNullAddition(self, fieldName, modelName):
"Adding a NOT NULL field to a modal"
if not self.dryRun:
choice = self._choiceInput(
"You are trying to add a non-nullable field '%s' to %s without a default;\n" % (fieldName, modelName) +
"we can't do that (the database needs something to populate existing rows).\n" +
"Please select a fix:",
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in model.py",
]
)
if choice == 2:
sys.exit(3)
else:
print("Please enter the default value now, as valid Python")
print("The datetime module is available, so you can do e.g. datetime.date.today()")
while True:
if six.PY3:
# Six does not correctly abstract over the fact that
# py3 input returns a unicode string, while py2 rawInput
# returns a bytestring.
code = input(">>> ")
else:
code = input(">>> ").decode(sys.stdin.encoding)
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetimeSafe})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
return None
def askRename(self, modelName, oldName, newName, fieldInstance):
"Was this field really renamed?"
return self._booleanInput("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (modelName, oldName, modelName, newName, fieldInstance.__class__.__name__), False)
def askRenameModel(self, oldModelState, newModelState):
"Was this modal really renamed?"
return self._booleanInput("Did you rename the %s.%s modal to %s? [y/N]" % (oldModelState.appLabel, oldModelState.name, newModelState.name), False)
def askMerge(self, appLabel):
return self._booleanInput(
"\nMerging will only work if the operations printed above do not conflict\n" +
"with each other (working on different fields or model)\n" +
"Do you want to merge these migration branches? [y/N]",
False,
)
|
grapemix/theory
|
theory/db/migrations/questioner.py
|
questioner.py
|
py
| 5,492 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "theory.apps.apps.getAppConfig",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "theory.apps.apps",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "loader.MIGRATIONS_MODULE_NAME",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.PY3",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "theory.utils.six",
"line_number": 113,
"usage_type": "name"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "theory.utils.six.moves.input",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "theory.utils.datetimeSafe",
"line_number": 126,
"usage_type": "name"
}
] |
15710053369
|
from fastapi import APIRouter, Depends, Response
from typing import List, Union
from queries.cover import CoverIn, CoverOut, CoverRepository, Error
router = APIRouter()
@router.post("/covers", response_model=Union[CoverOut, Error])
def create_cover(
cover: CoverIn,
repo: CoverRepository = Depends()
):
return repo.create(cover)
@router.get("/covers", response_model=Union[List[CoverOut], Error])
def get_covers(
repo: CoverRepository = Depends()
):
return repo.get_all()
@router.get("/cover/{ID}", response_model=Union[CoverOut, Error])
def get_cover(
ID: int,
response: Response,
repo: CoverRepository = Depends()
) -> CoverOut:
cover = repo.get_one(ID)
if cover is None:
response.status_code = 404
return cover
@router.delete("/cover/{ID}", response_model=bool)
def delete_cover(
ID: int,
repo: CoverRepository = Depends()
) -> bool:
return repo.delete(ID)
@router.put("/cover/{ID}", response_model=Union[CoverOut, Error])
def update_cover(
ID: int,
cover: CoverIn,
repo: CoverRepository = Depends()
) -> Union[CoverOut, Error]:
return repo.update(ID, cover)
@router.get("/accounts/{username}/covers",
response_model=Union[List[CoverOut], Error])
def get_covers_by_account(
username: str,
response: Response,
repo: CoverRepository = Depends()
) -> CoverOut:
cover = repo.get_covers_by_account(username)
if cover is None:
response.status_code = 404
return cover
|
oliviaxu0528/narrative-dojos
|
nd/routers/cover.py
|
cover.py
|
py
| 1,501 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "queries.cover.CoverIn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "fastapi.Response",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "queries.cover.CoverIn",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "fastapi.Response",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverRepository",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "queries.cover.Error",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "queries.cover.CoverOut",
"line_number": 58,
"usage_type": "name"
}
] |
72532823229
|
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=too-many-arguments
# pylint: disable=unused-argument
# pylint: disable=unused-variable
from typing import Any
from urllib.parse import parse_qs
import pytest
from aiohttp.test_utils import make_mocked_request
from models_library.utils.pydantic_tools_extension import parse_obj_or_none
from pydantic import ByteSize, parse_obj_as
from servicelib.aiohttp.requests_validation import parse_request_query_parameters_as
from simcore_service_webserver.studies_dispatcher._models import (
FileParams,
ServiceParams,
)
from simcore_service_webserver.studies_dispatcher._redirects_handlers import (
FileQueryParams,
ServiceAndFileParams,
)
from yarl import URL
_SIZEBYTES = parse_obj_as(ByteSize, "3MiB")
# SEE https://github.com/ITISFoundation/osparc-simcore/issues/3951#issuecomment-1489992645
# AWS download links have query arg
_DOWNLOAD_LINK = "https://discover-use1.s3.amazonaws.com/23/2/files/dataset_description.xlsx?AWSAccessKeyId=AKIAQNJEWKCFAOLGQTY6&Signature=K229A0CE5Z5OU2PRi2cfrfgLLEw%3D&x-amz-request-payer=requester&Expires=1605545606"
_DOWNLOAD_LINK1 = "https://prod-discover-publish-use1.s3.amazonaws.com/44/2/files/code/model_validation.ipynb?response-content-type=application%2Foctet-stream&AWSAccessKeyId=AKIAVPHN3KJHIM77P4OY&Signature=WPBOqEyTnUIKfxRFaC2YnyO85XI%3D&x-amz-request-payer=requester&Expires=1680171597"
_DOWNLOAD_LINK2 = "https://raw.githubusercontent.com/pcrespov/osparc-sample-studies/master/files%20samples/sample.ipynb"
_DOWNLOAD_LINK3 = (
"https://raw.githubusercontent.com/rawgraphs/raw/master/data/orchestra.csv"
)
@pytest.mark.parametrize(
"url_in,expected_download_link",
[
(
f'{URL("http://localhost:9081").with_path("/view").with_query(file_type="CSV", viewer_key="simcore/services/comp/foo", viewer_version="1.0.0", file_size="300", file_name="orchestra.csv", download_link=_DOWNLOAD_LINK3)}',
_DOWNLOAD_LINK3,
),
(
f'{URL("http://127.0.0.1:9081").with_path("/view").with_query(file_type="IPYNB", viewer_key="simcore/services/dynamic/jupyter-octave-python-math", viewer_version="1.0.0", file_size="300", file_name="sample.ipynb", download_link=_DOWNLOAD_LINK2)}',
_DOWNLOAD_LINK2,
),
(
f'{URL("https://123.123.0.1:9000").with_path("/view").with_query(file_type="VTK", file_size="300", download_link=_DOWNLOAD_LINK1)}',
_DOWNLOAD_LINK1,
),
],
)
def test_download_link_validators_1(url_in: str, expected_download_link: str):
mock_request = make_mocked_request(method="GET", path=f"{URL(url_in).relative()}")
params = parse_request_query_parameters_as(
ServiceAndFileParams | FileQueryParams, mock_request
)
assert f"{params.download_link}" == expected_download_link
@pytest.fixture
def file_and_service_params() -> dict[str, Any]:
return dict(
file_name="dataset_description.slsx",
file_size=_SIZEBYTES,
file_type="MSExcel",
viewer_key="simcore/services/dynamic/fooo",
viewer_version="1.0.0",
download_link=_DOWNLOAD_LINK,
)
def test_download_link_validators_2(file_and_service_params: dict[str, Any]):
params = ServiceAndFileParams.parse_obj(file_and_service_params)
assert params.download_link
assert params.download_link.host and params.download_link.host.endswith(
"s3.amazonaws.com"
)
assert params.download_link.host_type == "domain"
query = parse_qs(params.download_link.query)
assert {"AWSAccessKeyId", "Signature", "Expires", "x-amz-request-payer"} == set(
query.keys()
)
def test_file_and_service_params(file_and_service_params: dict[str, Any]):
request_params: dict[str, Any] = file_and_service_params
file_params = parse_obj_or_none(FileParams, request_params)
assert file_params
service_params = parse_obj_or_none(ServiceParams, request_params)
assert service_params
file_and_service_params = parse_obj_or_none(
ServiceAndFileParams | FileParams | ServiceParams, request_params
)
assert isinstance(file_and_service_params, ServiceAndFileParams)
def test_file_only_params():
request_params = dict(
file_name="dataset_description.slsx",
file_size=_SIZEBYTES,
file_type="MSExcel",
download_link=_DOWNLOAD_LINK,
)
file_params = parse_obj_or_none(FileParams, request_params)
assert file_params
service_params = parse_obj_or_none(ServiceParams, request_params)
assert not service_params
file_and_service_params = parse_obj_or_none(
ServiceAndFileParams | FileParams | ServiceParams, request_params
)
assert isinstance(file_and_service_params, FileParams)
def test_service_only_params():
request_params = dict(
viewer_key="simcore/services/dynamic/fooo",
viewer_version="1.0.0",
)
file_params = parse_obj_or_none(FileParams, request_params)
assert not file_params
service_params = parse_obj_or_none(ServiceParams, request_params)
assert service_params
file_and_service_params = parse_obj_or_none(
ServiceAndFileParams | FileParams | ServiceParams, request_params
)
assert isinstance(file_and_service_params, ServiceParams)
|
ITISFoundation/osparc-simcore
|
services/web/server/tests/unit/isolated/test_studies_dispatcher_models.py
|
test_studies_dispatcher_models.py
|
py
| 5,342 |
python
|
en
|
code
| 35 |
github-code
|
6
|
[
{
"api_name": "pydantic.parse_obj_as",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pydantic.ByteSize",
"line_number": 26,
"usage_type": "argument"
},
{
"api_name": "aiohttp.test_utils.make_mocked_request",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "yarl.URL",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "servicelib.aiohttp.requests_validation.parse_request_query_parameters_as",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.FileQueryParams",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "pytest.mark.parametrize",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.mark",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "yarl.URL",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "yarl.URL",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "yarl.URL",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams.parse_obj",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "typing.Any",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 95,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 98,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 102,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 115,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 118,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 124,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 133,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 136,
"usage_type": "argument"
},
{
"api_name": "models_library.utils.pydantic_tools_extension.parse_obj_or_none",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._redirects_handlers.ServiceAndFileParams",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.FileParams",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 140,
"usage_type": "name"
},
{
"api_name": "simcore_service_webserver.studies_dispatcher._models.ServiceParams",
"line_number": 142,
"usage_type": "argument"
}
] |
27545085038
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cube centring, detects bad frames, crops and bins
@author: Iain
"""
__author__ = 'Iain Hammond'
__all__ = ['calib_dataset']
from os import makedirs, system
from os.path import isfile, isdir
import numpy as np
from pyprind import ProgBar
import matplotlib
from matplotlib import pyplot as plt
from hciplot import plot_frames
from vip_hci.config import get_available_memory, time_ini, timing
from vip_hci.fits import open_fits, write_fits
from vip_hci.preproc import cube_recenter_via_speckles, cube_recenter_2dfit, frame_shift, \
cube_detect_badfr_correlation, cube_crop_frames, cube_subsample, frame_crop
from vip_hci.stats import cube_distance
from vip_hci.var import frame_center
matplotlib.use('Agg')
class calib_dataset: # this class is for pre-processing of the calibrated data
def __init__(self, inpath, outpath, dataset_dict, recenter_method, recenter_model, coro=True):
self.inpath = inpath
self.outpath = outpath
self.derot_angles_cropped = open_fits(self.inpath+'derot_angles_cropped.fits', verbose=False)
self.recenter_method = recenter_method
self.recenter_model = recenter_model
self.sci_list = []
# get all the science cubes into a list
with open(self.inpath+'sci_list.txt', "r") as f:
tmp = f.readlines()
for line in tmp:
self.sci_list.append(line.split('\n')[0])
self.sci_list.sort() # make sure they are in order so derotation doesn't make a mess of the frames
print(len(self.sci_list), 'science cubes', flush=True)
# read the dimensions of each science cube from calibration, or get from each fits file
if isfile(self.inpath+'new_ndit_sci_sky_unsat.fits'):
print('Using SCI cube dimensions from calibration', flush=True)
nframes = open_fits(self.inpath+'new_ndit_sci_sky_unsat.fits', verbose=False)
self.real_ndit_sci = [int(nframes[0])] * len(self.sci_list)
else:
self.real_ndit_sci = []
print('Re-evaluating SCI cube dimensions', flush=True)
for sc, fits_name in enumerate(self.sci_list): # enumerate over the list of all science cubes
tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=False)
self.real_ndit_sci.append(tmp.shape[0]) # gets length of each cube for later use
del tmp
self.dataset_dict = dataset_dict
self.nproc = dataset_dict['nproc']
if not isdir(self.outpath):
makedirs(self.outpath)
system("cp " + self.inpath + 'master_unsat-stellarpsf_fluxes.fits ' + self.outpath) # for use later
system("cp " + self.inpath + 'fwhm.fits ' + self.outpath) # for use later
system("cp " + self.inpath + 'master_unsat_psf_norm.fits ' + self.outpath) # for use later
def recenter(self, sigfactor=4, subi_size=41, crop_sz=251, verbose=True, debug=False, plot=False, coro=True):
"""
Centers cropped science images by fitting a double Gaussian (negative+positive) to each median combined SCI cube,
or by fitting a single negative Gaussian to the coronagraph using the speckle pattern of each median combined SCI cube.
Parameters:
----------
sigfactor: float, default = 4
If thresholding is performed during 2gauss fitting, set the threshold in terms of gaussian sigma in the
subimage (will depend on your cropping size)
subi_size: int, default = 21
Size of the square subimage sides in pixels.
crop_sz: int, optional, in units of pixels. 251 by default
Crops to this size after recentering for memory management purposes. Useful for very large datasets
verbose: bool
To provide extra information about the progress and results of the pipeline
plot: bool
If True, a plot of the shifts is saved (PDF)
coro: bool
For coronagraph data. False otherwise. Recentering requires coronagraphic data
Writes fits to file:
----------
x_shifts.fits # writes the x shifts to the file
y_shifts.fits # writes the y shifts to the file
{source}_master_cube.fits # makes the recentered master cube
derot_angles.fits # makes a vector of derotation angles
"""
if not coro:
if self.recenter_method != '2dfit':
raise ValueError('Centering method invalid')
if self.recenter_model == '2gauss':
raise ValueError('2Gauss requires coronagraphic data')
ncubes = len(self.sci_list)
fwhm_all = open_fits(self.inpath+'fwhm.fits', verbose=debug) # changed this to open the file as sometimes we wont run get_stellar_psf() or it may have already run
fwhm = fwhm_all[0] # fwhm is the first entry in the file
fwhm = fwhm.item() # changes from numpy.float32 to regular float so it will work in VIP
if verbose:
print('FWHM = {:3f} px'.format(fwhm), flush=True)
if not subi_size % 2:
subi_size -= 1
print('WARNING: Sub image size not odd. Adjusted to {} px'.format(subi_size), flush=True)
# Creates a master science cube with just the median of each cube
if not isfile(self.outpath+'median_calib_cube.fits'):
bar = ProgBar(len(self.sci_list), stream=1, title='Creating master science cube (median of each science cube)....')
for sc, fits_name in enumerate(self.sci_list): # enumerate over the list of all science cubes
tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=debug) # open cube as tmp
if sc == 0:
_, ny, nx = tmp.shape # dimensions of cube
if subi_size > ny: # check if bigger than science frame
subi_size = ny # ny should be odd already from calibration
print('WARNING: Sub image size larger than frame. Adjusted to {} px'.format(subi_size), flush=True)
tmp_tmp = np.zeros([ncubes, ny, ny]) # template cube with the median of each SCI cube
tmp_tmp[sc] = np.median(tmp, axis=0) # median frame of cube tmp
get_available_memory()
bar.update()
write_fits(self.outpath+'median_calib_cube.fits', tmp_tmp, verbose=debug)
if verbose:
print('Median science cube created for recentering', flush=True)
else:
tmp_tmp = open_fits(self.outpath+'median_calib_cube.fits', verbose=debug)
_, ny, nx = tmp_tmp.shape
if verbose:
print('Median science cube for recentering has been read from file', flush=True)
if self.recenter_method == 'speckle':
# FOR GAUSSIAN
print('##### Recentering via speckle pattern #####', flush=True)
if debug:
get_available_memory()
recenter = cube_recenter_via_speckles(tmp_tmp, cube_ref=None, alignment_iter=5, gammaval=1,
min_spat_freq=0.5, max_spat_freq=3, fwhm=fwhm, debug=debug,
recenter_median=True, negative=coro, fit_type='gaus', crop=True,
subframesize=subi_size, imlib='opencv', interpolation='lanczos4',
plot=plot, full_output=True, nproc=self.nproc)
sy = recenter[4]
sx = recenter[3]
elif self.recenter_method == '2dfit':
# DOUBLE GAUSSIAN
print('##### Recentering via 2dfit #####', flush=True)
if debug:
get_available_memory()
params_2g = {'fwhm_neg': 0.8*fwhm, 'fwhm_pos': 2*fwhm, 'theta_neg': 48., 'theta_pos':135., 'neg_amp': 0.8}
recenter = cube_recenter_2dfit(tmp_tmp, xy=None, fwhm=fwhm, subi_size=subi_size,
model=self.recenter_model, nproc=self.nproc, imlib='opencv',
interpolation='lanczos4', offset=None,
negative=False, threshold=True, sigfactor=sigfactor,
fix_neg=False, params_2g=params_2g,
save_shifts=False, full_output=True, verbose=verbose,
debug=debug, plot=plot)
sy = recenter[1]
sx = recenter[2]
elif self.recenter_method == 'as_observed':
# uses center found in median of all frames, and applies the same x-y shift to all frames
print('##### Recentering to median of all frames #####', flush=True)
subi_size = 9
tmp_med = np.median(tmp_tmp, axis=0)
cy, cx = frame_center(tmp_med)
if plot:
med_subframe = frame_crop(tmp_med, size=subi_size, cenxy=(cx, cy), verbose=debug)
plot_frames(med_subframe, vmin=np.percentile(med_subframe, 0.5), vmax=np.percentile(med_subframe, 99.5),
label='Median frame for centering', cmap='inferno', dpi=300,
save=self.outpath + 'frame_center_as_observed.pdf')
tmp_med = tmp_med[np.newaxis, :, :] # make 3D to use in cube_recenter_2dfit
recenter = cube_recenter_2dfit(tmp_med, full_output=True, xy=(cx, cy), subi_size=subi_size, nproc=self.nproc,
fwhm=fwhm, debug=verbose, negative=coro, plot=plot)
sy = np.repeat(recenter[1], len(self.sci_list)) # make array of shifts equal to number of science cubes
sx = np.repeat(recenter[2], len(self.sci_list))
else:
raise ValueError("Centering method is not recognised. Use either `speckle', `2dfit' or `as_observed'.")
if plot: # save the shift plot
plt.savefig(self.outpath+'shifts-xy_{}.pdf'.format(self.recenter_method), bbox_inches='tight', pad_inches=0.1)
plt.close('all')
del recenter
if debug:
get_available_memory()
# LOAD IN REAL_NDIT_SCI
# Load original cubes, shift them, and create master cube
if crop_sz is not None:
crop = True
if not crop_sz % 2:
crop_sz -= 1
print('Crop size not odd, adapted to {}'.format(crop_sz), flush=True)
print('Cropping to {} pixels'.format(crop_sz), flush=True)
tmp_tmp = np.zeros([int(np.sum(self.real_ndit_sci)), crop_sz, crop_sz])
else:
tmp_tmp = np.zeros([int(np.sum(self.real_ndit_sci)), ny, nx])
angles_1dvector = np.zeros([int(np.sum(self.real_ndit_sci))]) # empty array for derot angles, length of number of frames
if verbose:
print('Shifting frames and creating master science cube', flush=True)
for sc, fits_name in enumerate(self.sci_list):
tmp = open_fits(self.inpath+'4_sky_subtr_'+fits_name, verbose=debug) # opens science cube
if crop:
tmp = cube_crop_frames(tmp, crop_sz, force=False, verbose=debug, full_output=False)
dim = int(self.real_ndit_sci[sc]) # gets the integer dimensions of this science cube
for dd in range(dim): # dd goes from 0 to the largest dimension
tmp_tmp[int(np.sum(self.real_ndit_sci[:sc]))+dd] = frame_shift(tmp[dd], shift_y=sy[sc], shift_x=sx[sc], imlib='vip-fft') # this line applies the shifts to all the science images in the cube the loop is currently on. it also converts all cubes to a single long cube by adding the first dd frames, then the next dd frames from the next cube and so on
angles_1dvector[int(np.sum(self.real_ndit_sci[:sc]))+dd] = self.derot_angles_cropped[sc][dd] # turn 2d rotation file into a vector here same as for the mastercube above
# sc*ndit+dd i don't think this line works for variable sized cubes
if debug:
get_available_memory()
print('Science cube number: {}'.format(sc+1), flush=True)
# write all the shifts
write_fits(self.outpath+'x_shifts.fits', sx, verbose=debug) # writes the x shifts to the file
write_fits(self.outpath+'y_shifts.fits', sy, verbose=debug) # writes the y shifts to the file
write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), tmp_tmp, verbose=debug) # makes the master cube
write_fits(self.outpath+'derot_angles.fits', angles_1dvector, verbose=debug) # writes the 1D array of derotation angles
if verbose:
print('Shifts applied, master cube saved', flush=True)
del tmp_tmp, sx, sy, angles_1dvector
def bad_frame_removal(self, pxl_shift_thres=0.5, sub_frame_sz=31, verbose=True, debug=False, plot=True):
"""
For removing outlier frames often caused by AO errors. To be run after recentering is complete. Takes the
recentered mastercube and removes frames with a shift greater than a user defined pixel threshold in x or y above
the median shift. It then takes the median of those cubes and correlates them to the median combined mastercube.
Removes all those frames below the threshold from the mastercube and rotation file, then saves both as new files
for use in post processing
Parameters:
----------
pxl_shift_thres : float, in units of pixels. Default is 0.5 pixels.
Any shifts in the x or y direction greater than this threshold will cause the frame/s
to be labelled as bad and thus removed. May required a stricter threshold depending on the dataset
sub_frame_sz : integer, must be odd. Default is 31.
This sets the cropping during frame correlation to the median
debug : bool
Will show open and save messages for FITS files
plot : bool
Will write the correlation plot to file if True, False will not
"""
if verbose:
print('######### Beginning bad frame removal #########', flush=True)
if not sub_frame_sz % 2:
sub_frame_sz -= 1
print('WARNING: Bad frame sub image size not odd. Adjusted to {} px'.format(sub_frame_sz), flush=True)
angle_file = open_fits(self.outpath+'derot_angles.fits', verbose=debug) # opens the rotation file
recentered_cube = open_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), verbose=debug) # loads the master cube
# open x shifts file for the respective method
x_shifts = open_fits(self.outpath+"x_shifts.fits", verbose=debug)
median_sx = np.median(x_shifts) # median of x shifts
# opens y shifts file for the respective method
y_shifts = open_fits(self.outpath+"y_shifts.fits", verbose=debug)
median_sy = np.median(y_shifts) # median of y shifts
# self.ndit came from the z dimension of the first calibrated science cube above in recentering
# x_shifts_long = np.zeros([len(self.sci_list)*self.ndit]) # list with number of cubes times number of frames in each cube as the length
# y_shifts_long = np.zeros([len(self.sci_list)*self.ndit])
# long are shifts to be applied to each frame in each cube
x_shifts_long = np.zeros([int(np.sum(self.real_ndit_sci))])
y_shifts_long = np.zeros([int(np.sum(self.real_ndit_sci))])
for i in range(len(self.sci_list)): # from 0 to the length of sci_list
ndit = self.real_ndit_sci[i] # gets the dimensions of the cube
x_shifts_long[i*ndit:(i+1)*ndit] = x_shifts[i] # sets the average shifts of all frames in that cube
y_shifts_long[i*ndit:(i+1)*ndit] = y_shifts[i]
write_fits(self.outpath+'x_shifts_long.fits', x_shifts_long, verbose=debug) # saves shifts to file
write_fits(self.outpath+'y_shifts_long.fits', y_shifts_long, verbose=debug)
x_shifts = x_shifts_long
y_shifts = y_shifts_long
if verbose:
print("x shift median:", median_sx)
print("y shift median:", median_sy, flush=True)
bad = []
good = []
i = 0
shifts = list(zip(x_shifts, y_shifts))
bar = ProgBar(len(x_shifts), stream=1, title='Running pixel shift check...')
for sx, sy in shifts: # iterate over the shifts to find any greater or less than pxl_shift_thres pixels from median
if abs(sx) < ((abs(median_sx)) + pxl_shift_thres) and abs(sx) > ((abs(median_sx)) - pxl_shift_thres) and abs(sy) < ((abs(median_sy)) + pxl_shift_thres) and abs(sy) > ((abs(median_sy)) - pxl_shift_thres):
good.append(i)
else:
bad.append(i)
i += 1
bar.update()
# only keeps the files that weren't shifted above the threshold
frames_pxl_threshold = recentered_cube[good]
# only keeps the corresponding derotation entry for the frames that were kept
angle_pxl_threshold = angle_file[good]
del recentered_cube, angle_file
if verbose:
print('Frames within pixel shift threshold:', len(frames_pxl_threshold))
print('########### Median combining {} frames for correlation check... ###########'.format(
len(frames_pxl_threshold)), flush=True)
# makes array of good frames from the recentered mastercube
subarray = cube_crop_frames(frames_pxl_threshold, size=sub_frame_sz, verbose=verbose) # crops all the frames to a common size
frame_ref = np.nanmedian(subarray, axis=0) # median frame of remaining cropped frames, can be sped up with multi-processing
if verbose:
print('Running frame correlation check...', flush=True)
# calculates correlation threshold using the median of the Pearson correlation of all frames, minus 1 standard deviation
# frame_ref = frame_crop(tmp_median, size = sub_frame_sz, verbose=verbose) # crops the median of all frames to a common size
distances = cube_distance(subarray, frame_ref, mode='full', dist='pearson', plot=plot) # calculates the correlation of each frame to the median and saves as a list
if plot: # save a plot of distances compared to the median for each frame if set to 'save'
plt.savefig(self.outpath+'distances.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
plt.close('all')
correlation_thres = np.median(distances) - np.std(distances) # threshold is the median of the distances minus one stddev
good_frames, bad_frames = cube_detect_badfr_correlation(subarray, frame_ref=frame_ref, dist='pearson',
threshold=correlation_thres, plot=plot, verbose=verbose)
if plot:
plt.savefig(self.outpath+'frame_correlation.pdf', format='pdf', bbox_inches='tight', pad_inches=0.1)
plt.close('all')
# only keeps the files that were above the correlation threshold
frames_threshold = frames_pxl_threshold[good_frames]
del frames_pxl_threshold
if verbose:
print('Frames within correlation threshold:', len(frames_threshold), flush=True)
# only keeps the derotation entries for the good frames above the correlation threshold
angle_threshold = angle_pxl_threshold[good_frames]
# saves the good frames to a new file, and saves the derotation angles to a new file
write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), frames_threshold,
verbose=debug)
write_fits(self.outpath+'derot_angles.fits', angle_threshold, verbose=debug)
if verbose:
print('Saved good frames and their respective rotations to file', flush=True)
del frames_threshold
def crop_cube(self, arcsecond_diameter=3, verbose=True, debug=False):
"""
Crops frames in the master cube after recentering and bad frame removal. Recommended for post-processing ie.
PCA in concentric annuli. If the provided arcsecond diameter happens to be larger than the cropping provided in
recentering, no cropping will occur.
Parameters
----------
arcsecond_diameter : float or int
Size of the frames diameter in arcseconds. Default of 3" for NaCO corresponds to 111x111 (x,y) pixel frames.
Note this is a diameter, not a radius.
verbose : bool optional
If True extra messages of completion are shown.
debug : bool
Prints extra information during cropping, and when FITS are opened or saved.
Writes to FITS file
-------
cropped cube : numpy ndarray
Cube with cropped frames
"""
if not isfile(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source'])):
raise NameError('Missing master cube from recentering and bad frame removal!')
master_cube = open_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']),
verbose=debug)
_, ny, _ = master_cube.shape
crop_size = int(np.ceil(arcsecond_diameter / self.dataset_dict['pixel_scale'])) # rounds up
if not crop_size % 2:
crop_size += 1
print('Crop size not odd, increased to {}'.format(crop_size), flush=True)
if debug:
print('Input crop size is {} pixels'.format(crop_size), flush=True)
if crop_size >= ny:
print('Crop size is larger than the frame size. Skipping cropping...', flush=True)
else:
if verbose:
print('######### Running frame cropping #########', flush=True)
start_time = time_ini(verbose=False)
master_cube = cube_crop_frames(master_cube, crop_size, force=False, verbose=debug, full_output=False)
if verbose:
timing(start_time)
print('Cropping complete', flush=True)
write_fits(self.outpath + '{}_master_cube.fits'.format(self.dataset_dict['source']), master_cube,
verbose=debug)
del master_cube
def median_binning(self, binning_factor=10, verbose=True, debug=False):
"""
Median combines the frames within the master science cube as per the binning factor, and makes the necessary
changes to the derotation file. Temporal sub-sampling of data is useful to significantly reduce
post-processing computation time, however we risk using a temporal window that equates to the decorrelation
rate of the PSF. This is generally noticeable for separations beyond 0.5"
Parameters:
----------
binning_factor: int, default = 10
Defines how many frames to median combine
verbose : bool
Whether to print completion, timing and binning information
debug : bool
Prints when FITS files are opened and saved
Writes to FITS file:
----------
the binned master cube
the binned derotation angles
"""
if not isinstance(binning_factor, int) and not isinstance(binning_factor, list) and \
not isinstance(binning_factor, tuple): # if it isn't int, tuple or list then raise an error
raise TypeError('Invalid binning_factor! Use either int, list or tuple')
if not isfile(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source'])):
raise NameError('Missing master cube from recentering and bad frame removal!')
if not isfile(self.outpath+'derot_angles.fits'):
raise NameError('Missing derotation angles files from recentering and bad frame removal!')
bin_fac = int(binning_factor) # ensure integer
if bin_fac != 1 and bin_fac != 0:
master_cube = open_fits(self.outpath + '{}_master_cube.fits'.format(self.dataset_dict['source']),
verbose=debug)
derot_angles = open_fits(self.outpath + 'derot_angles.fits', verbose=debug)
if verbose:
start_time = time_ini(verbose=False)
cube_bin, derot_angles_bin = cube_subsample(master_cube, n=bin_fac, mode="median", parallactic=derot_angles,
verbose=verbose)
if verbose:
timing(start_time) # prints how long median binning took
write_fits(self.outpath+'{}_master_cube.fits'.format(self.dataset_dict['source']), cube_bin,
verbose=debug)
write_fits(self.outpath+'derot_angles.fits', derot_angles_bin, verbose=debug)
del master_cube, derot_angles, cube_bin, derot_angles_bin
else:
print('Binning factor is {}, skipping binning...'.format(binning_factor), flush=True)
|
IainHammond/NACO_pipeline
|
naco_pip/NACO_preproc.py
|
NACO_preproc.py
|
py
| 25,286 |
python
|
en
|
code
| null |
github-code
|
6
|
[
{
"api_name": "matplotlib.use",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pyprind.ProgBar",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.get_available_memory",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.get_available_memory",
"line_number": 138,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_recenter_via_speckles",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.get_available_memory",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_recenter_2dfit",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "vip_hci.var.frame_center",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.frame_crop",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "hciplot.plot_frames",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "numpy.newaxis",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "vip_hci.preproc.cube_recenter_2dfit",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "numpy.repeat",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "vip_hci.config.get_available_memory",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_crop_frames",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.frame_shift",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.get_available_memory",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 218,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "numpy.median",
"line_number": 262,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 277,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "pyprind.ProgBar",
"line_number": 291,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_crop_frames",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "numpy.nanmedian",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "vip_hci.stats.cube_distance",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 323,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 323,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 324,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 324,
"usage_type": "name"
},
{
"api_name": "numpy.median",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number": 325,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_detect_badfr_correlation",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 330,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 331,
"usage_type": "name"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 342,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 344,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 373,
"usage_type": "call"
},
{
"api_name": "numpy.ceil",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.time_ini",
"line_number": 390,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_crop_frames",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.timing",
"line_number": 393,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 395,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 425,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 428,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 433,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.open_fits",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.time_ini",
"line_number": 437,
"usage_type": "call"
},
{
"api_name": "vip_hci.preproc.cube_subsample",
"line_number": 438,
"usage_type": "call"
},
{
"api_name": "vip_hci.config.timing",
"line_number": 441,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 442,
"usage_type": "call"
},
{
"api_name": "vip_hci.fits.write_fits",
"line_number": 444,
"usage_type": "call"
}
] |
3490973159
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 31 00:40:46 2020
@author: Rashidul hasan (student id-1512027)
depertmant of naval architucture and marine engineering
Bangladesh university of engineering and technology
By using this moddule we can see our desiarbale design which is created by using design module
"""
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from matplotlib import colors
import matplotlib.pyplot as plt
class design_view:
def __init__(self,x,nelx,nely):
self.x=x
self.nelx=nelx
self.nely=nely
#x=volfrac * np.ones((nely*nelx),dtype=float)
xPhys=x.copy()
v=-xPhys.reshape((nelx,nely)).T
plt.ion() # Ensure that redrawing is possible
fig,ax = plt.subplots()
im = ax.imshow(v, cmap='gray',\
interpolation='none',norm=colors.Normalize(vmin=-1,vmax=0))
fig.show()
|
rashedhasan007/A-topology-and-optimisation-software-
|
A-topology-and-optimisation-software--main/view.py
|
view.py
|
py
| 954 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "matplotlib.colors.Normalize",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "matplotlib.colors",
"line_number": 29,
"usage_type": "name"
}
] |
18110173657
|
from django.contrib import admin
from django.urls import path, include, re_path as url
# 스웨거 설정
from rest_framework.permissions import AllowAny
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from django.conf import settings
from django.conf.urls.static import static
# 스웨거 설정
schema_url_patterns = [
path('api/user/', include('user.urls')),
path('api/user/', include('allauth.urls')),
]
schema_view_v1 = get_schema_view(
openapi.Info(
title="drfLogin Test API",
default_version='v1',
description="Development drfLogin Test Document",
terms_of_service="https://www.google.com/policies/terms/",
),
public=True,
permission_classes=(AllowAny,),
patterns=schema_url_patterns,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('api/user/', include('user.urls')),
path('api/user/', include('allauth.urls')),
path('blog/', include('blog.urls')),
]
if settings.DEBUG:
urlpatterns += [
# Auto DRF API docs
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view_v1.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view_v1.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view_v1.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Kim-Link/drfLogin
|
drfLogin/drfLogin/urls.py
|
urls.py
|
py
| 1,437 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "drf_yasg.views.get_schema_view",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi.Info",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "drf_yasg.openapi",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.AllowAny",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.urls.include",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 46,
"usage_type": "attribute"
}
] |
4769430747
|
#!/usr/bin/env python
import sys
import glob, os
import argparse
def insert_track_id(label_file, track_ids):
labels_with_track = []
with open(label_file, 'r') as yolo_f:
labels = yolo_f.readlines()
for i, label in enumerate(labels):
split_label = label.split()
if len(split_label) < 6:
split_label.insert(1, track_ids[i]) # Insert track ID into label
else:
print(f'{label_file} should have track ID already')
labels_with_track.append(' '.join(split_label) + '\n')
with open(label_file, 'w') as yolo_f:
yolo_f.writelines(labels_with_track)
def main(args):
mot_labels_path = os.path.join(args.mot_jde_dir, 'labels_with_ids')
yolo_train_labels_path = os.path.join(args.yolo_dir, 'obj_train_data')
yolo_valid_labels_path = os.path.join(args.yolo_dir, 'obj_valid_data')
for label_file in glob.glob(os.path.join(mot_labels_path, '*')):
track_ids = []
# Format: [class] [track_id] [x] [y] [width] [height]
with open(label_file, 'r') as mot_f:
mot_labels = mot_f.readlines()
for label in mot_labels:
track_ids.append(label.split()[1])
label_filename = os.path.splitext(os.path.basename(label_file))[0]
task_id = label_filename[:-6]
frame_id = label_filename[-6:]
yolo_label_filename = f'{task_id}_{frame_id}.txt'
train_label = os.path.join(yolo_train_labels_path, yolo_label_filename)
valid_label = os.path.join(yolo_valid_labels_path, yolo_label_filename)
if os.path.exists(train_label):
assert not os.path.exists(valid_label)
insert_track_id(train_label, track_ids)
elif os.path.exists(valid_label):
insert_track_id(valid_label, track_ids)
else:
print(f'label file {yolo_label_filename} not found. Skipping...')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Transcribe track IDs to yolo format from MOT JDE format.')
parser.add_argument('mot_jde_dir')
parser.add_argument('yolo_dir')
args = parser.parse_args()
main(args)
|
Salmon-Computer-Vision/salmon-computer-vision
|
utils/scribe_yolo_track.py
|
scribe_yolo_track.py
|
py
| 2,022 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 53,
"usage_type": "call"
}
] |
20825994964
|
import json
from pandas import DataFrame
import pandas as pd
import requests
import emails
file_name = 'teste.csv'
def getJson():
r = requests.get('https://api.biscoint.io/v1/ticker?base=BTC"e=BRL')
df_new = pd.DataFrame()
df = pd.DataFrame(json.loads(r.text))
date = pd.Timestamp.date(pd.Timestamp(
df['data']['timestamp'], tz='America/Fortaleza'))
time = pd.Timestamp.time(pd.Timestamp(
df['data']['timestamp'], tz='America/Fortaleza')).strftime('%H:%M:%S')
df_new['ask'] = [df['data']['ask']]
df_new['bid'] = [df['data']['bid']]
df_new['high'] = [df['data']['high']]
df_new['last'] = [df['data']['last']]
df_new['low'] = [df['data']['low']]
df_new['vol'] = [df['data']['vol']]
df_new['date'] = [date]
df_new['time'] = [time]
last = df_new['last'][0]
low = df_new['low'][0]
diff = (1-(last / low))
if diff > 0.04:
high = df_new['high'][0]
emails.send_email(last, low, diff, high)
with open(file_name, 'a') as f:
df_new.to_csv(f, header=f.tell() == 0)
if __name__ == '__main__':
# testar()
getJson()
|
HumbertoLimaa/mysite
|
utils.py
|
utils.py
|
py
| 1,135 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp.date",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pandas.Timestamp.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.Timestamp",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "emails.send_email",
"line_number": 35,
"usage_type": "call"
}
] |
29432109275
|
from collections import defaultdict, Counter
class Solution:
def groupAnagrams(self, strs):
ana_dict = defaultdict(list)
for s in strs:
# ana_dict[tuple(sorted(Counter(s)))].append(s)
count = [0]*26
for c in s:
count[ord(c)-ord('a')] += 1
ana_dict[tuple(count)].append(s)
return ana_dict.values()
solver=Solution()
strs = ["ddddddddddg","dgggggggggg"]
print(solver.groupAnagrams(strs))
|
mintaewon/coding_leetcode
|
0909/P53_hoin.py
|
P53_hoin.py
|
py
| 478 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
25033146898
|
import decimal
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from annoying.functions import get_object_or_None
from .forms import ListingForm
from .models import User, Listing, Bid, Comment, Category
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("auctions:index"))
return render(request, "auctions/login.html", {
"message": "Invalid username and/or password."
})
return render(request, "auctions/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("auctions:index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "auctions/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.save()
except IntegrityError:
return render(request, "auctions/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("auctions:index"))
return render(request, "auctions/register.html")
def index(request):
listings = Listing.objects.filter(active=True)
# get highest price if bids exist
for listing in listings:
# starting with starting price
highest_bid = listing.starting_price
bids = listing.listing_bids.all()
if bids:
# find max of bid amounts
highest_bid = max(bid.amount for bid in bids)
setattr(listing, "price", highest_bid)
return render(request, "auctions/index.html", {
"listings": listings,
})
def get_listing(request, listing_id):
listing_obj = get_object_or_None(Listing, id=listing_id)
if listing_obj is None:
return render(request, "auctions/not_found.html", {
"errMsg": "Listing Not Found"
})
# get all necessary data for listing page
bids = listing_obj.listing_bids.all()
comments = listing_obj.listing_comments.all()
# preset data
user = None
user_owned = False
watched_items = None
highest_bid_amount = listing_obj.starting_price
minimum_bid_amount = listing_obj.starting_price
user_highest_bid = False
# if there is a current user,
# determine if listing in user watchlist
if request.user.is_authenticated:
user = User.objects.get(username=request.user)
watched_items = user.watched_items.all()
# determine if listing belongs to current user
if user == listing_obj.owner:
user_owned = True
if bids.count():
# get bid object with highest amount
highest_bid = bids.order_by("-amount").first()
highest_bid_amount = highest_bid.amount
# set the minimum value for the next future bid
minimum_bid_amount = highest_bid.amount + decimal.Decimal(0.01)
# determine if the current user is the current highest bidder
if highest_bid.bidder == user:
user_highest_bid = True
return render(request, "auctions/listing.html", {
"listing": listing_obj,
"user_owned": user_owned,
"bids": bids,
"comments": comments,
"category": listing_obj.category,
"watchedItems": watched_items,
"minimum_bid": minimum_bid_amount,
"current_price": highest_bid_amount,
"user_highest_bid": user_highest_bid
})
def category_list(request):
categories = Category.objects.all()
return render(request, "auctions/category_list.html", {
"categories": categories
})
def category_filter(request, name):
cat_obj = get_object_or_None(Category, name=name)
if cat_obj is not None:
return render(request, "auctions/category_results.html", {
"category": cat_obj,
"listings": cat_obj.listings.all(),
})
return render(request, "auctions/not_found.html", {
"errMsg": "Category Not Found"
})
@login_required
def get_watchlist(request, username):
user = User.objects.get(username=username)
watched_items = user.watched_items.all()
return render(request, "auctions/watchlist.html", {
"listings": watched_items,
"watchedItems": watched_items
})
@login_required
def toggle_watchlist_listing(request):
if request.method == "POST":
user = User.objects.get(username=request.POST["username"])
try:
listing = user.watched_items.get(id=request.POST["listing_id"])
except Listing.DoesNotExist:
listing = None
if listing:
# if listing exists in the user's watched items, remove it
user.watched_items.remove(listing)
else:
# otherwise, add it
listing = Listing.objects.get(id=request.POST["listing_id"])
user.watched_items.add(listing)
HttpResponseRedirect(
reverse("auctions:listing",
kwargs={"listing_id": request.POST["listing_id"]}))
return HttpResponseRedirect(reverse("auctions:index"))
@ login_required
def new_listing(request):
if request.method == "POST":
listing = ListingForm(request.POST)
if listing.is_valid():
listing_obj = listing.save(commit=False)
user = User.objects.get(username=request.user)
listing_obj.owner = user
listing_obj.active = True
listing_obj.save()
return index(request)
return HttpResponseRedirect(reverse("auctions:new_listing"))
# get method for new listing
form = ListingForm()
return render(request, "auctions/new_listing.html", {
"form": form
})
@ login_required
def close_listing(request):
if request.method == "POST":
listing_id = request.POST["listing_id"]
listing_obj = get_object_or_None(Listing, id=listing_id)
if listing_obj:
listing_obj.active = False
listing_obj.save()
HttpResponseRedirect(
reverse("auctions:listing",
kwargs={"listing_id": request.POST["listing_id"]}))
@ login_required
def bid_on_listing(request):
if request.method == "POST":
user = User.objects.get(username=request.POST["username"])
listing_id = request.POST["listing_id"]
listing_obj = get_object_or_None(Listing, id=listing_id)
if listing_obj:
# only allow users who do not own listing to bid
if user != listing_obj.owner:
new_bid_price = request.POST["new_bid"]
bids = listing_obj.listing_bids.all()
# starting highest bid is just the starting price of listing
highest_bid = listing_obj.starting_price
if bids:
highest_bid = max(bid.amount for bid in bids)
# complicated checkpoint: allow the new bid to be created if:
# there are bids and the new bid is higher than the previous
# highest bid
# or there are no bids and the new bid is at least the amount
# of the starting price
if ((bids and decimal.Decimal(new_bid_price) > highest_bid) or
(not bids and decimal.Decimal(new_bid_price)
>= highest_bid)):
# create new bid object associated with listing
new_bid_obj = Bid(bidder=user,
bid_listing=listing_obj,
amount=new_bid_price)
new_bid_obj.save()
HttpResponseRedirect(
reverse("auctions:listing",
kwargs={"listing_id": request.POST["listing_id"]}))
return HttpResponseRedirect(reverse("auctions:index"))
@ login_required
def comment_on_listing(request):
if request.method == "POST":
user = User.objects.get(username=request.POST["username"])
listing_id = request.POST["listing_id"]
listing_obj = get_object_or_None(Listing, id=listing_id)
if listing_obj:
# create new comment associated with listing
new_comment = request.POST["new_comment"]
new_comment_obj = Comment(commenter=user,
com_listing=listing_obj,
text=new_comment)
new_comment_obj.save()
return HttpResponseRedirect(
reverse("auctions:listing",
kwargs={"listing_id": request.POST["listing_id"]}))
return HttpResponseRedirect(reverse("auctions:index"))
|
csloan29/HES-e-33a-web-django
|
commerce/auctions/views.py
|
views.py
|
py
| 9,574 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.auth.authenticate",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.logout",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "models.User.objects.create_user",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.db.IntegrityError",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.login",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "models.Listing.objects.filter",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "models.Listing.objects",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "models.Listing",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "annoying.functions.get_object_or_None",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "models.Listing",
"line_number": 84,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "models.Category.objects.all",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "models.Category.objects",
"line_number": 136,
"usage_type": "attribute"
},
{
"api_name": "models.Category",
"line_number": 136,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "annoying.functions.get_object_or_None",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "models.Category",
"line_number": 143,
"usage_type": "argument"
},
{
"api_name": "django.shortcuts.render",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 156,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 154,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "models.Listing.DoesNotExist",
"line_number": 170,
"usage_type": "attribute"
},
{
"api_name": "models.Listing",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "models.Listing.objects.get",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "models.Listing.objects",
"line_number": 177,
"usage_type": "attribute"
},
{
"api_name": "models.Listing",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "forms.ListingForm",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "models.User.objects.get",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 191,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "forms.ListingForm",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "annoying.functions.get_object_or_None",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "models.Listing",
"line_number": 208,
"usage_type": "argument"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 204,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 220,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "annoying.functions.get_object_or_None",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "models.Listing",
"line_number": 222,
"usage_type": "argument"
},
{
"api_name": "decimal.Decimal",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "models.Bid",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "models.User.objects.get",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "models.User.objects",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "models.User",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "annoying.functions.get_object_or_None",
"line_number": 256,
"usage_type": "call"
},
{
"api_name": "models.Listing",
"line_number": 256,
"usage_type": "argument"
},
{
"api_name": "models.Comment",
"line_number": 260,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 264,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 265,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "django.urls.reverse",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 251,
"usage_type": "name"
}
] |
43291543351
|
import math
import os
import cv2
from ultralytics import YOLO
from people import People
from car import Car
video = os.path.join('.', 'videos', 'Casa-Ch.mp4')
video_cap = cv2.VideoCapture(video)
fps = video_cap.get(cv2.CAP_PROP_FPS)
pixels = int((24/fps)*15)
ret, frame = video_cap.read()
altura, largura, canais = frame.shape
model = YOLO("yolov8n.pt")
carro = None
persons = []
personsT = []
frameCount = 0
detection_threshold = 0.7
flag = False
centerParkX = (215 + 506) / 2
centerParkY = (89 + 380) / 2
stopedCars = []
def tracking():
flag_2 = False
for i in range(len(persons)):
dist = persons[i].getdistance(bcenterX, bcenterY, frameCount, fps)
if not flag_2 and dist < pixels:
boxpeople = frame[y1:y2, x1:x2]
persons[i].compare_bouding(boxpeople)
persons[i].set_codinates(x1, x2, y1, y2)
persons[i].set_lastframe(frameCount)
persons[i].reverse_track()
flag_2 = True
if not flag_2 and len(persons) < pessoas:
boundingboxpeople = frame[y1:y2, x1:x2]
person1 = People(boundingboxpeople, x1, x2, y1, y2, frameCount)
persons.append(person1)
for cod in range(len(persons)):
if persons[cod].get_tracking():
org = (persons[cod].get_cx(), persons[cod].get_cy() - 7)
persons[cod].reverse_track()
cv2.circle(frame, (bcenterX, bcenterY), 5, (0, 255, 0), -1)
cv2.putText(frame, str(cod), org, 0, 1, (0, 0, 255), 2)
while ret:
frameCount += 1
ret, frame = video_cap.read()
frame = cv2.resize(frame, (640, 480))
results = model(frame)
for result in results:
pessoas = sum(1 for elemento in result.boxes.data.tolist() if elemento[-1] == 0.0)
for r in result.boxes.data.tolist():
x1, y1, x2, y2, score, class_id = r
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
class_id = int(class_id)
bcenterX = int((x1 + x2)/2)
bcenterY = int((y1 + y2)/2)
flag = math.hypot(centerParkX - (int(x1 + x2) / 2), centerParkY - (int(y1 + y2) / 2)) < 30
for rmv in range(len(persons)):
if persons[rmv].check_lost_track(fps, frameCount):
personsT.append(persons.pop(rmv))
personsT[len(personsT)-1].extract_caracteristcs()
''' if class_id == 2 and carro is not None and not flag:
carro = None'''
if class_id == 2 and carro is None and flag:
carro = Car(frame[y1:y2, x1:x2], frameCount, bcenterX, bcenterY)
else:
if carro is not None:
if carro.getStopedTime(fps, frameCount) >= 10 and not carro.get_alerted():
if carro.get_alerted():
stopedCars.append(carro)
carro.viewimage(bcenterX, bcenterY)
if class_id == 0:
#cv2.rectangle(frame, (int(x1), int(y1)), (int(x2), int(y2)), (255, 255, 255), 3)
if frameCount < 1:
boundingBoxPeople = frame[y1:y2, x1:x2]
person = People(boundingBoxPeople, x1, x2, y1, y2, frameCount)
persons.append(person)
else:
tracking()
cv2.imshow('Camera', frame)
cv2.waitKey(1)
video_cap.release()
cv2.destroyAllWindows()
|
serjetus/Projeto
|
src/main.py
|
main.py
|
py
| 3,473 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_FPS",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "ultralytics.YOLO",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "people.People",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.putText",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "math.hypot",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "car.Car",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "people.People",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 99,
"usage_type": "call"
}
] |
70835853948
|
import csv
import argparse
import os
import sys
import numpy as np
import torch
import torch.cuda
from PIL import Image
from torch.autograd import Variable
from torchvision.transforms import transforms
from my.yolov3.easy.net.load_net import load_net
from PIL import Image
image_size = (96, 96)
test_transformations = transforms.Compose([
transforms.ToTensor()
])
def load_trained_net(model_path):
print("Begin to load pre-trained net ... ", end="")
net = load_net("resnet152")
checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['state_dict'])
net.eval()
print("Finished.")
return net
def predict(net, ims: list):
# Define transformations for the image
transformation = test_transformations
images_tensor_list = []
for im in ims:
w = max(im.size) # 正方形的宽度
im = im.crop((0, 0, w, w)).resize(image_size) # 补成正方形再压缩
image = np.asarray(im)
image_tensor = transformation(image)
images_tensor_list.append(image_tensor)
images_tensor = torch.stack(images_tensor_list)
if torch.cuda.is_available():
images_tensor.cuda()
# 将输入变为变量
input = Variable(images_tensor)
# 预测图像的类
output = net(input)
index = output.data.numpy().argmax(axis=1)
return index + 1 # [0, C-1] -> [1, C]
if __name__ == '__main__':
net = load_trained_net("model/model-87-8.477896466274615e-05.pth")
image_paths = ["../data/images/0a0bf7bc-e0d7-4f20-abec-039136663d85.jpg",
"../data/images/0a0c27d7-2e2a-4817-a715-8182cf07ec9b.jpg",
"../data/images/0a00c2a3-a498-452a-ba88-6b9ef514e201.jpg",
"../data/images/0a1a5d35-1b30-43ff-87bc-9acdab1567c1.jpg"]
ims = []
for image_path in image_paths:
im = Image.open(image_path)
ims.append(im)
results = predict(net, ims)
print(results)
|
NJUCoders/commodity-classification-hard
|
easy/predict.py
|
predict.py
|
py
| 1,931 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.transforms.transforms.Compose",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.transforms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.transforms.ToTensor",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.transforms",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "my.yolov3.easy.net.load_net.load_net",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 70,
"usage_type": "name"
}
] |
32563261250
|
"""
HTTP endpoints for `station_store`
"""
from fastapi import HTTPException, status
from screfinery import schema
from screfinery.crud_routing import EndpointsDef, RouteDef, \
crud_router_factory
from screfinery.stores import station_store
from screfinery.util import is_user_authorized
def authorize(user, scope, item=None):
"""
Station resource isn't owned by anyone, so don't check ownership with user
"""
if not is_user_authorized(user, scope):
raise HTTPException(status.HTTP_403_FORBIDDEN)
station_routes = crud_router_factory(
station_store,
EndpointsDef(
list=RouteDef(
request_model=None,
response_model=schema.ListResponse[schema.Station],
authorize=authorize,
),
read=RouteDef(
request_model=None,
response_model=schema.Station,
authorize=authorize,
),
create=RouteDef(
request_model=schema.StationCreate,
response_model=schema.Station,
authorize=authorize,
),
update=RouteDef(
request_model=schema.StationUpdate,
response_model=schema.Station,
authorize=authorize,
),
delete=RouteDef(
request_model=None,
response_model=None,
authorize=authorize,
)
)
)
|
fre-sch/sc-refinery-api
|
screfinery/routes/station.py
|
station.py
|
py
| 1,371 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "screfinery.util.is_user_authorized",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "fastapi.status.HTTP_403_FORBIDDEN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "fastapi.status",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "screfinery.crud_routing.crud_router_factory",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "screfinery.stores.station_store",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "screfinery.crud_routing.EndpointsDef",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "screfinery.crud_routing.RouteDef",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "screfinery.schema.ListResponse",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "screfinery.schema.Station",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "screfinery.crud_routing.RouteDef",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "screfinery.schema.Station",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "screfinery.crud_routing.RouteDef",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "screfinery.schema.StationCreate",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "screfinery.schema.Station",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "screfinery.crud_routing.RouteDef",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "screfinery.schema.StationUpdate",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "screfinery.schema.Station",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "screfinery.schema",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "screfinery.crud_routing.RouteDef",
"line_number": 44,
"usage_type": "call"
}
] |
27568079162
|
from sys import platform
from pathlib import Path
from clang.cindex import Config
# -- Project information -----------------------------------------------------
project = 'zenoh-pico'
copyright = '2017, 2022 ZettaScale Technology Inc'
author = 'ZettaScale Zenoh team'
release = '0.11.0.0'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
extensions = ['sphinx_c_autodoc', 'sphinx_c_autodoc.napoleon']
language = 'c'
c_autodoc_roots = ['../include/zenoh-pico/api/']
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
breathe_debug_trace_directives = True
if platform == "darwin":
LIBCLANG_FILE = Path("/Library/Developer/CommandLineTools/usr/lib/libclang.dylib")
LIBCLANG_CELLAR = Path("/usr/local/Cellar/llvm/14.0.6/lib/libclang.dylib")
if LIBCLANG_FILE.is_file():
Config.set_library_file(LIBCLANG_FILE)
elif LIBCLANG_CELLAR.is_file():
Config.set_library_file(LIBCLANG_CELLAR)
else:
raise ValueError(f"libclang not found. \nTried: \n {LIBCLANG_FILE}\n {LIBCLANG_CELLAR}")
elif platform == "win32":
raise ValueError("Windows not supported yet for building docs.")
else:
Config.set_library_file('/usr/lib/llvm-14/lib/libclang.so.1') # Required for readthedocs
|
eclipse-zenoh/zenoh-pico
|
docs/conf.py
|
conf.py
|
py
| 1,328 |
python
|
en
|
code
| 63 |
github-code
|
6
|
[
{
"api_name": "sys.platform",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "clang.cindex.Config.set_library_file",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "clang.cindex.Config",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "clang.cindex.Config.set_library_file",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "clang.cindex.Config",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "sys.platform",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "clang.cindex.Config.set_library_file",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "clang.cindex.Config",
"line_number": 36,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.