seq_id
string | text
string | repo_name
string | sub_path
string | file_name
string | file_ext
string | file_size_in_byte
int64 | program_lang
string | lang
string | doc_type
string | stars
int64 | dataset
string | pt
string | api
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
21705447470
|
from glob import glob
from os import makedirs
from shutil import copy2
from tqdm import tqdm
SINGER = 'mixed'
RELEASE_DIR = 'release/mixed_---'
PATH_QUESTION = 'conf/jp_qst001_nnsvs_simple_4-4_mix.hed'
NAME_EXPERIMENT = 'simple_4-4_mix'
def copy_question(path_question, release_dir):
"""
hedファイル(question)をコピー
"""
makedirs(f'{release_dir}/conf', exist_ok=True)
print('copying question')
copy2(path_question, f'{release_dir}/{path_question}')
def copy_scaler(singer, release_dir):
"""
dumpフォルダにあるファイルをコピー
"""
makedirs(f'{release_dir}/dump/{singer}/norm', exist_ok=True)
list_path_scaler = glob(f'dump/{singer}/norm/*_scaler.joblib')
print('copying scaler')
for path_scaler in tqdm(list_path_scaler):
copy2(path_scaler, f'{release_dir}/{path_scaler}')
def copy_model(singer, name_exp, release_dir):
"""
name_exp: 試験のID
"""
name_exp = singer + '_' + name_exp
makedirs(f'{release_dir}/exp/{name_exp}/acoustic', exist_ok=True)
makedirs(f'{release_dir}/exp/{name_exp}/duration', exist_ok=True)
makedirs(f'{release_dir}/exp/{name_exp}/timelag', exist_ok=True)
list_path_model = glob(f'exp/{name_exp}/*/best_loss.pth')
list_path_model += glob(f'exp/{name_exp}/*/latest.pth')
list_path_model += glob(f'exp/{name_exp}/*/model.yaml')
print('copying model')
for path_model in tqdm(list_path_model):
copy2(path_model, f'{release_dir}/{path_model}')
def main():
"""
各種ファイルをコピーする
"""
copy_question(PATH_QUESTION, RELEASE_DIR)
copy_scaler(SINGER, RELEASE_DIR)
copy_model(SINGER, NAME_EXPERIMENT, RELEASE_DIR)
if __name__ == '__main__':
main()
|
oatsu-gh/nnsvs_mixed_db
|
recipe/00-svs-world/make_it_for_release.py
|
make_it_for_release.py
|
py
| 1,761 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.makedirs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "shutil.copy2",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "shutil.copy2",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "shutil.copy2",
"line_number": 48,
"usage_type": "call"
}
] |
29214803056
|
from django.views.generic.simple import direct_to_template
from django.db.models import get_app, get_models, get_model
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.forms import ModelForm
from django.forms.models import modelform_factory
from django.forms import ModelForm, Textarea, TextInput,HiddenInput
def index(request,app):
if request.is_ajax():
model = get_model(app, request.GET.get('model'))
for field in model._meta.fields:
if field.get_internal_type()=='DateField':
wdg={field.get_attname(): TextInput(attrs={'class': 'datepicker1'})}
else:
wdg=None
form=modelform_factory(model,widgets=wdg)#{'date_joined': TextInput(attrs={'class': 'datepicker1'})})
id=None
f = form(request.POST or None,request.FILES or None,instance=id and model.objects.get(id=id))
if request.method == 'POST' and f.is_valid() :
f.save()
data=[]
data = model.objects.all().values_list()
fields = [field.verbose_name for field in model._meta.fields]
html = render_to_string('table.html', {'data': data,'fields':fields,'form':form,'model':model,'modelnm':model.__name__})
return HttpResponse(html)
else:
list_models = []
for model in get_models(get_app(app)):
list_models.append([model.__name__, model._meta.verbose_name_plural])
return direct_to_template(request, 'index.html', {'models': list_models,})
|
dest81/test_dm
|
dynamic_models/views.py
|
views.py
|
py
| 1,631 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "django.db.models.get_model",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.forms.TextInput",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.forms.models.modelform_factory",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.db.models.get_models",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.db.models.get_app",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.views.generic.simple.direct_to_template",
"line_number": 31,
"usage_type": "call"
}
] |
74118290427
|
import os
import sys
import yt_dlp
from tqdm import tqdm
def download_videos(links_file):
if not os.path.exists(links_file):
print("Error: The links file '{}' does not exist.".format(links_file))
return
ydl_opts = {
'format': 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best',
'outtmpl': '%(autonumber)s - %(title)s.%(ext)s',
'merge_output_format': 'mp4',
}
with open(links_file, 'r') as f:
video_links = f.read().splitlines()
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
for link in video_links:
try:
info_dict = ydl.extract_info(link, download=True)
print("\n{}: Download completed!".format(info_dict['title']))
except Exception as e:
print("\nError downloading video: {}".format(e))
continue
if __name__ == "__main__":
links_file = "links.txt"
download_videos(links_file)
|
vishnu012/Personal-Scripts
|
pydownload/downloader.py
|
downloader.py
|
py
| 961 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "yt_dlp.YoutubeDL",
"line_number": 20,
"usage_type": "call"
}
] |
8209601687
|
import os
import pytest
import requests_mock
from newsApi.models.request import EverythingRequestModel, Language, TopHeadlinesRequestModel, SourcesRequestModel
from newsApi.models.response import EverythingResponseModel, TopHeadlinesResponseModel, SourcesResponseModel
from newsApi.service import NewsAPIService
@pytest.fixture
def news_api_service():
return NewsAPIService(os.environ.get('NEWS_API_KEY'))
def test_everything(news_api_service):
request_model = EverythingRequestModel(q="test")
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/everything', json={"status": "ok", "totalResults": 10, "articles": []})
response = news_api_service.everything(request_model)
assert isinstance(response, EverythingResponseModel)
assert response.status == 'ok'
assert response.totalResults == 10
def test_top_headlines(news_api_service):
request_model = TopHeadlinesRequestModel(q="test")
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/top-headlines', json={"status": "ok", "totalResults": 10, "articles": []})
response = news_api_service.top_headlines(request_model)
assert isinstance(response, TopHeadlinesResponseModel)
assert response.status == 'ok'
assert response.totalResults == 10
def test_sources(news_api_service):
request_model = SourcesRequestModel(language=Language.EN)
with requests_mock.Mocker() as m:
m.get('https://newsapi.org/v2/sources', json={"status": "ok", "sources": []})
response = news_api_service.sources(request_model)
assert isinstance(response, SourcesResponseModel)
assert response.status == 'ok'
|
roachseb/NewsAPI-Python-Client
|
tests/test_news_service.py
|
test_news_service.py
|
py
| 1,664 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "newsApi.service.NewsAPIService",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "newsApi.models.request.EverythingRequestModel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests_mock.Mocker",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "newsApi.models.response.EverythingResponseModel",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "newsApi.models.request.TopHeadlinesRequestModel",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests_mock.Mocker",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "newsApi.models.response.TopHeadlinesResponseModel",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "newsApi.models.request.SourcesRequestModel",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "newsApi.models.request.Language.EN",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "newsApi.models.request.Language",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "requests_mock.Mocker",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "newsApi.models.response.SourcesResponseModel",
"line_number": 34,
"usage_type": "argument"
}
] |
11773636601
|
import requests
import json
from pprint import pprint
access_token='<put your access token here>'
page_id='<put your page id here>'
url='https://graph.facebook.com/v2.0/'+page_id+'?feed&access_token='+access_token
r = requests.get(url)
try:
response_json = json.loads(r.text)
except (ValueError, KeyError, TypeError):
print("JSON error")
pprint(response_json)
|
mehta-a/FacebookDataExtraction
|
src/extract.py
|
extract.py
|
py
| 367 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 17,
"usage_type": "call"
}
] |
39627561667
|
import time
import json
import math
import csv
import serial # conda install pyserial
import sys
import glob
# pygame needs python 3.6, not available for 3.7
import pygame # conda install -c cogsci pygame; maybe because it only is supplied for earlier python, might need conda install -c evindunn pygame ; sudo apt-get install libsdl-ttf2.0-0
import pygame.joystick as joystick # https://www.pygame.org/docs/ref/joystick.html
from datetime import datetime
# our imports
import kbhit
from pendulum import Pendulum
POLOLU_MOTOR = False # set true to set options for this motor, which has opposite sign for set_motor TODO needs fixing in firmware or wiring of motor
SERIAL_PORT = "COM4" #"/dev/ttyUSB0" # might move if other devices plugged in
SERIAL_BAUD = 230400 # default 230400, in firmware. Alternatives if compiled and supported by USB serial intervace are are 115200, 128000, 153600, 230400, 460800, 921600, 1500000, 2000000
PRINT_PERIOD_MS = 100 # shows state every this many ms
CONTROL_PERIOD_MS = 5
CALIBRATE = False #False # important to calibrate if running standalone to avoid motor burnout because limits are determined during this calibration
MOTOR_FULL_SCALE = 7199 # 7199 # with pololu motor and scaling in firmware #7199 # with original motor
MOTOR_MAX_PWM = int(round(0.95 * MOTOR_FULL_SCALE))
JOYSTICK_SCALING = MOTOR_MAX_PWM # how much joystick value -1:1 should be scaled to motor command
JOYSTICK_DEADZONE = 0.05 # deadzone around joystick neutral position that stick is ignored
ANGLE_TARGET = 3129 # 3383 # adjust to exactly vertical angle value, read by inspecting angle output
ANGLE_CTRL_PERIOD_MS = 5 # Must be a multiple of CONTROL_PERIOD_MS
ANGLE_AVG_LENGTH = 4 # adc routine in firmware reads ADC this many times quickly in succession to reduce noise
ANGLE_SMOOTHING = 1 # 1.0 turns off smoothing
ANGLE_KP = 400
ANGLE_KD = 400
POSITION_TARGET = 0 # 1200
POSITION_CTRL_PERIOD_MS = 25 # Must be a multiple of CONTROL_PERIOD_MS
POSITION_SMOOTHING = 1 # 1.0 turns off smoothing
POSITION_KP = 20
POSITION_KD = 300
def serial_ports(): # from https://stackoverflow.com/questions/12090503/listing-available-com-ports-with-python
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
# if cannot open, check permissions
ports = glob.glob('/dev/ttyUSB[0-9]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
def saveparams():
print("\nSaving parameters")
p={}
p['ANGLE_TARGET']=ANGLE_TARGET
p['ANGLE_KP']=ANGLE_KP
p['ANGLE_KD']=ANGLE_KD
p['POSITION_TARGET']=POSITION_TARGET
p['POSITION_KP']=POSITION_KP
p['POSITION_KD']=POSITION_KD
p['ANGLE_SMOOTHING']=ANGLE_SMOOTHING
p['POSITION_SMOOTHING']=POSITION_SMOOTHING
with open('control.json','w') as f:
json.dump(p,f)
def loadparams():
print("\nLoading parameters")
f=open('control.json')
try:
p=json.load(f)
global ANGLE_TARGET, ANGLE_KP,ANGLE_KD,POSITION_TARGET,POSITION_KP,POSITION_KD,ANGLE_SMOOTHING,POSITION_SMOOTHING
ANGLE_TARGET=p['ANGLE_TARGET']
ANGLE_KP=p['ANGLE_KP']
ANGLE_KD=p['ANGLE_KD']
POSITION_TARGET=p['POSITION_TARGET']
POSITION_KP=p['POSITION_KP']
POSITION_KD=p['POSITION_KD']
ANGLE_SMOOTHING=p['ANGLE_SMOOTHING']
POSITION_SMOOTHING=p['POSITION_SMOOTHING']
except:
print("something went wrong loading parameters")
printparams()
def help():
print("\n***********************************")
print("keystroke commands")
print("ESC quit")
print("k toggle control on/off (initially off)")
print("K trigger motor position calibration")
print("=/- increase/decrease angle target")
print("[/] increase/decrease position target")
print("w/q angle proportional gain")
print("s/a angle derivative gain")
print("z/x angle smoothing")
print("r/e position proportional gain")
print("f/d position derivative gain")
print("c/v position smoothing")
print("l toggle logging data")
print("S/L Save/Load param values from disk")
print("D Toggle dance mode")
print("***********************************")
def printparams():
print("\nAngle PD Control Parameters")
print(" Set point {0}".format(ANGLE_TARGET))
print(" Average Length {0}".format(ANGLE_AVG_LENGTH))
print(" Smoothing {0:.2f}".format(ANGLE_SMOOTHING))
print(" P Gain {0:.2f}".format(ANGLE_KP))
print(" D Gain {0:.2f}".format(ANGLE_KD))
print("Position PD Control Parameters")
print(" Set point {0}".format(POSITION_TARGET))
print(" Control Period {0} ms".format(POSITION_CTRL_PERIOD_MS))
print(" Smoothing {0:.2f}".format(POSITION_SMOOTHING))
print(" P Gain {0:.2f}".format(POSITION_KP))
print(" D Gain {0:.2f}".format(POSITION_KD))
ratio=1.05
def inc(param):
if param < 2:
param = round(param + 0.1, 1)
else:
old=param
param = round(param*ratio)
if param==old:
param+=1
return param
def dec(param):
if param < 2:
param = max(0,round(param - 0.1, 1))
else:
old=param
param = round(param/ratio)
if param==old:
param-=1
return param
if ANGLE_CTRL_PERIOD_MS < CONTROL_PERIOD_MS or POSITION_CTRL_PERIOD_MS <CONTROL_PERIOD_MS:
raise Exception("angle or position control periods too short compared to CONTROL_PERIOD_MS")
# check that we are running from terminal, otherwise we cannot control it
if sys.stdin.isatty():
# running interactively
print('running interactively from an interactive terminal, ok')
else:
print('run from an interactive terminal to allow keyboard input')
quit()
################################################################################
# OPEN SERIAL PORT
################################################################################
p = Pendulum()
serialPorts=serial_ports()
print('Available serial ports: '+str(serialPorts))
if len(serialPorts)==0:
print('no serial ports available, or cannot open it; check linux permissions\n Under linux, sudo chmod a+rw [port] transiently, or add user to dialout or tty group')
quit()
if len(serialPorts)>1:
print(str(len(serialPorts))+' serial ports, taking first one which is '+str(serialPorts[0]))
SERIAL_PORT=str(serialPorts[0])
try:
p.open(SERIAL_PORT, SERIAL_BAUD)
except:
print('cannot open port '+str(SERIAL_PORT)+': available ports are '+str(serial_ports()))
quit()
print('opened '+str(SERIAL_PORT)+' successfully')
p.control_mode(False)
p.stream_output(False)
joystickExists=False
pygame.init()
joystick.init()
if joystick.get_count()==1:
stick = joystick.Joystick(0)
stick.init()
axisNum = stick.get_numaxes()
buttonNum = stick.get_numbuttons()
joystickExists=True
print('joystick found with '+str(axisNum)+' axes and '+str(buttonNum)+' buttons')
else:
print('no joystick found, only PD control or no control possible')
if CALIBRATE:
print("Calibrating motor position....")
if not p.calibrate():
print("Failed to connect to device")
p.close()
exit()
print("Done calibrating")
loadparams()
time.sleep(1)
################################################################################
# SET PARAMETERS
################################################################################
p.set_angle_config( ANGLE_TARGET,
ANGLE_AVG_LENGTH,
ANGLE_SMOOTHING,
ANGLE_KP,
ANGLE_KD)
p.set_position_config( POSITION_TARGET,
POSITION_CTRL_PERIOD_MS,
POSITION_SMOOTHING,
POSITION_KP,
POSITION_KD)
################################################################################
# GET PARAMETERS
################################################################################
( ANGLE_TARGET,
ANGLE_AVG_LENGTH,
ANGLE_SMOOTHING,
ANGLE_KP,
ANGLE_KD) = p.get_angle_config()
( POSITION_TARGET,
POSITION_CTRL_PERIOD_MS,
POSITION_SMOOTHING,
POSITION_KP,
POSITION_KD) = p.get_position_config()
################################################################################
# CONTROL LOOP (PC BASED)
################################################################################
printCount = 0
angleErrPrev = 0
angleCmd = 0
positionErrPrev = 0
positionCmd = 0
controlEnabled=False
danceEnabled=False
danceAmpl=500
dancePeriodS=8
loggingEnabled=False
kbAvailable=True
try:
kb = kbhit.KBHit() # can only use in posix terminal; cannot use from spyder ipython console for example
except:
kbAvailable=False
printparams()
help()
startTime=time.time()
lastTime=startTime
lastAngleControlTime=lastTime
lastPositionControlTime=lastTime
angleErr=0
positionErr=0 # for printing even if not controlling
p.stream_output(True) # now start streaming state
while True:
# Adjust Parameters
if kbAvailable & kb.kbhit():
c = kb.getch()
if c=='D':
danceEnabled=~danceEnabled
print("\ndanceEnabled= {0}".format(danceEnabled))
elif c == 'l':
loggingEnabled=~loggingEnabled
print("\nloggingEnabled= {0}".format(loggingEnabled))
if loggingEnabled:
try:
csvfilename=datetime.now().strftime("cartpole-%Y-%m-%d-%H-%M-%S.csv")
csvfile=open(csvfilename, 'w', newline='')
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerow(['time'] + ['deltaTimeMs']+['angle'] + ['position'] + ['angleTarget'] + ['angleErr'] + ['positionTarget'] + ['positionErr'] + ['angleCmd'] + ['positionCmd'] + ['motorCmd']+['actualMotorCmd'])
print("\n Started logging data to "+csvfilename)
except Exception as e:
loggingEnabled=False
print("\n" + str(e) + ": Exception opening csvfile; logging disabled")
else:
csvfile.close()
print("\n Stopped logging data to "+csvfilename)
elif c == 'k':
controlEnabled=~controlEnabled
print("\ncontrolEnabled= {0}".format(controlEnabled))
elif c == 'K':
controlEnabled=False
print("\nCalibration triggered")
p.calibrate()
print("\nCalibration finished")
elif c == 'h' or c=='?':
help()
elif c == 'p' :
printparams()
# Increase Target Angle
elif c == '=':
ANGLE_TARGET += 1
print("\nIncreased target angle to {0}".format(ANGLE_TARGET))
# Decrease Target Angle
elif c == '-':
ANGLE_TARGET -= 1
print("\nDecreased target angle to {0}".format(ANGLE_TARGET))
# Increase Target Position
elif c == ']':
POSITION_TARGET += 200
print("\nIncreased target position to {0}".format(POSITION_TARGET))
# Decrease Target Position
elif c == '[':
POSITION_TARGET -= 200
print("\nDecreased target position to {0}".format(POSITION_TARGET))
# Angle Gains
elif c == 'w':
ANGLE_KP=inc(ANGLE_KP)
print("\nIncreased angle KP {0}".format(ANGLE_KP))
elif c == 'q':
ANGLE_KP=dec(ANGLE_KP)
print("\nDecreased angle KP {0}".format(ANGLE_KP))
elif c == 's':
ANGLE_KD=inc(ANGLE_KD)
print("\nIncreased angle KD {0}".format(ANGLE_KD))
elif c == 'a':
ANGLE_KD=dec(ANGLE_KD)
print("\nDecreased angle KD {0}".format(ANGLE_KD))
elif c == 'x':
ANGLE_SMOOTHING=dec(ANGLE_SMOOTHING)
if ANGLE_SMOOTHING>1:
ANGLE_SMOOTHING=1
print("\nIncreased ANGLE_SMOOTHING {0}".format(ANGLE_SMOOTHING))
elif c == 'z':
ANGLE_SMOOTHING=inc(ANGLE_SMOOTHING)
if ANGLE_SMOOTHING>1:
ANGLE_SMOOTHING=1
print("\nDecreased ANGLE_SMOOTHING {0}".format(ANGLE_SMOOTHING))
# Position Gains
elif c == 'r':
POSITION_KP=inc(POSITION_KP)
print("\nIncreased position KP {0}".format(POSITION_KP))
elif c == 'e':
POSITION_KP=dec(POSITION_KP)
print("\nDecreased position KP {0}".format(POSITION_KP))
elif c == 'f':
POSITION_KD=inc(POSITION_KD)
print("\nIncreased position KD {0}".format(POSITION_KD))
elif c == 'd':
POSITION_KD=dec(POSITION_KD)
print("\nDecreased position KD {0}".format(POSITION_KD))
elif c == 'v':
POSITION_SMOOTHING=dec(POSITION_SMOOTHING)
if POSITION_SMOOTHING>1:
POSITION_SMOOTHING=1
print("\nIncreased POSITION_SMOOTHING {0}".format(POSITION_SMOOTHING))
elif c == 'c':
POSITION_SMOOTHING=inc(POSITION_SMOOTHING)
if POSITION_SMOOTHING>1:
POSITION_SMOOTHING=1
print("\nDecreased POSITION_SMOOTHING {0}".format(POSITION_SMOOTHING))
elif c=='S':
saveparams()
elif c=='L':
loadparams()
# Exit
elif ord(c) == 27 : # ESC
print("\nquitting....")
break
# This function will block at the rate of the control loop
# p.clear_read_buffer() # if we don't clear read buffer, state output piles up in serial buffer
(angle, position, command) = p.read_state()
# angle count is more positive CCW facing cart, position encoder count is more positive to right facing cart
timeNow=time.time()
deltaTime=timeNow-lastTime
if deltaTime==0:
deltaTime=1e-6
lastTime=timeNow
elapsedTime=timeNow-startTime
diffFactor=(CONTROL_PERIOD_MS/(deltaTime*1000))
positionTargetNow=POSITION_TARGET
if controlEnabled and danceEnabled:
positionTargetNow=POSITION_TARGET+danceAmpl*math.sin(2*math.pi*(elapsedTime/dancePeriodS))
# Balance PD Control
# Position PD Control
if timeNow -lastPositionControlTime >= POSITION_CTRL_PERIOD_MS*.001:
lastPositionControlTime=timeNow
positionErr = POSITION_SMOOTHING*(position - positionTargetNow) + (1.0 - POSITION_SMOOTHING)*positionErrPrev # First order low-P=pass filter
positionErrDiff = (positionErr - positionErrPrev)*diffFactor
positionErrPrev = positionErr
# Naive solution: if too positive (too right), move left (minus on positionCmd),
# but this does not produce correct control.
# The correct strategy is that if cart is too positive (too right),
# produce lean to the left by introducing a positive set point angle leaning slightly to left,
# i.e. more positve positionErr makes more positive effective ANGLE_TARGET
# End result is that sign of positionCmd is flipped
# Also, if positionErr is increasing more, then we want even more lean, so D sign is also positive
positionCmd = +(POSITION_KP*positionErr + POSITION_KD*positionErrDiff)
if timeNow-lastAngleControlTime >= ANGLE_CTRL_PERIOD_MS*.001:
lastAngleControlTime=timeNow
angleErr = ANGLE_SMOOTHING*(angle - ANGLE_TARGET) + (1.0 - ANGLE_SMOOTHING)*angleErrPrev # First order low-pass filter
angleErrDiff = (angleErr - angleErrPrev)*diffFactor # correct for actual sample interval; if interval is too long, reduce diff error
angleErrPrev = angleErr
angleCmd = -(ANGLE_KP*angleErr + ANGLE_KD*angleErrDiff) # if too CCW (pos error), move cart left
motorCmd = int(round(angleCmd + positionCmd)) # change to plus for original, check that when cart is displayed, the KP term for cart position leans cart the correct direction
motorCmd = MOTOR_MAX_PWM if motorCmd > MOTOR_MAX_PWM else motorCmd
motorCmd = -MOTOR_MAX_PWM if motorCmd < -MOTOR_MAX_PWM else motorCmd
stickPos=0.0
if joystickExists:
# for event in pygame.event.get(): # User did something.
# if event.type == pygame.QUIT: # If user clicked close.
# done = True # Flag that we are done so we exit this loop.
# elif event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed.")
# elif event.type == pygame.JOYBUTTONUP:
# print("Joystick button released.")
pygame.event.get() # must call get() to handle internal queue
stickPos=stick.get_axis(0) # 0 left right, 1 front back 2 rotate
if abs(stickPos)>JOYSTICK_DEADZONE:
actualMotorCmd=int(round(stickPos*JOYSTICK_SCALING))
elif controlEnabled:
actualMotorCmd=motorCmd
else:
actualMotorCmd=0
if POLOLU_MOTOR==False:
p.set_motor(-actualMotorCmd) # positive motor cmd moves cart right
else:
p.set_motor(actualMotorCmd) # positive motor cmd moves cart right
if loggingEnabled:
# csvwriter.writerow(['time'] + ['deltaTimeMs']+['angle'] + ['position'] + ['angleErr'] + ['positionErr'] + ['angleCmd'] + ['positionCmd'] + ['motorCmd'])
csvwriter.writerow([elapsedTime,deltaTime*1000,angle, position, ANGLE_TARGET, angleErr, positionTargetNow, positionErr, angleCmd,positionCmd,motorCmd,actualMotorCmd])
# Print output
printCount += 1
if printCount >= (PRINT_PERIOD_MS/CONTROL_PERIOD_MS):
printCount = 0
print("\r angle {:+4d} angleErr {:+6.1f} position {:+6d} positionErr {:+6.1f} angleCmd {:+6d} positionCmd {:+6d} motorCmd {:+6d} dt {:.3f}ms stick {:.3f} \r".format(int(angle), angleErr, int(position), positionErr, int(round(angleCmd)), int(round(positionCmd)), actualMotorCmd, deltaTime*1000, stickPos), end = '')
# if we pause like below, state info piles up in serial input buffer
# instead loop at max possible rate to get latest state info
# time.sleep(CONTROL_PERIOD_MS*.001) # not quite correct since there will be time for execution below
# when x hit during loop or other loop exit
p.set_motor(0) # turn off motor
p.close()
joystick.quit()
if loggingEnabled:
csvfile.close()
|
SensorsINI/DeltaGRU-cartpole
|
cartpole_robot/python_controller/control.py
|
control.py
|
py
| 19,414 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sys.platform.startswith",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.platform.startswith",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.platform.startswith",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "serial.Serial",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "serial.SerialException",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "json.dump",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "sys.stdin.isatty",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "pendulum.Pendulum",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "pygame.joystick.init",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "pygame.joystick",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "pygame.joystick.get_count",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pygame.joystick",
"line_number": 201,
"usage_type": "name"
},
{
"api_name": "pygame.joystick.Joystick",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pygame.joystick",
"line_number": 202,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "kbhit.KBHit",
"line_number": 273,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "csv.writer",
"line_number": 303,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 406,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 417,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 457,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 457,
"usage_type": "attribute"
},
{
"api_name": "pygame.joystick.quit",
"line_number": 488,
"usage_type": "call"
},
{
"api_name": "pygame.joystick",
"line_number": 488,
"usage_type": "name"
}
] |
12066160544
|
import enum
from typing import Dict, List, Tuple
# --------------------------
# Basic (string, float, int)
# --------------------------
name: str = "Tay May"
weight: float = 60.2
age: int = 16
print(name)
print(weight)
print(age)
# --------------------------
# List
# --------------------------
thanhvien_cs102: List[str] = ["Tay May", "To Mo", "Robot"]
# có thể thay đổi giá trị
thanhvien_cs102[1] = "Hello"
print(thanhvien_cs102)
# --------------------------
# Tuple
# --------------------------
mytuple: Tuple[str, str, int] = ("Pygame", "with", 102)
# Không thể thay đổi giá trị
# mytuple[2] = 103
print(mytuple)
# --------------------------
# Dictionary
# --------------------------
card: Dict[str, str] = {"course": "CS102", "main": "Pygame"}
print(card["main"])
# --------------------------
# enum
# --------------------------
class GameStateType(enum.Enum):
RUNNING = 0
WON = 1
LOST = 2
state: GameStateType = GameStateType.RUNNING
print(state == GameStateType.RUNNING)
print(state == GameStateType.WON)
# --------------------------
# Function
# --------------------------
def sum(items: List[float]) -> float:
total: float = 0.0
for item in items:
total += item
return total
danh_sach_diem: List[float] = [2.5, 1.5, 2, 3.25]
tong: float = sum(danh_sach_diem)
print(tong)
|
Greninja2021/Steam2022
|
Lesson_1_CS102-CrazyRobot/example_typing.py
|
example_typing.py
|
py
| 1,344 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 62,
"usage_type": "name"
}
] |
30347048151
|
#from exam_word import word_04
from .exam_func import TEST, MEAN,EXAMPLE, EXAMPLE_test, SIMILAR
import json
from collections import OrderedDict
from .W2V_word import W2V
import random
# word = word_04
def REMOVE(st):
row = ' '.join(s for s in st)
remove = "}"
for x in range(len(remove)):
row1 = row.replace(remove[x],"")
row2 = row1.replace("'","")
row3 = row2.split('.')
strip_li = []
for i in row3:
i = i.strip()
if i:
strip_li.append(i)
return strip_li
def W2V_MEAN(w2v_word):
n = len(w2v_word)
w2v_mean = []
for i in range(0,n):
t = TEST(w2v_word[i])
result_parse = t[0]
row1 = MEAN(result_parse)
row2 = REMOVE(row1)
w2v_mean.append(row2)
return w2v_mean
def t04(word):
w2v_word = W2V(word)
t = TEST(word)
result_parse = t[0]
result_similar = t[2]
result_similar = random.choice(result_similar)
mean = result_similar['예시']
similar = result_similar['유의어']
similar_list = []
similar_list.append(similar)
w2v_mean = W2V_MEAN(w2v_word) + W2V_MEAN(similar_list)
w2v_word = w2v_word + similar_list
choice = { name:value for name, value in zip(w2v_word, w2v_mean) }
file_exam4 = OrderedDict()
file_exam4["TYPE4"] = "다음 문장 속 "+word+"의 의미와 가장 관련이 깊은 단어를 고르시오."
#file_exam4["WORD"] = word #단어
file_exam4["ANSWER"] = similar #유의어
file_exam4["MEAN"] = mean #뜻
file_exam4["CHOICE"] = choice
EXAM4 = json.dumps(file_exam4, ensure_ascii=False, indent="\t")
print(EXAM4)
return EXAM4
|
GeulNoon/server
|
geulnoon/Word/test04.py
|
test04.py
|
py
| 1,699 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "exam_func.TEST",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "exam_func.MEAN",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "W2V_word.W2V",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "exam_func.TEST",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 63,
"usage_type": "call"
}
] |
40327669141
|
from easy_pyechart import _funnel_base_config, constants
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from typing import Any, Optional
from pyecharts.charts import Funnel
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
'''漏斗图'''
class eFunnel():
def __init__(
self,
title: Optional[str] = None,
subTitle: Optional[str] = None,
lableList: Optional[list] = [],
valueList: Optional[list] = [],
themeType=constants.defualt_theme,
backgroundImageUrl: Optional[str] = None):
self.opts: dict = {
"lengend": Funnel,
"xList": lableList,
"yList": valueList,
"themeType": themeType,
"backgroundImageUrl": backgroundImageUrl,
"title": title,
"subTitle": subTitle,
}
'''三角形的漏斗图设置'''
def _funnel_chart(self):
return _funnel_base_config(self)
|
jayz2017/easy_pyechart.py
|
easy_pyechart/easy_funnel.py
|
easy_funnel.py
|
py
| 1,050 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "easy_pyechart.constants.defualt_theme",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "easy_pyechart.constants",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pyecharts.charts.Funnel",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "easy_pyechart._funnel_base_config",
"line_number": 31,
"usage_type": "call"
}
] |
44284258376
|
import sys, dpkt, socket
from dpkt.compat import compat_ord
class Statistics: #Statistic Class: Used just to store global stats of the following info
connCount = 0
rstCount = 0
openCount = 0
closeCount = 0
duration = 0
minDuration = 0
meanDuration = 0
maxDuration = 0
RTTCount = 0
RTT = []
minRTT = -1
meanRTT = -1
maxRTT = -1
pktCount = 0
minPacket = 0
meanPacket = 0
maxPacket = 0
window = []
minWindow = -1
meanWindow = -1
maxWindow = -1
class Packet(object): #Packet Class: used to store packet info. A packet class is created for each packet, and destroyed after being analyzed
srcMac = ""
dstMac = ""
srcIP = ""
dstIP = ""
IPLen = -1
id = -1
seq = -1
ack = -1
windowSize = -1
flagsBin = -1
flags = []
srcPort = -1
dstPort = -1
time = -1
class Connection: #Connection Info. Used to store
def __init__(self, packet):
self.srcAdd = packet.srcIP
self.dstAdd = packet.dstIP
self.srcPort = packet.srcPort
self.dstPort = packet.dstPort
self.status = [0, 0, 0] #SYN Count, FIN Count, RST Count
self.startTime = packet.time
self.endTime = packet.time
self.srcDstPacketCount = 0
self.dstSrcPacketCount = 0
self.packetCount = 0
self.srcDstByteCount = 0
self.dstSrcByteCount = 0
self.byteCount = 0
self.initialClientSeq = packet.seq + 1
self.initialServerSeq = 0
self.pastClientSeq = -50
self.pastServerSeq = 0
self.pastClientPacketTime = packet.time
self.pastServerPacketTime = 0
self.RTTCount = 0
self.calRTT = 0
self.duration = 0
self.RTT = []
self.window = []
class Connections:
def __init__(self):
self.links = []
self.size = 0
def add(self, connection):
self.links.append(connection)
self.size = self.size + 1
def printConnections(self):
count = 1
for link in self.links:
print("Connection " + str(count) + ":")
print("Source Address: " + link.srcAdd)
print("Destination Address: " + link.dstAdd)
print("Source Port: " + str(link.srcPort))
print("Destination Port: " + str(link.dstPort))
print("Status: " + "S" + str(link.status[0]) + "F" + str(link.status[1]) + "R" + str(link.status[2]))
if link.status[0] >= 1:
if link.status[1] >= 1:
print("Start Time: " + str(link.startTime) + "ms")
print("End Time: " + str(link.endTime) + "ms")
print("Duration: " + str(link.duration) + "ms")
print("Number of packets send from Source to Destination: " + str(link.srcDstPacketCount))
print("Number of packets send from Destination to Source: " + str(link.dstSrcPacketCount))
print("Total number of packets: " + str(link.packetCount))
print("Number of data bytes send from Source to Destination: " + str(link.srcDstByteCount))
print("Number of data bytes send from Destination to Source: " + str(link.dstSrcByteCount))
print("Total number of data bytes: " + str(link.byteCount))
print("END")
count = count + 1
if count <= (self.size): print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
def printPacket(packet):
print("Source MAC: " + packet.srcMac)
print("Destination MAC: " + packet.dstMac)
print("Source IP: " + packet.srcIP)
print("Destination IP: " + packet.dstIP)
print("IP Header Length: " + str(packet.IPLen))
print("Packet ID: " + str(packet.id))
print("Sequence: " + str(packet.seq))
print("Acknowledgement: " + str(packet.ack))
print("Window Size: " + str(packet.windowSize))
print("Flag Binary: " + bin(packet.flagsBin))
print("Flags: " + str(packet.flags))
print("Source Port: " + str(packet.srcPort))
print("Destination Port: " + str(packet.dstPort))
print("Time: " + str(packet.time))
def mac_addr(address): #Refer to Reference #Used to convert binary to mac addresses
return ":".join("%02x" % compat_ord(b) for b in address)
def inet_to_str(inet): #Refer to Reference #Used to convert binary to Ip Addresses
return socket.inet_ntop(socket.AF_INET, inet)
def binToFlags(packet): #Binary Flag parsing
packet.flags = []
if packet.flagsBin & 0b1: packet.flags.append("FIN")
if packet.flagsBin & 0b10: packet.flags.append("SYN")
if packet.flagsBin & 0b100: packet.flags.append("RST")
if packet.flagsBin & 0b1000: packet.flags.append("PSH")
if packet.flagsBin & 0b10000: packet.flags.append("ACK")
if packet.flagsBin & 0b100000: packet.flags.append("URG")
return packet
def clientInitialRTT(stats, connection, packet): #The initial time for RTT
connection.pastClientSeq = packet.seq #Initial sequence number sent
connection.pastClientPacketTime = packet.time #Initial packet time
return 0
def clientFinalRTT(stats, connection, packet): #Client final RTT
if connection.pastClientSeq <= packet.ack: #Ensure that the ack receieved corresponds to an ack
RTT = packet.time - connection.pastClientPacketTime #Calculate RTT time between the matching seq to ack
connection.RTT.append(RTT) #Append RTT calculation to connection for mean and other purposes
return 0
def updateDstSrcCount(connection, packet): #Calculation to update the byte and packet count from the destunation to source
connection.packetCount = connection.packetCount + 1
connection.srcDstPacketCount = connection.srcDstPacketCount + 1
connection.dstSrcByteCount = packet.ack - connection.initialServerSeq - 1
connection.byteCount = connection.srcDstByteCount + connection.dstSrcByteCount
return packet.ack - connection.initialServerSeq - 1
def updateSrcDstCount(connection, packet): #Method to update the byte and packet count from the source to destionation
connection.packetCount = connection.packetCount + 1
connection.dstSrcPacketCount = connection.dstSrcPacketCount + 1
if connection.initialServerSeq == 0: #Initial server / client 3 way hand shake scenario handling
connection.initialServerSeq = packet.seq + 1
connection.srcDstByteCount = packet.ack - connection.initialClientSeq
connection.byteCount = connection.srcDstByteCount + connection.dstSrcByteCount
return packet.ack - connection.initialClientSeq
def printFinal(stats, connections):
print("A) Total number of connections: " + str(connections.size))
print("___________________________________________________________________________________")
print("")
print("B) Connection's details:")
print("")
connections.printConnections()
print("___________________________________________________________________________________")
print("")
print("C) General:")
print("")
print("Total number of complete TCP connections: " + str(stats.closeCount))
print("Number of reset TCP connections: " + str(stats.rstCount))
print("Number of TCP connections that were still open when the trace capture ended: " + str(stats.openCount))
print("___________________________________________________________________________________")
print("")
print("D) Complete TCP connections:")
print("")
print("Minimum time durations: " + str(stats.minDuration) + "ms")
print("Mean time durations: " + str(stats.meanDuration) + "ms")
print("Maximum time duration: " + str(stats.maxDuration) + "ms")
print("")
print("Minimum RTT values: " + str(stats.minRTT) + "ms")
print("Mean RTT values: " + str(stats.meanRTT) + "ms")
print("Maximum RTT values: " + str(stats.maxRTT) + "ms")
print("")
print("Minimum number of packets including both send/received: " + str(stats.minPacket))
print("Mean number of packets including both send/received: " + str(stats.meanPacket))
print("Maximum number of packets including both send/received: " + str(stats.maxPacket))
print("")
print("Minimum receive window sizes including both send/received: " + str(stats.minWindow))
print("Mean receive window sizes including both send/received: " + str(stats.meanWindow))
print("Maximum receive window sizes including both send/receive: " + str(stats.maxWindow))
print("___________________________________________________________________________________")
def analyzePacket(stats, connections, packet): #Series of function calls that analyzes all the packets
for connection in connections.links: #Checks whether a connection exists in file for the packet being analyzed
if (connection.srcAdd == packet.srcIP) and (connection.dstAdd == packet.dstIP) and (connection.srcPort == packet.srcPort) and (connection.dstPort == packet.dstPort):
if "SYN" in packet.flags:
connection.status[0] = connection.status[0] + 1 #Update SYN Count
if "FIN" in packet.flags:
connection.status[1] = connection.status[1] + 1 #Update FIN Count
connection.endTime = packet.time #Update END TIME
if "RST" in packet.flags:
connection.status[2] = connection.status[2] + 1 #Update RST Count
connection.window.append(packet.windowSize) #Store Window Size
byteTransfered = updateDstSrcCount(connection, packet) #Calculate if any bytes were sent /received, and store
if "SYN" in packet.flags or "FIN" in packet.flags: #Calculate the RTT if it is SYN or FIN
connection.calRTT = 1
clientInitialRTT(stats, connection, packet)
return 1
#Serires of function calls that analyzes all the packets
#Similar as to above, but for server -> destionation packets
if (connection.dstAdd == packet.srcIP) and (connection.srcAdd == packet.dstIP) and (connection.dstPort == packet.srcPort) and (connection.srcPort == packet.dstPort):
if "SYN" in packet.flags:
connection.status[0] = connection.status[0] + 1
if "FIN" in packet.flags:
connection.status[1] = connection.status[1] + 1
connection.endTime = packet.time
if "RST" in packet.flags:
connection.status[2] = connection.status[2] + 1
connection.window.append(packet.windowSize)
byteTransfered = updateSrcDstCount(connection, packet)
#Only calculate RTT is, it is SYN, has data transfered
if ((byteTransfered > 0 and "ACK" in packet.flags) or "SYN" in packet.flags) and connection.calRTT == 1:
connection.calRTT = 0
clientFinalRTT(stats, connection, packet)
return 1
connection = Connection(packet) #If there isn't any existing connection, create a connection
connection.srcDstPacketCount = connection.srcDstPacketCount + 1 #Update packet count
connection.packetCount = connection.packetCount + 1 #
connection.status[0] = connection.status[0] + 1 #Add syn count
stats.openCount = stats.openCount + 1 #Add open count
stats.connCount = stats.connCount + 1 #Add connection cvount
connection.window.append(packet.windowSize) #Store c
connections.add(connection)
if "SYN" in packet.flags: #If SYN, prepare values for RTT
connection.calRTT = 1
clientInitialRTT(stats, connection, packet)
return 0
def finalStatCheck(stats, connections): #After analyzing all the packets
for connection in connections.links: #For all connections
if connection.status[0] >= 1: #If SYN
if connection.status[1] >= 1: #If FIN
stats.openCount = stats.openCount - 1 #Complete connection
stats.closeCount = stats.closeCount + 1
connection.duration = connection.endTime - connection.startTime #min mean max duration for all compelete connections
stats.duration = stats.duration + connection.duration
if stats.minDuration == 0:
stats.minDuration = connection.duration
if stats.maxDuration == 0:
stats.maxDuration = connection.duration
if connection.duration <= stats.minDuration:
stats.minDuration = connection.duration
if connection.duration >= stats.maxDuration:
stats.maxDuration = connection.duration
stats.pktCount = stats.pktCount + connection.packetCount #min mean max packet count for all complete connections
if stats.minPacket == 0:
stats.minPacket = connection.packetCount
if stats.maxPacket == 0:
stats.maxPacket = connection.packetCount
if connection.packetCount <= stats.minPacket:
stats.minPacket = connection.packetCount
if connection.packetCount >= stats.maxPacket:
stats.maxPacket = connection.packetCount
stats.window.extend(connection.window) #update connection window for min, mean max calculations
stats.RTT.extend(connection.RTT) #add rtt for min mean max valculations
if connection.status[2] >= 1:
stats.rstCount = stats.rstCount + 1
stats.meanDuration = stats.duration / stats.closeCount #mean duration calculation
stats.meanPacket = stats.pktCount / stats.closeCount #mean packet count duration
stats.minWindow = min(stats.window) #min mean max window size calculation
stats.maxWindow = max(stats.window)
stats.meanWindow = sum(stats.window)/stats.pktCount
stats.minRTT = min(stats.RTT) #min mean max RTT calculation
stats.maxRTT = max(stats.RTT)
stats.meanRTT = sum(stats.RTT) / len(stats.RTT)
return 1
def main():
traceFileName = sys.argv[1] #name of file to read from
traceFile = open(traceFileName, "rb") #open the file to read in binary
tracePcap = dpkt.pcap.Reader(traceFile) #use a reader to parse
stats = Statistics()
connections = Connections()
count = 0
for timeStamp, buf in tracePcap: #Refer to reference. Parts of the referenced code has been deleted or modified.
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data #IP Header
tcp = ip.data #TCP Header
packet = Packet() #Storing various values into a packet class
packet.srcMac = mac_addr(eth.src)
packet.dstMac = mac_addr(eth.dst)
packet.srcIP = inet_to_str(ip.src)
packet.dstIP = inet_to_str(ip.dst)
packet.IPLen = ip.len
packet.id = ip.id
packet.seq = tcp.seq
packet.ack = tcp.ack
packet.windowSize = tcp.win
packet.flagsBin = tcp.flags
packet.srcPort = tcp.sport
packet.dstPort = tcp.dport
packet.time = timeStamp
packet = binToFlags(packet)
analyzePacket(stats, connections, packet) #For each packet, analyze
del packet
finalStatCheck(stats, connections)
printFinal(stats, connections)
main()
# Parsing is taken from the link below, in particluar the mac_addr, and inet_to_str.
# Opening the file, and obtaining the buffer and timestamp is from
# http://dpkt.readthedocs.io/en/latest/_modules/examples/print_packets.html?highlight=print%20ip
|
dmahw/CSC_361_TCPTrafficAnalysis
|
TCPTrafficAnalysis.py
|
TCPTrafficAnalysis.py
|
py
| 17,576 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dpkt.compat.compat_ord",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "socket.inet_ntop",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "socket.AF_INET",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 304,
"usage_type": "attribute"
},
{
"api_name": "dpkt.pcap.Reader",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "dpkt.pcap",
"line_number": 307,
"usage_type": "attribute"
},
{
"api_name": "dpkt.ethernet.Ethernet",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "dpkt.ethernet",
"line_number": 314,
"usage_type": "attribute"
}
] |
39468004248
|
__doc__ = "this module contains varoous tools"
from datetime import date, datetime
# built in modules:
# import sys
# import os
# modules from pypi (install using `pip install module_name`)
# paramiko
# requests
def input_int(num_range: tuple):
"""
`range`: tuple like (from, to)
"""
frm, to = num_range
option = input(f'enter number between {frm} and {to} ')
while (not option.isdigit()) or (int(option) < frm or int(option) > to):
print("Error: invalid option")
option = input()
return int(option)
def cool():
print("la la la ")
def print_this_time():
"""should be used only when this modle is the main module"""
print(datetime.now())
if __name__ == '__main__':
print_this_time()
|
MrPupik/python-examples
|
zero_to_hero/tools.py
|
tools.py
|
py
| 753 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.datetime.now",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "name"
}
] |
27516705586
|
from typing import Literal
ver_num = "3.2.2"
online_message = "Oh no, pas encore..."
mods = {}
def enable_module(mod):
mods[mod] = "✅"
def disable_module(mod):
mods[mod] = "❌"
def get_modules():
return mods
ban_domain = ["twitter", "deezer", "spotify"]
values = Literal[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50]
sites_dict = {
"dailymotion": {
"icon_url": "https://upload.wikimedia.org/wikipedia/commons/2/27/Logo_dailymotion.png",
"color": 0x00bff9,
"message": None
},
"soundcloud": {
"icon_url": "https://play-lh.googleusercontent.com/lvYCdrPNFU0Ar_lXln3JShoE-NaYF_V-DNlp4eLRZhUVkj00wAseSIm-60OoCKznpw=w240-h480",
"color": 0xff6800,
"message": None
},
"tiktok": {
"icon_url": "https://cdn.pixabay.com/photo/2021/06/15/12/28/tiktok-6338432_960_720.png",
"color": 0xee1d52,
"message": None
},
"twitch": {
"icon_url": "https://static-00.iconduck.com/assets.00/twitch-icon-2048x2048-tipdihgh.png",
"color": 0x9146ff,
"message": None
},
"twitter": {
"icon_url": "https://e7.pngegg.com/pngimages/804/985/png-clipart-social-media-logo-computer-icons-information-twitter-logo-media.png",
"color": 0x05acf0,
"message": None
},
"youtube": {
"icon_url": "https://cdn.icon-icons.com/icons2/1099/PNG/512/1485482355-youtube_78661.png",
"color": 0xfe0000,
"message": None
},
"reddit": {
"icon_url": "https://freelogopng.com/images/all_img/1658834272reddit-logo-transparent.png",
"color": 0xff4500,
"message": None
},
"générique": {
"thumbnail": "https://images.frandroid.com/wp-content/uploads/2018/08/guide-apps-video-android.jpg",
"icon_url": "https://cdn0.iconfinder.com/data/icons/basic-uses-symbol-vol-2/100/Help_Need_Suggestion_Question_Unknown-512.png",
"color": 0xffffff,
"message": None
}
}
|
Tintin361/Kiri-Chan
|
tools/variables.py
|
variables.py
|
py
| 2,005 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.Literal",
"line_number": 19,
"usage_type": "name"
}
] |
44091065220
|
import os
import gc
import re
import sys
import copy
import time
import random
import tempfile
import logging
import cPickle as cp
import multiprocessing
import subprocess
import deepity
import numpy as np
import numpy.random as npr
import smat as sm
import scipy
import scipy.stats
from . import std
from . import hpsearch as hp
from . import _io_
from . import util
from .data import datasource
from . import globals
from .report import training_report, calc_auc, bootstrap_auc
import node as _node
import trainer as _trainer
class _object_factory_from_file(object):
def __init__(self,filename,fieldname=None):
self.filename = filename
self.fieldname = fieldname
def __call__(self):
obj = _io_.load(self.filename)
if self.fieldname and isinstance(obj,dict):
return obj[self.fieldname]
return obj
def _create_model(model_proto, hparams):
model = copy.deepcopy(model_proto)
for key,val in hparams.iteritems():
prefix,path = key.split(":") # look for hparams named "model:..."
if prefix == "model":
nodepath,attrname = path.rsplit(".",1)
node = model.find(nodepath)
if hasattr(node,"set_"+attrname):
getattr(node,"set_"+attrname)(model,val) # call model.set_xxx(val)
else:
setattr(node,attrname,val)
return model
def _create_trainer(trainer_proto, hparams):
trainer = copy.deepcopy(trainer_proto)
for key,val in hparams.iteritems():
prefix,attrname = key.split(":") # look for hparams named "trainer:..."
if prefix == "trainer":
if hasattr(trainer,"set_"+attrname):
getattr(trainer,"set_"+attrname)(model,val) # call trainer.set_xxx(val)
else:
setattr(trainer,attrname,val)
return trainer
def _slice_hparams(hparams, inst):
h = copy.deepcopy(hparams)
for key in h.keys():
h[key] = h[key][inst]
return h
def load_hparams_result(filename):
with open(filename,'r') as f:
lines = f.readlines()
params = {}
result = 0.0
for line in lines:
# Look for validation performance
matches = re.findall("# metric = (\S+)", line)
if len(matches) > 0:
result = float(matches[0])
continue
# Add hparam
name, value = re.findall(" *(\S+) += (\S+)", line)[0]
if name in [":cfgname"]:
params[name] = value
else:
params[name] = float(value)
return hp.sample(params, result)
def save_hparams_result(filename, hparams_result, metric_key):
util.makepath(os.path.dirname(filename))
with open(filename,'w') as f:
if metric_key:
f.write("# metric = %f (%s)\n" % (hparams_result.metrics[metric_key], metric_key))
f.write(hparams2str(hparams_result.params))
def _save_model_inst(filename, inst, model, hparams):
m = copy.deepcopy(model)
sm.sync()
# Slice the trainable weights
m.slice_inst(inst)
# Also slice the hyperparams, and replace corresponding 'arrayed'
# attributes in the model with their scalar (sliced element) counterpart.
h = _slice_hparams(hparams,inst)
for key,val in h.iteritems():
prefix,path = key.split(":") # look for hparams named "model:..."
if prefix != "model":
continue
nodepath,attrname = path.rsplit(".",1)
node = m.find(nodepath)
if hasattr(node,"set_"+attrname):
getattr(node,"set_"+attrname)(model,val) # call model.set_xxx(val)
else:
setattr(node,attrname,val)
# Dump the model
util.makepath(os.path.dirname(filename))
with open(filename,'wb') as f:
cp.dump(m,f)
sm.sync() # Make sure we wait until the sarrays are all dumped
def gen_predictions(model, data):
# We must feed each sequence through the model several times
# by applying the model repeatedly on sliding a window along the sequence.
# That generates a prediction map, from which we can take max, sum, etc.
predictions = []
gmaps = {}
batches = data.asbatches(batchsize=128, reshuffle=False)
for batch in batches:
args = batch.input_data()
args["want_bprop_inputs"] = False
if isinstance(model.Z.origin().node,std.softmaxnode):
args["bprop_inputs_loss"] = std.nll()
else:
args["bprop_inputs_loss"] = std.mse()
outputs = model.eval(**args)
Z = outputs['Z'].asnumpy()
Zmask = outputs.get('Zmask',None)
if Zmask is not None:
Zmask = Zmask.asnumpy()
Z = Z[Zmask.ravel()]
predictions.append(Z)
# Concatenate all numpy arrays if they're the same size
predictions = np.vstack(predictions)
return predictions
def getinstdir(outdir, targetname, trialnum, foldid):
if isinstance(outdir,str):
return outdir
outdir = [_ for _ in outdir] # Make a copy that we can substitute elements in
args = {"target" : targetname,
"trial" : trialnum,
"fold" : foldid}
for i,item in enumerate(outdir):
if isinstance(item, tuple):
name, patt = item
if args[name] is None:
outdir[i] = None
else:
outdir[i] = patt % args[name]
instdir = "/".join([part for part in outdir if part is not None])
return instdir
def load_metrics(filename):
metrics = {}
with open(filename) as f:
groupnames = f.readline().rstrip().split()
for line in f:
line = line.rstrip().split()
for i,val in enumerate(line[1:]):
metrics.setdefault(groupnames[i],{})[line[0]] = val
return metrics
def save_metrics(outfile, metrics):
with open(outfile,"w") as f:
groupnames = sorted(metrics.keys())
fieldnames = set()
for groupname in groupnames:
for fieldname in metrics[groupname].keys():
fieldnames.add(fieldname)
fieldnames = sorted(list(fieldnames))
f.write(" "*14+"\t".join(groupnames) + "\n")
rows = {}
for groupname in groupnames:
for fieldname in fieldnames:
fieldval = metrics[groupname].setdefault(fieldname, np.nan)
if not isinstance(fieldval,np.ndarray):
if isinstance(fieldval, float):
fmt = "%.2e" if fieldname.endswith(".p") else "%.6f"
fieldval = fmt%fieldval
rows.setdefault(fieldname,[]).append(str(fieldval))
f.writelines([fieldname + " "*max(0,14-len(fieldname)) + "\t".join(rows[fieldname]) +"\n" for fieldname in fieldnames])
def call_dumpviz(dumpdir):
subprocess.Popen(["python", os.path.dirname(__file__)+"/dumpviz.py", dumpdir])
##########################################
class hypertrain_worker(object):
"""
Given a dataset and specific hyperparameters, this object will
simply train a model (an array of models) and return the
validation error (array of validation errors).
"""
def __init__(self, worker_id, model_proto, trainer_proto, datasrc,
nfold, allfolds, outdir, report_class, devices, verbose,
default_dtype, global_flags, auxfilter, mode, dumpviz):
self.worker_id = worker_id
# All the data subsets in 'trainset' will be merged into a single fold.
self.model_proto = model_proto
self.trainer_proto = trainer_proto
self.datasrc = datasrc # Load a copy of the dataset into this worker process.
self.nfold = nfold
self.allfolds = allfolds
self.outdir = outdir
self.mode = mode
self.aucrange = (0.5,0.5) # threshold for making AUCs out of non-binary targets, presumed to be in range [0,1]
self.report_class = report_class
self.auxfilter = auxfilter
self.dumpviz = dumpviz
globals.flags.copy_from(global_flags)
# If we've been called from a new process, create a separate log file.
# Otherwise everything is logged into the original log file.
if multiprocessing.current_process().name != "MainProcess":
logdir = getinstdir(outdir,None,None,None)
worker_logfile = os.path.join(logdir,"hypertrain_worker%d.log" % worker_id)
globals.set_logging(worker_logfile,level=verbose,echo=False)
logging.info("\n----------------------------- %s -----------------------------" % time.strftime("%y-%m-%d %H-%M-%S",time.localtime()))
# Configure deepity to use this worker's GPU device.
logging.info("worker %d starting on device %d using %s" % (worker_id,devices[worker_id],sm.get_default_dtype().__name__))
rseed = int((time.time()*100000 + worker_id) % 2000)
globals.reset_backend(device=devices[worker_id], seed=rseed)
random.seed(rseed)
sm.set_default_dtype(default_dtype)
npr.seed(rseed)
# Seed this process's random number generator, for reproducibility
sm.sync()
# Prepare the datasource to serve data.
self.datasrc.open()
def __del__(self):
self.datasrc.close()
self.datasrc = None
gc.collect() # Clear out the cruft and make sure the backend can be destroyed
sm.sync()
sm.destroy_backend()
def __call__(self, hparams, task_ids, sample_ids):
# Determine what kind of targets we want to train on
data = self.datasrc.astargets(task_ids) # Copies of arbitrary targets
data = data[:] # Copy so that when we normalize etc we don't affect the original data
# Normalize the targets. For logisitic-output models this means
# scaling targets to [0,1]. For other models this means scaling
# targets to have mean=0, variance=1.
data.requirements = self.model_proto.data_requirements()
#print np.percentile(data.Y[data.Ymask].ravel(), [99, 99.99, 99.995, 99.999])
#print data.Y.size, int(data.Y.size*(100-99.95)/100)
if "clamp_targets" in globals.flags:
data.clamp_extremes(0.0,99.95)
if "normalize_targets" in globals.flags:
data.normalize_targets()
#data.arcsinhtransform_targets()
if self.mode != 'calib':
# If we're not in calibration mode, then there's no need for multiple checkpoints
# -- just keep the last checkpoint so that it can be dumped to disk
#del hparams["trainer:checkpoints"]
self.trainer_proto.checkpoints = 1
# Shuffle the individual rows of data, always the same random shuffle
# and therefore always the same random split each time the code is run.
data.shuffle()
# Create a callback handler to collect predictions and evaluate final performance
checkpoints = self.report_class()
# Perform k-fold cross validation (k=nfold), training with one fold held out at a time.
for foldid in range(self.nfold):
checkpoints.setfold(foldid) # Tell the checkpoint
# Create a new model and trainer with the given hyperparams
model = _create_model(self.model_proto, hparams)
trainer = _create_trainer(self.trainer_proto, hparams)
# Split the data into training and validation sets
trdata, vadata = data.split(foldid, self.nfold-1)
trdata = trdata.augmented(trdata)
datasets = { "train" : trdata }
if vadata:
vadata = vadata.augmented(vadata)
datasets["validate"] = vadata
if self.auxfilter:
datasets["validate_aux"] = vadata[[i for i in range(len(vadata)) if vadata.foldids[i] in self.auxfilter]]
for dset in datasets.values():
dset.requirements = model.data_requirements()
#if not checkpoint_callback:
# trainer.viz_steps = False # Disable periodic updates if no reports
# Train the model and remember how well it performed.
trainer.train(model, datasets, checkpoints)
if self.mode == 'train' and self.nfold > 1:
entries = checkpoints.curr()
metrics = self.calc_metrics(entries)
self.save_model(model, hparams, task_ids, sample_ids, foldid)
self.save_metrics(metrics, task_ids, sample_ids, foldid)
self.save_predictions(entries, task_ids, sample_ids, foldid)
self.call_dumpviz(task_ids, sample_ids, foldid)
# If we`re only supposed to try one fold, then don`t bother looping over the other splits
if not self.allfolds:
break
# Consolidate the separate folds, and dump them if need be
entries = checkpoints.combined()
# Calculate the performance stats associated with each target
metrics = self.calc_metrics(entries)
# Save the current model and predictions
if self.mode == 'train':
self.save_predictions(entries, task_ids, sample_ids, None)
self.save_metrics(metrics, task_ids, sample_ids, None)
if self.nfold == 1:
self.save_model(model, hparams, task_ids, sample_ids, None)
self.save_preprocessors(data, task_ids, sample_ids, None)
#self.call_dumpviz(task_ids, sample_ids, None)
# Return a new hparams object with the performance incorporated
hpsearch_result = self.add_hparam_metrics(hparams, metrics)
return hpsearch_result
def save_model(self, model, hparams, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Slice out model i and save it to disk
_save_model_inst(dumpdir+"/model.pkl", i, model, hparams)
def save_predictions(self, entries, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Save out the predictions for model i
assert len(entries[i]) == 1, "Bug. Expected only a single unique 'step' in the list of entries"
groups = entries[i].values()[0]
np.savez_compressed(dumpdir+"/predict.npz",
targetname=np.asarray(taskid, dtype=object),
groups=np.asarray(groups, dtype=object))
def save_metrics(self, metrics, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
util.makepath(dumpdir)
# Save out the predictions for model i
assert len(metrics[i]) == 1, "Bug. Expected only a single unique 'step' in the list of entries"
groups = metrics[i].values()[0]
save_metrics(dumpdir+"/metrics.txt", groups)
def call_dumpviz(self, task_ids, sample_ids, foldid):
if not self.dumpviz:
return
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
call_dumpviz(dumpdir)
def save_preprocessors(self, data, task_ids, sample_ids, foldid):
for i, taskid in enumerate(task_ids):
dumpdir = getinstdir(self.outdir, taskid, sample_ids[i], foldid)
data.dump_preprocessors(dumpdir, slice(i,i+1))
def add_hparam_metrics(self, hparams, metrics):
groupkey = "validate" if "validate" in metrics[0].values()[0] else "train"
hpsearch_result = {}
for i in metrics:
for step in metrics[i]:
hparams_i = { key : val[i] for key,val in hparams.iteritems() }
hparams_i["trainer:max_steps"] = step
metrics_i = metrics[i][step][groupkey]
hpsearch_result.setdefault(i,[]).append((hparams_i, metrics_i)) # Thus tuple is returned to hpsearch
return hpsearch_result
"""
if "vloss" in stats and stats["vloss"] is not None:
loss.append(stats["vloss"])
auc.append(stats["vauc"])
else:
loss.append(stats["tloss"])
auc.append(stats["tauc"])
if self.testfilter is not None:
tidx = [i for i in range(len(vdata)) if vdata.foldids[i] in self.testfilter]
tdata = vdata[tidx]
tpred = gen_predictions(model, tdata)
testauc,teststd = bootstrap_auc(tpred.ravel(), tdata.Y.ravel(), ntrial=20)
flogfile = self.outdir + "/%s_%04d/fold%d.log" % (task_ids[0], sample_ids[0], foldid)
with open(flogfile) as fh:
flog = fh.readlines()
flog[-1] = flog[-1].rstrip() + "\ttestAUC=%.3f (%f)\n" % (testauc,teststd)
with open(flogfile,"w") as fh:
fh.writelines(flog)
testaucs.append((testauc, teststd))
if report:
reports.append(report)
report.dump(want_html=True)
#report.dump(want_html=self.want_html)
# Dump each model to a separate file
for inst in range(len(sample_ids)):
filename = self.outdir + ("/%s_%04d/fold%d.model.pkl" % (task_ids[inst], sample_ids[inst], foldid))
_save_model_inst(filename, inst, model, hparams)
"""
#break
""""
if reports != []:
# Dump the separate (individual) hyperparams that were used for each instance trained
for inst in range(len(sample_ids)):
dumpdir = self.outdir + ("/%s_%04d/" % (task_ids[inst], sample_ids[inst]))
vloss = self.validation_performance[task_ids[inst]] if self.validation_performance else None
_dump_hparams(dumpdir, _slice_hparams(hparams,inst), vloss)
tdata.dump_preprocessors(dumpdir, slice(inst,inst+1))
merged = self.report_class.merge_reports(self.outdir + "/%(task_id)s_%(sample_id)04d/final.log", task_ids, sample_ids, reports)
#merged.dump(want_html=self.want_html)
merged.dump()
if testaucs:
flogfile = self.outdir + "/%s_%04d/final.log" % (task_ids[0], sample_ids[0])
with open(flogfile) as fh:
flog = fh.readlines()
testauc = sum([_auc for _auc, _std in testaucs]) / len(testaucs)
teststd = sum([_std for _auc, _std in testaucs]) / len(testaucs)
flog[-1] = flog[-1].rstrip() + "\ttestAUC=%.3f (%f)\n" % (testauc,teststd)
with open(flogfile,"w") as fh:
fh.writelines(flog)
# Average the loss over each fold
loss = np.mean(np.asarray(loss),axis=0)
auc = np.mean(np.asarray(auc),axis=0)
# Dump each average loss and corresponding hyperparameters into a log file
for inst in range(len(sample_ids)):
util.makepath(self.outdir+"/hpsearch")
with open(self.outdir+"/hpsearch/%s.log"%task_ids[inst],"a") as f:
f.write("%.6f\t%.4f\t%s\n"%(loss[inst], auc[inst], hparams2str( _slice_hparams(hparams,inst) ).replace("\n",";")) )
"""
sm.sync()
# Return a list of objective values, one per search_id
values = [float(x) for x in loss]
return values
def calc_metrics(self, entries):
metrics = {}
for taskidx in entries:
for step in entries[taskidx]:
for group in entries[taskidx][step]:
entry = entries[taskidx][step][group]
Z = entry["Z"]
Y = entry["Y"]
# Start computing stats
metric = metrics.setdefault(taskidx,{}).setdefault(step,{}).setdefault(group,{})
metric["loss"] = entry["L"]
if Z.shape[1] == 1:
metric.update(deepity.calc_metrics(Z.ravel(), Y.ravel(), self.aucrange))
return metrics
def hparams2str(params):
txt = ""
for key in sorted(params.keys()):
value = params[key]
if isinstance(value, np.ndarray) and value.size > 10:
value = "ndarray"
txt += " %s = %s\n" % (key + " "*max(0,20-len(key)),value)
return txt
#######################################
def hypertrain(model, trainer, data,
nfold=2, allfolds=True, outdir=None, nsample=20,
devices=None, verbose=None, report_class=None,
auxfilter=None):
if report_class is None: report_class = training_report
# Create the output directory if it doesn't already exist.
if outdir is None:
outdir = join(tempfile.gettempdir(),"hypertrain")
# Define the search space
space = _get_hypertrain_searchspace(model, trainer)
# Perform the search, returning the best parameters in the search space.
logging.info("calibrating...")
samples = hp.search(space,
objective = hypertrain_worker,
objective_initargs = (model,trainer,data,nfold,allfolds,outdir,report_class,devices,False,sm.get_default_dtype(),globals.flags,auxfilter,"calib",False),
task_ids = data.targetnames,
nsample = nsample,
nprocess = len(devices),
nsample_per_process = 15,
print_progress = True)
logging.info("...calibrating done")
return samples
###########################################
def train(model, trainer, data, hparams=None, hparams_metric=None,
nfold=1, outdir=None, nsample=1,
devices=None, verbose=None, report_class=None,
auxfilter=None, dumpviz=True):
if report_class is None: report_class = training_report
if hparams:
for targetname in data.targetnames:
for sample in range(nsample):
for fold in range(nfold):
save_hparams_result(getinstdir(outdir, targetname, sample, fold)+"/calib.txt", hparams[targetname], hparams_metric)
space = _get_fixed_searchspace(model, trainer, data.targetnames, hparams)
#space = _get_hypertrain_searchspace(model, trainer)
#if space and not hparams:
# raise ValueError("The given model has undetermined hyperparamters. Must call hypertrain first.")
# Replace the randomly sampled hparams with fixed values specified by 'hparams'
#for pname in space._pdefs.iterkeys():
# pbest = np.asarray([hparams[task_id].params[pname] for task_id in data.targetnames])
# space._pdefs[pname] = hp.fixed(pbest, pname)
#print "assigning hparam",pname,"<-",pbest
final_outdir = outdir
logging.info("train...")
hp.search(space,
objective = hypertrain_worker,
objective_initargs = (model,trainer,data,nfold,True,final_outdir,report_class,devices,verbose,sm.get_default_dtype(),globals.flags,auxfilter,"train",dumpviz),
task_ids = data.targetnames,
nsample = nsample,
nsample_per_process = 2,#len(data.targetnames), # Hack: only train numtargets models at a time, to ensure that when nsample>1 the next sample gets a different minibatch order
nprocess = len(devices))
logging.info("...train done")
#######################################################
def _get_fixed_searchspace(model, trainer, targetnames, hparams):
pdefs = []
if hparams:
# Convert the hparams list-of-dictionaries (all dictionaries having same key)
# into a single dictionary-of-lists
hpvec = {}
for targetname in targetnames:
sample = hparams[targetname]
for pkey in sample.params:
hpvec.setdefault(pkey,[]).append(sample.params[pkey])
for key in hpvec:
pdefs.append(hp.fixed(np.array(hpvec[key]), key))
space = hp.space(pdefs)
return space
def _get_hypertrain_searchspace(model, trainer):
# First, collect all hparams by visiting the model's dependency graph
model_hparams = []
def collect_hparam(path,attr):
if isinstance(attr,hp.paramdef):
attr.name = "model:" + path # model:...path
model_hparams.append(attr)
model.visit(collect_hparam)
# Next, ask the trainer for its hyperparams, and put a "trainer." prefix on the name of each one
# so that they don't conflict with model_hparams
trainer_hparams = []
for name,attr in trainer.__dict__.iteritems():
if isinstance(attr,hp.paramdef):
attr.name = "trainer:" + name # trainer:...path
trainer_hparams.append(attr)
# Return a search space built from model and trainer hyperparams
return hp.space(trainer_hparams + model_hparams)
|
jisraeli/DeepBind
|
code/libs/deepity/deepity/hypertrain.py
|
hypertrain.py
|
py
| 25,376 |
python
|
en
|
code
| 85 |
github-code
|
6
|
[
{
"api_name": "copy.deepcopy",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "smat.sync",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "cPickle.dump",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "smat.sync",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "data.asbatches",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 199,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 200,
"usage_type": "attribute"
},
{
"api_name": "subprocess.Popen",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 208,
"usage_type": "attribute"
},
{
"api_name": "multiprocessing.current_process",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 244,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "time.strftime",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "time.localtime",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "smat.get_default_dtype",
"line_number": 250,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "smat.set_default_dtype",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "smat.sync",
"line_number": 258,
"usage_type": "call"
},
{
"api_name": "gc.collect",
"line_number": 266,
"usage_type": "call"
},
{
"api_name": "smat.sync",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "smat.destroy_backend",
"line_number": 268,
"usage_type": "call"
},
{
"api_name": "data.requirements",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "data.clamp_extremes",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "data.normalize_targets",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "data.shuffle",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "data.split",
"line_number": 311,
"usage_type": "call"
},
{
"api_name": "trainer.train",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "numpy.savez_compressed",
"line_number": 378,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 379,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 380,
"usage_type": "call"
},
{
"api_name": "data.dump_preprocessors",
"line_number": 405,
"usage_type": "call"
},
{
"api_name": "smat.sync",
"line_number": 486,
"usage_type": "call"
},
{
"api_name": "deepity.calc_metrics",
"line_number": 506,
"usage_type": "call"
},
{
"api_name": "numpy.ndarray",
"line_number": 513,
"usage_type": "attribute"
},
{
"api_name": "report.training_report",
"line_number": 525,
"usage_type": "name"
},
{
"api_name": "tempfile.gettempdir",
"line_number": 529,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 535,
"usage_type": "call"
},
{
"api_name": "smat.get_default_dtype",
"line_number": 538,
"usage_type": "call"
},
{
"api_name": "data.targetnames",
"line_number": 539,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 544,
"usage_type": "call"
},
{
"api_name": "report.training_report",
"line_number": 555,
"usage_type": "name"
},
{
"api_name": "data.targetnames",
"line_number": 558,
"usage_type": "attribute"
},
{
"api_name": "data.targetnames",
"line_number": 564,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 578,
"usage_type": "call"
},
{
"api_name": "smat.get_default_dtype",
"line_number": 581,
"usage_type": "call"
},
{
"api_name": "data.targetnames",
"line_number": 582,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 586,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 601,
"usage_type": "call"
},
{
"api_name": "trainer.__dict__.iteritems",
"line_number": 619,
"usage_type": "call"
},
{
"api_name": "trainer.__dict__",
"line_number": 619,
"usage_type": "attribute"
}
] |
26529063971
|
from copy import deepcopy
from flask import abort
from flask import Blueprint
from flask import request
from flask_api import status
from oneview_redfish_toolkit.api.event import Event
from oneview_redfish_toolkit.api.event_service import EventService
from oneview_redfish_toolkit.blueprints.util.response_builder import \
ResponseBuilder
from oneview_redfish_toolkit import config
from oneview_redfish_toolkit import util
event_service = Blueprint("event_service", __name__)
ONEVIEW_TEST_ALERT = {
"timestamp": "2018-02-12T20:12:03.231Z",
"resource": {
"category": "alerts",
"associatedResource": {
"resourceName": "0000A66101, bay 3",
"resourceUri": "/rest/server-hardware/"
"30373737-3237-4D32-3230-313530314752"
}
}
}
ONEVIEW_TEST_TASK = {
"timestamp": "2018-02-12T20:12:03.231Z",
"resourceUri": "/rest/server-hardware/"
"30373737-3237-4D32-3230-313530314752",
"changeType": None,
"resource": {
"category": "server-hardware",
"name": "0000A66101, bay 3"
}
}
REDFISH_TO_ONEVIEW_EVENTS = {
"ResourceAdded": "Created",
"ResourceUpdated": "Updated",
"ResourceRemoved": "Deleted"
}
@event_service.route("/redfish/v1/EventService/", methods=["GET"])
def get_event_service():
"""Get the Redfish Event Service.
Get method to return EventService JSON when
/redfish/v1/EventService is requested.
Returns:
JSON: JSON with EventService.
"""
evs = EventService(util.get_delivery_retry_attempts(),
util.get_delivery_retry_interval())
return ResponseBuilder.success(evs)
@event_service.route(
"/redfish/v1/EventService/Actions/EventService.SubmitTestEvent/",
methods=["POST"])
def execute_test_event_action():
"""Executes the SubmitTestEvent Action
Return a JSON containing the EventType received.
Logs exception of any error and return abort.
Returns:
JSON: JSON containing the EventType.
Exceptions:
Exception: Missing EventType property.
Return Bad Request status(400)
"""
if not config.auth_mode_is_conf():
abort(status.HTTP_404_NOT_FOUND,
"EventService is not enabled.")
event_type = None
try:
event_type = request.get_json()['EventType']
except Exception:
abort(status.HTTP_400_BAD_REQUEST,
'Invalid JSON data. Missing EventType property.')
if event_type not in util.get_subscriptions_by_type().keys():
abort(status.HTTP_400_BAD_REQUEST,
'Invalid EventType value: %s' % event_type)
# Creates a sample OneView SCMB message according to
# the value of 'event_type'
if event_type == "Alert":
message = deepcopy(ONEVIEW_TEST_ALERT)
else:
message = deepcopy(ONEVIEW_TEST_TASK)
message['changeType'] = REDFISH_TO_ONEVIEW_EVENTS[event_type]
event = Event(message)
util.dispatch_event(event)
return ResponseBuilder.response(event, status.HTTP_202_ACCEPTED)
|
HewlettPackard/oneview-redfish-toolkit
|
oneview_redfish_toolkit/blueprints/event_service.py
|
event_service.py
|
py
| 3,130 |
python
|
en
|
code
| 16 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.api.event_service.EventService",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util.get_delivery_retry_attempts",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "oneview_redfish_toolkit.util.get_delivery_retry_interval",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "oneview_redfish_toolkit.blueprints.util.response_builder.ResponseBuilder.success",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.blueprints.util.response_builder.ResponseBuilder",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "oneview_redfish_toolkit.config.auth_mode_is_conf",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.config",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "flask_api.status.HTTP_404_NOT_FOUND",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "flask.request.get_json",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "flask.request",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "flask_api.status.HTTP_400_BAD_REQUEST",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "oneview_redfish_toolkit.util.get_subscriptions_by_type",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util",
"line_number": 91,
"usage_type": "name"
},
{
"api_name": "flask.abort",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flask_api.status.HTTP_400_BAD_REQUEST",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.api.event.Event",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util.dispatch_event",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.util",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "oneview_redfish_toolkit.blueprints.util.response_builder.ResponseBuilder.response",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "oneview_redfish_toolkit.blueprints.util.response_builder.ResponseBuilder",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "flask_api.status.HTTP_202_ACCEPTED",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "flask_api.status",
"line_number": 107,
"usage_type": "name"
}
] |
27277101793
|
from flask import Flask, redirect, render_template, request, url_for, session, flash
import sqlite3
import random
import datetime
import smtplib
from email.mime.text import MIMEText
# sqlite3 connection
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS users (
id INTEGER PRIMARY KEY,
first_name TEXT,
middle_name TEXT,
last_name TEXT,
date_of_birth DATE,
email TEXT UNIQUE,
gender TEXT,
user_id TEXT UNIQUE
)
''')
app = Flask(__name__)
# Role selection
@app.route("/", methods=["POST", "GET"])
def select_role():
if request.method == "POST":
role = request.form.get("role") # Check which role was selected
if role == "admin":
return redirect("/admin-login?role=admin") # Pass role=admin as a query parameter
elif role == "agent":
return redirect("/register-citizen")
return render_template("select_role.html")
# Admin login
@app.route("/admin-login", methods=["POST", "GET"])
def admin_login():
admin_password = "admin123"
# Check if the role query parameter is present and set to "admin"
role = request.args.get("role")
if role != "admin":
return redirect("/") # Redirect to the role selection page if the role is not "admin"
if request.method == "POST":
entered_password = request.form.get("admin_password")
if entered_password == admin_password:
return redirect("/view-citizens")
else:
# Password is incorrect, show an error message
error_message = "Incorrect password. Please try again."
return render_template("admin_login.html", error_message=error_message)
return render_template("admin_login.html")
@app.route("/register-citizen", methods=[ "POST", "GET" ])
def register_citizen():
if request.method == "POST":
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
first_name = request.form["first_name"]
middle_name = request.form["middle_name"]
last_name = request.form["last_name"]
email = request.form["email"]
date_of_birth = request.form["date_of_birth"]
gender = request.form["gender"]
cursor.execute('SELECT id FROM users WHERE email = ?', (email,))
existing_user = cursor.fetchone()
# Check if the email already exists
if not existing_user:
# conn.close()
user_id = generate_citizen_id()
cursor.execute('INSERT INTO users (first_name, middle_name, last_name, email, date_of_birth, gender, user_id) VALUES (?, ?, ?, ?, ?, ?, ?)',
(first_name, middle_name, last_name, email, date_of_birth, gender, user_id))
conn.commit()
# conn.close()
send_code_to_email(first_name, email, user_id)
return render_template("index.html")
# view citizen
@app.route("/view-citizen/<int:user_id>")
def view_citizen(user_id):
try:
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
# Execute an SQL query to select the user with the specified user_id
cursor.execute("SELECT id, first_name, middle_name, last_name, email, date_of_birth, gender, user_id FROM users WHERE id = ?", (user_id,))
user = cursor.fetchone()
conn.close()
return render_template("view_citizen.html", user=user)
except Exception as e:
return f"An error occurred: {str(e)}"
# update citizen
@app.route("/update-citizen/<int:user_id>", methods=["GET", "POST"])
def update_user(user_id):
if request.method == "POST":
# Get the updated information from the form
first_name = request.form["first_name"]
middle_name = request.form["middle_name"]
last_name = request.form["last_name"]
email = request.form["email"]
# Connect to the database and execute an SQL query to update the user's information
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("UPDATE users SET first_name=?, middle_name=?, last_name=?, email=? WHERE id=?", (first_name, middle_name, last_name, email, user_id))
conn.commit()
conn.close()
return redirect("/view-citizens")
else:
# Display the update form with the current user's information
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("SELECT id, first_name, middle_name, last_name, email FROM users WHERE id=?", (user_id,))
user = cursor.fetchone()
conn.close()
return render_template("update_citizen.html", user_id=user_id, user=user)
# delete citizen
@app.route("/delete-citizen/<int:user_id>")
def delete_citizen(user_id):
try:
# Connect to the database and execute an SQL query to delete the user
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
cursor.execute("DELETE FROM users WHERE id=?", (user_id,))
conn.commit()
conn.close()
return redirect("/view-citizens")
except Exception as e:
return f"An error occurred: {str(e)}"
# View all citizens
@app.route("/view-citizens")
def view_citizens():
try:
conn = sqlite3.connect('mydatabase.db')
cursor = conn.cursor()
# Execute an SQL query to select all users
cursor.execute("SELECT id, first_name, middle_name, last_name, email, date_of_birth, gender, user_id FROM users")
# Fetch all user data
users = cursor.fetchall()
conn.close()
# Render the HTML template and pass the user data
return render_template("view_citizens.html", users=users)
except Exception as e:
return f"An error occurred: {str(e)}"
# erin's function to send code to email
def send_code_to_email(first_name, email, user_id):
# Email configuration
SMTP_SERVER = "smtp.gmail.com"
SMTP_PORT = 587
SMTP_USERNAME = "[email protected]"
SMTP_PASSWORD = "nbxb qojo fyqm ewhn"
msg = MIMEText(f"Hello {first_name}\n\tThank you for processing your application.\n\tYour ID number is: {user_id}.\n\tPLEASE DO NOT SHARE THIS WITH ANYONE!!!!!")
msg["Subject"] = "Your Generated Code"
msg["From"] = SMTP_USERNAME
msg["To"] = email
try:
# Connect to the SMTP server
server = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)
server.starttls()
server.login(SMTP_USERNAME, SMTP_PASSWORD)
# Send the email
server.sendmail(SMTP_USERNAME, [email], msg.as_string())
# Disconnect from the server
server.quit()
print(f"Code sent to {email}")
except smtplib.SMTPException as e:
print("SMTP error:", e)
def generate_citizen_id():
citizen_id = ''.join(str(random.randint(0, 9)) for _ in range(9))
return citizen_id
if __name__ == '__main__':
app.run(debug=True)
|
Jordan1570/ID-proj
|
app.py
|
app.py
|
py
| 7,120 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "flask.request.form.get",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 71,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 73,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 78,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 90,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "flask.render_template",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "email.mime.text",
"line_number": 131,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 147,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "flask.redirect",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "email.mime.text",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "smtplib.SMTP",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "email.mime.text",
"line_number": 208,
"usage_type": "name"
},
{
"api_name": "email.mime.text",
"line_number": 212,
"usage_type": "name"
},
{
"api_name": "smtplib.SMTPException",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 218,
"usage_type": "call"
}
] |
10247855105
|
#-*- coding: utf-8 -*-
from itertools import chain
from os.path import dirname, splitext
from sys import platform
from typing import Dict, List, Set, Union
from backend.converters import FileConverter, rar_executables
from backend.db import get_db
from backend.files import scan_files
from backend.volumes import Volume
conversion_methods: Dict[str, Dict[str, FileConverter]] = {}
"source_format -> target_format -> conversion class"
for fc in FileConverter.__subclasses__():
conversion_methods.setdefault(fc.source_format, {})[fc.target_format] = fc
def get_available_formats() -> Set[str]:
"""Get all available formats that can be converted to.
Returns:
Set[str]: The list with all formats
"""
return set(chain.from_iterable(conversion_methods.values()))
def find_target_format_file(
file: str,
formats: List[str]
) -> Union[FileConverter, None]:
"""Get a FileConverter class based on source format and desired formats.
Args:
file (str): The file to get the converter for.
formats (List[str]): The formats to convert to, in order of preference.
Returns:
Union[FileConverter, None]: The converter class that is possible
and most prefered.
In case of no possible conversion, `None` is returned.
"""
source_format = splitext(file)[1].lstrip('.').lower()
if not source_format in conversion_methods:
return
if (
source_format in ('rar', 'cbr')
and not platform in rar_executables
):
return
available_formats = conversion_methods[source_format]
for format in formats:
if source_format == format:
break
if format in available_formats:
return available_formats[format]
return
def convert_file(file: str, formats: List[str]) -> str:
"""Convert a file from one format to another.
Args:
file (str): The file to convert.
formats (List[str]): A list of formats to convert the file to.
Order of list is preference of format (left to right).
Should be key `conversion.conversion_methods` -> source_format dict.
Returns:
str: The path of the converted file.
"""
conversion_class = find_target_format_file(
file,
formats
)
if conversion_class is not None:
return conversion_class().convert(file)
else:
return file
def __get_format_pref_and_files(
volume_id: int,
issue_id: Union[int, None] = None
) -> List[str]:
"""Get the format preference and load the targeted files into the cursor.
Args:
volume_id (int): The ID of the volume to get the files for.
issue_id (Union[int, None], optional): The ID of the issue to get
the files for.
Defaults to None.
Returns:
List[str]: The format preference in the settings
"""
cursor = get_db()
format_preference = cursor.execute(
"SELECT value FROM config WHERE key = 'format_preference' LIMIT 1;"
).fetchone()[0].split(',')
if format_preference == ['']:
format_preference = []
if not issue_id:
cursor.execute("""
SELECT DISTINCT filepath
FROM files f
INNER JOIN issues_files if
INNER JOIN issues i
ON
f.id = if.file_id
AND if.issue_id = i.id
WHERE volume_id = ?
ORDER BY filepath;
""",
(volume_id,)
)
else:
cursor.execute("""
SELECT DISTINCT filepath
FROM files f
INNER JOIN issues_files if
INNER JOIN issues i
ON
f.id = if.file_id
AND if.issue_id = i.id
WHERE
volume_id = ?
AND i.id = ?
ORDER BY filepath;
""",
(volume_id, issue_id)
)
return format_preference
def preview_mass_convert(
volume_id: int,
issue_id: int = None
) -> List[Dict[str, str]]:
"""Get a list of suggested conversions for a volume or issue
Args:
volume_id (int): The ID of the volume to check for.
issue_id (int, optional): The ID of the issue to check for.
Defaults to None.
Returns:
List[Dict[str, str]]: The list of suggestions.
Dicts have the keys `before` and `after`.
"""
cursor = get_db()
format_preference = __get_format_pref_and_files(
volume_id,
issue_id
)
result = []
for (f,) in cursor:
converter = find_target_format_file(
f,
format_preference
)
if converter is not None:
if converter.target_format == 'folder':
result.append({
'before': f,
'after': dirname(f)
})
else:
result.append({
'before': f,
'after': splitext(f)[0] + '.' + converter.target_format
})
return result
def mass_convert(
volume_id: int,
issue_id: Union[int, None] = None,
files: List[str]= []
) -> None:
"""Convert files for a volume or issue.
Args:
volume_id (int): The ID of the volume to convert for.
issue_id (Union[int, None], optional): The ID of the issue to convert for.
Defaults to None.
files (List[str], optional): Only convert files mentioned in this list.
Defaults to [].
"""
# We're checking a lot if strings are in this list,
# so making it a set will increase performance (due to hashing).
files = set(files)
cursor = get_db()
format_preference = __get_format_pref_and_files(
volume_id,
issue_id
)
for (f,) in cursor.fetchall():
if files and f not in files:
continue
converter = find_target_format_file(
f,
format_preference
)
if converter is not None:
converter().convert(f)
scan_files(Volume(volume_id).get_info())
return
|
Casvt/Kapowarr
|
backend/conversion.py
|
conversion.py
|
py
| 5,191 |
python
|
en
|
code
| 221 |
github-code
|
6
|
[
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "backend.converters.FileConverter",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "backend.converters.FileConverter.__subclasses__",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "backend.converters.FileConverter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "itertools.chain.from_iterable",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Set",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "backend.converters.rar_executables",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "backend.converters.FileConverter",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "backend.db.get_db",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "backend.db.get_db",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 146,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 186,
"usage_type": "name"
},
{
"api_name": "backend.db.get_db",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "backend.files.scan_files",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "backend.volumes.Volume",
"line_number": 220,
"usage_type": "call"
}
] |
2001310411
|
# -*- coding: utf-8 -*-
from django.db import models
from django.conf import settings
from .base import BaseModel
from .user_group import UserGroup
class Event(BaseModel):
TYPE_1, TYPE_2 = xrange(2)
SOURCE_GROUP, SOURCE_INDIVIDUAL = xrange(2)
EVENT_TYPES = (
(TYPE_1, 'Collecting'),
(TYPE_2, 'Spent')
)
SOURCE_TYPES =(
(SOURCE_GROUP, 'Group'),
(SOURCE_INDIVIDUAL, 'Individual')
)
group = models.ForeignKey(
UserGroup,
on_delete=models.CASCADE,
related_name="events"
)
event_type = models.SmallIntegerField(
choices=EVENT_TYPES)
amount = models.DecimalField(default=0.0, max_digits=10, decimal_places=2)
description = models.TextField(default=None)
source_money = models.SmallIntegerField(choices=SOURCE_TYPES, default=SOURCE_GROUP)
member_join = models.TextField(null=True)
def collecting_money(self):
from .transaction import Transaction
if self.event_type == self.TYPE_1:
for user in self.group.members.filter(id__in=self.member_join_list):
Transaction.create_user_paid_transaction(
user=user,
amount=self.amount,
description=u"<@%s> đóng tiền vào quỹ '%s' cho event '%s'" %(user.userprofile.slack_id, self.group.name, self.description),
group=self.group,
paid_group=True,
event_id=self.id
)
if self.event_type == self.TYPE_2:
if self.source_money == self.SOURCE_GROUP:
Transaction.create_group_paid_transaction(
amount=self.amount,
description=u"<@%s> trả tiền cho event '%s'" %(self.group.name, self.description),
group=self.group,
event=self
)
if self.source_money == self.SOURCE_INDIVIDUAL:
for user in self.group.members.filter(id__in=self.member_join_list):
Transaction.create_user_paid_transaction(
user=user,
amount=self.amount,
description=u"<@%s> trả tiền cho event '%s' của '%s'" %(user.userprofile.slack_id, self.description, self.group.name),
group=self.group,
paid_group=False,
event_id=self.id
)
@property
def event_complete_status(self):
from .transaction import Transaction
if self.transactions.filter(status=Transaction.PENDING).count() > 0:
return False
return True
@property
def member_join_list(self):
return [int(member) for member in self.member_join.split(",")]
|
luhonghai/expense
|
expense/apps/mobile_api/models/event.py
|
event.py
|
py
| 2,818 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "base.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "user_group.UserGroup",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "django.db.models",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.db.models.CASCADE",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "django.db.models.SmallIntegerField",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.db.models.DecimalField",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.db.models.SmallIntegerField",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextField",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction.create_user_paid_transaction",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "transaction.Transaction",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction.create_group_paid_transaction",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "transaction.Transaction",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction.create_user_paid_transaction",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "transaction.Transaction",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "transaction.Transaction.PENDING",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "transaction.Transaction",
"line_number": 70,
"usage_type": "name"
}
] |
34832289590
|
import cv2
import numpy as np
cap = cv2.VideoCapture(0) #取得視訊鏡頭的畫面
#不同顏色的筆
pen_color_HSV = [[86, 121, 205, 111, 245, 255],
[46, 78, 172, 71, 255, 255],
[22, 70, 214, 31, 255, 255]
]
#不同顏色的筆對應的筆尖
pen_color_BGR = [[255, 0, 0],
[0, 255, 0],
[0, 255, 255]
]
#記錄每個筆畫過的位置和顏色 [x, y, color_ID]
draw_points = []
def find_pen(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#用迴圈跑每個顏色
for i in range(len(pen_color_HSV)):
#最大和最小值
lower = np.array(pen_color_HSV[i][:3])
upper = np.array(pen_color_HSV[i][3:6])
mask = cv2.inRange(hsv, lower, upper)
#過濾顏色
result = cv2.bitwise_and(img, img, mask = mask)
pen_x, pen_y = find_contour(mask)
cv2.circle(img_contour, (pen_x, pen_y), 10, pen_color_BGR[i], cv2.FILLED)
#先判斷是否有偵測到輪廓
if(pen_y != -1):
draw_points.append([pen_x, pen_y, i])
# cv2.imshow("result", result)
def find_contour(img):
#檢測輪廓
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
x, y, w, h = -1, -1, -1, -1
for cnt in contours:
cv2.drawContours(img_contour, cnt, -1, (0,0,0), 4) #描邊
#畫外切正方形
area = cv2.contourArea(cnt)
if(area > 500):
peri = cv2.arcLength(cnt, True)
vertices = cv2.approxPolyDP(cnt, peri * 0.02, True) #頂點
x, y, w, h = cv2.boundingRect(vertices) #把每個圖形用方形匡起來 左上x座標、左上y座標、寬度、高度
return x+w//2, y
#用筆畫圖
def draw(draw_points):
for point in draw_points:
cv2.circle(img_contour, (point[0], point[1]), 10, pen_color_BGR[point[2]], cv2.FILLED)
#顯示影片
while(True):
ret, frame = cap.read() #回傳兩個變數(有無成功取得下一幀->bool + 下一幀的圖片)
if ret:
img_contour = frame.copy()
find_pen(frame)
draw(draw_points)
cv2.imshow("contour", img_contour)
else:
break
if cv2.waitKey(1) == ord("q"): #若輸入q則結束影片
break
|
jim2832/Image-Recognition
|
virtual_pen.py
|
virtual_pen.py
|
py
| 2,304 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.inRange",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_and",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_EXTERNAL",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_NONE",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "cv2.contourArea",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "cv2.arcLength",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "cv2.approxPolyDP",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "cv2.FILLED",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 78,
"usage_type": "call"
}
] |
2103471277
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
sns.set_context(rc={'figure.figsize': (9, 9)}, font_scale=2.)
def load_embeddings(filename):
"""
Load a DataFrame from the generalized text format used by word2vec, GloVe,
fastText, and ConceptNet Numberbatch. The main point where they differ is
whether there is an initial line with the dimensions of the matrix.
"""
labels = []
rows = []
with open(filename, encoding='utf-8') as infile:
for i, line in enumerate(infile):
items = line.rstrip().split(' ')
if len(items) == 2:
# This is a header row giving the shape of the matrix
continue
labels.append(items[0])
values = np.array([float(x) for x in items[1:]], 'f')
rows.append(values)
arr = np.vstack(rows)
return pd.DataFrame(arr, index=labels, dtype='f')
def load_lexicon(filename):
"""
Load a file from Bing Liu's sentiment lexicon
(https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html), containing
English words in Latin-1 encoding.
One file contains a list of positive words, and the other contains
a list of negative words. The files contain comment lines starting
with ';' and blank lines, which should be skipped.
"""
lexicon = []
with open(filename, encoding='latin-1') as infile:
for line in infile:
line = line.rstrip()
if line and not line.startswith(';'):
lexicon.append(line)
return lexicon
def load_data(data_path, embeddings_path, state=0):
pos_words = load_lexicon(data_path + '/positive-words.txt')
neg_words = load_lexicon(data_path + '/negative-words.txt')
embeddings = load_embeddings(embeddings_path)
pos_vectors = embeddings.loc[pos_words].dropna()
neg_vectors = embeddings.loc[neg_words].dropna()
vectors = pd.concat([pos_vectors, neg_vectors])
targets = np.array([1 for entry in pos_vectors.index] + [-1 for entry in neg_vectors.index])
labels = list(pos_vectors.index) + list(neg_vectors.index)
train_vectors, test_vectors, train_targets, test_targets, train_vocab, test_vocab = \
train_test_split(vectors, targets, labels, test_size=0.1, random_state=state)
## Data
X_train = train_vectors.values
X_test = test_vectors.values
# Encoding y
one_hot = OneHotEncoder(sparse=False, categories='auto')
one_hot.fit(np.array(train_targets).reshape(-1,1))
y_train = one_hot.transform(np.array(train_targets).reshape(-1,1))
y_test = one_hot.transform(np.array(test_targets).reshape(-1,1))
return embeddings, X_train, X_test, y_train, y_test, train_vocab, test_vocab
def load_test_names(embeddings):
NAMES_BY_ETHNICITY = {
# The first two lists are from the Caliskan et al. appendix describing the
# Word Embedding Association Test.
'White': [
'Adam', 'Chip', 'Harry', 'Josh', 'Roger', 'Alan', 'Frank', 'Ian', 'Justin',
'Ryan', 'Andrew', 'Fred', 'Jack', 'Matthew', 'Stephen', 'Brad', 'Greg', 'Jed',
'Paul', 'Todd', 'Brandon', 'Hank', 'Jonathan', 'Peter', 'Wilbur', 'Amanda',
'Courtney', 'Heather', 'Melanie', 'Sara', 'Amber', 'Crystal', 'Katie',
'Meredith', 'Shannon', 'Betsy', 'Donna', 'Kristin', 'Nancy', 'Stephanie',
'Bobbie-Sue', 'Ellen', 'Lauren', 'Peggy', 'Sue-Ellen', 'Colleen', 'Emily',
'Megan', 'Rachel', 'Wendy'
],
'Black': [
'Alonzo', 'Jamel', 'Lerone', 'Percell', 'Theo', 'Alphonse', 'Jerome',
'Leroy', 'Rasaan', 'Torrance', 'Darnell', 'Lamar', 'Lionel', 'Rashaun',
'Tyree', 'Deion', 'Lamont', 'Malik', 'Terrence', 'Tyrone', 'Everol',
'Lavon', 'Marcellus', 'Terryl', 'Wardell', 'Aiesha', 'Lashelle', 'Nichelle',
'Shereen', 'Temeka', 'Ebony', 'Latisha', 'Shaniqua', 'Tameisha', 'Teretha',
'Jasmine', 'Latonya', 'Shanise', 'Tanisha', 'Tia', 'Lakisha', 'Latoya',
'Sharise', 'Tashika', 'Yolanda', 'Lashandra', 'Malika', 'Shavonn',
'Tawanda', 'Yvette'
]
}
NAMES_BY_ETHNICITY['White'] = [n.lower() for n in NAMES_BY_ETHNICITY['White'] if n.lower() in embeddings.index]
NAMES_BY_ETHNICITY['Black'] = [n.lower() for n in NAMES_BY_ETHNICITY['Black'] if n.lower() in embeddings.index]
white_female_start = NAMES_BY_ETHNICITY['White'].index('amanda')
black_female_start = NAMES_BY_ETHNICITY['Black'].index('aiesha')
test_gender = white_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['White']) - white_female_start)*['Female']
test_gender += black_female_start*['Male'] + (len(NAMES_BY_ETHNICITY['Black']) - black_female_start)*['Female']
test_df = pd.DataFrame({'name':NAMES_BY_ETHNICITY['White'] + NAMES_BY_ETHNICITY['Black'],
'race':len(NAMES_BY_ETHNICITY['White'])*['White'] + len(NAMES_BY_ETHNICITY['Black'])*['Black'],
'gender':test_gender})
test_names_embed = embeddings.loc[test_df['name']].values
return test_df, test_names_embed
def load_nyc_names(names_path, embeddings):
names_df = pd.read_csv(names_path)
ethnicity_fixed = []
for n in names_df['Ethnicity']:
if n.startswith('BLACK'):
ethnicity_fixed.append('Black')
if n.startswith('WHITE'):
ethnicity_fixed.append('White')
if n.startswith('ASIAN'):
ethnicity_fixed.append('Asian')
if n.startswith('HISPANIC'):
ethnicity_fixed.append('Hispanic')
names_df['Ethnicity'] = ethnicity_fixed
names_df = names_df[np.logical_or(names_df['Ethnicity']=='Black', names_df['Ethnicity']=='White')]
names_df['Child\'s First Name'] = [n.lower() for n in names_df['Child\'s First Name']]
names_from_df = names_df['Child\'s First Name'].values.tolist()
idx_keep = []
for i, n in enumerate(names_from_df):
if n in embeddings.index:
idx_keep.append(i)
names_df = names_df.iloc[idx_keep]
names_from_df = names_df['Child\'s First Name'].values.tolist()
names_embed = embeddings.loc[names_from_df].values
return names_embed
def print_summary(test_df, method_name, test_accuracy):
print(method_name + ' test accuracy %f' % test_accuracy)
mean_sentiments_race = []
for r in ['Black', 'White']:
mean_sent = test_df[method_name + '_logits'][test_df['race']==r].mean()
mean_sentiments_race.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(r, mean_sent))
print(method_name + ' race mean sentiment difference is %f\n' % np.abs(mean_sentiments_race[0] - mean_sentiments_race[1]))
mean_sentiments_gender = []
for g in ['Female', 'Male']:
mean_sent = test_df[method_name + '_logits'][test_df['gender']==g].mean()
mean_sentiments_gender.append(mean_sent)
print(method_name + ' %s mean sentiment is %f' %(g, mean_sent))
print(method_name + ' gender mean sentiment difference is %f\n' % np.abs(mean_sentiments_gender[0] - mean_sentiments_gender[1]))
sns.boxplot(x='race', y=method_name + '_logits', data=test_df).set_title(method_name, fontsize=30)
plt.ylim(-4.5, 7.)
plt.xlabel('')
plt.ylabel('Logits', size=20, labelpad=-5)
plt.xticks(fontsize=20)
plt.yticks(fontsize=14)
plt.show()
return
|
IBM/sensitive-subspace-robustness
|
utils.py
|
utils.py
|
py
| 7,600 |
python
|
en
|
code
| 13 |
github-code
|
6
|
[
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "seaborn.set_context",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.logical_or",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ylim",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xticks",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.yticks",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
}
] |
24746745289
|
# -*- coding: utf-8 -*-
import json
import urllib
from django.contrib import auth
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from common import utils, page
from www.misc import qiniu_client
from www.misc.decorators import staff_required, common_ajax_response, verify_permission
from www.question.interface import TopicBase
@verify_permission('')
def topic(request, template_name='admin/topic.html'):
from www.question.models import Topic
states = [{'name': x[1], 'value': x[0]} for x in Topic.state_choices]
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def format_topic(objs, num):
data = []
for x in objs:
num += 1
data.append({
'num': num,
'topic_id': x.id,
'name': x.name,
'domain': x.domain,
'parent_id': x.parent_topic.id if x.parent_topic else '',
'parent_name': x.parent_topic.name if x.parent_topic else '',
'child_count': x.child_count,
'follower_count': x.follower_count,
'question_count': x.question_count,
'level': x.level,
'img': x.get_img(),
'des': x.des,
'sort': x.sort_num,
'is_show': x.is_show,
'state': x.state,
'create_time': str(x.create_time)
})
return data
@verify_permission('query_topic')
def search(request):
topic_name = request.POST.get('topic_name')
page_index = int(request.POST.get('page_index', 1))
data = []
if topic_name:
objs = TopicBase().get_topic_by_name(topic_name)
objs = [objs] if objs else []
else:
objs = TopicBase().get_all_topics()
page_objs = page.Cpt(objs, count=10, page=page_index).info
num = 10 * (page_index - 1)
data = format_topic(page_objs[0], num)
return HttpResponse(
json.dumps({'data': data, 'page_count': page_objs[4], 'total_count': page_objs[5]}),
mimetype='application/json'
)
@verify_permission('query_topic')
def get_topics_by_name(request):
topic_name = request.REQUEST.get('topic_name')
result = []
topics = TopicBase().get_topics_by_name(topic_name)
if topics:
for x in topics:
result.append([x.id, x.name, None, x.name])
return HttpResponse(json.dumps(result), mimetype='application/json')
@verify_permission('query_topic')
def get_topic_by_id(request):
data = ""
topic_id = request.REQUEST.get('topic_id')
obj = TopicBase().get_topic_by_id_or_domain(topic_id, False)
if obj:
data = format_topic([obj], 1)[0]
return HttpResponse(json.dumps(data), mimetype='application/json')
@verify_permission('modify_topic')
def modify_topic(request):
topic_id = request.REQUEST.get('topic_id')
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
des = request.REQUEST.get('des')
state = request.REQUEST.get('state')
sort = request.REQUEST.get('sort')
parent_topic_id = request.REQUEST.get('parent_id')
tb = TopicBase()
obj = tb.get_topic_by_id_or_domain(topic_id, False)
img_name = obj.img
img = request.FILES.get('img')
if img:
flag, img_name = qiniu_client.upload_img(img, img_type='topic')
img_name = '%s/%s' % (settings.IMG0_DOMAIN, img_name)
code, msg = tb.modify_topic(topic_id, name, domain, des, img_name, state, parent_topic_id, sort)
if code == 0:
url = "/admin/topic?#modify/%s" % (topic_id)
else:
url = "/admin/topic?%s#modify/%s" % (msg, topic_id)
return HttpResponseRedirect(url)
@verify_permission('add_topic')
def add_topic(request):
name = request.REQUEST.get('name')
domain = request.REQUEST.get('domain')
des = request.REQUEST.get('des')
state = request.REQUEST.get('state')
sort = request.REQUEST.get('sort')
parent_topic_id = request.REQUEST.get('parent_id')
tb = TopicBase()
img_name = ''
img = request.FILES.get('img')
if img:
flag, img_name = qiniu_client.upload_img(img, img_type='topic')
img_name = '%s/%s' % (settings.IMG0_DOMAIN, img_name)
flag, msg = tb.create_topic(name, domain, parent_topic_id, img_name, des)
if flag == 0:
url = "/admin/topic?#modify/%s" % (msg)
else:
url = "/admin/topic?%s" % (msg)
return HttpResponseRedirect(url)
|
lantianlz/zx
|
www/admin/views_topic.py
|
views_topic.py
|
py
| 4,554 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "www.question.models.Topic.state_choices",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "www.question.models.Topic",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render_to_response",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.template.RequestContext",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "common.page.Cpt",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "common.page",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "www.misc.qiniu_client.upload_img",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "www.misc.qiniu_client",
"line_number": 121,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.IMG0_DOMAIN",
"line_number": 122,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 122,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "www.question.interface.TopicBase",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "www.misc.qiniu_client.upload_img",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "www.misc.qiniu_client",
"line_number": 148,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.IMG0_DOMAIN",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "www.misc.decorators.verify_permission",
"line_number": 134,
"usage_type": "call"
}
] |
15038954927
|
import tkinter
from PIL import Image
import time
import pygame
# Ссылка на гугл диск, на котором файл с музыкой. Он не прошла по размеру на github. https://drive.google.com/drive/folders/1RzTOtOH4LLt6UE6C6TCYG-0Quf38lkTE
pygame.init()
pygame.mixer.music.load("music.wav")
pygame.mixer.music.play(-1)
def game():
code_symbols = []
for i in range(10):
code_symbols.append(i)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in alphabet:
code_symbols.append(i)
middle = 9
def block(k):
from random import randint
interval = k * middle
summa = 0
result = ''
while summa <= interval:
summa = 0
result = ''
for i in range(k):
a = randint(0, 35)
summa += a
result += str(code_symbols[a])
return result
def clicked():
lbl = tkinter.Label(window, text="Успешно! Ваш код: " + block(5) + '-' + block(4) + '-' + block(4),
font=("Arial Bold", 25),
bg='Gray')
lbl.grid(column=0, row=0)
window = tkinter.Tk()
window.title("Добро пожаловать в генерацию кода")
window.geometry('1313x833')
window.image = tkinter.PhotoImage(file='gta.png')
bg_gta = tkinter.Label(window, image=window.image)
bg_gta.grid(column=0, row=0)
btn_1 = tkinter.Button(window, text="Сгенерировать код", font=("Arial Bold", 15), bg='Gray', command=clicked)
btn_1.grid(column=0, row=0)
window.mainloop()
def animation(count, k):
global anim
global frames
im2 = im[count]
gif_label.configure(image=im2)
count += 1
k += 1
time.sleep(0.5)
if count == frames:
count = 0
if k == frames + 1:
root.destroy()
game()
anim = root.after(50, lambda: animation(count, k))
root = tkinter.Tk()
root.title('Мы начинаем!')
file = "10.gif"
root.geometry('627x627')
info = Image.open(file)
frames = info.n_frames # gives total number of frames that gif contains
# creating list of PhotoImage objects for each frames
im = [tkinter.PhotoImage(file=file, format=f"gif -index {i}") for i in range(frames)]
count = 0
k = 0
anim = None
gif_label = tkinter.Label(root, image="")
gif_label.pack()
btn_2 = tkinter.Button(root, text="СТАРТ", font=("Arial Bold", 15), command=lambda: animation(count, k))
btn_2.pack()
root.mainloop()
|
PashaSeleznev/Lab_4
|
main.py
|
main.py
|
py
| 2,652 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.mixer.music.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "pygame.mixer.music.play",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "tkinter.PhotoImage",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "tkinter.PhotoImage",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "tkinter.Label",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "tkinter.Button",
"line_number": 90,
"usage_type": "call"
}
] |
40534220786
|
from dataclasses import dataclass, field
from typing import List
from frater.component import ComponentState, IOComponent, IOComponentConfig, ComponentBuilder
from frater.stream import InputStream, OutputStream, StreamConfig, StreamState
@dataclass
class SummationComponentState(ComponentState):
total: int = 0
class SummationComponent(IOComponent):
def __init__(self, config: IOComponentConfig, input_stream: InputStream, output_stream: OutputStream):
super(SummationComponent, self).__init__(config, input_stream, output_stream)
def init_state(self):
return SummationComponentState()
def process(self, data):
self.state.total += data
return self.state.total
@dataclass
class IterableInputStreamConfig(StreamConfig):
data: List[int] = field(default_factory=list)
class IterableInputStream(InputStream):
def __init__(self, config: IterableInputStreamConfig):
super(IterableInputStream, self).__init__(config)
def __iter__(self):
yield StreamState.START
yield from self.config.data
yield StreamState.END
class PrintOutputStream(OutputStream):
def send(self, data):
print(data)
def main():
input_stream = IterableInputStream(
IterableInputStreamConfig.from_dict({'data': list(range(10))}))
output_stream = PrintOutputStream()
component = ComponentBuilder.build(SummationComponent, IOComponentConfig(), input_stream, output_stream)
component.run()
if __name__ == '__main__':
main()
|
Frater-SDK/frater
|
docs/source/getting_started/examples/io_component_example.py
|
io_component_example.py
|
py
| 1,530 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "frater.component.ComponentState",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "frater.component.IOComponent",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "frater.component.IOComponentConfig",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "frater.stream.InputStream",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "frater.stream.OutputStream",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "frater.stream.StreamConfig",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "dataclasses.field",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "frater.stream.InputStream",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "frater.stream.StreamState.START",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "frater.stream.StreamState",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "frater.stream.StreamState.END",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "frater.stream.StreamState",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "frater.stream.OutputStream",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "frater.component.ComponentBuilder.build",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "frater.component.ComponentBuilder",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "frater.component.IOComponentConfig",
"line_number": 49,
"usage_type": "call"
}
] |
20674612571
|
import logging
import sys
from tracker import LogawareMixin, getenv_or_fail
from tracker.fetch.online import JsonEndpointFetcher
from tracker.metadata.retriever import LiftMetadataRetriever
from tracker.metadata.store import LiftMetadataDatabaseRecorder
class LiftMetadataInserter(LogawareMixin):
def __init__(self, lift_metadata_retriever: LiftMetadataRetriever, database_client: LiftMetadataDatabaseRecorder):
super().__init__()
self.lift_metadata_retriever = lift_metadata_retriever
self.database_client = database_client
def insert(self):
lift_metadata = []
for page in range(1, 13):
lift_metadata.extend(self.lift_metadata_retriever.lift_metadata(page))
self._log.debug(f'recording lift state snapshot {lift_metadata}')
self.database_client.record_all(lift_metadata)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s", stream=sys.stdout)
recorder = LiftMetadataDatabaseRecorder(getenv_or_fail('MONGODB_PASS'))
recorder.purge_data()
snapshot_taker = LiftMetadataInserter(
LiftMetadataRetriever(JsonEndpointFetcher.lift_metadata_fetcher(getenv_or_fail('DOLOMITI_BEARER'))),
recorder
)
snapshot_taker.insert()
|
dachrisch/dolomiti-lift-queue
|
tracker/metadata/insert.py
|
insert.py
|
py
| 1,298 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tracker.LogawareMixin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tracker.metadata.retriever.LiftMetadataRetriever",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "tracker.metadata.store.LiftMetadataDatabaseRecorder",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "logging.basicConfig",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "tracker.metadata.store.LiftMetadataDatabaseRecorder",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tracker.getenv_or_fail",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tracker.metadata.retriever.LiftMetadataRetriever",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tracker.fetch.online.JsonEndpointFetcher.lift_metadata_fetcher",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "tracker.fetch.online.JsonEndpointFetcher",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "tracker.getenv_or_fail",
"line_number": 29,
"usage_type": "call"
}
] |
10649208417
|
"""
Example
Description: Showcasing the use of Example Images from 'Images.py'
"""
from IMAGES import *
import pygame,sys
pygame.init()
w,h = (1920,1080)
win = pygame.display.set_mode([w,h])
img1 = Rock((255,255,255),(0,0))
img2 = Testing((255,255,255),(0,0))
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
win.fill((125,125,125))
win.blit(img1.img,(0,0))
win.blit(img2.img,(0,0))
pygame.display.flip()
|
LandenTy/GeometricEngine
|
CustomTexturer/Example Images/Example.py
|
Example.py
|
py
| 487 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.display.flip",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 24,
"usage_type": "attribute"
}
] |
34673770578
|
import typed_args as ta
from typing import List, Callable
@ta.argument_parser()
class Args(ta.TypedArgs):
"""
Process some integers.
"""
integers: List[int] = ta.add_argument(
metavar='N', type=int, nargs='+',
# help='an integer for the accumulator'
)
"""
an integer for the accumulator
"""
accumulate: Callable[[List[int]], int] = ta.add_argument(
'--sum',
action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)'
)
args = Args.parse_args()
print(args.accumulate(args.integers))
|
SunDoge/typed-args
|
examples/prog.py
|
prog.py
|
py
| 611 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "typed_args.TypedArgs",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typed_args.add_argument",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "typed_args.add_argument",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typed_args.argument_parser",
"line_number": 5,
"usage_type": "call"
}
] |
44313582014
|
from tkinter import *
import sqlite3
from PIL import ImageTk, Image
from backend import Database
import requests
database=Database("books.db")
class Window(object):
def __init__(self,window):
self.window=window
self.window.title("Bookstore")
self.window.configure(bg='#856ff8')
#Title input
self.title=StringVar()
self.l1=Label(text="Title")
self.l1.grid(row=0,column=0)
self.e1=Entry(window,textvariable=self.title)
self.e1.grid(row=0,column=1)
#Author input
self.author=StringVar()
self.l2=Label(text="Author")
self.l2.grid(row=0,column=2)
self.e2=Entry(window,textvariable=self.author)
self.e2.grid(row=0,column=3)
#Year input
self.year=StringVar()
self.l3=Label(text="Year")
self.l3.grid(row=1,column=0)
self.e3=Entry(window,textvariable=self.year)
self.e3.grid(row=1,column=1)
#Genre input
self.genre=StringVar()
self.l4=Label(text="Genre")
self.l4.grid(row=1,column=2)
self.e4=Entry(window,textvariable=self.genre)
self.e4.grid(row=1,column=3)
#Rate input
self.rate=StringVar()
self.l5=Label(text="Rating")
self.l5.grid(row=2,column=0)
self.e5=Entry(window,textvariable=self.rate)
self.e5.grid(row=2,column=1)
#ISBN input
self.isbn=StringVar()
self.l6=Label(text="ISBN")
self.l6.grid(row=2,column=2)
self.e6=Entry(window,textvariable=self.isbn)
self.e6.grid(row=2,column=3)
#URL input
self.img_url=StringVar()
self.l7=Label(text="Image Url")
self.l7.grid(row=3,column=1)
self.e7=Entry(window,textvariable=self.img_url)
self.e7.grid(row=3,column=2)
self.list1=Listbox(window,width=75)
self.list1.grid(row=0,column=5,rowspan=6)
sb1=Scrollbar(window)
sb1.grid(row=0,column=6,rowspan=6)
self.list1.configure(yscrollcommand=sb1.set)
sb1.configure(command=self.list1.yview)
self.list1.bind('<<ListboxSelect>>',self.CurSelect)
#Search Button
b1=Button(text ="Search Entry", width=15,foreground="red",command=self.search)
b1.grid(row=4,column=1)
#Add Button
b2=Button(text ="Add Entry", width=15,command=self.add)
b2.grid(row=4,column=3)
#Update Button
b3=Button(text ="Update Entry", width=15,command=self.update)
b3.grid(row=5,column=1)
#Delete Button
b4=Button(text ="Delete Entry", width=15,command=self.delete)
b4.grid(row=5,column=3)
b5=Button(text ="Close", width=15,command=window.destroy)
b5.grid(row=5,column=2)
def view(self):
for row in database.view():
self.list1.insert(END,row)
def search(self):
self.list1.delete(0,END)
for row in database.search(title.get(),author.get(),year.get(),genre.get(),rate.get(),isbn.get()):
self.list1.insert(END,row)
def CurSelect(self,e):
if self.list1.size()!=0:
global selected_tuple
global img
#this one will get the index and to be used for deleting an item
index=list1.curselection()[0]
self.selected_tuple=self.list1.get(index)
value=self.list1.get(self.list1.curselection())
#This part will create the image box on the side and it will display the image that belongs to the book
self.img_url1=value[len(value)-1]
self.img = Image.open(requests.get(img_url1, stream=True).raw)
self.img = img.resize((170,170), Image.ANTIALIAS)
self.img = ImageTk.PhotoImage(img)
self.l8=Label(image=img)
self.l8.grid(row=0,column=7,rowspan=5)
#this part will bring selected item to the entrees
self.e1.delete(0,END)
self.e1.insert(END,self.selected_tuple[1])
self.e2.delete(0,END)
self.e2.insert(END,self.selected_tuple[2])
self.e3.delete(0,END)
self.e3.insert(END,self.selected_tuple[3])
self.e4.delete(0,END)
self.e4.insert(END,self.selected_tuple[4])
self.e5.delete(0,END)
self.e5.insert(END,self.selected_tuple[5])
self.e6.delete(0,END)
self.e6.insert(END,self.selected_tuple[6])
self.e7.delete(0,END)
self.e7.insert(END, self.selected_tuple[7])
def delete(self):
database.delete(self.selected_tuple[0])
self.list1.delete(0,END)
view()
def add(self):
database.insert(self.title.get(),self.author.get(),self.year.get(),self.genre.get(),self.rate.get(),self.isbn.get(),self.img_url.get())
self.e1.delete(0,END)
self.e2.delete(0,END)
self.e3.delete(0,END)
self.e4.delete(0,END)
self.e5.delete(0,END)
self.e6.delete(0,END)
self.e7.delete(0,END)
self.list1.delete(0,END)
self.view()
def update(self):
database.update(self.selected_tuple[0],self.title.get(),self.author.get(),self.year.get(),self.genre.get(),self.rate.get(),self.isbn.get(),self.img_url.get())
self.list1.delete(0,END)
view()
window= Tk()
win=Window(window)
win.view()
window.mainloop()
|
mertwithamouth/Py_Projects
|
Book Store/bookstore.py
|
bookstore.py
|
py
| 5,422 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "backend.Database",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 116,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "PIL.Image.ANTIALIAS",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 117,
"usage_type": "name"
},
{
"api_name": "PIL.ImageTk.PhotoImage",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "PIL.ImageTk",
"line_number": 118,
"usage_type": "name"
}
] |
74874759867
|
import sys
import os
import argparse
from pypiscout.SCout_Logger import Logger as sc
import gprof2dot # pylint: disable=unused-import
# Rationale: Not directly used, but later we do a sys-call wich needs the library. This is needed to inform the user to install the package.
sys.path.append("../")
# pylint: disable=wrong-import-position
# Rationale: This module needs to access modules that are above them in the folder structure.
from Emma.shared_libs.stringConstants import * # pylint: disable=unused-wildcard-import,wildcard-import
import Emma.shared_libs.emma_helper
import genDoc._genCallGraphs
import genDoc._genUmlDiagrams
def ParseArguments():
"""
Argument parser
:return: argparse object containing the parsed options
"""
parser = argparse.ArgumentParser(
prog="Emma - Call graph generator",
description="Script to generate call graphs that can be used in the documentation or to examine the run of Emma and the Emma Visualiser.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--graphviz_bin_folder",
help=r"The bin subfolder of the Graphviz software. Example: c:\Program Files (x86)\Graphviz2.38\bin",
required=False
)
parser.add_argument(
"--verbose",
help="Prints out more info during run.",
default=False
)
parser.add_argument(
"--no_graphs",
help="Do not update graphs (UML + call graph)",
action="store_true",
default=False
)
return parser.parse_args()
def main(arguments):
"""
Main function.
:param arguments: Processed command line arguments.
:return: None
"""
sc(invVerbosity=-1, actionWarning=lambda: sys.exit(-10), actionError=lambda: sys.exit(-10))
sc().header("Generating the Readme documents", symbol="/")
# Give a hint on python sys-call
sc().info("A `python` system call is going to happen. If any errors occur please check the following first:")
if sys.platform == "win32":
sc().info("Windows OS detected. Make sure `python` refers to the Python3 version targeted for this application (-> dependencies; e.g. WSL comes with its own Python).\n")
else:
sc().info("Make sure `python` refers to a Python 3 installation.\n")
# Store original path variables
pathOldValue = os.environ["PATH"]
if not("Graphviz" in os.environ["PATH"] or "graphviz" in os.environ["PATH"]):
if arguments.graphviz_bin_folder is not None:
graphvizBinAbspath = os.path.abspath(arguments.graphviz_bin_folder)
# Add to path
os.environ["PATH"] += (graphvizBinAbspath + ";")
else:
sc().error("The \"graphviz_bin_folder\" was not found in PATH nor was given in the argument --graphviz_bin_folder")
try:
outPath = os.path.abspath(Emma.shared_libs.emma_helper.joinPath("..", README_CALL_GRAPH_AND_UML_PATH))
if not os.path.isdir(outPath):
sc().info("The folder \"" + outPath + "\" was created because it did not exist...")
os.makedirs(README_CALL_GRAPH_AND_UML_PATH)
if not arguments.no_graphs:
# pylint: disable=protected-access
# Rationale: These modules are private so that the users will not use them directly. They are meant to be used trough this script.
genDoc._genCallGraphs.main(arguments)
genDoc._genUmlDiagrams.main()
sc().info("Storing Emma readme as a .html file...")
markdownFilePath = r"../doc/readme-emma.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing Emma Visualiser readme as a .html file...")
markdownFilePath = r"../doc/readme-vis.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing Emma contribution as a .html file...")
markdownFilePath = r"../doc/contribution.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing the test_project readme as a .html file...")
markdownFilePath = r"../doc/test_project/readme.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.\n")
sc().info("Storing the top level README as a .html file...")
# Change the working directory; otherwise we get errors about the relative image import paths in emma_helper.changePictureLinksToEmbeddingInHtmlData()
os.chdir("..")
markdownFilePath = r"../README.md"
Emma.shared_libs.emma_helper.convertMarkdownFileToHtmlFile(Emma.shared_libs.emma_helper.joinPath(os.path.dirname(__file__), markdownFilePath), (os.path.splitext(markdownFilePath)[0] + ".html"))
sc().info("Done.")
os.chdir("doc") # Change working directory back
except Exception as exception: # pylint: disable=broad-except
# Rationale: We are not trying to catch a specific exception type here.
# The purpose of this is, that the PATH environment variable will be set back in case of an error.
sc().error("An exception was caught:", exception)
# Get back initial path config
os.environ["PATH"] = pathOldValue
if __name__ == "__main__":
main(ParseArguments())
|
bmwcarit/Emma
|
genDoc/genReadmeHtmlFromMd.py
|
genReadmeHtmlFromMd.py
|
py
| 6,000 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "os.path.isdir",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "genDoc._genCallGraphs._genCallGraphs.main",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "genDoc._genCallGraphs._genCallGraphs",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "genDoc._genCallGraphs",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "genDoc._genCallGraphs._genUmlDiagrams.main",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "genDoc._genCallGraphs._genUmlDiagrams",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "genDoc._genCallGraphs",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.convertMarkdownFileToHtmlFile",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.convertMarkdownFileToHtmlFile",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.convertMarkdownFileToHtmlFile",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.convertMarkdownFileToHtmlFile",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 104,
"usage_type": "name"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 104,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.convertMarkdownFileToHtmlFile",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "Emma.shared_libs.stringConstants",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "Emma.shared_libs.stringConstants.shared_libs.emma_helper.joinPath",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pypiscout.SCout_Logger.Logger",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 121,
"usage_type": "attribute"
}
] |
24168954856
|
import asyncio
import importlib
import re
from contextlib import closing, suppress
from uvloop import install
from pyrogram import filters, idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
from Yukinon.menu import *
from Yukinon import *
from Yukinon.plugins import ALL_MODULES
from Yukinon.utils import paginate_modules
from lang import get_command
from Yukinon.utils.lang import *
from Yukinon.utils.commands import *
from Yukinon.mongo.rulesdb import *
from Yukinon.utils.start import *
from Yukinon.mongo.usersdb import *
from Yukinon.mongo.restart import *
from Yukinon.mongo.chatsdb import *
from Yukinon.plugins.fsub import ForceSub
import random
loop = asyncio.get_event_loop()
flood = {}
START_COMMAND = get_command("START_COMMAND")
HELP_COMMAND = get_command("HELP_COMMAND")
HELPABLE = {}
async def start_bot():
global HELPABLE
for module in ALL_MODULES:
imported_module = importlib.import_module("Yukinon.plugins." + module)
if (
hasattr(imported_module, "__MODULE__")
and imported_module.__MODULE__
):
imported_module.__MODULE__ = imported_module.__MODULE__
if (
hasattr(imported_module, "__HELP__")
and imported_module.__HELP__
):
HELPABLE[
imported_module.__MODULE__.replace(" ", "_").lower()
] = imported_module
all_module = ""
j = 1
for i in ALL_MODULES:
all_module = "•≫ Successfully imported:{:<15}.py".format(i)
print(all_module)
restart_data = await clean_restart_stage()
try:
if restart_data:
await app.edit_message_text(
restart_data["chat_id"],
restart_data["message_id"],
"**Restarted Successfully**",
)
else:
await app.send_message(LOG_GROUP_ID, "Yukinon Robot started!")
except Exception as e:
print(e)
#print(f"{all_module}")
print("""
_____________________________________________
| |
| Deployed Successfully |
| (C) 2021-2022 by @TechZBots |
|_____________________________________________|
""")
await idle()
await aiohttpsession.close()
await app.stop()
for task in asyncio.all_tasks():
task.cancel()
home_keyboard_pm = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text=" ➕ Add Me To Your Group ➕ ",
url=f"http://t.me/{BOT_USERNAME}?startgroup=new",
)
],
[
InlineKeyboardButton(
text=" ℹ️ About", callback_data="_about"
),
InlineKeyboardButton(
text="🌍 languages ", callback_data="_langs"
),
],
[
InlineKeyboardButton(
text="📮 How To Use Me", callback_data="bot_commands"
),
],
[
InlineKeyboardButton(
text="🌐 My Website",
url=f"https://szrosebot.ml",
),
InlineKeyboardButton(
text="🔰News Channel",
url=f"https://t.me/szroseupdates",
)
],
]
)
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="📚 Commands & help",
url=f"t.me/{BOT_USERNAME}?start=help",
)
]
]
)
IMG = ["https://telegra.ph/file/c8f5c1dd990ca9a3d8516.jpg",
"https://telegra.ph/file/77cc3154b752ce822fd52.jpg",
"https://telegra.ph/file/e72fb0b6a7fba177cf4c7.jpg",
"https://telegra.ph/file/8738a478904238e367939.jpg",
"https://telegra.ph/file/68d7830ba72820f44bda0.jpg"
]
@app.on_message(filters.command(START_COMMAND))
@language
async def start(client, message: Message, _):
FSub = await ForceSub(bot, message)
if FSub == 400:
return
chat_id = message.chat.id
if message.sender_chat:
return
if message.chat.type != "private":
await message.reply(
_["main2"], reply_markup=keyboard)
return await add_served_chat(message.chat.id)
if len(message.text.split()) > 1:
name = (message.text.split(None, 1)[1]).lower()
if name.startswith("rules"):
await get_private_rules(app, message, name)
return
elif "_" in name:
module = name.split("_", 1)[1]
text = (_["main6"].format({HELPABLE[module].__MODULE__}
+ HELPABLE[module].__HELP__)
)
await message.reply(text, disable_web_page_preview=True)
elif name == "help":
text, keyb = await help_parser(message.from_user.first_name)
await message.reply(
_["main5"],
reply_markup=keyb,
disable_web_page_preview=True,
)
elif name == "connections":
await message.reply("Run /connections to view or disconnect from groups!")
else:
served_chats = len(await get_served_chats())
served_chats = []
chats = await get_served_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
served_users = len(await get_served_users())
served_users = []
users = await get_served_users()
for user in users:
served_users.append(int(user["bot_users"]))
await message.reply(f"""
[👋]({random.choice(IMG)}) Hey there {message.from_user.mention},
My name is Yukinon, an advanced telegram Group management Bot For helpYou Protect Your Groups & Suit For All Your Needs.
I currently manage about `{len(served_chats)}` groups.I have over `{len(served_users)}` users
⚒ Send Me /help For Get Commands.
👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma
""",
reply_markup=home_keyboard_pm,
)
return await add_served_user(message.from_user.id)
@app.on_message(filters.command(HELP_COMMAND))
@language
async def help_command(client, message: Message, _):
FSub = await ForceSub(bot, message)
if FSub == 400:
return
if message.chat.type != "private":
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
key = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text=_["main3"],
url=f"t.me/{BOT_USERNAME}?start=help_{name}",
)
],
]
)
await message.reply(
_["main4"],
reply_markup=key,
)
else:
await message.reply(
_["main2"], reply_markup=keyboard
)
else:
await message.reply(
_["main2"], reply_markup=keyboard
)
else:
if len(message.command) >= 2:
name = (message.text.split(None, 1)[1]).replace(" ", "_").lower()
if str(name) in HELPABLE:
text = (_["main6"].format({HELPABLE[name].__MODULE__}
+ HELPABLE[name].__HELP__)
)
if hasattr(HELPABLE[name], "__helpbtns__"):
button = (HELPABLE[name].__helpbtns__) + [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
if not hasattr(HELPABLE[name], "__helpbtns__"): button = [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
await message.reply(text,
reply_markup=InlineKeyboardMarkup(button),
disable_web_page_preview=True)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
_["main5"],
reply_markup=help_keyboard,
disable_web_page_preview=True,
)
else:
text, help_keyboard = await help_parser(
message.from_user.first_name
)
await message.reply(
text, reply_markup=help_keyboard, disable_web_page_preview=True
)
return
@app.on_callback_query(filters.regex("startcq"))
@languageCB
async def startcq(client,CallbackQuery, _):
served_chats = len(await get_served_chats())
served_chats = []
chats = await get_served_chats()
for chat in chats:
served_chats.append(int(chat["chat_id"]))
served_users = len(await get_served_users())
served_users = []
users = await get_served_users()
for user in users:
served_users.append(int(user["bot_users"]))
await CallbackQuery.message.edit(
text=f"""
👋 Hey there {CallbackQuery.from_user.mention},
My name is Yukinon ,an advanced telegram Group management Bot For help
You Protect Your Groups & Suit For All Your Needs.
I currently manage about `{len(served_chats)}` groups.I have over `{len(served_users)}` users
⚒ Send Me /help For Get Commands.
👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma
""",
disable_web_page_preview=True,
reply_markup=home_keyboard_pm)
async def help_parser(name, keyboard=None):
if not keyboard:
keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, "help"))
return (
"""
**Welcome to help menu**
I'm a group management bot with some useful features.
You can choose an option below, by clicking a button.
If you have any bugs or questions on how to use me,
have a look at my [Docs](https://szsupunma.gitbook.io/rose-bot/), or head to @szteambots.
**All commands can be used with the following: / **""",
keyboard,
)
@app.on_callback_query(filters.regex("bot_commands"))
@languageCB
async def commands_callbacc(client,CallbackQuery, _):
text ,keyboard = await help_parser(CallbackQuery.from_user.mention)
await app.send_message(
CallbackQuery.message.chat.id,
text=_["main5"],
reply_markup=keyboard,
disable_web_page_preview=True,
)
await CallbackQuery.message.delete()
@app.on_callback_query(filters.regex(r"help_(.*?)"))
@languageCB
async def help_button(client, query, _):
home_match = re.match(r"help_home\((.+?)\)", query.data)
mod_match = re.match(r"help_module\((.+?)\)", query.data)
prev_match = re.match(r"help_prev\((.+?)\)", query.data)
next_match = re.match(r"help_next\((.+?)\)", query.data)
back_match = re.match(r"help_back", query.data)
create_match = re.match(r"help_create", query.data)
top_text = _["main5"]
if mod_match:
module = (mod_match.group(1)).replace(" ", "_")
text = (
"{} **{}**:\n".format(
"Here is the help for", HELPABLE[module].__MODULE__
)
+ HELPABLE[module].__HELP__
+ "\n👨💻Dᴇᴠᴇʟᴏᴘᴇʀ : @supunma"
)
if hasattr(HELPABLE[module], "__helpbtns__"):
button = (HELPABLE[module].__helpbtns__) + [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
if not hasattr(HELPABLE[module], "__helpbtns__"): button = [[InlineKeyboardButton("« Back", callback_data="bot_commands")]]
await query.message.edit(
text=text,
reply_markup=InlineKeyboardMarkup(button),
disable_web_page_preview=True,
)
await query.answer(f"Here is the help for {module}",show_alert=True)
elif home_match:
await app.send_message(
query.from_user.id,
text= _["main2"],
reply_markup=home_keyboard_pm,
)
await query.message.delete()
elif prev_match:
curr_page = int(prev_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(curr_page - 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif next_match:
next_page = int(next_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(next_page + 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif back_match:
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(0, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif create_match:
text, keyboard = await help_parser(query)
await query.message.edit(
text=text,
reply_markup=keyboard,
disable_web_page_preview=True,
)
return await client.answer_callback_query(query.id)
if __name__ == "__main__":
install()
with closing(loop):
with suppress(asyncio.exceptions.CancelledError):
loop.run_until_complete(start_bot())
loop.run_until_complete(asyncio.sleep(3.0))
|
TechShreyash/Yukinon_Robot
|
Yukinon/__main__.py
|
__main__.py
|
py
| 13,537 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "asyncio.get_event_loop",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "lang.get_command",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "lang.get_command",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "Yukinon.plugins.ALL_MODULES",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "importlib.import_module",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "Yukinon.plugins.ALL_MODULES",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "pyrogram.idle",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "asyncio.all_tasks",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 135,
"usage_type": "name"
},
{
"api_name": "Yukinon.plugins.fsub.ForceSub",
"line_number": 136,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters",
"line_number": 133,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.Message",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "Yukinon.plugins.fsub.ForceSub",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters.command",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "pyrogram.filters.regex",
"line_number": 254,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "Yukinon.utils.paginate_modules",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters.regex",
"line_number": 298,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters",
"line_number": 298,
"usage_type": "name"
},
{
"api_name": "re.match",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 315,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 317,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 330,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardButton",
"line_number": 331,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 349,
"usage_type": "call"
},
{
"api_name": "Yukinon.utils.paginate_modules",
"line_number": 350,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 359,
"usage_type": "call"
},
{
"api_name": "Yukinon.utils.paginate_modules",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "pyrogram.types.InlineKeyboardMarkup",
"line_number": 368,
"usage_type": "call"
},
{
"api_name": "Yukinon.utils.paginate_modules",
"line_number": 369,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters.regex",
"line_number": 310,
"usage_type": "call"
},
{
"api_name": "pyrogram.filters",
"line_number": 310,
"usage_type": "name"
},
{
"api_name": "uvloop.install",
"line_number": 385,
"usage_type": "call"
},
{
"api_name": "contextlib.closing",
"line_number": 386,
"usage_type": "call"
},
{
"api_name": "contextlib.suppress",
"line_number": 387,
"usage_type": "call"
},
{
"api_name": "asyncio.exceptions",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "asyncio.sleep",
"line_number": 389,
"usage_type": "call"
}
] |
21124445086
|
import linecache
import math
import os
# numpy and revise it with numpy
# multiple sets of 100 test cases
# cosine similarity
# 100 test cases for sum squared errors
# 100 test cases for Average
# small standard deviation -> good prediction
# smaller mean -> the better -> (mean = better prediction)
# transpose matrix to get the same functionality
# transpose matrix ->
FILESIZE = 24983
NUMJOKES = 100
FILENAME = "jester-data-1.csv"
FILENAMENNC = "nearestNeighborsC.csv"
FILENAMENNIB = "nearestNeighborsIB.csv"
def main():
print("Collaborative Average - User 500, Joke 50: " + str(collaborativeAverage(3, 3)))
print("Item Based Average - User 500, Joke 500: " + str(itemBasedAverage(3, 3)))
print("Collaborative Weighted Sum - User 500, Joke 50: " + str(collaborativeWeightedSum(3, 3)))
print("Item Based Weighted Sum - User 500, Joke 50: " + str(itemBasedWeightedSum(3, 3)))
print("Collaborative Adjusted Weighted Sum - User 500, Joke 50: " + str(collaborativeAdjustedWeightedSum(3, 3)))
print("Item Based Adjusted Weighted Sum - User 500, Joke 50: " + str(itemBasedAdjustedWeightedSum(3, 3)))
# print("Nearest Neighbors Collaborative Average - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeAverage(499, 49, 24982)))
# print("Nearest Neighbors Collaborative Weighted Sum - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeWeightedSum(499, 49, 24982)))
# print("Nearest Neighbors Collaborative Adjusted Weighted Sum - User 500, Joke 50, N 24982: " + str(nearestNeighborsCollaborativeAdjustedWeightedSum(499, 49, 24982)))
# print("Nearest Neighbors Item Based Average: " + str(nearestNeighborsItemBasedAverage(499, 49, 99)))
# print("Nearest Neighbors Item Based Weighted Sum: " + str(nearestNeighborsItemBasedWeightedSum(499, 49, 99)))
# print("Item Based Adjusted Weighted Sum - User 500, Joke 50: " + str(itemBasedAdjustedWeightedSum(3, 3)))
# print("Nearest Neighbors Item Based Adjusted Weighted Sum: " + str(nearestNeighborsItemBasedAdjustedWeightedSum(499, 49, 99)))
# print("Collaborative Pearson Correlation - User 24983, User 24982: " + str(collaborativePearsonCorrelation(FILESIZE - 1, FILESIZE - 2)))
# print("Item Based Pearson Correlation - Joke 99, Joke 100: " + str(itemBasedPearsonCorrelation(98, 99)))
# print("--------------------------------------------Test Cases ^^^-------------------------------------------------------------------")
# print("Collaborative Average - User 1, Joke 1: " + str(collaborativeAverage(0, 0)))
# print("Item based Average - User 1, Joke 1: " + str(itemBasedAverage(0, 0)))
# print("Collaborative Pearson Correlation - User 1, User 2: " + str(collaborativePearsonCorrelation(0, 1)))
# print("Item Based Pearson Correlation - Joke 1, Joke 2: " + str(itemBasedPearsonCorrelation(3, 3)))
# print("Collaborative Weighted Sum - User 1, Joke 1: " + str(collaborativeWeightedSum(0,0)))
# print("Collaborative Adjusted Weighted Sum - User 1, Joke 1: " + str(collaborativeAdjustedWeightedSum(0,0)))
# print("Item Based Weighted Sum - User 1, Joke 1: " + str(itemBasedWeightedSum(3,3)))
# print("Item Based Adjusted Weighted Sum - User 1, Joke 1: " + str(itemBasedAdjustedWeightedSum(0, 0)))
# print("Nearest Neighbors Collaborative Average - User 500, Joke 50, N 5: " + str(nearestNeighborsCollaborativeAverage(499, 49, 24982)))
# print("Nearest Neighbors Item Based Average: " + str(nearestNeighborsItemBasedWeightedSum(499, 49, 99)))
# given the user number and joke number, find all joke ratings at joke number except at row of user
def collaborativeAverage(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
currentUser = 0
count = 0
total = 0
for i in range(0, fileSize):
if currentUser != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
rating = float(info[itemNumber + 1])
if rating != 99:
total += rating
count += 1
currentUser += 1
return total/count
def collaborativeWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE): # need to add appropriate params
normalizationSum = 0
compSum = 0
for i in range(0, fileSize):
if i != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
utilityUserI = float(info[itemNumber + 1])
if utilityUserI != 99:
similarity = collaborativePearsonCorrelation(userNumber, i, fileName)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * utilityUserI)
return compSum/normalizationSum
def collaborativeAdjustedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
for i in range(0, fileSize):
if i != userNumber:
info = linecache.getline(fileName, i + 1).split(",")
utilityUserI = float(info[itemNumber + 1])
if utilityUserI != 99:
similarity = collaborativePearsonCorrelation(userNumber, i, fileName)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * (utilityUserI - itemBasedAverage(i, -1, fileName)))
return (itemBasedAverage(userNumber, -1, fileName) + (compSum/normalizationSum))
def collaborativePearsonCorrelation(user1Number, user2Number, fileName = FILENAME):
sumNumerator = 0
sumDenominatorUser1 = 0
sumDenominatorUser2 = 0
user1 = linecache.getline(fileName, user1Number + 1).split(",") # linecache indices start with 1
user2 = linecache.getline(fileName, user2Number + 1).split(",")
avgUser1 = itemBasedAverage(user1Number, -1, fileName) # -1 to ensure that it does not skip any joke
avgUser2 = itemBasedAverage(user2Number, -1, fileName)
#print(avgUser1, avgUser2)
for i in range(1, len(user1)):
utilityUser1 = float(user1[i])
utilityUser2 = float(user2[i])
if not (utilityUser1 == 99 or utilityUser2 == 99):
compUser1 = utilityUser1 - avgUser1
compUser2 = utilityUser2 - avgUser2
sumNumerator += compUser1 * compUser2
sumDenominatorUser1 += compUser1 ** 2
sumDenominatorUser2 += compUser2 ** 2
return sumNumerator / math.sqrt(sumDenominatorUser1 * sumDenominatorUser2)
def itemBasedAverage(userNumber, itemNumber, fileName = FILENAME):
total = 0
count = 0
line = linecache.getline(fileName, userNumber + 1)
info = line.split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
rating = float(info[i])
if rating != 99:
total += rating
count += 1
return total/(count)
def itemBasedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
info = linecache.getline(fileName, userNumber + 1).split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
utilityItemI = float(info[i])
if utilityItemI != 99:
similarity = itemBasedPearsonCorrelation(itemNumber, i - 1, fileName, fileSize)
#print(similarity)
normalizationSum += abs(similarity)
compSum += (similarity * utilityItemI)
return compSum/normalizationSum
def itemBasedAdjustedWeightedSum(userNumber, itemNumber, fileName = FILENAME, fileSize = FILESIZE):
normalizationSum = 0
compSum = 0
info = linecache.getline(fileName, userNumber + 1).split(",")
for i in range(1, len(info)):
if i != itemNumber + 1:
utilityItemI = float(info[i])
if utilityItemI != 99:
similarity = itemBasedPearsonCorrelation(itemNumber, i - 1, fileName, fileSize)
normalizationSum += abs(similarity)
compSum += (similarity * (utilityItemI - collaborativeAverage(-1, i - 1, fileName, fileSize)))
return (collaborativeAverage(-1, itemNumber) + (compSum/normalizationSum))
def itemBasedPearsonCorrelation(item1Number, item2Number, fileName = FILENAME, fileSize = FILESIZE):
sumNumerator = 0
sumDenominatorItem1 = 0
sumDenominatorItem2 = 0
avgItem1 = collaborativeAverage(-1, item1Number, fileName, fileSize) # -1 to ensure that it does not skip any user by
avgItem2 = collaborativeAverage(-1, item2Number, fileName, fileSize)
for i in range(0, fileSize):
line = linecache.getline(fileName, i + 1).split(",");
utilityItem1 = float(line[item1Number + 1])
utilityItem2 = float(line[item2Number + 1])
if not (utilityItem1 == 99 or utilityItem2 == 99): # if either user did not rate the joke, do not calculate
compItem1 = utilityItem1 - avgItem1
compItem2 = utilityItem2 - avgItem2
sumNumerator += compItem1 * compItem2
sumDenominatorItem1 += compItem1 ** 2
sumDenominatorItem2 += compItem2 ** 2
return sumNumerator / math.sqrt(sumDenominatorItem1 * sumDenominatorItem2)
def getNearestNeighborsCollaborative(userNumber, n):
nearestNeighbors = [[-2, -1] for i in range(n)]
for i in range(0, FILESIZE):
if i != userNumber:
info = linecache.getline(FILENAME, i + 1).split(",")
similarity = collaborativePearsonCorrelation(userNumber, i)
if similarity > nearestNeighbors[0][0]:
#print("Before assigning")
#print(nearestNeighbors)
nearestNeighbors[0][0] = similarity
#print("After assigning")
#print(nearestNeighbors)
nearestNeighbors[0][1] = i
#print("Before sorted: ")
#print(nearestNeighbors)
nearestNeighbors = sorted(nearestNeighbors, key=lambda x: x[0])
#print("After sorted: ")
#print(nearestNeighbors)
file = open(FILENAMENNC, 'w')
for j in range(0, n):
file.write(linecache.getline(FILENAME, nearestNeighbors[j][1] + 1))
file.write(linecache.getline(FILENAME, userNumber + 1))
def getNearestNeighborsItemBased(itemNumber, n):
nearestNeighbors = [[-2, -1] for i in range(n)]
for i in range(0, NUMJOKES):
if i != itemNumber:
similarity = itemBasedPearsonCorrelation(itemNumber, i);
if similarity > nearestNeighbors[0][0]:
nearestNeighbors[0][0] = similarity
nearestNeighbors[0][1] = i
nearestNeighbors = sorted(nearestNeighbors, key=lambda x: x[0])
file = open(FILENAMENNIB, 'w')
for i in range(0, FILESIZE):
line = linecache.getline(FILENAME, i+1).rstrip()
info = line.split(',')
count = NUMJOKES - (info[1:]).count(str(99))
lineOut = ""
for j in range(0, n):
lineOut += info[nearestNeighbors[j][1]] + ","
lineOut += info[itemNumber]
lineOut = str(count) + "," + lineOut + "\n"
file.write(lineOut)
def nearestNeighborsCollaborativeAverage(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
average = collaborativeAverage(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return average
def nearestNeighborsCollaborativeWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
weightedSum = collaborativeWeightedSum(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return weightedSum
def nearestNeighborsCollaborativeAdjustedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsCollaborative(userNumber, n)
adjustedWeightedSum = collaborativeAdjustedWeightedSum(n, itemNumber, FILENAMENNC, n + 1)
deleteFile(FILENAMENNC)
return adjustedWeightedSum
def nearestNeighborsItemBasedAverage(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
average = itemBasedAverage(userNumber, n, FILENAMENNIB)
deleteFile(FILENAMENNIB)
return average
def nearestNeighborsItemBasedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
weightedSum = itemBasedWeightedSum(userNumber, n, FILENAMENNIB)
deleteFile(FILENAMENNIB)
return weightedSum
def nearestNeighborsItemBasedAdjustedWeightedSum(userNumber, itemNumber, n):
getNearestNeighborsItemBased(itemNumber, n)
adjustedWeightedSum = itemBasedAdjustedWeightedSum(userNumber, n, FILENAMENNIB, FILESIZE)
deleteFile(FILENAMENNIB)
return adjustedWeightedSum
def deleteFile(fileName):
try:
os.remove(fileName)
except:
pass
if __name__ == "__main__":
main()
|
R3xKyle-CP/cpe400
|
jester/prediction2.py
|
prediction2.py
|
py
| 12,781 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "linecache.getline",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "linecache.getline",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 271,
"usage_type": "call"
}
] |
1909503331
|
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.sas_logical_interconnects import SasLogicalInterconnects
from hpOneView.resources.resource import ResourceClient
class SasLogicalInterconnectsTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._client = SasLogicalInterconnects(self.connection)
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._client.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(count=500, fields='name=TestName', filter='name:ascending', query='',
sort='', start=2, view='')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once_with_default(self, mock_get_all):
self._client.get_all()
mock_get_all.assert_called_once_with(count=-1, fields='', filter='', query='', sort='', start=0, view='')
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id_called_once(self, mock_get):
logical_interconnect_id = "f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
self._client.get(logical_interconnect_id)
mock_get.assert_called_once_with(logical_interconnect_id)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_uri_called_once(self, mock_get):
logical_interconnect_uri = "/rest/sas-logical-interconnects/f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
self._client.get(logical_interconnect_uri)
mock_get.assert_called_once_with(logical_interconnect_uri)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get):
self._client.get_by("name", "value")
mock_get.assert_called_once_with("name", "value")
@mock.patch.object(ResourceClient, 'create')
def test_replace_drive_enclosure_called_once(self, mock_create):
drive_replacement = {
"oldSerialNumber": "SN1100",
"newSerialNumber": "SN1101"
}
self._client.replace_drive_enclosure(drive_replacement, "ad28cf21-8b15-4f92-bdcf-51cb2042db32")
mock_create.assert_called_once_with(
drive_replacement.copy(),
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/replaceDriveEnclosure')
@mock.patch.object(ResourceClient, 'update')
def test_update_compliance_all_called_once(self, mock_update):
compliance_uris = {
"uris": [
"/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32"
]}
self._client.update_compliance_all(compliance_uris)
mock_update.assert_called_once_with(compliance_uris.copy(),
'/rest/sas-logical-interconnects/compliance',
timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_compliance_by_uri(self, mock_update_with_zero_body):
logical_interconnect_uri = '/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_compliance(logical_interconnect_uri)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/compliance', timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_compliance_by_id(self, mock_update_with_zero_body):
mock_update_with_zero_body.return_value = {}
logical_interconnect_id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_compliance(logical_interconnect_id)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/compliance', timeout=-1)
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_configuration_by_uri(self, mock_update_with_zero_body):
logical_interconnect_uri = '/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_configuration(logical_interconnect_uri)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/configuration')
@mock.patch.object(ResourceClient, 'update_with_zero_body')
def test_update_configuration_by_id(self, mock_update_with_zero_body):
logical_interconnect_id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'
self._client.update_configuration(logical_interconnect_id)
mock_update_with_zero_body.assert_called_once_with(
'/rest/sas-logical-interconnects/ad28cf21-8b15-4f92-bdcf-51cb2042db32/configuration')
@mock.patch.object(ResourceClient, 'get')
def test_get_firmware(self, mock_get):
logical_interconnect_id = '3518be0e-17c1-4189-8f81-83f3724f6155'
logical_interconnect_uri = "/rest/sas-logical-interconnects/" + logical_interconnect_id
expected_uri = logical_interconnect_uri + "/firmware"
self._client.get_firmware(logical_interconnect_id)
mock_get.assert_called_once_with(expected_uri)
@mock.patch.object(ResourceClient, 'update')
def test_update_firmware(self, mock_update):
logical_interconnect_id = '3518be0e-17c1-4189-8f81-83f3724f6155'
fake_firmware = dict(
command="Update",
sppUri="/rest/firmware-drivers/Service_0Pack_0for_0ProLiant"
)
logical_interconnect_uri = "/rest/sas-logical-interconnects/" + logical_interconnect_id
expected_uri = logical_interconnect_uri + "/firmware"
self._client.update_firmware(fake_firmware, logical_interconnect_id)
mock_update.assert_called_once_with(fake_firmware, expected_uri)
|
HewlettPackard/python-hpOneView
|
tests/unit/resources/networking/test_sas_logical_interconnects.py
|
test_sas_logical_interconnects.py
|
py
| 5,962 |
python
|
en
|
code
| 86 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "hpOneView.connection.connection",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.networking.sas_logical_interconnects.SasLogicalInterconnects",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "mock.patch.object",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 15,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 25,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 30,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 36,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 47,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 72,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 80,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 90,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 99,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 108,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "mock.patch.object",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "hpOneView.resources.resource.ResourceClient",
"line_number": 119,
"usage_type": "argument"
},
{
"api_name": "mock.patch",
"line_number": 119,
"usage_type": "attribute"
}
] |
21917306491
|
from django.shortcuts import render, redirect
from .models import *
from hashlib import sha1
from django.http import JsonResponse, HttpResponseRedirect
from . import user_decorator
from df_goods.models import *
def register(request):
return render(request, 'df_user/register.html')
def register_handle(request):
# 接受用户输入
post = request.POST
uname = post.get('user_name')
upwd = post.get('pwd')
upwd2 = post.get('cpwd')
uemail = post.get('email')
# 判断两次密码是否一次
if upwd != upwd2:
return redirect('/user/register/')
# 密码加密
s1 = sha1()
s1.update(upwd.encode("utf8"))
upwd3 = s1.hexdigest()
# 创建对象
user = UserInfo()
user.uname = uname
user.upwd = upwd3
user.uemail = uemail
user.save()
# 注册成功,转到登录页面
context = {'title': '用户登录', 'uname': uname, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def register_exist(request):
uname = request.GET.get('uname')
print(uname)
count = UserInfo.objects.filter(uname=uname).count()
print(count)
return JsonResponse({'count': count})
def login(request):
uname = request.COOKIES.get('uname', '')
context = {'title': '用户登录', 'error_name': 0, 'error_pwd': 0, "uname": uname, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def login_handle(request):
# 接受请求信息
post = request.POST
uname = post.get('username')
upwd = post.get('pwd')
remember_name = post.get('remember_name', 0)
# 根据用户名查询对象
users = UserInfo.objects.filter(uname=uname) # []
print(uname)
# 判断:如果未查到则用户名错,如果查到则判断密码是否正确,正确则转到用户中心
if len(users) == 1:
s1 = sha1()
s1.update(upwd.encode("utf8"))
if s1.hexdigest() == users[0].upwd:
url = request.COOKIES.get('url', '/')
red = HttpResponseRedirect(url)
# 记住用户名
if remember_name != 0:
red.set_cookie('uname', uname)
else:
red.set_cookie('uname', '', max_age=-1) # max_age过期时间
request.session['user_id'] = users[0].id
request.session['user_name'] = uname
return red
else:
context = {'title': '用户登录', 'error_name': 0, 'error_pwd': 1, 'uname': uname, 'upwd': upwd, 'page_name': 1}
return render(request, 'df_user/login.html', context)
else:
context = {'title': '用户登录', 'error_name': 1, 'error_pwd': 0, 'uname': uname, 'upwd': upwd, 'page_name': 1}
return render(request, 'df_user/login.html', context)
def logout(request):
request.session.clear()
return HttpResponseRedirect('/')
@user_decorator.login
def info(request):
user_email = UserInfo.objects.get(id=request.session['user_id']).uemail
# 最近浏览
goods_ids = request.COOKIES.get('goods_ids', '')
goods_ids1 = goods_ids.split(',')
goods_list = []
for goods_id in goods_ids1:
goods_list.append(GoodsInfo.objects.get(id=int(goods_id)))
context = {'title': '用户中心',
'user_email': user_email,
'user_name': request.session['user_name'],
'page_name': 1,
'goods_list': goods_list,
}
return render(request, 'df_user/user_center_info.html', context)
@user_decorator.login
def order(request):
context = {'title': '订单中心', 'page_name': 1}
return render(request, 'df_user/user_center_order.html', context)
@user_decorator.login
def site(request):
user = UserInfo.objects.get(id=request.session['user_id'])
print(user.id)
if request.method == 'POST':
post = request.POST
user.urece = post.get('urece')
user.uaddress = post.get('uaddress')
user.uzip = post.get('uzip')
user.uphone = post.get('uphone')
user.save()
context = {'title': '收货地址', 'user': user, 'page_name': 1}
return render(request, 'df_user/user_center_site.html', context)
|
junjie0825/dailyfresh
|
dailyfresh/df_user/views.py
|
views.py
|
py
| 4,210 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "hashlib.sha1",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 127,
"usage_type": "call"
}
] |
20398018702
|
import wx
[wxID_NUMBERINGPANEL, wxID_NUMBERINGPANELALPHA, wxID_NUMBERINGPANELALPHA_PAD,
wxID_NUMBERINGPANELALPHA_UC, wxID_NUMBERINGPANELASC,
wxID_NUMBERINGPANELCOUNT, wxID_NUMBERINGPANELCOUNTBYDIR,
wxID_NUMBERINGPANELDESC, wxID_NUMBERINGPANELDIGIT,
wxID_NUMBERINGPANELDIGIT_AUTOPAD, wxID_NUMBERINGPANELDIGIT_PAD,
wxID_NUMBERINGPANELDIGIT_SETPAD, wxID_NUMBERINGPANELDOWNBOTTOM,
wxID_NUMBERINGPANELDOWNBUTTON, wxID_NUMBERINGPANELDOWNMORE,
wxID_NUMBERINGPANELORDER, wxID_NUMBERINGPANELPAD_CHAR,
wxID_NUMBERINGPANELPAD_WIDTH, wxID_NUMBERINGPANELRESET,
wxID_NUMBERINGPANELRESETDIR, wxID_NUMBERINGPANELROMAN,
wxID_NUMBERINGPANELROMAN_UC, wxID_NUMBERINGPANELSORTING,
wxID_NUMBERINGPANELSORT_TEXT, wxID_NUMBERINGPANELSTART,
wxID_NUMBERINGPANELSTARTBYITEMS, wxID_NUMBERINGPANELSTATICTEXT1,
wxID_NUMBERINGPANELSTATICTEXT2, wxID_NUMBERINGPANELSTATICTEXT5,
wxID_NUMBERINGPANELSTATICTEXT6, wxID_NUMBERINGPANELSTATICTEXT7,
wxID_NUMBERINGPANELSTEP, wxID_NUMBERINGPANELSTYLE,
wxID_NUMBERINGPANELUPBUTTON, wxID_NUMBERINGPANELUPMORE,
wxID_NUMBERINGPANELUPTOP, wxID_NUMBERINGPANELSTARTBYITEM
] = [wx.NewId() for _init_ctrls in range(37)]
class numberingPanel(wx.Panel):
def sizer(self):
#>> start style box:
sLine1 = wx.BoxSizer(wx.HORIZONTAL)
line1elements = [(self.digit,10),
(self.digit_pad,0),
(self.pad_char,5)]
if main.langLTR:
for i in line1elements:
sLine1.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
line1elements.reverse()
for i in line1elements:
sLine1.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
sLine1.Add((5,-1),0)
sLine3 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
sLine3.Add(self.digit_setpad,0,wx.ALIGN_CENTER)
sLine3.Add(self.pad_width,0)
else:
sLine3.Add(self.pad_width,0)
sLine3.Add(self.digit_setpad,0,wx.ALIGN_CENTER)
sLine4 = wx.BoxSizer(wx.HORIZONTAL)
line4elements = [(self.alpha,10),
(self.alpha_uc,5),
(self.alpha_pad,10),]
if main.langLTR:
for i in line4elements:
sLine4.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
line4elements.reverse()
for i in line4elements:
sLine4.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
sLine4.Add((5,-1),0)
sLine5 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
sLine5.Add(self.roman,0,wx.RIGHT,10)
sLine5.Add(self.roman_uc,0)
else:
sLine5.Add(self.roman_uc,0,wx.RIGHT,10)
sLine5.Add(self.roman,0,wx.RIGHT,5)
styleSizer = wx.StaticBoxSizer(self.style, wx.VERTICAL)
styleSizer.Add(sLine1,0,wx.TOP|wx.BOTTOM|main.alignment,7)
styleSizer.Add(self.digit_autopad,0,wx.LEFT|wx.RIGHT|main.alignment,20)
styleSizer.Add((1,7))
styleSizer.Add(sLine3,0,wx.LEFT|wx.RIGHT|main.alignment,20)
styleSizer.Add(sLine4,0,wx.BOTTOM|wx.TOP|main.alignment,25)
styleSizer.Add(sLine5,0,wx.BOTTOM|main.alignment,10)
#<< end style box
#>> start order box:
oLine1 = wx.BoxSizer(wx.HORIZONTAL)
if main.langLTR:
oLine1.Add(self.sort_text,0,wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT,5)
oLine1.Add(self.sorting,0,wx.ALIGN_CENTER)
else:
oLine1.Add((-1,-1),1)
oLine1.Add(self.sorting,0,wx.ALIGN_CENTER)
oLine1.Add(self.sort_text,0,wx.ALIGN_CENTER|wx.LEFT|wx.RIGHT,5)
oLine2 = wx.BoxSizer(wx.HORIZONTAL)
oLine2elements = [((5,-1),0),
(self.staticText2,5),
(self.upButton,2),
(self.downButton,0),
((5,-1),0),
(self.upMore,2),
(self.downMore,0),
((5,-1),0),
(self.upTop,2),
(self.downBottom,0),
((5,-1),0)]
if main.langLTR:
for i in oLine2elements:
oLine2.Add(i[0],0,wx.ALIGN_CENTER|wx.RIGHT,i[1])
else:
oLine2elements.reverse()
for i in oLine2elements:
oLine2.Add(i[0],0,wx.ALIGN_CENTER|wx.LEFT,i[1])
orderSizer = self.orderSizer = wx.StaticBoxSizer(self.order, wx.VERTICAL)
orderSizer.Add((-1,3),0)
orderSizer.Add(oLine1,0,wx.BOTTOM|wx.EXPAND,10)
orderSizer.Add(oLine2,0,wx.BOTTOM,4)
#<< end order box
#>> start count box:
countDir = wx.BoxSizer(wx.HORIZONTAL)
countDir.Add(self.asc,3)
countDir.Add((-1,-1),1)
countDir.Add(self.desc,3)
countSizer = wx.FlexGridSizer(cols=2, vgap=3, hgap=5)
countElements = [[self.staticText5,
self.start],
[(-1,-1),
self.startByItems],
[(-1,5),(-1,5)],
[self.staticText6,
countDir],
[(-1,5),(-1,5)],
[self.staticText7,
self.step],
[(-1,-1),
self.countByDir],
[(-1,15),(-1,15)],
[self.staticText1,
self.reset],
[(-1,-1),
self.resetDir],
]
for row in countElements:
if not main.langLTR:
row.reverse()
for i in row:
countSizer.Add(i,0,wx.EXPAND|main.alignment)
countBoxSizer = wx.StaticBoxSizer(self.count, wx.VERTICAL)
countBoxSizer.Add(countSizer,0,wx.ALL,7)
#<< end count box
# main sizer and finish:
mainSizer = self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
leftSizer = self.leftSizer = wx.BoxSizer(wx.VERTICAL)
leftSizer.Add(styleSizer,0,wx.EXPAND)
leftSizer.Add(orderSizer,0,wx.EXPAND|wx.TOP,10)
mainElements = [((10,-1),0),
(leftSizer,7),
((25,-1),0),
(countBoxSizer,30)]
if main.langLTR:
for i in mainElements:
mainSizer.Add(i[0],0,wx.TOP,i[1])
else:
mainElements.reverse()
mainSizer.Add((-1,-1),1)
for i in mainElements:
mainSizer.Add(i[0],0,wx.TOP,i[1])
self.SetSizerAndFit(mainSizer)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Panel.__init__(self, id=wxID_NUMBERINGPANEL, name=u'numberingPanel',
parent=prnt, pos=wx.Point(346, 305), size=wx.Size(642, 357),
style=wx.TAB_TRAVERSAL)
self.SetClientSize(wx.Size(634, 328))
self.count = wx.StaticBox(id=wxID_NUMBERINGPANELCOUNT,
label=_(u"Counter:"), name=u'count', parent=self,
pos=wx.Point(328, 16), style=main.alignment)
self.order = wx.StaticBox(id=wxID_NUMBERINGPANELORDER, label=_(u"Item Sorting:"),
name=u'order', parent=self, pos=wx.Point(16, 208),
size=wx.Size(280, 88), style=main.alignment)
self.style = wx.StaticBox(id=wxID_NUMBERINGPANELSTYLE,
label=_(u"Style:"), name=u'style', parent=self, pos=wx.Point(16,
16), style=main.alignment)
self.digit = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT,
label=_(u"Numerical:"), name=u'digit', parent=self,
pos=wx.Point(32, 48), style=wx.RB_GROUP)
self.digit.SetValue(True)
self.digit.Bind(wx.EVT_RADIOBUTTON, self.check_styles,
id=wxID_NUMBERINGPANELDIGIT)
self.alpha = wx.RadioButton(id=wxID_NUMBERINGPANELALPHA,
label=_(u"Alphabetical:"), name=u'alpha', parent=self,
pos=wx.Point(32, 112), style=0)
self.alpha.SetValue(False)
self.alpha.Enable(True)
self.alpha.SetToolTipString(_(u"Must start at positive value: (1=a, 28=ab, etc..)"))
self.alpha.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.roman = wx.RadioButton(id=wxID_NUMBERINGPANELROMAN,
label=_(u"Roman Numeral:"), name=u'roman', parent=self,
pos=wx.Point(32, 144), style=0)
self.roman.SetValue(False)
self.roman.SetToolTipString(_(u"Count values must be between 1 and 4999"))
self.roman.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.digit_pad = wx.CheckBox(id=wxID_NUMBERINGPANELDIGIT_PAD,
label=_(u"Pad, using:"), name=u'digit_pad', parent=self,
pos=wx.Point(112, 48), style=0)
self.digit_pad.SetValue(True)
self.digit_pad.Bind(wx.EVT_CHECKBOX, self.check_styles)
self.pad_char = wx.TextCtrl(id=wxID_NUMBERINGPANELPAD_CHAR,
name=u'pad_char', parent=self, pos=wx.Point(185, 47),
size=wx.Size(24, -1), style=0, value='0')
self.pad_char.SetMaxLength(1)
self.pad_char.Bind(wx.EVT_TEXT, main.showPreview)
self.alpha_pad = wx.CheckBox(id=wxID_NUMBERINGPANELALPHA_PAD,
label=_(u"auto pad"), name=u'alpha_pad', parent=self,
pos=wx.Point(216, 112), style=0)
self.alpha_pad.SetValue(True)
self.alpha_pad.Enable(False)
self.alpha_pad.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.pad_width = wx.SpinCtrl(id=wxID_NUMBERINGPANELPAD_WIDTH, initial=3,
max=255, min=1, name=u'pad_width', parent=self, pos=wx.Point(161,
82), size=wx.Size(55, -1), style=wx.SP_ARROW_KEYS|wx.TE_PROCESS_ENTER)
self.pad_width.SetValue(3)
self.pad_width.SetRange(1, 255)
self.pad_width.Bind(wx.EVT_TEXT_ENTER, self.OnPad_widthSpinctrl)
self.pad_width.Bind(wx.EVT_SPINCTRL, self.OnPad_widthSpinctrl)
self.roman_uc = wx.CheckBox(id=wxID_NUMBERINGPANELROMAN_UC,
label=_(u"Uppercase"), name=u'roman_uc', parent=self,
pos=wx.Point(152, 144), style=0)
self.roman_uc.SetValue(True)
self.roman_uc.Enable(False)
self.roman_uc.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.sort_text = wx.StaticText(id=wxID_NUMBERINGPANELSORT_TEXT,
label=_(u"Sort all items:"), name=u'sort_text', parent=self,
pos=wx.Point(24, 232), style=0)
self.staticText2 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT2,
label=_(u"Manually adjust item:"), name=u'staticText2', parent=self,
pos=wx.Point(24, 264), style=0)
self.downButton = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/down.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNBUTTON,
name=u'downButton', parent=self, pos=wx.Point(152, 256), style=wx.BU_AUTODRAW)
self.downButton.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNBUTTON)
self.upButton = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/up.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPBUTTON,
name=u'upButton', parent=self, pos=wx.Point(128, 256), style=wx.BU_AUTODRAW)
self.upButton.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPBUTTON)
self.upTop = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/upAll.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPTOP, name=u'upTop',
parent=self, pos=wx.Point(240, 256), style=wx.BU_AUTODRAW)
self.upTop.SetToolTipString(_(u"move to top"))
self.upTop.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPTOP)
self.downBottom = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/downAll.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNBOTTOM,
name=u'downBottom', parent=self, pos=wx.Point(264, 256), style=wx.BU_AUTODRAW)
self.downBottom.SetToolTipString(_(u"move to bottom"))
self.downBottom.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNBOTTOM)
self.upMore = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/up5.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELUPMORE, name=u'upMore',
parent=self, pos=wx.Point(184, 256), style=wx.BU_AUTODRAW)
self.upMore.SetToolTipString(_(u"move by 5"))
self.upMore.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELUPMORE)
self.downMore = wx.BitmapButton(bitmap=wx.Bitmap(main.realPath(u'icons/down5.png'),
wx.BITMAP_TYPE_PNG), id=wxID_NUMBERINGPANELDOWNMORE,
name=u'downMore', parent=self, pos=wx.Point(208, 256), style=wx.BU_AUTODRAW)
self.downMore.SetToolTipString(_(u"move by 5"))
self.downMore.Bind(wx.EVT_BUTTON, self.changeItemOrder,
id=wxID_NUMBERINGPANELDOWNMORE)
self.sorting = wx.Choice(choices=[ _(u"Ascending"), _(u"Descending"),
_(u"Manually")], id=wxID_NUMBERINGPANELSORTING, name=u'sorting',
parent=self, pos=wx.Point(160, 224), style=0)
self.sorting.SetSelection(0)
self.sorting.Bind(wx.EVT_CHOICE, self.setSortingOptions,
id=wxID_NUMBERINGPANELSORTING)
self.staticText5 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT5,
label=_(u"Start at:"), name=u'staticText5', parent=self,
pos=wx.Point(352, 43), style=0)
self.step = wx.SpinCtrl(id=wxID_NUMBERINGPANELSTEP, initial=1,
max=10000000, min=1, name=u'step', parent=self, pos=wx.Point(416,
136), size=wx.Size(168, -1), style=wx.SP_ARROW_KEYS)
self.step.SetValue(1)
self.step.SetToolTipString(_(u"A.K.A. step size"))
self.step.Bind(wx.EVT_TEXT_ENTER, main.showPreview)
self.step.Bind(wx.EVT_SPINCTRL, main.showPreview)
self.staticText7 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT7,
label=_(u"Count by:"), name=u'staticText7', parent=self,
pos=wx.Point(344, 142), style=0)
self.asc = wx.RadioButton(id=wxID_NUMBERINGPANELASC, label=_(u"+"),
name=u'asc', parent=self, pos=wx.Point(504, 104),
style=wx.RB_GROUP)
self.asc.SetFont(wx.Font(17, wx.SWISS, wx.NORMAL, wx.BOLD, False))
self.asc.SetValue(True)
self.asc.SetToolTipString(_(u"Increase counting number."))
self.asc.Bind(wx.EVT_RADIOBUTTON, main.showPreview)
self.desc = wx.RadioButton(id=wxID_NUMBERINGPANELDESC, label=_(u"-"),
name=u'desc', parent=self, pos=wx.Point(552, 104), style=0)
self.desc.SetFont(wx.Font(15, wx.SWISS, wx.NORMAL, wx.BOLD, False,
u'Impact'))
self.desc.SetValue(False)
self.desc.SetToolTipString(_(u"Decrease counting number."))
self.desc.Bind(wx.EVT_RADIOBUTTON, main.showPreview)
self.staticText6 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT6,
label=_(u"Count:"), name=u'staticText6', parent=self,
pos=wx.Point(360, 104), style=0)
self.alpha_uc = wx.CheckBox(id=wxID_NUMBERINGPANELALPHA_UC,
label=_(u"Uppercase"), name=u'alpha_uc', parent=self,
pos=wx.Point(136, 112), style=0)
self.alpha_uc.SetValue(False)
self.alpha_uc.Enable(False)
self.alpha_uc.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.start = wx.SpinCtrl(id=wxID_NUMBERINGPANELSTART, initial=0,
max=100000000, min=0, name=u'start', parent=self,
pos=wx.Point(416, 40), size=wx.Size(168, -1),
style=wx.SP_ARROW_KEYS)
self.start.SetValue(1)
self.start.SetToolTipString(_(u"starting number or equivalent alpha/roman character"))
self.start.Bind(wx.EVT_TEXT_ENTER, main.showPreview, id=wxID_NUMBERINGPANELSTART)
self.start.Bind(wx.EVT_SPINCTRL, main.showPreview, id=wxID_NUMBERINGPANELSTART)
self.staticText1 = wx.StaticText(id=wxID_NUMBERINGPANELSTATICTEXT1,
label=_(u"Reset every:"), name=u'staticText1', parent=self,
pos=wx.Point(344, 203), style=0)
self.reset = wx.SpinCtrl(id=wxID_NUMBERINGPANELRESET, initial=0,
max=100000000, min=0, name=u'reset', parent=self,
pos=wx.Point(416, 200), size=wx.Size(168, -1),
style=wx.SP_ARROW_KEYS)
self.reset.SetValue(0)
self.reset.SetToolTipString(_(u"0 = don't reset"))
self.reset.SetRange(0, 100000000)
self.reset.Bind(wx.EVT_TEXT_ENTER, main.showPreview,
id=wxID_NUMBERINGPANELRESET)
self.reset.Bind(wx.EVT_SPINCTRL, main.showPreview,
id=wxID_NUMBERINGPANELRESET)
self.digit_autopad = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT_AUTOPAD,
label=_(u"Auto pad"), name=u'digit_autopad', parent=self,
pos=wx.Point(56, 68), style=wx.RB_GROUP)
self.digit_autopad.SetValue(True)
self.digit_autopad.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.digit_setpad = wx.RadioButton(id=wxID_NUMBERINGPANELDIGIT_SETPAD,
label=_(u"Fixed pad width:"), name=u'digit_setpad', parent=self,
style=0)
self.digit_setpad.SetValue(False)
self.digit_setpad.Bind(wx.EVT_RADIOBUTTON, self.check_styles)
self.resetDir = wx.CheckBox(id=wxID_NUMBERINGPANELRESETDIR,
label=_(u"Reset every directory"), name=u'resetDir', parent=self,
pos=wx.Point(456, 232), style=0)
self.resetDir.SetToolTipString(_(u"Reset count to initial value when directory changes."))
self.resetDir.SetValue(False)
self.resetDir.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.countByDir = wx.CheckBox(id=wxID_NUMBERINGPANELCOUNTBYDIR,
label=_(u"Count by directory"), name=u'countByDir', parent=self,
pos=wx.Point(472, 168), style=0)
self.countByDir.SetToolTipString(_(u"Only increase/decrease count when directory changes."))
self.countByDir.SetValue(False)
self.countByDir.Bind(wx.EVT_CHECKBOX, main.showPreview)
self.startByItems = wx.CheckBox(id=wxID_NUMBERINGPANELSTARTBYITEM,
label=_(u"Start at number of items"), name=u'start_by_item',
parent=self, pos=wx.Point(440, 72), style=0)
self.startByItems.SetValue(False)
self.startByItems.SetToolTipString(_(u"Use number of items as starting point for count."))
self.startByItems.Bind(wx.EVT_CHECKBOX, self.OnStartByItemsCheckbox,
id=wxID_NUMBERINGPANELSTARTBYITEM)
def __init__(self, parent, main_window):
global main
main = main_window
self._init_ctrls(parent)
self.sizer()
self.setSortingOptions(0)
# determine style:
def GetNumberStyle(self):
#digit:
style = ''
if self.digit.GetValue():
pad = self.digit_pad.GetValue()
pad_char = self.pad_char.GetValue()
if self.digit_setpad.GetValue():
pad_width = self.pad_width.GetValue()
else:
pad_width = u"auto"
style = (u"digit", pad_char, pad_width, pad)
#alphabetical:
elif self.alpha.GetValue():
style = (u"alpha", self.alpha_uc.GetValue(), self.alpha_pad.GetValue())
#roman numeral:
elif self.roman.GetValue():
style = (u"roman", self.roman_uc.GetValue())
return style
# determine parameters:
def GetNumberParams(self):
#ascending:
if self.asc.GetValue() == True:
step_dir = +int(self.step.GetValue())
#descending:
else:
step_dir = -int(self.step.GetValue())
params = (self.start.GetValue(), step_dir, self.reset.GetValue(),
self.resetDir.GetValue(), self.countByDir.GetValue(),
self.startByItems.GetValue(),)
return params
# enables/disables item position change buttons:
def setSortingOptions(self, event):
sortButtons = (self.staticText2,self.upButton, self.downButton,
self.upMore,self.downMore,self.upTop,self.downBottom,)
if self.sorting.GetSelection() == 2:
for item in sortButtons:
item.Enable(True)
else:
for item in sortButtons:
item.Enable(False)
main.showPreview(event)
#enable/disable options based on what is selected:
def check_styles(self, event):
#digit:
digit_options = (self.digit_pad, self.pad_char, self.digit_setpad,
self.digit_autopad, self.pad_width)
pad_options = (self.digit_setpad, self.digit_autopad, self.pad_char,
self.pad_width)
if self.digit.GetValue():
self.digit_pad.Enable(True)
if self.digit_pad.GetValue():
for option in pad_options:
option.Enable(True)
else:
for option in pad_options:
option.Enable(False)
if self.reset.GetValue() == 4999:
self.reset.SetValue(0)
else:
for option in digit_options:
option.Enable(False)
#roman numeral:
if self.roman.GetValue():
self.roman_uc.Enable(True)
if self.reset.GetValue() > 4999:
self.reset.SetValue(4999)
if self.start.GetValue() == 0:
self.start.SetValue(1)
else:
self.roman_uc.Enable(False)
#alphabetical:
if self.alpha.GetValue():
self.alpha_uc.Enable(True)
self.alpha_pad.Enable(True)
if self.start.GetValue() == 0:
self.start.SetValue(1)
if self.reset.GetValue() == 4999:
self.reset.SetValue(0)
else:
self.alpha_uc.Enable(False)
self.alpha_pad.Enable(False)
main.showPreview(event)
def OnStartByItemsCheckbox(self, event):
if self.startByItems.GetValue():
self.start.Enable(False)
else:
self.start.Enable(True)
main.showPreview(event)
def OnPad_widthSpinctrl(self, event):
self.digit_setpad.SetValue(True)
main.showPreview(event)
# triggered when a button to change item position is clicked
def changeItemOrder(self, event):
buttons = {
wxID_NUMBERINGPANELUPBUTTON : -1,
wxID_NUMBERINGPANELDOWNBUTTON : 1,
wxID_NUMBERINGPANELUPMORE : -5,
wxID_NUMBERINGPANELDOWNMORE : 5,
wxID_NUMBERINGPANELUPTOP : u'top',
wxID_NUMBERINGPANELDOWNBOTTOM : u'bottom',
}
change = buttons[event.GetId()]
main.changeItemOrder(change)
###### GET/SET CONFIGURATION SETTINGS: #########################################
def getSettings(self):
settings = (u"<[numbering]>",
u"digit>:>%s" %int(self.digit.GetValue()),
u"digit_pad>:>%s" %int(self.digit_pad.GetValue()),
u"pad_char>:>%s" %self.pad_char.GetValue(),
u"digit_setpad>:>%s" %int(self.digit_setpad.GetValue()),
u"digit_autopad>:>%s" %int(self.digit_autopad.GetValue()),
u"pad_width>:>%s" %self.pad_width.GetValue(),
u"alpha>:>%s" %int(self.alpha.GetValue()),
u"alpha_uc>:>%s" %int(self.alpha_uc.GetValue()),
u"alpha_pad>:>%s" %int(self.alpha_pad.GetValue()),
u"roman>:>%s" %int(self.roman.GetValue()),
u"roman_uc>:>%s" %int(self.roman_uc.GetValue()),
u"start>:>%s" %self.start.GetValue(),
u"asc>:>%s" %int(self.asc.GetValue()),
u"desc>:>%s" %int(self.desc.GetValue()),
u"step>:>%s" %int(self.step.GetValue()),
u"reset>:>%s" %int(self.reset.GetValue()),
u"resetDir>:>%s" %int(self.resetDir.GetValue()),
u"countByDir>:>%s" %int(self.countByDir.GetValue()),
u"startByItems>:>%s" %int(self.startByItems.GetValue()),
u"sorting>:>%s" %self.sorting.GetSelection(),
)
return settings
def setSettings(self,settings):
if len(settings) == 20: #make sure number of settings is correct
try:
self.digit.SetValue(int(settings[0]))
self.digit_pad.SetValue(int(settings[1]))
self.pad_char.SetValue(settings[2])
self.digit_setpad.SetValue(int(settings[3]))
self.digit_autopad.SetValue(int(settings[4]))
self.pad_width.SetValue(int(settings[5]))
self.alpha.SetValue(int(settings[6]))
self.alpha_uc.SetValue(int(settings[7]))
self.alpha_pad.SetValue(int(settings[8]))
self.roman.SetValue(int(settings[9]))
self.roman_uc.SetValue(int(settings[10]))
self.start.SetValue(int(settings[11]))
self.asc.SetValue(int(settings[12]))
self.desc.SetValue(int(settings[13]))
self.step.SetValue(int(settings[14]))
self.reset.SetValue(int(settings[15]))
self.resetDir.SetValue(int(settings[16]))
self.countByDir.SetValue(int(settings[17]))
self.startByItems.SetValue(int(settings[18]))
self.sorting.SetSelection(int(settings[19].replace(u'\n','')))
except ValueError:
return False
else:
# apply settings:
self.check_styles(0)
self.setSortingOptions(0)
return True
else:
return False
|
metamorphose/metamorphose1
|
numbering.py
|
numbering.py
|
py
| 25,919 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "wx.NewId",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticBoxSizer",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTTOM",
"line_number": 70,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTTOM",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTTOM",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "wx.RIGHT",
"line_number": 102,
"usage_type": "attribute"
},
{
"api_name": "wx.ALIGN_CENTER",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "wx.LEFT",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticBoxSizer",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 108,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTTOM",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "wx.BOTTOM",
"line_number": 111,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "wx.FlexGridSizer",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "wx.EXPAND",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticBoxSizer",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "wx.ALL",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 151,
"usage_type": "call"
},
{
"api_name": "wx.HORIZONTAL",
"line_number": 151,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "wx.VERTICAL",
"line_number": 152,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "wx.EXPAND",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "wx.TOP",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 174,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "wx.TAB_TRAVERSAL",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "wx.Size",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "wx.StaticBox",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "wx.StaticBox",
"line_number": 183,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "wx.StaticBox",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 188,
"usage_type": "call"
},
{
"api_name": "wx.RadioButton",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "wx.RB_GROUP",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 195,
"usage_type": "attribute"
},
{
"api_name": "wx.RadioButton",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 204,
"usage_type": "attribute"
},
{
"api_name": "wx.RadioButton",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 211,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 217,
"usage_type": "attribute"
},
{
"api_name": "wx.TextCtrl",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 220,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 221,
"usage_type": "call"
},
{
"api_name": "wx.EVT_TEXT",
"line_number": 223,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 230,
"usage_type": "attribute"
},
{
"api_name": "wx.SpinCtrl",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "wx.SP_ARROW_KEYS",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "wx.TE_PROCESS_ENTER",
"line_number": 234,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TEXT_ENTER",
"line_number": 237,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_SPINCTRL",
"line_number": 238,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 245,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticText",
"line_number": 247,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "wx.StaticText",
"line_number": 251,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 253,
"usage_type": "call"
},
{
"api_name": "wx.BitmapButton",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 255,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 258,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapButton",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 262,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 263,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapButton",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 268,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 269,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 269,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapButton",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 275,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 276,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 276,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 278,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapButton",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 282,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 283,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 283,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 285,
"usage_type": "attribute"
},
{
"api_name": "wx.BitmapButton",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "wx.Bitmap",
"line_number": 288,
"usage_type": "call"
},
{
"api_name": "wx.BITMAP_TYPE_PNG",
"line_number": 289,
"usage_type": "attribute"
},
{
"api_name": "wx.Point",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "wx.BU_AUTODRAW",
"line_number": 290,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_BUTTON",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "wx.Choice",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHOICE",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticText",
"line_number": 302,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 304,
"usage_type": "call"
},
{
"api_name": "wx.SpinCtrl",
"line_number": 306,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 307,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "wx.SP_ARROW_KEYS",
"line_number": 308,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TEXT_ENTER",
"line_number": 311,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_SPINCTRL",
"line_number": 312,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticText",
"line_number": 314,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 316,
"usage_type": "call"
},
{
"api_name": "wx.RadioButton",
"line_number": 318,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 319,
"usage_type": "call"
},
{
"api_name": "wx.RB_GROUP",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "wx.Font",
"line_number": 321,
"usage_type": "call"
},
{
"api_name": "wx.SWISS",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "wx.NORMAL",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "wx.BOLD",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 324,
"usage_type": "attribute"
},
{
"api_name": "wx.RadioButton",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 327,
"usage_type": "call"
},
{
"api_name": "wx.Font",
"line_number": 328,
"usage_type": "call"
},
{
"api_name": "wx.SWISS",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "wx.NORMAL",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "wx.BOLD",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 332,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticText",
"line_number": 334,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 336,
"usage_type": "call"
},
{
"api_name": "wx.CheckBox",
"line_number": 338,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 340,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "wx.SpinCtrl",
"line_number": 345,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 347,
"usage_type": "call"
},
{
"api_name": "wx.SP_ARROW_KEYS",
"line_number": 348,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TEXT_ENTER",
"line_number": 351,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_SPINCTRL",
"line_number": 352,
"usage_type": "attribute"
},
{
"api_name": "wx.StaticText",
"line_number": 354,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "wx.SpinCtrl",
"line_number": 358,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "wx.Size",
"line_number": 360,
"usage_type": "call"
},
{
"api_name": "wx.SP_ARROW_KEYS",
"line_number": 361,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_TEXT_ENTER",
"line_number": 365,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_SPINCTRL",
"line_number": 367,
"usage_type": "attribute"
},
{
"api_name": "wx.RadioButton",
"line_number": 370,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 372,
"usage_type": "call"
},
{
"api_name": "wx.RB_GROUP",
"line_number": 372,
"usage_type": "attribute"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 374,
"usage_type": "attribute"
},
{
"api_name": "wx.RadioButton",
"line_number": 376,
"usage_type": "call"
},
{
"api_name": "wx.EVT_RADIOBUTTON",
"line_number": 380,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 382,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 384,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 387,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 389,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 391,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "wx.CheckBox",
"line_number": 396,
"usage_type": "call"
},
{
"api_name": "wx.Point",
"line_number": 398,
"usage_type": "call"
},
{
"api_name": "wx.EVT_CHECKBOX",
"line_number": 401,
"usage_type": "attribute"
}
] |
27672728231
|
from typing import Optional
import torch
from torch import nn
from config import dt_config
from block import TransformerBlock
class DecisionTransformer(nn.Module):
def __init__(self,
cfg: dt_config,
state_dim: int,
action_dim: int) -> None:
super().__init__()
self.embedding_dropout = nn.Dropout(cfg.embedding_dropout)
self.embedding_norm = nn.LayerNorm(cfg.embedding_dim)
self.final_norm = nn.LayerNorm(cfg.embedding_dim)
self.positional_encoding = nn.Embedding(cfg.episode_length + cfg.sequence_length,
cfg.embedding_dim)
self.state_embedding = nn.Linear(state_dim, cfg.embedding_dim)
self.action_embedding = nn.Linear(action_dim, cfg.embedding_dim)
self.return_embedding = nn.Linear(1, cfg.embedding_dim)
self.blocks = nn.ModuleList([
TransformerBlock(3 * cfg.sequence_length,
cfg.embedding_dim,
cfg.num_heads,
cfg.attention_dropout,
cfg.residual_dropout) for _ in range(cfg.num_layers)
])
self.embedding_dim = cfg.embedding_dim
self.sequence_length = cfg.sequence_length
self.state_dim = state_dim
self.action_dim = action_dim
self.episode_length = cfg.episode_length
self.max_action = cfg.max_action
self.action_head = nn.Sequential(
nn.Linear(self.embedding_dim, self.action_dim),
nn.Tanh()
)
self.apply(self.reset_weights)
@staticmethod
def reset_weights(m: nn.Module):
if isinstance(m, (nn.Linear, nn.Embedding)):
nn.init.normal_(m.weight, mean=0.0, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.zeros_(m.bias)
if isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def forward(self,
states: torch.Tensor,
actions: torch.Tensor,
mc_returns: torch.Tensor,
time_steps: torch.Tensor,
key_padding_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size, sequence_length = states.shape[0], states.shape[1]
pos_encoding = self.positional_encoding(time_steps)
state_embedding = self.state_embedding(states) + pos_encoding
action_embedding = self.action_embedding(actions) + pos_encoding
returns_embedding = self.return_embedding(mc_returns.unsqueeze(-1)) + pos_encoding
sequence = torch.stack((
returns_embedding, state_embedding, action_embedding
), dim=1).permute(0, 2, 1, 3).reshape(batch_size, 3 * sequence_length, self.embedding_dim)
if key_padding_mask is not None:
key_padding_mask = torch.stack((
key_padding_mask, key_padding_mask, key_padding_mask
), dim=1).permute(0, 2, 1).reshape(batch_size, 3 * sequence_length)
out = self.embedding_dropout(self.embedding_norm(sequence))
for block in self.blocks:
out = block(out, padding_mask=key_padding_mask)
out = self.final_norm(out)
return self.action_head(out[:, 1::3]) * self.max_action
|
zzmtsvv/rl_task
|
decision_transformer/model.py
|
model.py
|
py
| 3,401 |
python
|
en
|
code
| 8 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "config.dt_config",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleList",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "block.TransformerBlock",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.Tanh",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "torch.nn.Embedding",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.init.normal_",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.zeros_",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "torch.nn.LayerNorm",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.zeros_",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "torch.nn.init.ones_",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.init",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "torch.stack",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.stack",
"line_number": 77,
"usage_type": "call"
}
] |
25328118850
|
import cv2
import numpy as np
from scipy.spatial import cKDTree
from sklearn.decomposition import PCA
def sample_to_heatmap(points, x_adjust=0, y_adjust=0, z_threshold=None, nearest_k=3):
# If a threshold is provided, keep only points with a z-coordinate above this threshold
if z_threshold is not None:
points = points[points[:, 2] > z_threshold]
print('cropped')
# Compute PCA
pca = PCA(n_components=3)
pca.fit(points)
# The normal of the plane is the smallest principal component
normal = pca.components_[-1]
# The point on the plane can be the centroid of the point cloud
centroid = np.mean(points, axis=0)
# Now we can print the plane equation
# The plane equation is of the form ax + by + cz + d = 0
a, b, c = normal
d = -centroid.dot(normal)
print(f"The equation of the plane is {a:.5f}x + {b:.5f}y + {c:.5f}z + {d:.5f} = 0")
# Get x, y, z coordinates
x_coords = points[:, 0]
y_coords = points[:, 1]
z_coords = points[:, 2]
# Calculate minimum and maximum values in x and y directions
x_min, x_max = np.min(x_coords), np.max(x_coords)
y_min, y_max = np.min(y_coords), np.max(y_coords)
x_mid = (x_min + x_max) / 2 + x_adjust
y_mid = (y_min + y_max) / 2 + y_adjust
# The range of x and y values for the mesh grid
x_range = np.linspace(x_mid - 15, x_mid + 15, 514)
y_range = np.linspace(y_mid - 15, y_mid + 15, 514)
x, y = np.meshgrid(x_range, y_range)
# Compute corresponding z values for the plane
z = (-a * x - b * y - d) / c
tree = cKDTree(points)
distances = []
for point in np.vstack([x.flatten(), y.flatten(), z.flatten()]).T:
# Find the three nearest points in the point cloud
dists, idxs = tree.query(point, k=nearest_k)
nearest_points = points[idxs]
# For each nearest point, compute the distance to the point along the normal direction
ds = []
for nearest_point in nearest_points:
displacement = nearest_point - point # vector from point to nearest_point
distance = np.dot(displacement, normal) # project displacement onto normal
ds.append(distance)
distances.append(sum(ds) / len(ds))
# 这里是用最小值纠正(normalisation)矩阵
distances_array = (np.array(distances) - np.min(distances)) / 0.5 * 255
distances_reshape = distances_array.reshape((514, 514))[1:513, 1:513].astype(int)
return distances_reshape # 这个就是image 直接save就行
# Create a heatmap using seaborn
def plot_heatmap(heatmap, save_path=None):
cv2.imwrite(save_path, heatmap)
|
jichengzhi/cube-sampling
|
heatmap.py
|
heatmap.py
|
py
| 2,657 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.max",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "scipy.spatial.cKDTree",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "numpy.min",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 74,
"usage_type": "call"
}
] |
20801228142
|
from itertools import chain
y, x = [int(i) for i in input().split()]
matrix = [[] for _ in range(y)]
for i in range(y):
s = input()
for u in s:
if u == ".":
matrix[i].append(True)
elif u == "#":
matrix[i].append(False)
def step(y, x, matrix):
matrix[y][x] = True
tmp = [[y, x+1],[y, x-1],[y-1, x],[y+1, x]]
neighbors = []
for i in tmp:
try:
if matrix[i[0]][i[1]] == False:
neighbors.append(i)
except:
pass
if len(neighbors) == 0:
return
else:
for i in neighbors:
step(i[0], i[1], matrix)
count = 0
while False in chain.from_iterable(matrix):
coord = []
for i in matrix:
if False in i:
coord = [matrix.index(i), i.index(False)]
step(coord[0], coord[1], matrix)
count += 1
print(count)
|
michbogos/olymp
|
eolymp/dynamic_programming/cut_paper.py
|
cut_paper.py
|
py
| 915 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "itertools.chain.from_iterable",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "itertools.chain",
"line_number": 35,
"usage_type": "name"
}
] |
21987190006
|
import pytest
@pytest.fixture(name="fixer")
def fixer_fixture(two_to_three_test_case):
return two_to_three_test_case("methodattrs")
attrs = ["func", "self", "class"]
def test(fixer):
for attr in attrs:
b = "a.im_%s" % attr
if attr == "class":
a = "a.__self__.__class__"
else:
a = "a.__%s__" % attr
fixer.check(b, a)
b = "self.foo.im_%s.foo_bar" % attr
if attr == "class":
a = "self.foo.__self__.__class__.foo_bar"
else:
a = "self.foo.__%s__.foo_bar" % attr
fixer.check(b, a)
def test_unchanged(fixer):
for attr in attrs:
s = "foo(im_%s + 5)" % attr
fixer.unchanged(s)
s = "f(foo.__%s__)" % attr
fixer.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
fixer.unchanged(s)
|
ryanwersal/crosswind
|
fixer_suites/two_to_three/tests/test_methodattrs.py
|
test_methodattrs.py
|
py
| 847 |
python
|
en
|
code
| 11 |
github-code
|
6
|
[
{
"api_name": "pytest.fixture",
"line_number": 4,
"usage_type": "call"
}
] |
27104562564
|
import os
import pickle
import uvicorn
from fastapi import FastAPI
FAKE_HASH_TABLE_DB = './database/FakeHashTable.pickle'
class FakeHashTable:
def __init__(self, bit_limitation=10):
self.limitation = 2 ** bit_limitation
self.hashtable = dict()
self.id_list = set()
self.history = list()
self.avail_id = list(range(self.limitation))
def hash(self, value, replacement=None):
"""
:param value: value to hash
:param replacement: if replacement = 'oldest' this instance will replace the object by the oldest record. If
replacement is the value that existed in hashtable, it will remove old record and replace by new value.
:return:
"""
# For user replace ID by a new value
if replacement is not None:
if replacement == 'oldest' and self.history.__len__() > 2:
old_id = self.hashtable[self.history[0]]
del self.hashtable[self.history[0]]
self.history = self.history[1:]
self.history.append(value)
self.hashtable[value] = old_id
if replacement in self.hashtable:
old_id = self.hashtable[replacement]
self.history.remove(old_id)
self.history.append(value)
del self.hashtable[replacement]
self.hashtable[value] = old_id
return old_id
return None
if value in list(self.hashtable.keys()):
return self.hashtable[value]
# If larger than 10 bit, return None
if self.hashtable.items().__len__() > self.limitation:
return None
# Add new ID
new_id = self.avail_id.pop(0)
self.history.append(value)
self.id_list.add(new_id)
self.hashtable[value] = new_id
return new_id
def remove(self, value):
if value not in self.hashtable:
return False
old_id = self.hashtable[value]
del self.hashtable[value]
self.id_list.remove(value)
self.avail_id.append(old_id)
self.history.remove(value)
def backup_htb_object(in_htb):
with open(FAKE_HASH_TABLE_DB, 'wb') as ff:
pickle.dump(in_htb, ff)
def load_htb_object():
with open(FAKE_HASH_TABLE_DB, 'rb') as ff:
data = pickle.load(ff)
return data
app = FastAPI()
if os.path.exists(FAKE_HASH_TABLE_DB):
htb = load_htb_object()
else:
htb = FakeHashTable()
@app.post("/")
async def get_machine_id(value):
iid = htb.hash(value)
backup_htb_object(htb)
return {"id": iid}
if __name__ == '__main__':
uvicorn.run(
"hash_service:app",
host='0.0.0.0',
port=8000,
reload=True,
debug=True,
workers=3
)
|
hoangperry/system-design-implementation
|
unique-id-generator/hash_service.py
|
hash_service.py
|
py
| 2,812 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "pickle.dump",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "uvicorn.run",
"line_number": 94,
"usage_type": "call"
}
] |
6673888762
|
import bs4
import requests
url = 'https://id.carousell.com/carousell_id'
contents = requests.get(url)
response = bs4.BeautifulSoup(contents.text, 'html.parser')
data = response.find('div', attrs={'class': 'D_apq D_eZ M_gF D_fb M_gH'})
datas = data.findAll('div', attrs={'class': 'D_jg', 'class': 'D_qq', 'class': 'D_qv'})
# print(datas)
for obj in datas:
judul = obj.find('p', attrs={'class': "D_bN M_aT D_aP M_aC D_bO M_aU D_bR M_aX D_bT M_aZ D_bW M_bc "
"D_bZ M_bg D_bK"}).text
image = obj.find('img', attrs={'class': 'D_bl', 'class': 'D_bi', 'class': 'D_tO'})['src']
with open('images/' + judul + '.jpg', 'wb') as f:
img = requests.get(image)
f.write(img.content)
|
AlfaRiza/ScrapingCarousell
|
getImg.py
|
getImg.py
|
py
| 748 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
}
] |
1798043180
|
import time
import json
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
max_val = None
min_val = None
# Create the I2C bus
i2c = busio.I2C(board.SCL, board.SDA)
# Create the ADC object using the I2C bus
ads = ADS.ADS1015(i2c)
# Create single-ended input on channel 0
chan = AnalogIn(ads, ADS.P2)
baseline_check = input("Is Light Sensor Covered? (enter 'y' to proceed): ")
if baseline_check == 'y':
max_val = chan.value
print("------{:>5}\t{:>5}".format("raw", "v"))
for x in range(0, 10):
if chan.value > max_val:
max_val = chan.value
print("CHAN 2: "+"{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
print('\n')
water_check = input("Does the Light Sensor receive the maximum light? (enter 'y' to proceed): ")
if water_check == 'y':
min_val = chan.value
print("------{:>5}\t{:>5}".format("raw", "v"))
for x in range(0, 10):
if chan.value < min_val:
min_val = chan.value
print("CHAN 2: "+"{:>5}\t{:>5.3f}".format(chan.value, chan.voltage))
time.sleep(0.5)
config_data = dict()
config_data["min"] = min_val
config_data["max"] = max_val
with open('light_config.json', 'w') as outfile:
json.dump(config_data, outfile)
print('\n')
print(config_data)
time.sleep(0.5)
|
pdany1116/is-iot-collector
|
helpers/light_intensity_moisture_calibration.py
|
light_intensity_moisture_calibration.py
|
py
| 1,305 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "busio.I2C",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "board.SCL",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "board.SDA",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "adafruit_ads1x15.ads1015.ADS1015",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "adafruit_ads1x15.ads1015",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "adafruit_ads1x15.analog_in.AnalogIn",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "adafruit_ads1x15.ads1015.P2",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "adafruit_ads1x15.ads1015",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 49,
"usage_type": "call"
}
] |
2374031473
|
from torchvision import models
import torch
import torch.nn as nn
from PIL import ImageGrab
import cv2
import torch.nn.functional as F
# import albumentations as A
# from albumentations.pytorch import ToTensorV2
from torchvision import transforms
import numpy as np
from PIL import Image
from input_keys import PressKey, ReleaseKey
import time
#import torch.nn.functional as F
#output = torch.randn(10, 5) # example output tensor
#softmax_result = F.softmax(output, dim=1)
labels = {0: 'a', 1: 'w', 2: 'd'}
#labels = {0: 'a', 1: 'w', 2: 'd', 3: 's'}
def ingame_predic():
test_transform = transforms.Compose(
[
# A.SmallestMaxSize(max_size=160),
transforms.Resize((640, 480)),
# A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
transforms.ToTensor()
]
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
#net = models.mobilenet_v3_large(pretrained=True, weights=models.mobilenet_v3_large(pretrained=True).weights.IMAGENET1K_V1)
net = models.mobilenet_v3_large(pretrained=True)
net.classifier[3] = nn.Linear(in_features=1280,out_features=3)
#net = models.efficientnet_b4(pretrained=True)
#net.classifier[1] = nn.Linear(in_features=1792,out_features=4)
#net.load_state_dict(torch.load('./mbmodel.pt', map_location=device))
net.load_state_dict(torch.load('./mbv3model.pt', map_location=device))
net.to(device)
net.eval()
while(True):
with torch.no_grad():
screen = ImageGrab.grab(bbox=(0, 40, 1024, 768)) # 1024, 768 화면을 받아서 Numpy Array로 전환
# screen = cv2.imread('./test_image2.jpg') # test image
# input_image = Image.fromarray(screen)
input_image = test_transform(screen).unsqueeze(0).to(device)
output = net(input_image)
softmax_result = F.softmax(output)
top_prob, top_label = torch.topk(softmax_result, 1)
prob = round(top_prob.item() * 100, 2)
label = labels.get(int(top_label))
# print(f'prob: {prob}, label: {label}')
W = 0x11
A = 0x1E
S = 0x1F
D = 0x20
if (60 < prob) and (label == 'a'):
PressKey(A)
time.sleep(0.5)
ReleaseKey(A)
elif (60 < prob) and (label == 'w'):
PressKey(W)
time.sleep(0.5)
ReleaseKey(W)
elif (60 < prob) and (label == 'd'):
PressKey(D)
time.sleep(0.5)
ReleaseKey(D)
elif (60 < prob) and (label == 's'):
PressKey(S)
time.sleep(0.5)
ReleaseKey(S)
else:
time.sleep(0.5)
print(prob,label)
#return prob, label
if __name__ == '__main__':
predic_prob, predic_label = ingame_predic()
print(predic_prob, predic_label)
|
DH-an/Metaverse_Autonomous_Driving_AI_Project
|
Data_Collecting/ingame_testing.py
|
ingame_testing.py
|
py
| 3,032 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchvision.transforms.Compose",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Resize",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.device",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.mobilenet_v3_large",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.load",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab.grab",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "PIL.ImageGrab",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.softmax",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "torch.topk",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "input_keys.PressKey",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "input_keys.ReleaseKey",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "input_keys.PressKey",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "input_keys.ReleaseKey",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "input_keys.PressKey",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "input_keys.ReleaseKey",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "input_keys.PressKey",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "input_keys.ReleaseKey",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 89,
"usage_type": "call"
}
] |
43166848463
|
'''A simple blockchain implementation.
Inspired by https://medium.com/crypto-currently/lets-build-the-tiniest-blockchain-e70965a248b'''
from __future__ import print_function
import hashlib
import datetime
class Block:
'''Blocks of data that will create the Blockchain'''
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
'''returns a sha256 hash of the Block's index, timestamp, data,
and previous block's hash'''
sha_hash = hashlib.sha256()
sha_hash.update(str(self.index).encode('utf-8') + str(self.timestamp).encode('utf-8') + str(self.data).encode('utf-8') + str(self.previous_hash).encode('utf-8'))
return sha_hash.hexdigest()
def create_genesis_block():
'''Create the first block in the chain'''
return Block(0, datetime.datetime.now(), "Genesis Block", "0")
def next_block(previous_block):
'''Create the next block in the chain'''
index = previous_block.index + 1
timestamp = datetime.datetime.now()
data = "I'm block {}".format(index)
return Block(index, timestamp, data, previous_block.hash)
def create_block_chain(num_of_blocks):
block_chain = [create_genesis_block()]
previous_block = block_chain[0]
for _ in range(0, num_of_blocks):
new_block = next_block(previous_block)
block_chain.append(new_block)
previous_block = new_block
print("Block #{} was added to the blockchain".format(new_block.index))
print("Hash: {}\n".format(new_block.hash))
create_block_chain(10)
|
William-Hill/UVI_Teaching_2018
|
blockchain/cruzan_coin.py
|
cruzan_coin.py
|
py
| 1,722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "hashlib.sha256",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 33,
"usage_type": "attribute"
}
] |
36388169565
|
from fastapi import APIRouter
from api.docker.models import DockerStatsModel
from api.docker.retrieval import get_container_stats, ping_docker
router = APIRouter(
prefix="/docker",
tags=["Docker"],
)
@router.get("/", tags=["Ping"])
def get_docker_health():
status = ping_docker()
return {"status": "ok" if status else "error"}
@router.get("/stats/", response_model=DockerStatsModel)
def get_docker_stats():
return get_container_stats()
|
noahtigner/homelab
|
api/docker/router.py
|
router.py
|
py
| 462 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "api.docker.retrieval.ping_docker",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "api.docker.retrieval.get_container_stats",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "api.docker.models.DockerStatsModel",
"line_number": 18,
"usage_type": "name"
}
] |
6969826416
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import torch
class EMA_FM():
def __init__(self, decay=0.9, first_decay=0.0, channel_size=512, f_map_size=196, is_use = False):
self.decay = decay
self.first_decay = first_decay
self.is_use = is_use
self.shadow = {}
self.epsional = 1e-5
if is_use:
self._register(channel_size=channel_size, f_map_size= f_map_size)
def _register(self, channel_size=512, f_map_size=196):
Init_FM = torch.zeros((f_map_size, channel_size),dtype=torch.float)
self.shadow['FM'] = Init_FM.cuda().clone()
self.is_first = True
def update(self, input):
B, C, _ = input.size()
if not(self.is_use):
return torch.ones((C,C), dtype=torch.float)
decay = self.first_decay if self.is_first else self.decay
####### FEATURE SIMILARITY MATRIX EMA ########
# Mu = torch.mean(input,dim=0)
self.shadow['FM'] = (1.0 - decay) * torch.mean(input,dim=0) + decay * self.shadow['FM']
self.is_first = False
return self.shadow['FM']
class Cluster_loss():
def __init__(self):
pass
def update(self, correlation, loss_mask_num, loss_mask_den, labels):
batch, channel, _ = correlation.shape
c, _, _ = loss_mask_num.shape
if labels is not None:
label_mask = (1 - labels).view(batch, 1, 1)
## smg_loss if only available for positive sample
correlation = correlation * label_mask
correlation = (correlation / batch).view(1, batch, channel, channel).repeat(c, 1, 1, 1)
new_Num = torch.sum(correlation * loss_mask_num.view(c, 1, channel, channel).repeat(1, batch, 1, 1),
dim=(1, 2, 3))
new_Den = torch.sum(correlation * (loss_mask_den).view(c, 1, channel, channel).repeat(1, batch, 1, 1),
dim=(1, 2, 3))
ret_loss = -torch.sum(new_Num / (new_Den + 1e-5))
return ret_loss
class Multiclass_loss():
def __init__(self, class_num=None):
self.class_num = class_num
def get_label_mask(self, label):
label = label.cpu().numpy()
sz = label.shape[0]
label_mask_num = []
label_mask_den = []
for i in range(self.class_num):
idx = np.where(label == i)[0]
cur_mask_num = np.zeros((sz, sz))
cur_mask_den = np.zeros((sz, sz))
for j in idx:
cur_mask_num[j][idx] = 1
cur_mask_den[j][:] = 1
label_mask_num.append(np.expand_dims(cur_mask_num, 0))
label_mask_den.append(np.expand_dims(cur_mask_den, 0))
label_mask_num = np.concatenate(label_mask_num, axis=0)
label_mask_den = np.concatenate(label_mask_den, axis=0)
return torch.from_numpy(label_mask_num).float().cuda(), torch.from_numpy(label_mask_den).float().cuda()
def update(self, fmap, loss_mask_num, label):
B, C, _, _ = fmap.shape
center, _, _ = loss_mask_num.shape
fmap = fmap.view(1, B, C, -1).repeat(center, 1, 1, 1)
mean_activate = torch.mean(torch.matmul(loss_mask_num.view(center, 1, C, C).repeat(1, B, 1, 1), fmap),
dim=(2, 3))
# cosine
mean_activate = torch.div(mean_activate, torch.norm(mean_activate, p=2, dim=0, keepdim=True) + 1e-5)
inner_dot = torch.matmul(mean_activate.permute(1, 0), mean_activate).view(-1, B, B).repeat(self.class_num, 1, 1)
label_mask, label_mask_intra = self.get_label_mask(label)
new_Num = torch.mean(inner_dot * label_mask, dim=(1, 2))
new_Den = torch.mean(inner_dot * label_mask_intra, dim=(1, 2))
ret_loss = -torch.sum(new_Num / (new_Den + 1e-5))
return ret_loss
def Cal_Center(fmap, gt):
f_1map = fmap.detach().cpu().numpy()
matrix = gt.detach().cpu().numpy()
B, C, H, W = f_1map.shape
cluster = []
visited = np.zeros(C)
for i in range(matrix.shape[0]):
tmp = []
if(visited[i]==0):
for j in range(matrix.shape[1]):
if(matrix[i][j]==1 ):
tmp.append(j)
visited[j]=1;
cluster.append(tmp)
center = []
for i in range(len(cluster)):
cur_clustet_fmap = f_1map[:,cluster[i],...]
cluster_center = np.mean(cur_clustet_fmap,axis=1)
center.append(cluster_center)
center = np.transpose(np.array(center),[1,0,2,3])
center = torch.from_numpy(center).float()
return center
|
ada-shen/icCNN
|
utils/utils.py
|
utils.py
|
py
| 4,582 |
python
|
en
|
code
| 18 |
github-code
|
6
|
[
{
"api_name": "torch.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "torch.ones",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.float",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.mean",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.expand_dims",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "torch.div",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.norm",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "torch.matmul",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "torch.mean",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "torch.sum",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 107,
"usage_type": "call"
},
{
"api_name": "numpy.transpose",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 110,
"usage_type": "call"
}
] |
71601955069
|
import datetime
dogdict = {
"american staffordshire terrier": True,
"pitbull terrier": True,
"bullterrier": True,
"bullmastiff": True,
"staffordshire bullterrier": True,
"cane corso": True,
"dogo argentino": True,
"bordeaux dogge": True,
"fila brasileiro": True,
"mastin espanol": True,
"französische bulldogge": False,
"labrador": False,
"chihuahua": False,
"australian shepherd": False,
"rottweiler": False,
"border collie": False,
"golden retriever": False,
"rhodesian ridgeback": False,
"mops": False,
"berner sennenhund": False
}
citydict = {
75015: 108,
76359: 96,
69254: 78,
76275: 96,
76287: 90,
76337: 108,
76307: 66,
76327: 72,
75045: 108,
76356: 96,
76297: 84,
76344: 84,
76351: 72,
76707: 72,
76676: 60,
76689: 69,
76646: 96,
75053: 48,
75038: 90,
76703: 87,
76698: 61,
76707: 48,
68753: 96,
76661: 96,
76709: 72,
76669: 90,
76684: 75,
75059: 72
}
def serverHandler(clientString):
dogInformations = splitStringFromClient(clientString, True)
personalInformation = splitStringFromClient(clientString, False)
if(dogInformations[3].lower() == "nein"):
regionTax = getRegionTax(personalInformation)
dogRaceSeperation = getDogRaceSeperation(dogInformations)
dogTax = getDogTax(dogRaceSeperation, regionTax)
else:
dogTax = 0
return dogTax
def splitStringFromClient(clientString, state):
try:
seperations = clientString.split(',')
if(state):
dogInformation = seperations[1].split(';')
return dogInformation
else:
personalInformation = seperations[0].split(';')
return personalInformation
except:
print("Error in SplitStringFormClient")
def getRegionTax(personalInformations):
regionTax = citydict[int(personalInformations[6])]
return regionTax
def getDogTax(dogRaceSeperation,regionTax):
dogTax = None
if(dogRaceSeperation == True):
dogTax = int(regionTax) * 5
else:
dogTax = int(regionTax)
if(dogTax != None):
return dogTax
else:
return "Error in DogTax"
def getDogRaceSeperation(dogInformations):
dogRaceSeperation = dogdict[dogInformations[2]]
return dogRaceSeperation
def checkDate(value):
try:
day, month, year = map(int, value.split('.'))
geburtstag_obj = datetime.date(year, month, day)
try:
date = value.strip()
datetime.datetime.strptime(date, "%d.%m.%Y")
except ValueError:
return "Ungueltiges Datumsformat. Bitte geben Sie den Geburtstag im Format TT.MM.JJJJ ein.", False
if geburtstag_obj >= datetime.date.today():
return "Ungueltiges Datumsformat. Der Geburtstag muss in der Vergangenheit liegen.", False
if month == 2 and day > 28:
leap_year = (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)
if (day == 29 and not leap_year) or (day > 29):
return "Ungueltiges Datumsformat. Bitte geben Sie ein gueltiges Geburtsdatum ein.", False
return "", True
except:
return "Bei der Datumsüberprüfung lief etwas schief. Bitte wenden Sie sich an einen Admin", False
def checkInput(value,nessesary, nameCheck):
if(nessesary):
if(value == None or value == ''):
return False
else:
if("Geburtstag" in nameCheck):
result = checkDate(value)
return result
elif(nameCheck == "Hunderasse"):
if(value in dogdict):
return True
else:
return False
elif(nameCheck == "Ermaessigung"):
if(value.lower() == "ja" or value.lower() == "nein"):
return True
else:
return False
elif(nameCheck == "PLZ"):
if(value.isnumeric and int(value.strip()) in citydict):
return True
else:
return False
elif(nameCheck == "Hausnummer"):
if(value.isnumeric()):
return True
else:
return False
else:
return True
else:
if(value != None):
return True
else:
return False
|
MHin504/OZG-Hundesteuer
|
Server.py
|
Server.py
|
py
| 4,498 |
python
|
de
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "datetime.date",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 109,
"usage_type": "attribute"
}
] |
72779432828
|
import requests
import optparse
from progressbar import *
CHUNK_SIZE = 1024
widgets = ['Downloading : ', Percentage(),
' ', Bar(marker='#',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
def download_file(url, file_name=None):
response = requests.head(url)
total_size = int(response.headers.get('content-length'))
pbar = ProgressBar(widgets=widgets, maxval=total_size)
pbar.start()
going_size = 0
if not file_name:
file_name = url.split('/')[-1]
elif os.path.isfile(file_name):
file_name += 'new_' + file_name
r = requests.get(url, stream=True)
with open(file_name, 'wb') as f:
for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
going_size += CHUNK_SIZE
pbar.update(going_size)
if chunk:
f.write(chunk)
f.flush()
pbar.finish()
return local_filename
parser = optparse.OptionParser()
parser.add_option('-u', default=False, dest='url')
parser.add_option('-n', default=False, dest='name')
options, remainder = parser.parse_args()
file_ = download_file(options.url, options.name)
|
bitst0rm/video-stream-downloader
|
vid_single.py
|
vid_single.py
|
py
| 1,158 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.head",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "optparse.OptionParser",
"line_number": 38,
"usage_type": "call"
}
] |
24966970253
|
#!/usr/bin/env python
from netCDF4 import Dataset
import copyNCVariable as copync
import sys, os
import random
import pdb
import numpy as np
import datetime as dt
#
#
#
def usage():
print("Usage")
print(" "+sys.argv[0]+" [filename] [dim name]")
exit(1)
def change_time_units(var):
"""Change the time unit from epoch time to hours since 1800"""
century18 = dt.datetime(1800,1,1,0)
#for i,j in enumerate(var[:]):
# date = dt.datetime.utcfromtimestamp(j)
# seconds = (date - century18).total_seconds()
# hours = int( seconds / 60 / 60 )
# var[i] = hours
def change_unit(date):
date = dt.datetime.utcfromtimestamp(date)
seconds = (date - century18).total_seconds()
hours = int( seconds / 60 / 60 )
return hours
vfunc = np.vectorize(change_unit)
new_data = vfunc(var[:])
var[:] = new_data
setattr(var, 'standard_name', "time")
setattr(var, 'long_name', "time")
setattr(var, "units","hours since 1800-01-01 00:00:00.0")
setattr(var, "calendar", "proleptic_gregorian")
return var
def add_utc_date(nc, time_var):
""" Adds human readable date variable.
Assumes date is in seconds since epoch.
time_var is netCDF.Variable object.
"""
# Create Variable
utc = nc.createVariable('utc_time', int, ('time'))
setattr(utc, 'standard_name', "time")
setattr(utc, 'long_name', "UTC date yyyy-mm-dd hh:00:00 as yyyymmddhh")
setattr(utc, "units","Gregorian_year month day hour")
toUTC = lambda d: int(dt.datetime.fromtimestamp(d).strftime('%Y%m%d%H'))
vfunc = np.vectorize(toUTC)
utc_data = vfunc(time_var[:])
utc[:] = utc_data
def find_variables_with_dimension(nc, dim_name):
selected_vars = []
for var_name in nc.variables:
var = nc.variables[var_name]
if dim_name in var.dimensions:
selected_vars.append(var)
return selected_vars
def find_variables_without_dimension(nc, dim_name):
selected_vars = []
for var_name in nc.variables:
var = nc.variables[var_name]
if dim_name not in var.dimensions:
selected_vars.append(var)
return selected_vars
def check_if_reduce_needed(vars_to_modify):
"""Return True if variable has missing start and end"""
for var in vars_to_modify:
if len(var.dimensions) > 2 and var[0,0,:].mask.all() and \
var[-1,1,:,:].mask.all():
return True
return False
def add_time_bounds(nc, varname):
"""
Adds a time bounds variable to variable.
Assumes time dimension is called 'time'
"""
THREE_HOURS = 60*60*3 # in seconds
bnds_name = 'time_bnds'
bounds_dim = 'nv'
# Create bounds dimension
nc.createDimension(bounds_dim, 2)
# Get variable matching varname
time_var = nc.variables['time']
time_var.setncattr('bounds', bnds_name)
time_data = time_var[:]
time_length = len(time_data)
# reshape time data
bounds_data = np.dstack((time_data,time_data)).reshape(time_length,2)
for i in bounds_data:
i[0] = i[0] - (THREE_HOURS)
bounds_var = nc.createVariable(bnds_name, time_var.dtype, ('time', bounds_dim), fill_value=9999)
bounds_var[:] = bounds_data
def add_cell_methods(nc):
methods = {
'avg' : 'mean',
'accum' : 'sum',
'min' : 'minimum',
'max' : 'maximum'
}
step_str = 'GRIB_stepType'
for i in nc.variables:
var = nc.variables[i]
if step_str in var.ncattrs() and 'instant' not in var.getncattr(step_str):
if 'cell_methods' in var.ncattrs():
cur_str = var.getncattr('cell_methods')
var.setncattr('cell_methods', cur_str + " time: " + methods[var.getncattr(step_str)])
else:
pass
#var.setncattr('cell_methods', "time: " + methods[var.getncattr(step_str)])
def change_coordinates(nc):
for i in nc.variables:
var = nc.variables[i]
if 'coordinates' in var.ncattrs():
coord_str = var.getncattr('coordinates')
coord_str = coord_str.replace('valid_time', '')
coord_str = coord_str.replace('step', '')
if 'time' not in coord_str:
coord_str += " time"
coord_str = ' '.join(coord_str.split())
var.setncattr('coordinates', coord_str)
def remove_dimension(nc, dim_name, outfile=None):
vars_to_modify = find_variables_with_dimension(nc, dim_name)
vars_to_copy = find_variables_without_dimension(nc, dim_name)
reduce_needed = check_if_reduce_needed(vars_to_modify)
if outfile is None:
outfile = 'tmp' + str(random.randint(1,10000)) + '.nc'
tmp_nc = Dataset(outfile, 'w')
# First copy global attrs
copync.copy_global_attrs(nc, tmp_nc)
# Then copy dimensions minus unwanted
copync.copy_dimensions(nc, tmp_nc, ignore=['time',dim_name])
if 'step' in nc.dimensions:
if reduce_needed:
tmp_nc.createDimension('time', (nc.dimensions['time'].size * nc.dimensions['step'].size) - 2)
else:
tmp_nc.createDimension('time', nc.dimensions['time'].size * nc.dimensions['step'].size )
else:
tmp_nc.createDimension('time', nc.dimensions['time'].size)
if len(vars_to_modify) == 0: # not in dimensions, but need to get rid of step vars
err_str = "'" + dim_name + "' is not in any of the variables."
#raise Exception(err_str)
time_var = None
valid_var = None
for var in vars_to_copy:
if var.name != 'time' and var.name != 'step' and var.name != 'valid_time':
copync.copy_variable(nc, tmp_nc, var.name)
elif var.name == 'time':
time_var = var
elif var.name == 'valid_time':
valid_var = var
new_var = tmp_nc.createVariable('time', valid_var.dtype, ('time',))
copync.copy_var_attrs(valid_var, new_var)
new_var[:] = valid_var[:]
return (outfile, tmp_nc)
# Next, copy unchanged vars
time_var = None
for var in vars_to_copy:
if var.name != 'time':
copync.copy_variable(nc, tmp_nc, var.name)
else:
time_var = var
for var in vars_to_modify:
# If described by only unwanted dimension, then remove variable.
if len(var.dimensions) == 1:
# Remove variable
pass
else:
# find dim index
dims = var.dimensions
dims_list = list(dims)
shape = var.shape
shape_list = list(shape)
idx = dims.index(dim_name)
if idx == 0:
print('Need to implement')
print('Exiting.')
exit(1)
size = shape_list.pop(idx)
dims_list.pop(idx)
dims = tuple(dims_list)
shape_list[idx-1] = shape_list[idx-1]*size
new_data = var[:].reshape(*shape_list)
if reduce_needed:
if len(dims) == 1:
new_data = new_data[1:-1]
elif len(dims) > 1:
new_data = new_data[1:-1,:,:]
varname = var.name
if varname == 'valid_time':
varname = 'time'
new_var = tmp_nc.createVariable(varname, var.dtype, dims)
copync.copy_var_attrs(var, new_var)
new_var[:] = new_data
step_str = 'GRIB_stepType'
if step_str in new_var.ncattrs() and new_var.getncattr(step_str) is not 'instant':
add_time_bounds(tmp_nc, new_var.name)
return (outfile, tmp_nc)
def change_fill_value(nc, fill_value):
"""Changes fill value for all variables in file"""
outfile = 'tmp' + str(random.randint(1,100000)) + '.nc'
out_nc = copync.copy_dimensions(nc, outfile)
copync.copy_variables(nc, out_nc, new_fill_value=fill_value)
out_nc.close()
return outfile
if __name__ == '__main__':
if len(sys.argv) <= 2:
usage()
outfile = None
nc_file = sys.argv[1]
dim_name = sys.argv[2]
nc = Dataset(nc_file, 'r+')
if dim_name != "none":
outfile,nc = remove_dimension(nc, dim_name)
add_cell_methods(nc)
change_coordinates(nc)
add_utc_date(nc, nc.variables['time'])
change_time_units(nc.variables['time'])
if 'time_bnds' in nc.variables:
change_time_units(nc.variables['time_bnds'])
second_outfile = change_fill_value(nc, 9999)
nc.close()
if outfile is not None:
os.remove(outfile)
os.rename(second_outfile, nc_file)
|
NCAR/rda-dataset-curation
|
common/removeDimension.py
|
removeDimension.py
|
py
| 8,646 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.utcfromtimestamp",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "numpy.vectorize",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "numpy.vectorize",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.dstack",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 152,
"usage_type": "call"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 153,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_global_attrs",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_dimensions",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_variable",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_var_attrs",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_variable",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_var_attrs",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_dimensions",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "copyNCVariable.copy_variables",
"line_number": 238,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 244,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "netCDF4.Dataset",
"line_number": 249,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number": 261,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 262,
"usage_type": "call"
}
] |
25377227491
|
import profile
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.auth import views as auth_views
from .views import PostListView,PostDetailView,PostCreateView,PostUpdateView, PostDeleteView, ProfileView, AddFollower,RemoveFollower,CommentCreateView
urlpatterns = [
# path('',PostListView.as_view(), name='home'),
path('',views.index, name='home'),
path('profile/<int:pk>',ProfileView.as_view(),name = 'profile'),
path('post/<int:pk>/',PostDetailView.as_view(), name='post-detail'),
path('post/<int:pk>/update/',PostUpdateView.as_view(), name='post-update'),
path('post/<int:pk>/delete/',PostDeleteView.as_view(), name='post-delete'),
path('post/new/',PostCreateView.as_view(), name='post-create'),
path('user_registration/',views.register, name ='user_registration' ),
path('accounts/login/',auth_views.LoginView.as_view(template_name = 'accounts/login.html'), name = 'login'),
path('logout/', auth_views.LogoutView.as_view(template_name = 'accounts/logout.html'), name='logout'),
path('profile/<int:pk>/followers/add', AddFollower.as_view(), name='add-follower'),
path('profile/<int:pk>/followers/remove', RemoveFollower.as_view(), name='remove-follower'),
path('post/<int:pk>/comment/',CommentCreateView.as_view(), name='add_comment'),
path('search/', views.search_results, name='search_results')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
Njoro410/Insta-clone
|
insta/urls.py
|
urls.py
|
py
| 1,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "views.index",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.ProfileView.as_view",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "views.ProfileView",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PostDetailView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PostDetailView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.PostUpdateView.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "views.PostUpdateView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.PostDeleteView.as_view",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "views.PostDeleteView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.PostCreateView.as_view",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "views.PostCreateView",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "views.register",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.urls.path",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView.as_view",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LoginView",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView.as_view",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.views.LogoutView",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.views",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.AddFollower.as_view",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "views.AddFollower",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.RemoveFollower.as_view",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "views.RemoveFollower",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.CommentCreateView.as_view",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "views.CommentCreateView",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.urls.path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "views.search_results",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings.DEBUG",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 28,
"usage_type": "attribute"
}
] |
40026671586
|
import requests
from lxml import etree
BASE_DOMIN = 'https://ygdy8.net'
URL = []
HEADERS = {
'Referer': 'https://c.02kdid.com/b/1/1754/22432/960X90/960X90.html',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Safari/537.36'
}
def get_detail_url(url):
response = requests.get(url, headers=HEADERS)
text = response.content
html = etree.HTML(text)
detail_url = html.xpath('//table[@class="tbspan"]//a/@href')
detail_url = map(lambda url: BASE_DOMIN + url, detail_url)
return detail_url
def parse_detail_page(url):
response = requests.get(url, headers=HEADERS)
text = response.content
html = etree.HTML(text)
movie = {}
movie['title'] = html.xpath('//div[@class="title_all"]//font[@color="#07519a"]/text()')
zoom = html.xpath('//div[@id="Zoom"]')[0]
imgs = html.xpath('//img/@src')
movie['cover'] = imgs[0]
infos = zoom.xpath('.//text()')
def info_parse(text, rule):
return text.replace(rule, '').strip()
for index, info in enumerate(infos):
if info.startswith('◎年 代'):
info = info_parse(info, '◎年 代')
movie['year'] = info
elif info.startswith('◎产 地'):
info = info_parse(info, '◎产 地')
movie['plase'] = info
elif info.startswith('◎类 别'):
info = info_parse(info, '◎类 别')
movie['catergory'] = info
elif info.startswith('◎主 演'):
info = info_parse(info, '◎主 演')
actors = [info]
for x in range(index+1, len(infos)):
actor = infos[x].strip()
if actor.startswith('◎'):
break
actors.append(actor)
movie['actors'] = actors
elif info.startswith('◎简 介'):
info = info_parse(info, '◎简 介')
proflie = ''
for x in range(index+1, len(infos)):
proflie += infos[x].strip()
if infos[x+1].startswith('【下载地址】'):
break
movie['proflie'] = proflie
downloadurl = html.xpath('//td[@bgcolor="#fdfddf"]/a/@href')[0]
movie['downloadurl'] = downloadurl
return movie
def spider():
base_url = "https://ygdy8.net/html/gndy/dyzz/list_23_{}.html"
for x in range(1, 2):
url = base_url.format(x)
detail_urls = get_detail_url(url)
for detail_url in detail_urls:
movie = parse_detail_page(detail_url)
if __name__ == '__main__':
spider()
|
mirrorthink/python
|
douban/douban.py
|
douban.py
|
py
| 2,668 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "requests.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 24,
"usage_type": "name"
}
] |
39746911520
|
import threading
from flask import jsonify
from dophon_cloud import enhance, micro_cell_list
a = enhance(import_name=__name__,
properties={'111': 'aaa', 'service_name': 'dxh-service', 'host': '0.0.0.0', 'port': 80,
'reg_url': 'http://127.0.0.1:8301/reg/service/'})
m_c_list = micro_cell_list(a, properties={
'dxh-service': [
{
'/test': [
'/test1'
]
}
],
'xxx-service': [
{
'/c/test': [
'/another/test'
]
}
]
})
@a.route('/b')
def enter_b():
result = m_c_list.request('dxh-service', ['/test', '/test1'])
print(result)
return jsonify(result)
@a.route('/c')
def enter_c():
result = m_c_list.request('xxx-service', ['/c/test', '/another/test'])
print(result)
return jsonify(result)
threading.Thread(target=a.run).start()
|
Ca11MeE/dophon_cloud
|
dophon_cloud/a_test.py
|
a_test.py
|
py
| 912 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "dophon_cloud.enhance",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "dophon_cloud.micro_cell_list",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 40,
"usage_type": "call"
}
] |
18842905496
|
import logging
try:
from settings import DEBUG
except ImportError:
DEBUG = True
from raven.handlers.logging import SentryHandler
from clean.infra.log.utils.colors import color_style
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return DEBUG
class ColorsFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
super(ColorsFormatter, self).__init__(*args, **kwargs)
self.style = self.configure_style(color_style())
def configure_style(self, style):
style.DEBUG = style.HTTP_NOT_MODIFIED
style.INFO = style.HTTP_INFO
style.WARNING = style.HTTP_NOT_FOUND
style.ERROR = style.ERROR
style.CRITICAL = style.HTTP_SERVER_ERROR
return style
def format(self, record):
message = logging.Formatter.format(self, record)
colorizer = getattr(self.style, record.levelname, self.style.HTTP_SUCCESS)
return colorizer(message)
class CaptureError(SentryHandler):
def emit(self, record):
return super(CaptureError, self).emit(record)
|
bahnlink/pyclean
|
clean/infra/log/utils/__init__.py
|
__init__.py
|
py
| 1,182 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "settings.DEBUG",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "logging.Filter",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "settings.DEBUG",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "logging.Filter",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "settings.DEBUG",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "logging.Formatter",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "clean.infra.log.utils.colors.color_style",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.Formatter.format",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "raven.handlers.logging.SentryHandler",
"line_number": 39,
"usage_type": "name"
}
] |
18672735980
|
import numpy as np
import pickle
import scipy.signal as sp
import matplotlib.pyplot as plt
with open('datasave', 'rb') as file:
datasym =pickle.load(file)
dataf = np.zeros((91, 1024, 1024))
ref = np.mean(datasym[:17, :, :],axis=0)
for z1 in range(1024):
for z2 in range(1024):
value1 =datasym[30:121, z1, z2]
value2 = np.multiply(np.ones((91,)),ref[z1, z2])
dataf[:, z1, z2] = np.abs(np.fft.fft(value1-value2, axis=0))
for z in range(6):
f = sp.medfilt2d(
np.log(np.mean(dataf[z*3-2:z*3, :, :],axis=0)), kernel_size=11)
plt.figure()
plt.imshow(f)
plt.show()
|
jialanxin/UED-Analysis
|
load.py
|
load.py
|
py
| 615 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pickle.load",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.multiply",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.fft.fft",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.fft",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "scipy.signal.medfilt2d",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.log",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
}
] |
43918816491
|
from cleverhans.attacks import CarliniWagnerL2
from tools.cleverhans.adversarial_attack import AdversarialAttack
class CarliniWagnerAttack(AdversarialAttack):
def __init__(self, model, targeted=False, confidence=0, batch_size=1, learning_rate=5e-3, binary_search_steps=5,
max_iterations=1000, abort_early=True, initial_const=1e-2, clip_min=-1, clip_max=1):
super().__init__(model=model, clip_min=clip_min, clip_max=clip_max)
self._targeted = targeted
self._confidence = confidence
self._batch_size = batch_size
self._learning_rate = learning_rate
self._binary_search_steps = binary_search_steps
self._max_iterations = max_iterations
self._abort_early = abort_early
self._initial_const = initial_const
with self.graph.as_default():
self._method = CarliniWagnerL2(self._model, sess=self.session, confidence=self._confidence,
batch_size=self._batch_size, learning_rate=self._learning_rate,
binary_search_steps=self._binary_search_steps,
max_iterations=self._max_iterations, abort_early=self._abort_early,
initial_const=self._initial_const, clip_min=self._clip_min,
clip_max=self._clip_max, targeted=self._targeted)
def get_name(self):
return "{}_{}".format(self.TOOL_NAME, "C&W")
def attack_method(self, labels):
if labels is not None:
if self._targeted:
return self._method.generate(x=self._x_clean, y_target=labels)
else:
return self._method.generate(x=self._x_clean, y=labels)
return self._method.generate(x=self._x_clean)
|
GianmarcoMidena/adversarial-ML-benchmarker
|
tools/cleverhans/carlini_wagner_attack.py
|
carlini_wagner_attack.py
|
py
| 1,845 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "tools.cleverhans.adversarial_attack.AdversarialAttack",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "cleverhans.attacks.CarliniWagnerL2",
"line_number": 20,
"usage_type": "call"
}
] |
33272740574
|
from random import random
import numpy
from copy import deepcopy
from texttable import Texttable
class Ant:
def __init__(self, size):
self._size = size
self._representation = [[] for i in range(size * 2)]
self._graph = [self._representation]
self._freeSpots = size - 1
def getFreeSpotsCount(self):
return self._freeSpots
def getRepresentation(self):
return self._representation
def getGraph(self):
return self._graph[:]
def setRepresentation(self, newRepresentation):
if len(self._representation[-1]) > len(newRepresentation[-1]):
self.decreaseFreeSpots()
self._representation = deepcopy(newRepresentation)
def decreaseFreeSpots(self):
self._freeSpots -= 1
def nextPossibilities(self):
possibilities = []
for i in range(self._size * 2 * (self._freeSpots)):
newPossibility = deepcopy(self._representation)
for row in range(self._size * 2):
possibleNumbers = [i for i in range(1, self._size + 1)]
for elem in self._representation[row]:
possibleNumbers.remove(elem)
# if row >= self._size and newPossibility[row - self._size][-1] in possibleNumbers:
# possibleNumbers.remove(newPossibility[row - self._size][-1])
choice = numpy.random.choice(possibleNumbers)
newPossibility[row].append(choice)
possibleNumbers.remove(choice)
possibilities.append(newPossibility)
return possibilities
def move(self, q0, trace, alpha, beta):
nextPossibilities = self.nextPossibilities()
distances = []
if len(nextPossibilities) == 0:
return False
auxAnt = Ant(self._size)
for position in nextPossibilities:
auxAnt.setRepresentation(position)
distances.append([position, auxAnt.fitness() - self.fitness()])
for i in range(len(distances)):
index = [0, False]
while index[0] < len(trace) or index[1]:
if trace[index[0]] == distances[i][0]:
index[1] = True
index[0] += 1
if index[1]:
distances[i][1] = (distances[i][1] ** beta) * (trace(index[0]) ** alpha)
if numpy.random.random() < q0:
distances = min(distances, key=lambda elem:elem[1])
self.setRepresentation(distances[0])
self._graph.append(self._representation)
else:
suma = 0
for elem in distances:
suma += elem[1]
if suma == 0:
choice = numpy.random.randint(0, len(distances))
self.setRepresentation(distances[choice][0])
self._graph.append(self._representation)
return
distances = [[distances[i][0], distances[i][1] / suma] for i in range(len(distances))]
for i in range(len(distances)):
sum = 0
for j in range(i+1):
sum += distances[j][1]
distances[i][1] = sum
choice = numpy.random.random()
i = 0
while choice > distances[i][1]:
i += 1
self.setRepresentation(distances[i][0])
self._graph.append(self._representation)
return True
def __str__(self):
table = Texttable()
for i in range(self._size):
row = []
for j in range(len(self._representation[i])):
row.append((self._representation[i][j], self._representation[i + self._size][j]))
table.add_row(row)
return table.draw()
def fitness(self):
fitness = 0
for i in range(self._size):
for j in range(len(self._representation[i])):
if self._representation[i][j] == self._representation[i + self._size][j]:
fitness += 1
if i < len(self._representation[i]) and self._representation[j][i] == self._representation[j + self._size][i]:
fitness += 1
for i in range(self._size - 1):
for j in range(i + 1, self._size):
fitness += numpy.count_nonzero(
numpy.equal(self._representation[i + self._size], self._representation[j + self._size]))
fitness += numpy.count_nonzero(numpy.equal(self._representation[i], self._representation[j]))
for i in range(len(self._representation[-1]) - 1):
column11 = [self._representation[j][i] for j in range(self._size)]
column12 = [self._representation[j + self._size][i] for j in range(self._size)]
for j in range(i + 1, len(self._representation[i])):
column21 = [self._representation[k][j] for k in range(self._size)]
column22 = [self._representation[k + self._size][j] for k in range(self._size)]
fitness += numpy.count_nonzero(numpy.equal(column11, column21))
fitness += numpy.count_nonzero(numpy.equal(column12, column22))
return fitness
|
CMihai998/Artificial-Intelligence
|
Lab4 - ACO/models/ant.py
|
ant.py
|
py
| 5,321 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "copy.deepcopy",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.random",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "texttable.Texttable",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "numpy.equal",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.equal",
"line_number": 122,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.equal",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 132,
"usage_type": "call"
},
{
"api_name": "numpy.equal",
"line_number": 132,
"usage_type": "call"
}
] |
22666826531
|
from torchmetrics import Accuracy, F1Score, Precision, Recall, AUROC
class Configs:
def __init__(self, dataset="EMG"):
# preprocess configs
if dataset == "EMG":
self.dataset_config = EMGGestureConfig()
elif dataset == "NINA":
self.dataset_config = NinaproDB5Config()
self.model_config = ModelConfig(self.dataset_config)
self.training_config = TrainingConfig(self.dataset_config)
class EMGGestureConfig:
def __init__(self):
self.url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00481/EMG_data_for_gestures-master.zip"
self.save_dir = "dataset/EMGGesture"
self.batch_size = 256
self.partition = [0.8, 0., 0.2]
self.sampling_freq = 1000
self.pass_band = 200
self.classes = [1, 2, 3, 4, 5, 6]
self.window_length = 256
self.window_padding = 32
self.window_step = 64
self.threshold = 0
self.channels = 8
self.num_classes = len(self.classes)
self.jitter_ratio = 0.1
self.scaling_ratio = 0.1
self.num_permute = 8
class NinaproDB5Config:
def __init__(self):
self.url = [
"http://ninapro.hevs.ch/download/file/fid/457",
"http://ninapro.hevs.ch/download/file/fid/458",
"http://ninapro.hevs.ch/download/file/fid/459",
"http://ninapro.hevs.ch/download/file/fid/467",
"http://ninapro.hevs.ch/download/file/fid/461",
"http://ninapro.hevs.ch/download/file/fid/462",
"http://ninapro.hevs.ch/download/file/fid/463",
"http://ninapro.hevs.ch/download/file/fid/464",
"http://ninapro.hevs.ch/download/file/fid/465",
"http://ninapro.hevs.ch/download/file/fid/466",
]
self.save_dir = "dataset/Ninapro_DB5"
self.batch_size = 256
self.partition = [0.6, 0, 0.4]
self.sampling_freq = 200
self.pass_band = None
self.classes = [0, 6, 13, 14, 15, 16]
self.window_length = 512
self.window_padding = 32
self.window_step = 64
self.threshold = 0
self.channels = 8
self.num_classes = len(self.classes)
self.jitter_ratio = 0.1
self.scaling_ratio = 0.1
self.num_permute = 8
self.frequency_masking_ratio = 0.01
self.frequency_masking_damp = 0.5
class ModelConfig:
def __init__(self, dataset_config):
# (B, C, T)
self.span = dataset_config.window_length # keeping up with window length
self.input_channels = dataset_config.channels
self.kernel_size = 8
self.stride = 1
self.final_out_channels = 128
self.num_classes = dataset_config.num_classes
self.dropout = 0.35
self.conv_output_dim = self.span // 8
self.feature_len = 128
self.hidden_dim = 100
self.timesteps = self.conv_output_dim // 4
self.loss_temperature = 0.2
self.classifier_hidden = [512, self.num_classes]
self.classifier_dropout = 0.15
class TrainingConfig:
def __init__(self, config):
self.bag_of_metrics = {
"accuracy": Accuracy(
task="multiclass",
num_classes=config.num_classes,
average="micro",
),
"f1": F1Score(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"precision": Precision(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"recall": Recall(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
"auroc": AUROC(
task="multiclass",
num_classes=config.num_classes,
average="macro",
),
}
self.log_save_dir = "run1"
self.experiment_name = "TSTCC"
self.mode = "pretrain_finetune"
self.seed = 42
self.pretrain_epoch = 100
self.finetune_epoch = 100
self.lr = 3e-4
self.classifier_lr = 1e-4
self.classifier_weight_decay = 3e-3
self.per_class_samples = 100
self.version = f"samples_{self.per_class_samples}_pe_{self.pretrain_epoch}_fe_{self.finetune_epoch}_seed_{self.seed}"
|
3rd-Musketeer/UAF-PyTorch
|
configs/TSTCC_configs.py
|
TSTCC_configs.py
|
py
| 4,479 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torchmetrics.Accuracy",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "torchmetrics.F1Score",
"line_number": 106,
"usage_type": "call"
},
{
"api_name": "torchmetrics.Precision",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "torchmetrics.Recall",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "torchmetrics.AUROC",
"line_number": 121,
"usage_type": "call"
}
] |
14807523398
|
import multiprocessing
import time
import os
'''
import os
print("work进程编号",os.getpid())
'''
def dance(nums,names):
print("dance进程id:"+str(os.getpid()))
print("dance父进程id:"+str(os.getppid()))
for i in range(nums):
print(names+"跳舞")
time.sleep(0.5)
def sing(nums,names):
print("sing进程id:"+str(os.getpid()))
print("sing进程父id:"+str(os.getppid()))
for i in range(nums):
print(names+"唱歌")
time.sleep(0.5)
if __name__ == "__main__":
print("多进程")
sing_process = multiprocessing.Process(target=sing,args=(5,"小米"))
dance_process = multiprocessing.Process(target=dance,kwargs={"names":"小茗","nums":6})
print("主进程id:"+str(os.getpid()))
sing_process.start()
dance_process.start()
|
kids0cn/leetcode
|
Python语法/python多线程多进程/3.获取进程编号.py
|
3.获取进程编号.py
|
py
| 808 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "os.getpid",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getppid",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.getppid",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Process",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.getpid",
"line_number": 30,
"usage_type": "call"
}
] |
23399964442
|
import nextcord
from nextcord.ext import commands, application_checks
from nextcord import Interaction, SlashOption
from config.config_handler import ConfigHandler
from middlewares.server_verification import ServerVerification
from components.modals.nuke_model import NukeModel
from middlewares.bot_permissions import BotPermissions
from utils.server_utils import ServerUtils
class Nuke(commands.Cog):
config_handler = ConfigHandler()
verifications = ServerVerification()
permissions = BotPermissions()
utils = ServerUtils()
def __init__(self, bot: commands.Bot):
self.bot = bot
@nextcord.slash_command(
name = "nuke",
description = "Ataque total sobre un servidor.",
guild_ids = config_handler.get_CaC_server_id()
)
@application_checks.has_role(config_handler.get_executor_rol_id())
async def nuke_command(self, ctx: Interaction,
id: str = SlashOption(required = True, description = "Id del servidor objtivo.")):
guild = self.utils.get_server(int(id), self.bot)
self.verifications.check_command_execution_in_allowed_server(guild.id)
self.verifications.check_bag(guild)
if (not self.permissions.has_nuke_permissions(guild)
and not self.permissions.has_administrator_permission):
raise commands.BotMissingPermissions(["8"])
modal = NukeModel(guild, self.bot)
await ctx.response.send_modal(modal)
def setup(client):
client.add_cog(Nuke(client))
|
Worcer/ASF
|
amanecer sin fronteras/src/commands/raid/nuke.py
|
nuke.py
|
py
| 1,639 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "nextcord.ext.commands.Cog",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "nextcord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "config.config_handler.ConfigHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "middlewares.server_verification.ServerVerification",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "middlewares.bot_permissions.BotPermissions",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "utils.server_utils",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "utils.server_utils.ServerUtils",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.commands.Bot",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "nextcord.ext.commands",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "nextcord.Interaction",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "nextcord.SlashOption",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.commands.BotMissingPermissions",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.commands",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "components.modals.nuke_model.NukeModel",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "nextcord.slash_command",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.application_checks.has_role",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nextcord.ext.application_checks",
"line_number": 27,
"usage_type": "name"
}
] |
70279501309
|
import os
import six
import logging
from django.utils import timezone
logger = logging.getLogger(__name__)
def queryset_csv_export(qs, fields, cache_funcs=None, filepath=None, fileobj=None, delimiter='|'):
import csv
import inspect
from django.db.models.query import QuerySet
if not filepath:
raise Exception("expecting a filepath")
file_dir = os.path.dirname(filepath)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
csvfile = fileobj or open(filepath, 'w') # will write to disk by default
writer = csv.writer(csvfile, delimiter=delimiter)
def to_string(val):
if val is None:
val = ""
if callable(val):
val = val()
if not isinstance(val, six.string_types):
val = str(val)
# try:
# val = ascii_encode(val)
# except:
# val = str(val)
return ("%r" % val)[1:-1]
def get_arg_count(fn):
from functools import partial
if type(fn) == partial:
return len(inspect.getargspec(fn.func)[0]) - len(fn.args)
return len(inspect.getargspec(fn)[0])
header_names = []
rows = []
import types
if isinstance(qs, list):
total_count = len(qs)
elif isinstance(qs, QuerySet):
total_count = qs.count()
elif isinstance(qs, types.GeneratorType):
total_count = "unknown (generator)"
else:
raise Exception("No one has shown me how to get the count of a %s" % type(qs))
logger.debug("# of rows in qs = %s" % total_count)
count = 0
for obj in qs:
count += 1
start_time = timezone.now()
row = []
cache_dict = {}
if cache_funcs:
def is_cache_evaluated():
all_cache_keys = [cache_func[0] for cache_func in cache_funcs]
return all([cache_key in cache_dict for cache_key in all_cache_keys])
while not is_cache_evaluated():
for cache_func_tpl in cache_funcs:
cache_key, cache_func = cache_func_tpl[0], cache_func_tpl[1]
cache_dependency = cache_func_tpl[2] if len(cache_func_tpl) > 2 else None
if cache_key in cache_dict or (cache_dependency is not None and cache_dependency not in cache_dict):
continue
cache_func_arg_count = get_arg_count(cache_func)
if cache_func_arg_count == 1:
cache_dict[cache_key] = cache_func(obj)
elif cache_func_arg_count == 2:
cache_dict[cache_key] = cache_func(obj, cache_dict)
else:
raise Exception("invalid number of args for cache function")
for field in fields:
if isinstance(field, six.string_types):
if field not in header_names:
header_names.append(field)
if isinstance(obj, dict):
val = obj.get(field, "")
else:
val = getattr(obj, field, "")
row.append(to_string(val)) # append the value as a raw text value to keep linebreaks \r\n on a single line
elif isinstance(field, tuple):
if len(field) != 2:
raise Exception("invalid computed field length of %s. Field value = %s" % (len(field), field))
computed_header_name, fn = field
if computed_header_name not in header_names:
header_names.append(computed_header_name)
fn_arg_count = get_arg_count(fn)
if fn_arg_count == 1:
row.append(to_string(fn(obj)))
elif fn_arg_count == 2:
row.append(to_string(fn(obj, cache_dict)))
else:
raise Exception("expecting 1 or 2 args. actual # = %s" % fn_arg_count)
else:
raise Exception("invalid field type of %s, field value = %s" % (type(field), field))
rows.append(row)
end_time = timezone.now()
logger.debug("finished %s of %s. time = %s" % (count, total_count, str(end_time - start_time)))
writer.writerow(header_names)
writer.writerows(rows)
if fileobj:
return fileobj
# def ascii_encode(string):
# import unicodedata
# return unicodedata.normalize('NFKD', unicode(string)).encode('ascii', 'ignore')
|
mirusresearch/mirus_django_csv
|
mirus_django_csv.py
|
mirus_django_csv.py
|
py
| 4,477 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "functools.partial",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "inspect.getargspec",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "inspect.getargspec",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "django.db.models.query.QuerySet",
"line_number": 49,
"usage_type": "argument"
},
{
"api_name": "types.GeneratorType",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "six.string_types",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 105,
"usage_type": "name"
}
] |
4272453202
|
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from .models import Pokemon
def pokemon_list(request):
pokemon = Pokemon.objects.all()
data = {"results": list(pokemon.values(
"name",
"apiId",
"chainId",
"healtPoint",
"attack",
"defense",
"specialAttack",
"specialDefense",
"speed",
"height",
"weight",
"evolution"
))
}
return JsonResponse(data)
def pokemon_detail(request, name):
pokemon = get_object_or_404(Pokemon, name=name)
chain = Pokemon.objects.values('name', 'apiId', 'evolution').filter(
chainId=pokemon.chainId).exclude(name=pokemon.name)
data = {"Pokemon": {
"name": pokemon.name,
"apiId": pokemon.apiId,
"chainId": pokemon.chainId,
"healtPoint": pokemon.healtPoint,
"attack": pokemon.attack,
"defense": pokemon.defense,
"specialAttack": pokemon.specialAttack,
"specialDefense": pokemon.specialDefense,
"speed": pokemon.speed,
"height": pokemon.height,
"weight": pokemon.weight,
"evolution": pokemon.evolution
},
"evolution": []
}
for i in chain:
et = ""
if i["evolution"] > pokemon.evolution:
et = "Evolution"
elif i["evolution"] < pokemon.evolution:
et = "Preevolution"
else:
et = "Alternate"
related = {
"apiId": i["apiId"],
"name": i["name"],
"type": et
}
data['evolution'].append(related)
return JsonResponse(data)
|
hitolv4/poketest
|
pokemon/views.py
|
views.py
|
py
| 1,667 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "models.Pokemon.objects.all",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "models.Pokemon.objects",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "models.Pokemon",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.get_object_or_404",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "models.Pokemon",
"line_number": 28,
"usage_type": "argument"
},
{
"api_name": "models.Pokemon.objects.values",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "models.Pokemon.objects",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "models.Pokemon",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.http.JsonResponse",
"line_number": 64,
"usage_type": "call"
}
] |
38390581306
|
import logging
from queue import Queue
from logging.handlers import QueueListener, QueueHandler, RotatingFileHandler
from contextlib import contextmanager
from django.conf import settings
@contextmanager
def prepare_background_logging(log_path):
logger = logging.getLogger()
logger.handlers = []
log_queue = Queue(-1)
queue_handler = QueueHandler(log_queue)
logger.addHandler(queue_handler)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S %Z')
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.DEBUG)
file_handler = RotatingFileHandler(log_path, maxBytes=1024*1024*1024, backupCount=12)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
listener = QueueListener(log_queue, console_handler, file_handler)
listener.start()
try:
yield
except Exception as e:
logger.error(str(e))
raise e
finally:
listener.stop()
|
Shvidkiy-Dima/checker
|
background_service/utils.py
|
utils.py
|
py
| 1,090 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.handlers.QueueHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.RotatingFileHandler",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.QueueListener",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 8,
"usage_type": "name"
}
] |
8214750417
|
from flask import request
from flask_restx import Resource, Namespace, abort
from marshmallow import ValidationError
from implemented import user_service
from tools.jwt_token import JwtSchema, JwtToken
from views.users import LoginValidator
auth_ns = Namespace('auth')
@auth_ns.route('/')
class AuthView(Resource):
def post(self):
try:
data = LoginValidator().load(request.json)
user = user_service.get_by_name(data["username"])
if not user:
abort(404)
token_data = JwtSchema().load({"user_id": user.id, "role": user.role})
return JwtToken(token_data).get_tokens(), 201
except ValidationError:
abort(400)
def put(self):
try:
refresh_token = request.json["refresh_token"]
data = JwtToken.decode_token(refresh_token)
data.pop("exp", None)
token_data = JwtSchema().load(data)
user = user_service.get_one(token_data["user_id"])
if not user:
abort(404)
token_data = JwtSchema().load({"user_id": user.id, "role": user.role})
return JwtToken(token_data).get_tokens(), 201
except Exception as e:
abort(400)
|
Mariyatm/-lesson19_project_hard_source
|
views/auth.py
|
auth.py
|
py
| 1,256 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask_restx.Namespace",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask_restx.Resource",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "views.users.LoginValidator",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "implemented.user_service.get_by_name",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "implemented.user_service",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask_restx.abort",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "tools.jwt_token.JwtSchema",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tools.jwt_token.JwtToken",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "marshmallow.ValidationError",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "flask_restx.abort",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "flask.request.json",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "tools.jwt_token.JwtToken.decode_token",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tools.jwt_token.JwtToken",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "tools.jwt_token.JwtSchema",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "implemented.user_service.get_one",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "implemented.user_service",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "flask_restx.abort",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "tools.jwt_token.JwtSchema",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "tools.jwt_token.JwtToken",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "flask_restx.abort",
"line_number": 37,
"usage_type": "call"
}
] |
75066299708
|
import argparse
def parse_args():
# description
parser = argparse.ArgumentParser(
description='Compares two configuration files and shows a difference.')
# positional arguments:
parser.add_argument('first_file')
parser.add_argument('second_file')
# optional arguments:
parser.add_argument('-f', '--format',
default="stylish",
choices=['stylish', 'plain', 'json'],
help='set format of output')
# assign an argument
args = parser.parse_args()
return args.first_file, args.second_file, args.format
|
slovohot/python-project-50
|
gendiff/logic/argparser.py
|
argparser.py
|
py
| 620 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
}
] |
25006136605
|
from typing import TYPE_CHECKING, List, NamedTuple, Optional
import boto3
if TYPE_CHECKING:
from mypy_boto3_ec2.type_defs import FilterTypeDef
from typing_extensions import TypedDict
from aec.util.config import Config
class Image(TypedDict, total=False):
Name: Optional[str]
ImageId: str
CreationDate: str
RootDeviceName: Optional[str]
Size: int
# optional
SnapshotId: str
class AmiMatcher(NamedTuple):
owner: str
match_string: str
amazon_base_account_id = "137112412989"
canonical_account_id = "099720109477"
ami_keywords = {
"amazon2": AmiMatcher(amazon_base_account_id, "amzn2-ami-hvm*x86_64-gp2"),
"ubuntu1604": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64"),
"ubuntu1804": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64"),
"ubuntu2004": AmiMatcher(canonical_account_id, "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64"),
}
def fetch(config: Config, ami: str) -> Image:
ami_matcher = ami_keywords.get(ami, None)
if ami_matcher:
try:
# lookup the latest ami by name match
ami_details = describe(config, owner=ami_matcher.owner, name_match=ami_matcher.match_string)[0]
except IndexError:
raise RuntimeError(
f"Could not find ami with name matching {ami_matcher.match_string} owned by account {ami_matcher.owner}"
)
else:
try:
# lookup by ami id
ami_details = describe(config, ami=ami)[0]
except IndexError:
raise RuntimeError(f"Could not find {ami}")
return ami_details
def describe(
config: Config,
ami: Optional[str] = None,
owner: Optional[str] = None,
name_match: Optional[str] = None,
show_snapshot_id: bool = False,
) -> List[Image]:
"""List AMIs."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
if ami:
response = ec2_client.describe_images(ImageIds=[ami])
else:
if owner:
owners_filter = [owner]
else:
describe_images_owners = config.get("describe_images_owners", None)
if not describe_images_owners:
owners_filter = ["self"]
elif isinstance(describe_images_owners, str):
owners_filter = [describe_images_owners]
else:
owners_filter: List[str] = describe_images_owners
if name_match is None:
name_match = config.get("describe_images_name_match", None)
filters: List[FilterTypeDef] = [] if name_match is None else [{"Name": "name", "Values": [f"*{name_match}*"]}]
print(f"Describing images owned by {owners_filter} with name matching {name_match if name_match else '*'}")
response = ec2_client.describe_images(Owners=owners_filter, Filters=filters)
images = []
for i in response["Images"]:
image: Image = {
"Name": i.get("Name", None),
"ImageId": i["ImageId"],
"CreationDate": i["CreationDate"],
"RootDeviceName": i["RootDeviceName"] if "RootDeviceName" in i else None,
"Size": i["BlockDeviceMappings"][0]["Ebs"]["VolumeSize"],
}
if show_snapshot_id:
image["SnapshotId"] = i["BlockDeviceMappings"][0]["Ebs"]["SnapshotId"]
images.append(image)
return sorted(images, key=lambda i: i["CreationDate"], reverse=True)
def delete(config: Config, ami: str) -> None:
"""Deregister an AMI and delete its snapshot."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
response = describe(config, ami, show_snapshot_id=True)
ec2_client.deregister_image(ImageId=ami)
ec2_client.delete_snapshot(SnapshotId=response[0]["SnapshotId"])
def share(config: Config, ami: str, account: str) -> None:
"""Share an AMI with another account."""
ec2_client = boto3.client("ec2", region_name=config.get("region", None))
ec2_client.modify_image_attribute(
ImageId=ami,
LaunchPermission={"Add": [{"UserId": account}]},
OperationType="add",
UserIds=[account],
Value="string",
DryRun=False,
)
|
DENE-dev/dene-dev
|
RQ1-data/exp2/552-seek-oss@aec-dc5825f8ca2f88df7f4eba38362ffbcf90bf17bb/src/aec/command/ami.py
|
ami.py
|
py
| 4,241 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing_extensions.TypedDict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.NamedTuple",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "aec.util.config.Config",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "aec.util.config.Config",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "mypy_boto3_ec2.type_defs.FilterTypeDef",
"line_number": 87,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "aec.util.config.Config",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 111,
"usage_type": "call"
},
{
"api_name": "aec.util.config.Config",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "boto3.client",
"line_number": 123,
"usage_type": "call"
}
] |
19303998344
|
# -*- coding: utf-8 -*-
import pygame, os, sys
import pygame_functions as pyf
import constants as c
import time
import shuffle
import bfs
import dfs
import it_dfs
import a_star
import utils
class Game_Interface:
def __init__(self, nmax, filename):
# Variaveis de Controle
self.nmax = nmax
self.mouse_state_plus = False
self.mouse_state_minus = False
#self.alg
self.sprite_list = []
self.shuffler = shuffle.Shuffle(self.nmax)
self.imagesize = c.IMAGE_SIZE
self.time_elapsed = 0
# Inicializacao Pygame
pyf.screenSize(c.SCREEN_WIDTH, c.SCREEN_HEIGHT)
pyf.setBackgroundColour(c.GRAY)
# Instancia lista de sprites
for i in range (0, nmax*nmax):
self.sprite_list.append(pyf.makeSprite("images/" + filename + str(i) + ".png"))
# Carrega sprites padroes
self.plus = pyf.makeSprite("images/plus.png")
self.minus = pyf.makeSprite("images/minus.png")
self.shuffle_button = pyf.makeSprite("images/shuffle.png")
self.BFS_button = pyf.makeSprite("images/BFS.png")
self.DFS_button = pyf.makeSprite("images/DFS.png")
self.DFS_IT_button = pyf.makeSprite("images/BFS_IT.png")
self.A1_button = pyf.makeSprite("images/A_H1.png")
self.A2_button = pyf.makeSprite("images/A_H2.png")
self.text_shuffler_label = pyf.makeLabel(u"Número de iterações: ", 30, 50, 690, "black", "Arial", "clear")
self.text_time = pyf.makeLabel(u"Tempo de execução: ", 30, 700, 400, "black", "Arial", "clear")
self.text_time2 = pyf.makeLabel("segundos", 30, 980, 400, "black", "Arial", "gray")
self.text_memory = pyf.makeLabel(u"Memória utilizada: ", 30, 735, 450, "black", "Arial", "clear")
#self.text_moves = pyf.makeLabel("Movimentos Realizados: ", 30, 735, 500, "black", "Arial", "clear")
#self.text_moves2 = pyf.makeLabel("", 30, 735, 500, "black", "Arial", "gray")
self.text_memory2 = pyf.makeLabel("bytes", 30, 980, 450, "black", "Arial", "gray")
self.number_shuffler_label = pyf.makeLabel(str(c.IT), 30, 332, 692, "black", "Arial", "clear")
# Transforma sprites para tamanhos maiores que 3x3
if self.nmax > 3:
self.initial_transformation()
# Posiciona Sprites
self.initial_position()
pyf.moveSprite(self.shuffle_button, 570, 710, True)
pyf.moveSprite(self.plus, 515, 710, True)
pyf.moveSprite(self.minus, 460, 710, True)
pyf.moveSprite(self.BFS_button, 800, 100, True)
pyf.moveSprite(self.DFS_button, 1010, 100, True)
pyf.moveSprite(self.DFS_IT_button, 900, 210, True)
pyf.moveSprite(self.A1_button, 800, 320, True)
pyf.moveSprite(self.A2_button, 1010, 320, True)
# Mostra sprites na tela
for i in range(0, nmax*nmax):
pyf.showSprite(self.sprite_list[i])
# print(i)
pyf.showSprite(self.shuffle_button)
pyf.showSprite(self.plus)
pyf.showSprite(self.minus)
pyf.showLabel(self.text_shuffler_label)
pyf.showLabel(self.number_shuffler_label)
pyf.showLabel(self.BFS_button)
pyf.showLabel(self.DFS_button)
pyf.showLabel(self.DFS_IT_button)
pyf.showLabel(self.A1_button)
pyf.showLabel(self.A2_button)
pyf.showLabel(self.text_time)
pyf.showLabel(self.text_time2)
pyf.showLabel(self.text_memory)
pyf.showLabel(self.text_memory2)
#pyf.showLabel(self.text_moves)
#pyf.showLabel(self.text_moves2)
pyf.transformSprite(self.shuffle_button, 0, 0.25)
pyf.transformSprite(self.plus, 0, 0.25)
pyf.transformSprite(self.minus, 0, 0.1)
def initial_position(self):
ini_pos = self.imagesize/2 + c.SPRITE_BORDER
count_index = 1
for i in range (0, self.nmax):
for j in range(0, self.nmax):
pyf.moveSprite(self.sprite_list[count_index], ini_pos + (j * self.imagesize), ini_pos + (i * self.imagesize), True)
count_index += 1
if count_index == self.nmax*self.nmax:
break
pyf.moveSprite(self.sprite_list[0], ini_pos + ((self.nmax - 1) * self.imagesize), ini_pos + ((self.nmax - 1) * self.imagesize), True)
def initial_transformation(self):
factor = (600.0/self.nmax) / self.imagesize
self.imagesize = self.imagesize * factor
for i in range(0, self.nmax * self.nmax):
pyf.transformSprite(self.sprite_list[i], 0, factor)
def run(self):
# RODA ATE A TECLA ESC SER PRESSIONADA
keys = pygame.key.get_pressed()
current_time = pygame.time.get_ticks()
waittime = 0
while not keys[pygame.K_ESCAPE]:
current_time = pygame.time.get_ticks()
if current_time > waittime:
pygame.event.clear()
keys = pygame.key.get_pressed()
waittime += 20
# Incrementa Iteracoes
if pyf.spriteClicked(self.plus):
if not self.mouse_state_plus:
self.mouse_state_plus = True
if c.IT >= 1000:
c.IT += 1000
elif c.IT >= 100:
c.IT += 100
elif c.IT >= 10:
c.IT += 10
else:
c.IT += 1
pyf.changeLabel(self.number_shuffler_label, str(c.IT))
else:
self.mouse_state_plus = False
# Decrementa Iteracoes
if pyf.spriteClicked(self.minus):
if not self.mouse_state_minus:
self.mouse_state_minus = True
if c.IT > 1000:
c.IT -= 1000
elif c.IT > 100:
c.IT -= 100
elif c.IT > 10:
c.IT -= 10
elif c.IT > 0:
c.IT -= 1
pyf.changeLabel(self.number_shuffler_label, str(c.IT))
else:
self.mouse_state_minus = False
# Botao Shuffle
if pyf.spriteClicked(self.shuffle_button): # ao clicar o sprite do shuffler chama o metodo para embaralhar
self.initial_position()
self.shuffler_method(c.IT)
# Botoes Algoritmos
move_list = []
# BFS
if pyf.spriteClicked(self.BFS_button):
bfs_alg = bfs.BFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
bfs_alg.BFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(bfs_alg.get_memory_usage()) + " bytes")
move_list = bfs_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# DFS
if pyf.spriteClicked(self.DFS_button):
dfs_alg = dfs.DFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
dfs_alg.DFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(dfs_alg.get_memory_usage()) + "bytes")
move_list = dfs_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# DFS_IT
if pyf.spriteClicked(self.DFS_IT_button):
# modificar manualmente a profundidade máxima inicial
dfs_it_alg = it_dfs.IT_DFS(self.shuffler.get_matrix(), self.nmax)
start = time.time()
dfs_it_alg.IT_DFS_algorithm()
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(dfs_it_alg.get_memory_usage()) + "bytes")
move_list = dfs_it_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# A_STAR H1
if pyf.spriteClicked(self.A1_button):
astar_alg = a_star.A_STAR(self.shuffler.get_matrix(), self.nmax)
start = time.time()
astar_alg.a_star_algorithm(utils.chessboard_heuristic)
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(astar_alg.get_memory_usage()) + "bytes")
move_list = astar_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
# A_STAR H2
if pyf.spriteClicked(self.A2_button):
astar_alg = a_star.A_STAR(self.shuffler.get_matrix(), self.nmax)
start = time.time()
astar_alg.a_star_algorithm(utils.manhattan_heuristic)
end = time.time()
if end - start < 1:
pyf.changeLabel(self.text_time2, "{0:.3f}".format((end - start) * 1000) + "ms")
else:
pyf.changeLabel(self.text_time2, "{0:.3f}".format(end - start) + "s")
pyf.changeLabel(self.text_memory2, "{0:.0f}".format(astar_alg.get_memory_usage()) + "bytes")
move_list = astar_alg.get_solution_path()
self.move_numbers(move_list, True)
self.shuffler.reset_matrix()
pyf.endWait()
def shuffler_method(self, n_moves):
self.shuffler.shuffle_algorithm(n_moves)
moves_list = self.shuffler.get_moves_list()
self.move_numbers(moves_list, False)
def change_position(self, m, flag): #m=n?
pos_correction = self.imagesize/2
n0_x, n0_y = self.sprite_list[0].getPosition() # X e Y do zero
x_pos, y_pos = self.sprite_list[m].getPosition() # X e Y da posicao que sera trocada com 0
x_temp, y_temp = self.sprite_list[m].getPosition() # Temporario
n0_y += pos_correction
n0_x += pos_correction
y_temp = y_temp+pos_correction
x_temp = x_temp+pos_correction
y_pos += pos_correction
x_pos += pos_correction
pyf.moveSprite(self.sprite_list[0], x_pos, y_pos, True) # muda posição do 0
if flag:
if n0_y > y_temp:
for x in range(0, int(self.imagesize/5)):
y_pos += 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_y < y_temp:
for x in range(0, int(self.imagesize/5)):
y_pos -= 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_x > x_temp:
for x in range(0, int(self.imagesize/5)):
x_pos += 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
elif n0_x < x_temp:
for x in range(0, int(self.imagesize/5)):
x_pos -= 5
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos, True)
if flag:
time.sleep(c.TIME_CONST)
else:
if n0_y > y_temp:
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos + self.imagesize, True)
elif n0_y < y_temp:
pyf.moveSprite(self.sprite_list[m], x_pos, y_pos - self.imagesize, True)
elif n0_x > x_temp:
pyf.moveSprite(self.sprite_list[m], x_pos + self.imagesize, y_pos, True)
else:
pyf.moveSprite(self.sprite_list[m], x_pos - self.imagesize, y_pos, True)
def move_numbers(self, moves, flag):
for move in moves:
self.change_position(move, flag)
def text_objects(self, text, font, color_text):
text_surface = font.render(text, True, color_text)
return text_surface, text_surface.get_rect()
game = Game_Interface(3, c.FILENAME_MAT)
#game = Game_Interface(3, c.FILENAME_JAC)
#game = Game_Interface(3, c.FILENAME_STD)
#game = Game_Interface(4, c.FILENAME_STD)
#game = Game_Interface(5, c.FILENAME_STD)
game.run()
|
pHgon/8Puzzle-FIA
|
Interface/main.py
|
main.py
|
py
| 13,432 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "shuffle.Shuffle",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "constants.IMAGE_SIZE",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.screenSize",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "constants.SCREEN_WIDTH",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "constants.SCREEN_HEIGHT",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.setBackgroundColour",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "constants.GRAY",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeSprite",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pygame_functions.makeLabel",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showSprite",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showSprite",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showSprite",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showSprite",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "pygame_functions.showLabel",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pygame_functions.transformSprite",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "pygame_functions.transformSprite",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "pygame_functions.transformSprite",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "constants.SPRITE_BORDER",
"line_number": 92,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "pygame_functions.transformSprite",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 114,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 116,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.get_ticks",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 117,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.clear",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 119,
"usage_type": "attribute"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 128,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 135,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 146,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 147,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "constants.IT",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 150,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "constants.IT",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "bfs.BFS",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 164,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 166,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "dfs.DFS",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "it_dfs.IT_DFS",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 202,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 207,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "a_star.A_STAR",
"line_number": 216,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "utils.chessboard_heuristic",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 222,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "pygame_functions.spriteClicked",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "a_star.A_STAR",
"line_number": 233,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 234,
"usage_type": "call"
},
{
"api_name": "utils.manhattan_heuristic",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 236,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 241,
"usage_type": "call"
},
{
"api_name": "pygame_functions.changeLabel",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "pygame_functions.endWait",
"line_number": 248,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 267,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 272,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "constants.TIME_CONST",
"line_number": 274,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 280,
"usage_type": "call"
},
{
"api_name": "constants.TIME_CONST",
"line_number": 280,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 286,
"usage_type": "call"
},
{
"api_name": "constants.TIME_CONST",
"line_number": 286,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 290,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "constants.TIME_CONST",
"line_number": 292,
"usage_type": "attribute"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 297,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 299,
"usage_type": "call"
},
{
"api_name": "pygame_functions.moveSprite",
"line_number": 301,
"usage_type": "call"
},
{
"api_name": "constants.FILENAME_MAT",
"line_number": 313,
"usage_type": "attribute"
}
] |
2542885352
|
from os import path
import warnings
import copy
import cv2
from PIL import Image
from infy_field_extractor.internal.constants import Constants
class ExtractorHelper():
"""Helper class for data extraction"""
@staticmethod
def extract_with_text_coordinates(
image, bboxes_text, get_text_provider, file_data_list, additional_info,
fieldboxes, logger, temp_folderpath, field="checkbox"):
"""
Method to help extract fields using image, imagepath, bounding boxes coordinates of the text and
bounding boxes coordinates of the field
"""
_, width = image.shape
# filter fieldboxes from bboxes_text
bboxes_text = ExtractorHelper.filter_fieldboxes_from_ocr_words(
fieldboxes, bboxes_text)
# getting phrases
# bboxes_text = ocr_parser_object.get_tokens_from_ocr(
# token_type_value=3, ocr_word_list=bboxes_text)
additional_info['word_bbox_list'] = bboxes_text
bboxes_text = get_text_provider.get_tokens(
3, image, [], file_data_list, additional_info, temp_folderpath)
# dividing a series of horizontal fieldboxes as each bounding box
Y_SCALE = 6
H_SCALE = 3
c = fieldboxes[0]
# appends the first line in bboxes_line_list
bboxes_line_list = [[0, c[Constants.BB_Y]-c[Constants.BB_H]//Y_SCALE,
width, c[Constants.BB_H]+c[Constants.BB_H]//H_SCALE]]
# list to add a new line
temp_list = []
# to track the count of the number of lines in bboxes_line_list
count = 0
# to track if any new word found which is not present in any bboxes_line_list
flag = False
for f in fieldboxes:
for i in bboxes_line_list:
count += 1
# if words already there in the bboxes_list_line then set flag
# as True and moves to the next line
if(i[Constants.BB_Y] <= f[Constants.BB_Y] <= i[Constants.BB_Y]+i[Constants.BB_H]):
flag = True
elif(flag is False and count == len(bboxes_line_list)):
temp_list.append(
[0, f[Constants.BB_Y]-f[Constants.BB_H]//Y_SCALE, width,
f[Constants.BB_H]+f[Constants.BB_H]//H_SCALE])
bboxes_line_list = bboxes_line_list + temp_list
temp_list = []
flag = False
count = 0
# getting the final result
# for each line divided calls the __get_status_for_each_line method
result = {}
done_fields_dList = []
count = 0
for bbox_line in bboxes_line_list:
count += 1
logger.info(
"Extracting checkboxes from line "+str(count)+":")
r, done_fieldsList = ExtractorHelper.get_status_for_each_line(
bbox_line, bboxes_text, fieldboxes, image, logger, field)
done_fields_dList = done_fields_dList+done_fieldsList
result.update(r)
return result, done_fields_dList
@staticmethod
def get_status_for_each_line(bbox_line, bboxes_text, fieldboxes, image, logger, field):
"""
It returns a dictionary with text as key and the field's status or bbox as value
"""
# stores the x,y,width and height of the bbox of the line
_ = bbox_line[Constants.BB_X]
y_l = bbox_line[Constants.BB_Y]
_ = bbox_line[Constants.BB_W]
h_l = bbox_line[Constants.BB_H]
# filter fieldboxes present in bbox_line
fieldboxes_line = []
for c in fieldboxes:
if(y_l <= c[Constants.BB_Y] <= y_l+h_l):
fieldboxes_line.append(c)
# filter texts present in bbox_line
texts_line = []
for t in bboxes_text:
# gets all the text even if a small region of the text is in the line, therefore
# matches both the y-coordinate and y-coordinate+height of the text
# lying inside the line's bbox
if((y_l <= t.get("bbox")[Constants.BB_Y] <= (y_l+h_l)) or
(y_l <= (t.get("bbox")[Constants.BB_Y]+t.get("bbox")[Constants.BB_H]) <= (y_l+h_l))):
texts_line.append(t)
# check if the fieldboxes are at the right or left side of the texts
# initializing isfieldRight as True, assuming that the last bbox in the line is of checkbox
isfieldRight = True
last_field = fieldboxes_line[len(fieldboxes_line)-1]
y_c = last_field[Constants.BB_Y]
h_c = last_field[Constants.BB_H]
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
# if the last bbox in the line is a phrase then fieldboxes are on the left side
if((y_c-(h_c//2) <= y_t <= y_c + h_c) and x_t > last_field[Constants.BB_X]):
isfieldRight = False
logger.info(
"Fieldboxes on the right side of value:"+str(isfieldRight))
result = {}
# get the final result
# the variable adds dictionary with key as the text used for radiobutton and value as its bbox
done_fields_dList = []
for f in fieldboxes_line:
# declare closest variable to consider the key for the fielbox which is closest to it
closest = texts_line[0]
# if key are to the right of fields, the closest text to the right
# of the field is key for that field
if(isfieldRight is False):
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
t_dist = x_t - (f[Constants.BB_X]+f[Constants.BB_W])
close_dist = closest.get(
"bbox")[Constants.BB_X] - (f[Constants.BB_X]+f[Constants.BB_W])
if(close_dist < 0):
closest = t
if(close_dist > 0 and t_dist > 0 and t_dist < close_dist):
closest = t
# if key are to the left of fields, the closest text to the left of the field
# is key for that field
else:
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
w_t = t.get("bbox")[Constants.BB_W]
t_dist = f[Constants.BB_X] - x_t - w_t
close_dist = f[Constants.BB_X] - (
closest.get("bbox")[Constants.BB_X]+closest.get("bbox")[Constants.BB_W])
if(close_dist < 0):
closest = t
if(close_dist > 0 and t_dist > 0 and t_dist < close_dist):
closest = t
text = closest.get("text")
done_fields_dList.append(closest)
# if two phrases arranged vertically is meant for that field, it looks for the texts
# which has almost the same y-coordinate
X_SCALE = 2
Y_SCALE = 2
for t in texts_line:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
w_t = t.get("bbox")[Constants.BB_W]
h_t = t.get("bbox")[Constants.BB_H]
x_ct = closest.get("bbox")[Constants.BB_X]
y_ct = closest.get("bbox")[Constants.BB_Y]
# compares the closest text's y-coordinates with the current text
# which should be more than
# heigth of the phrase and the x- coordinate should be almost equal
if((x_t-w_t//X_SCALE) <= x_ct <= (x_t+w_t//X_SCALE) and (Y_SCALE*abs(y_t - y_ct) > h_t)):
done_fields_dList.append(t)
if(y_ct < y_t):
text = closest.get("text") + " " + t.get("text")
else:
text = t.get("text") + " " + closest.get("text")
break
# if the field is a checkbox then calls the method to see if checkbox checked or not
if(field == "checkbox"):
isCheck = ExtractorHelper.check_if_true(
image, f, field)
result[text] = isCheck
# if the fiels is radio then returns the text as key and radiobutton bbox as value
elif(field == "radio"):
result[text] = f
return result, done_fields_dList
@staticmethod
def check_if_true(
image, field_bbox, field, field_coordinate=[], debug_mode_check=False,
temp_folderpath=None, img_name=None):
"""
checks the status of the checkbox/radio using contour detection method
"""
# to get the image of only field
x, y, w, h = field_bbox[Constants.BB_X], field_bbox[Constants.BB_Y], \
field_bbox[Constants.BB_W], field_bbox[Constants.BB_H]
img = image[y:y+h, x:x+w]
_, threshold = cv2.threshold(
img, 170, 255, cv2.THRESH_BINARY_INV)
contours, _ = cv2.findContours(
threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if(debug_mode_check is True):
img = cv2.drawContours(img, contours, -1, (180, 105, 255), 3)
cv2.imwrite(f'{temp_folderpath}\\{img_name}_contours.png', img)
if(field == "radio"):
isCheck = False
x, y, r = field_coordinate[0] - \
x, field_coordinate[1] - y, field_coordinate[2]
for i in range(0, len(contours)):
cnt = contours[i]
for c in cnt:
if(int(x-r/3) < c[0][0] < int(x+r/3) and int(y - r/3) < c[0][1] < int(y+r/3)):
isCheck = True
return isCheck
elif(field == "checkbox"):
MY_CONS_1 = 6
cv2.drawContours(img, contours, -1, (100, 255, 150), 5)
# x,y,w,h of the outer most boundary of the checkbox
x, y = 0, 0
# to count the number of squares
count = 0
# to count junk contours
count_false_cnt = 0
# checked_area = 0
for i in range(0, len(contours)):
cnt = contours[i]
epsilon = 0.04*cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, epsilon, True)
x1, _, w1, h1 = cv2.boundingRect(cnt)
# counts the if the contours has four edges and if the x-coordinate lies in the range of
# x-coordinate of the outermost boundary of the checkbox
if (len(approx) == Constants.RECT_EDGES and x-(w//MY_CONS_1) <= x1 <= x+(w//MY_CONS_1)):
count += 1
elif w1*h1 < 0.05*w*h:
count_false_cnt += 1
# else:
# checked_area += w1*h1
# if there is another contour other than the margins of the checkboxes, then true
if(len(contours)-count - count_false_cnt > 0):
return True
else:
return False
@staticmethod
def filter_fieldboxes_from_ocr_words(fieldboxes, bboxes_text):
filter_list = []
# filter fieldboxes from bboxes_text
for t in bboxes_text:
for c in fieldboxes:
x_t = t.get("bbox")[Constants.BB_X]
y_t = t.get("bbox")[Constants.BB_Y]
w_t = t.get("bbox")[Constants.BB_W]
h_t = t.get("bbox")[Constants.BB_H]
# checks if the fieldbox x-coordinate is in the range of bboxes_text x -coordinate
# and x-coordinate+width or vice-versa
# also checks for the fieldbox y-coordinate is in the range of bboxes _text
if((x_t <= c[Constants.BB_X] <= (x_t+w_t) and
y_t <= c[Constants.BB_Y] <= (y_t+h_t)) or
(c[Constants.BB_X] <= x_t <= (c[Constants.BB_X]+c[Constants.BB_W]) and
c[Constants.BB_Y] <= y_t <= (c[Constants.BB_Y]+c[Constants.BB_H]))):
filter_list.append(t)
continue
# checks if the fieldbox y-coordinate is in the range of bboxes_text y-coordinate
# and x-coordinate+width or vice-versa
# also checks for the fieldbox x-coordinate is in the range of bboxes _text
if((x_t <= c[Constants.BB_X] <= (x_t+w_t) and
c[Constants.BB_Y] <= y_t <= (c[Constants.BB_Y]+c[Constants.BB_H])) or
(c[Constants.BB_X] <= x_t <= (c[Constants.BB_X]+c[Constants.BB_W]) and
y_t <= c[Constants.BB_Y] <= (y_t+h_t))):
filter_list.append(t)
continue
bboxes_text = [x for x in bboxes_text if x not in filter_list]
return bboxes_text
@staticmethod
def check_image_dpi(imagepath, logger):
im = Image.open(imagepath)
try:
dpi = im.info['dpi']
if(dpi[0] < Constants.TESSERACT_MIN_DPI and dpi[1] < Constants.TESSERACT_MIN_DPI):
warning = "The result might be not accurate due to low dpi"
warnings.warn(warning)
logger.warning(warning)
except Exception:
warning = ("Dpi of the image cannot be extracted: "
"The result might be not accurate if the dpi is less than 300")
warnings.warn(warning)
logger.warning(warning)
@staticmethod
def read_image(image_path, logger, temp_folderpath, coordinates=[]):
if(path.exists(image_path) is False):
logger.error("property imagepath not found")
raise Exception("property imagepath not found")
img_name = path.splitext(path.split(image_path)[1])[0]
image = cv2.imread(image_path)
try:
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
except Exception:
img = image
if(coordinates != []):
if type(coordinates[0]) == dict:
coordinates = coordinates[0]["bbox"]
x_img, y_img, width, height = coordinates[Constants.BB_X], coordinates[
Constants.BB_Y], coordinates[Constants.BB_W], coordinates[Constants.BB_H]
image = img[y_img:y_img+height, x_img:x_img+width]
imagepath = temp_folderpath + "//" + img_name + '_crop.jpg'
PILimage = Image.fromarray(image)
PILimage.save(imagepath, dpi=(300, 300))
# cv2.imwrite(path.join(imagepath), image)
else:
image = img
imagepath = image_path
return (image, imagepath, img_name)
@staticmethod
def get_closest_fieldbox(fieldboxes, field_pos, phrase_bbox):
closest = fieldboxes[0]
if(field_pos == "right"):
for c in fieldboxes:
c_dist = c[Constants.BB_X] - \
(phrase_bbox[Constants.BB_X] + phrase_bbox[Constants.BB_Y])
close_dist = closest[Constants.BB_X] - \
(phrase_bbox[Constants.BB_X] + phrase_bbox[Constants.BB_Y])
closest = ExtractorHelper.closest_fieldbox_if_left_right(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "left"):
for c in fieldboxes:
c_dist = phrase_bbox[Constants.BB_X] - \
(c[Constants.BB_X] + c[Constants.BB_W])
close_dist = phrase_bbox[Constants.BB_X] - \
(closest[Constants.BB_X]+closest[Constants.BB_W])
closest = ExtractorHelper.closest_fieldbox_if_left_right(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "bottom"):
for c in fieldboxes:
c_dist = c[Constants.BB_Y] - \
(phrase_bbox[Constants.BB_Y] + phrase_bbox[Constants.BB_H])
close_dist = closest[Constants.BB_Y] - \
(phrase_bbox[Constants.BB_Y] + phrase_bbox[Constants.BB_H])
closest = ExtractorHelper.closest_fieldbox_if_top_bottom(
phrase_bbox, c, close_dist, c_dist, closest)
elif(field_pos == "top"):
for c in fieldboxes:
c_dist = phrase_bbox[Constants.BB_Y] - \
(c[Constants.BB_Y] + c[Constants.BB_H])
close_dist = phrase_bbox[Constants.BB_Y] - \
(closest[Constants.BB_Y]+closest[Constants.BB_H])
closest = ExtractorHelper.closest_fieldbox_if_top_bottom(
phrase_bbox, c, close_dist, c_dist, closest)
else:
dist_list = []
for f in fieldboxes:
dist_dict = {}
dist_dict["fieldbox"] = f
dist_dict["x_dist"] = abs(
f[Constants.BB_X] - phrase_bbox[Constants.BB_X])
dist_dict["y_dist"] = abs(
f[Constants.BB_Y] - phrase_bbox[Constants.BB_Y])
dist_list.append(dist_dict)
dist_list.sort(key=lambda x: (x["x_dist"], x["y_dist"]))
return dist_list[0]["fieldbox"]
return closest
@staticmethod
def closest_fieldbox_if_top_bottom(
phrase_bbox, fieldbox, closest_fieldbox_dist, fieldbox_dist, closest_fieldbox):
close_dist = closest_fieldbox_dist
c = fieldbox
c_dist = fieldbox_dist
closest = closest_fieldbox
if(close_dist < 0):
if(phrase_bbox[Constants.BB_X] >= c[Constants.BB_X] and
phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W] <= c[Constants.BB_X]+c[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(close_dist > 0 and c_dist > 0 and c_dist <= close_dist):
if(phrase_bbox[Constants.BB_X] >= c[Constants.BB_X] and
phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W] <= c[Constants.BB_X]+c[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
elif(phrase_bbox[Constants.BB_X] <= c[Constants.BB_X]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_X]+phrase_bbox[Constants.BB_W]):
closest = c
return closest
@staticmethod
def closest_fieldbox_if_left_right(phrase_bbox, fieldbox, closest_fieldbox_dist, fieldbox_dist, closest_fieldbox):
close_dist = closest_fieldbox_dist
c = fieldbox
c_dist = fieldbox_dist
closest = closest_fieldbox
if(close_dist < 0):
if(phrase_bbox[Constants.BB_Y] >= c[Constants.BB_Y] and
phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H] <= c[Constants.BB_Y]+c[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(close_dist > 0 and c_dist > 0 and c_dist <= close_dist):
if(phrase_bbox[Constants.BB_Y] >= c[Constants.BB_Y] and
phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H] <= c[Constants.BB_Y]+c[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
elif(phrase_bbox[Constants.BB_Y] <= c[Constants.BB_Y]+c[Constants.BB_W] <= phrase_bbox[Constants.BB_Y]+phrase_bbox[Constants.BB_H]):
closest = c
return closest
@staticmethod
def get_box_region(
image, img_name, debug_mode_check, temp_folderpath,
MIN_BOX_HEIGHT, MIN_BOX_WIDTH, MAX_BOX_HEIGHT=None, MAX_BOX_WIDTH=None):
# get regions
img = image.copy()
(_, img_bin) = cv2.threshold(img, 128, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
img_bin = 255-img_bin
# Defining a kernel length
kernel_length = img.shape[1]//200
# A verticle kernel of (1 X kernel_length), which will detect
# all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (1, kernel_length))
# A horizontal kernel of (kernel_length X 1), which will help
# to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (kernel_length, 1))
# A kernel of (3 X 3) ones.
# Morphological operation to detect verticle lines from an image
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(
img_temp1, verticle_kernel, iterations=3)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
# Weighting parameters, this will decide the quantity of an image
# to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# This function helps to add two image with specific weight
# parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(
verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(_, img_final_bin) = cv2.threshold(img_final_bin, 128,
255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
if(debug_mode_check is True):
cv2.imwrite(temp_folderpath+"//"+img_name +
"verticle_line.png", verticle_lines_img)
cv2.imwrite(temp_folderpath+"//"+img_name +
"horizontal_line.png", horizontal_lines_img)
cv2.imwrite(temp_folderpath+"//"+img_name +
"img_final_bin.png", img_final_bin)
# Find contours for image, which will detect all the boxes
contours, _ = cv2.findContours(
img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
idx = 0
bboxes_region = []
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
if(MAX_BOX_HEIGHT is None and MAX_BOX_WIDTH is None):
if (w >= MIN_BOX_WIDTH and h >= MIN_BOX_HEIGHT):
idx += 1
bboxes_region.append([x, y, w, h])
if(debug_mode_check is True):
new_img = img[y:y+h, x:x+w]
cv2.imwrite(temp_folderpath+"//"+img_name+str(x) +
'_'+str(y) + '.png', new_img)
else:
if (MAX_BOX_WIDTH >= w >= MIN_BOX_WIDTH and MAX_BOX_HEIGHT >= h >= MIN_BOX_HEIGHT):
idx += 1
bboxes_region.append([x, y, w, h])
if(debug_mode_check is True):
new_img = img[y:y+h, x:x+w]
cv2.imwrite(temp_folderpath+"//"+img_name+str(x) +
'_'+str(y) + '.png', new_img)
return bboxes_region
@staticmethod
def get_updated_within_box(within_bbox, scaling_factor):
if(len(within_bbox) > 0):
for i in [0, 2]:
within_bbox[i] = round(
within_bbox[i] * scaling_factor.get('hor', 1))
for i in [1, 3]:
within_bbox[i] = round(
within_bbox[i] * scaling_factor.get('ver', 1))
return within_bbox
@staticmethod
def get_updated_text_bbox(text_bboxes, scaling_factor):
if(len(text_bboxes) > 0):
for bbox in text_bboxes:
for i in [0, 2]:
bbox['bbox'][i] = round(
bbox['bbox'][i] * scaling_factor.get('hor', 1))
for i in [1, 3]:
bbox['bbox'][i] = round(
bbox['bbox'][i] * scaling_factor.get('ver', 1))
return text_bboxes
@staticmethod
def get_invalid_keys(truth_dict, test_dict) -> list:
"""Compare two dictionary objects and return invalid keys by using one of them as reference
Args:
truth_dict (dict): The object containing all valid keys
test_dict (dict): The object to evaluate for presence of invalid keys
Returns:
list: The list of invalid keys
"""
def __get_all_keys_recursively(parent_key, dict_obj):
all_keys = []
for k, val in dict_obj.items():
key = k if parent_key is None or len(
parent_key) == 0 else f"{parent_key}->{k}"
if not key in all_keys:
all_keys.append(key)
if isinstance(val, dict):
all_keys += __get_all_keys_recursively(key, val)
return all_keys
truth_keys = __get_all_keys_recursively(None, truth_dict)
test_keys = __get_all_keys_recursively(None, test_dict)
return list(set(test_keys)-set(truth_keys))
@staticmethod
def get_updated_config_dict(from_dict, default_dict):
config_dict_temp = copy.deepcopy(default_dict)
for key in from_dict:
if isinstance(from_dict[key], dict):
if config_dict_temp.get(key) is None:
config_dict_temp[key] = from_dict[key]
else:
config_dict_temp[key] = ExtractorHelper.get_updated_config_dict(
from_dict[key], config_dict_temp[key])
else:
if config_dict_temp.get(key) is None:
config_dict_temp[key] = from_dict[key]
return config_dict_temp
|
Infosys/Document-Extraction-Libraries
|
infy_field_extractor/src/infy_field_extractor/internal/extractor_helper.py
|
extractor_helper.py
|
py
| 26,643 |
python
|
en
|
code
| 6 |
github-code
|
6
|
[
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 82,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 83,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 83,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 84,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 85,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 85,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 89,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 99,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 106,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 106,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 107,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 107,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 109,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 109,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 110,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 112,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 129,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 130,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 132,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 141,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 141,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 142,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 144,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 145,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 158,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 159,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 160,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 160,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 162,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 193,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 195,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY_INV",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "cv2.findContours",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 198,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawContours",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 201,
"usage_type": "call"
},
{
"api_name": "cv2.drawContours",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "cv2.arcLength",
"line_number": 224,
"usage_type": "call"
},
{
"api_name": "cv2.approxPolyDP",
"line_number": 225,
"usage_type": "call"
},
{
"api_name": "cv2.boundingRect",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.RECT_EDGES",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 229,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 247,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 247,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 248,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 248,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 249,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 250,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 250,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 254,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 254,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 255,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 255,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 256,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 257,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 263,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 263,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 264,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 264,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 265,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 265,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 266,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 266,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 274,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 274,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.TESSERACT_MIN_DPI",
"line_number": 277,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 277,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 279,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 284,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 289,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 289,
"usage_type": "name"
},
{
"api_name": "os.path.splitext",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 292,
"usage_type": "name"
},
{
"api_name": "os.path.split",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 293,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 295,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 295,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 301,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 301,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 302,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 302,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 305,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 305,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 318,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 318,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 319,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 320,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 320,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 321,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 321,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 326,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 326,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 327,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 327,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 328,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 328,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 329,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 329,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 334,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 334,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 335,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 335,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 336,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 336,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 337,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 337,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 342,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 342,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 343,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 343,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 344,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 344,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 345,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 345,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 354,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 354,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 356,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 356,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 370,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 370,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 371,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 371,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 373,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 373,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 375,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 378,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 378,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 379,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 379,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 381,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 381,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_X",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 383,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 383,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 394,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 394,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 395,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 395,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 397,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 397,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 399,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 399,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 402,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 402,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 403,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 403,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 405,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 405,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 405,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_Y",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants",
"line_number": 407,
"usage_type": "name"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_W",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "infy_field_extractor.internal.constants.Constants.BB_H",
"line_number": 407,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"line_number": 417,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 418,
"usage_type": "attribute"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 426,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_RECT",
"line_number": 427,
"usage_type": "attribute"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 430,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_RECT",
"line_number": 431,
"usage_type": "attribute"
},
{
"api_name": "cv2.getStructuringElement",
"line_number": 434,
"usage_type": "call"
},
{
"api_name": "cv2.MORPH_RECT",
"line_number": 434,
"usage_type": "attribute"
},
{
"api_name": "cv2.erode",
"line_number": 435,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 436,
"usage_type": "call"
},
{
"api_name": "cv2.erode",
"line_number": 439,
"usage_type": "call"
},
{
"api_name": "cv2.dilate",
"line_number": 440,
"usage_type": "call"
},
{
"api_name": "cv2.addWeighted",
"line_number": 448,
"usage_type": "call"
},
{
"api_name": "cv2.erode",
"line_number": 450,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 451,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "cv2.THRESH_OTSU",
"line_number": 452,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 454,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 456,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 458,
"usage_type": "call"
},
{
"api_name": "cv2.findContours",
"line_number": 461,
"usage_type": "call"
},
{
"api_name": "cv2.RETR_TREE",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "cv2.CHAIN_APPROX_SIMPLE",
"line_number": 462,
"usage_type": "attribute"
},
{
"api_name": "cv2.boundingRect",
"line_number": 468,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 475,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 483,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 539,
"usage_type": "call"
}
] |
23158389057
|
import os
import argparse
import torch
from torchvision import datasets, transforms
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn.model_selection import train_test_split
from collections import Counter
import numpy as np
# Transform and to normalize the data [0.0, 1.0]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
transform_test = transforms.Compose([transforms.ToTensor()])
# Cifar10 classes
cifar10_classes = ('airplane 0', 'automobile 1', 'bird 2', 'cat 3',
'deer 4', 'dog 5', 'frog 6', 'horse 7', 'ship 8', 'truck 9')
def to_numpy(tensor):
'''To convert the torch tensor into numpy
'''
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def index_to_label(index_dict):
'''To crete a new dict by replacing the keys (class-indexes) with class-labels
'''
new_dict = {}
for key in range(len(index_dict)):
new_dict[cifar10_classes[key]] = index_dict[key]
return new_dict
def prepareTrainset(args, X_train, y_train):
'''
*** Usase:
# prepare the training set where we limit no of samples in some classes
# Rest of the classes will take all available samples
*** parameters:
args:
args.classes_to_limit: Classes where we need less samples, [2,4,9]-> ['bird', 'deer', 'truck']
args.data_limit_in_classes: samples limit, 2400
X_train: Original training data(images)
y_train: Original training targets(classes)
*** Return
training set with desired no of samples in each class
'''
X_train = np.rollaxis(X_train, 3, 1)
X_train = (X_train/255.0)
X_train = X_train.astype(np.float32)
train_idx = []
for i in range(10):
indexes = [idx for idx in range(len(y_train)) if y_train[idx] == i]
if i in args.classes_to_limit:
indexes = indexes[:args.data_limit_in_classes]
train_idx.extend(indexes)
else:
train_idx.extend(indexes)
trainset = [(X_train[i], y_train[i]) for i in train_idx]
if args.verbose:
y_train = [y_train[id] for id in train_idx]
print(f'\nTraining dataset: \n{len(y_train)}\n{index_to_label(dict(Counter(y_train)))}')
return trainset
def prepareValset(args, X_val, y_val):
'''Prepare validation set with 1,000 samples where each class has 100 samples
'''
X_val = np.rollaxis(X_val, 3, 1)
X_val = (X_val/255.0)
X_val = X_val.astype(np.float32)
valset = [(X_val[i], y_val[i]) for i in range(len(X_val))]
# Verbose
if args.verbose:
print(f'\nValidation dataset: \n{len(y_val)}\n{index_to_label(dict(Counter(y_val)))}')
return valset
def train_data_sampler(args, y_train):
''' Sampling strategy for the training batches
Weighted over sampling: Building a multinomial distribution over the set of observations
where each observation behaves as its own class with a controlled probability of being drawn
'''
train_idx = []
for i in range(10):
indexes = [idx for idx in range(len(y_train)) if y_train[idx] == i]
if i in args.classes_to_limit:
indexes = indexes[:args.data_limit_in_classes]
train_idx.extend(indexes)
else:
train_idx.extend(indexes)
train_targets = [y_train[i] for i in train_idx]
class_sample_count = np.unique(train_targets, return_counts=True)[1]
weight = 1. / class_sample_count
samples_weight = weight[train_targets]
samples_weight = torch.from_numpy(samples_weight)
sampler = WeightedRandomSampler(samples_weight, num_samples=len(samples_weight), replacement=False)
return sampler
def loadCIFAR10(args):
''' Preparing the traning, val, test data loaders
# Training set : `args.classes_to_limit` classes will have `args.data_limit_in_classes` samples, other classes will have 4900 samples
# Validation set: 1,000 samples (making sure that 100 images are in each class)
# Test set : 10,000 (By default 1000 images are in each class)
'''
if args.verbose:
print('\n***** CIFAR-10 DATASET')
# path to save CIFAR10 data
path = f'{os.path.dirname(os.path.dirname(__file__))}/data'
# Download and load the CIFAR10 dataset
train_val_set = datasets.CIFAR10(path, download = True, train = True, transform = transform_train)
testset = datasets.CIFAR10(path, download = True, train = False, transform = transform_test)
# Divide the CIFAR10 training samples into training and validation set
# Training set : 49,000 samples
# Validation set : 1,000 samples (making sure that 100 images are in each class)
X_train, X_val, y_train, y_val = train_test_split(train_val_set.data, train_val_set.targets, test_size=0.02, train_size=0.98, stratify=train_val_set.targets, shuffle=True, random_state=42)
trainset = prepareTrainset(args, X_train, y_train)
valset = prepareValset(args, X_val, y_val)
# Train, Val, Test Dataset Loaders
if args.data_sampling == None:
trainLoader = torch.utils.data.DataLoader(trainset, batch_size = args.batch_size, shuffle=True)
elif args.data_sampling == 'weightedOverSampling':
# Weighted Oversampler for trainLoader
train_sampler = train_data_sampler(args, y_train)
trainLoader = torch.utils.data.DataLoader(trainset, batch_size = args.batch_size, sampler=train_sampler)
valLoader = torch.utils.data.DataLoader(valset, batch_size = args.batch_size, shuffle = True)
testLoader = torch.utils.data.DataLoader(testset, batch_size = args.batch_size, shuffle = True)
if args.verbose:
print(f'\nTest dataset: \n{len(testset.targets)}\n{index_to_label(dict(Counter(testset.targets)))}')
return trainLoader, valLoader, testLoader
def loadCIFAR10_testset(batch_size = 100):
# path to save CIFAR10 data
path = f'{os.path.dirname(os.path.dirname(__file__))}/data'
# Download and load the testset
testset = datasets.CIFAR10(path, download = True, train = False, transform = transform_test)
testLoader = torch.utils.data.DataLoader(testset, batch_size = batch_size, shuffle = True)
return testLoader
# To check and visualize dataset independently
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Dataset_loader')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--classes_to_limit', default=[2, 4, 9], choices=[i for i in range(10)])
parser.add_argument('--data_limit_in_classes', default=2450, type=int)
parser.add_argument('--verbose', default=True, type=bool)
parser.add_argument('--visualize_a_batch', default=True, type=bool)
parser.add_argument('--data_sampling', default='weightedOverSampling', type=str,
choices=['weightedOverSampling', None],
help='Data sampling to tackle imbalanced dataset')
args = parser.parse_args()
# Cifar10-dataset data loaders
trainLoader, valLoader, testLoader = loadCIFAR10(args)
if args.visualize_a_batch:
print('\n***** Visualize a batch')
dataiter = iter(trainLoader)
images, labels = dataiter.next()
print(images.shape, labels.shape)
print(f'Pixel Values are B/W: [{torch.min(images).item()}, {torch.max(images).item()}]')
print('\n***** Visualize some batches to see class distributions after applying weighted data over sampling')
class_distribution = []
for i, data in enumerate(trainLoader):
_, labels = data
class_distribution.append(np.unique(labels, return_counts=True)[1])
print(class_distribution[i])
if i > 9:
break
print('\n**** class-wise average distribution in batches after applying weighted data over sampling')
class_distribution = np.array(class_distribution)
class_distribution_avg = np.average(class_distribution, axis=0)
print(f'{np.round(class_distribution_avg, decimals=2)}\n')
|
minkeshtu/Imbalanced-Cifar-10-classification
|
dataset/cifar10Loader.py
|
cifar10Loader.py
|
py
| 8,218 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "torchvision.transforms.Compose",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomCrop",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomHorizontalFlip",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.rollaxis",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.rollaxis",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line_number": 80,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 85,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.sampler.WeightedRandomSampler",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 124,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 127,
"usage_type": "name"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 128,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 128,
"usage_type": "name"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 133,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 139,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 143,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 143,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 144,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 145,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "torchvision.datasets",
"line_number": 158,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 159,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 159,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "torch.min",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "torch.max",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "numpy.round",
"line_number": 198,
"usage_type": "call"
}
] |
26889276534
|
import torch
def iou_score(output, target):
smooth = 1e-5
if torch.is_tensor(output):
output = torch.sigmoid(output).data.cpu().numpy()
if torch.is_tensor(target):
target = target.data.cpu().numpy()
output_ = output > 0.5
target_ = target > 0.5
intersection = (output_ & target_).sum()
union = (output_ | target_).sum()
return (intersection + smooth) / (union + smooth)
|
krishnakaushik25/Medical-Image-Segmentation-DL
|
modular_code/src/ML_Pipeline/iou.py
|
iou.py
|
py
| 423 |
python
|
en
|
code
| 7 |
github-code
|
6
|
[
{
"api_name": "torch.is_tensor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torch.sigmoid",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.is_tensor",
"line_number": 10,
"usage_type": "call"
}
] |
72966516669
|
# encoding=utf8
from Source import *
import logging,re,mylex
logging.basicConfig(format=' %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %H:%M:%S',level=logging.DEBUG)
index=0
# token_stream="""INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER '=' CONSTANT ';' WHILE '(' SIZEOF '(' INT ')' ')' '{' IDENTIFIER '=' CONSTANT ';' '}' RETURN CONSTANT ';' '}''""".split(" ")
token_stream="""INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER '=' CONSTANT ';' '}' """.split(" ")
# INT IDENTIFIER '(' ')' '{' IDENTIFIER ';' IDENTIFIER ';' IDENTIFIER ';' '}'
error = []
def main():
#读入token序列
s=mylex.get_s()
print(s)
print(token_stream)
#调用reader()
print(reader('translation_unit',CONTROLLER))
# t=get_terminals()
# print(t)
# tt=get_terminals()
# print(tt)
# derive_controller(1)
# print(index)
def reader(key, num_to_choose):
"""key:需要调用的产生式的名称
num_to_choose:需要选择的产生式序号,为Source.CONTROLLER的时候作为分发器。否则调用Source.c_dict['key'][num_to_choose]产生式
index:token_stream的下标,指示next将要读入的字符
token_stream:语法分析器输入的token序列"""
if (num_to_choose == CONTROLLER):
return derive_controller(key)
else:
return derive(key, num_to_choose)
def derive_controller(key):
global index
# logging.info("derive_controller called with key:------"+key+"--------at index:"+str(index)+" token:"+str(token_stream[index]))
if (c_dict.get(key) is None):
logging.error("error when parsing!No such key in dictionary.产生式出现了不可解决的异常")
error_process(key,"产生式出现了不可解决的异常")
return False
else:
derived_result = c_dict[key]
# logging.info("derive_controller::::::"+key+"->"+str(derived_result))
index_save=index
for i in range(0,len(derived_result)):
index=index_save
result=derive(key,i)
if(result ==True):
if derived_result[i]!="":
logging.info("匹配成功\t"+"<"+key+"> -> "+derived_result[i])
return result
else:
continue
# logging.error("没有在便利所有产生式后找到合适的产生式:key:"+key+"\t derive_result:"+str(derived_result))
return False
def derive(key, num_to_choose):
global index
derive_list=c_dict.get(key)
if(num_to_choose>len(derive_list)-1):
logging.error("fatal error!产生式种类不全!")
error_process(key,"fatal error!产生式种类不全!")
derive_sentence=derive_list[num_to_choose]
# logging.info("derive called with options: deriving :--------"+derive_sentence+"------------")
# 适用于推出了非终结符的情况
if derive_sentence in c_dict.keys():
return derive_controller(derive_sentence)
else:
# 适用于推出了终结符的情况
if derive_sentence in get_terminals():
if derive_sentence=="":
# 适合于产生空的情况
# logging.info("产生式选择问为空")
return True
if derive_sentence==token_stream[index]:
index+=1
else:
return False
logging.info(key+"推出了一个终结符"+derive_sentence)
return True
# 适用于推出了包含空格隔开的产生式,依次分析
derive_sentence_list=re.split(r'\s+',derive_sentence)
for i in range(0,len(derive_sentence_list)):
if derive_sentence_list[i] in c_dict.keys():
result=derive_controller(derive_sentence_list[i])
elif derive_sentence_list[i] in get_terminals():
# 推出了终结符?
# TODO should inc index?
if derive_sentence_list[i]=="":
result=True
else:
if derive_sentence_list[i]==token_stream[index]:
# logging.info("匹配终结符"+token_stream[index])
index+=1
result=True
else:
result=False
else:
result=False
if result==False:
# logging.info("this is not the path.选择了错误的产生式:"+key+"->"+ str(derive_sentence_list))
return False
# logging.info("成功匹配产生式"+str({"key":key,"value":derive_sentence}))
return True
def term(token):
return token_stream[index]==token
def error_process(key, error_info):
error.append({'key': key, 'error_info': error_info, "位置": index})
if __name__ == "__main__":
main()
|
zhaoguoquan94/compiler_syntax_analysor
|
systax_analysisor.py
|
systax_analysisor.py
|
py
| 4,919 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "mylex.get_s",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "re.split",
"line_number": 91,
"usage_type": "call"
}
] |
1558159854
|
from enum import IntEnum
SCALE_2X: bool = False
WORLD_SCALE: int = 32
OBJECT_SCALE: int = WORLD_SCALE // 2
SPRITE_SCALE: int = WORLD_SCALE * 2
ANIMATION_NUM_FRAMES: int = 4
RESOLUTION_X: int = WORLD_SCALE * 20
RESOLUTION_Y: int = int(RESOLUTION_X * 0.625) # 640x400 aspect ratio
SPRITE_CLOTHES_COLORS = ['#42200f', '#834222', '#9d633d']
SNOW_CLOTHES_COLORS = ['#8996c6', '#aac2ff', '#a5acc4']
GRASS_CLOTHES_COLOR = ['#0c2618', '#123924', '#266e48']
STONE_CLOTHES_COLOR = ['#4a4a4a', '#8c8c8c', '#adadad']
EMBER_CLOTHES_COLOR = ['#ad0021', '#ef6221', '#efce21']
class ObjectType(IntEnum):
FOOD = 0
DANGER = 1
BONUS = 2
WEAPON = 3
OBJECT_RADIUS: float = 0.25
OBJECT_NUM_VERSIONS: int = 6
|
cgloeckner/prehistoric_guy
|
core/constants.py
|
constants.py
|
py
| 713 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "enum.IntEnum",
"line_number": 21,
"usage_type": "name"
}
] |
27264929540
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
import argparse
import configparser
import json
import logging
import textwrap
import argcomplete
from las import Client, Credentials
from las.credentials import MissingCredentials, read_from_file
from .__version__ import __version__
from .util import NotProvided
from .parser import (
create_app_clients_parser,
create_assets_parser,
create_datasets_parser,
create_deployment_environments_parser,
create_documents_parser,
create_logs_parser,
create_models_parser,
create_organizations_parser,
create_payment_methods_parser,
create_plans_parser,
create_predictions_parser,
create_roles_parser,
create_secrets_parser,
create_transitions_parser,
create_users_parser,
create_workflows_parser,
)
def create_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=textwrap.dedent('''
Command Line Interface for Cradl API, see --help for more info or visit https//docs.cradl.ai. To use tab
completion make sure you have global completion activated. See argcomplete docs for more information:
https://kislyuk.github.io/argcomplete/
'''),
)
parser.add_argument('--profile', '-p')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--version', action='version', version=f'%(prog)s {__version__}')
subparsers = parser.add_subparsers()
create_app_clients_parser(subparsers)
create_assets_parser(subparsers)
create_datasets_parser(subparsers)
create_deployment_environments_parser(subparsers)
create_documents_parser(subparsers)
create_logs_parser(subparsers)
create_models_parser(subparsers)
create_organizations_parser(subparsers)
create_payment_methods_parser(subparsers)
create_plans_parser(subparsers)
create_predictions_parser(subparsers)
create_roles_parser(subparsers)
create_secrets_parser(subparsers)
create_transitions_parser(subparsers)
create_users_parser(subparsers)
create_workflows_parser(subparsers)
argcomplete.autocomplete(parser)
return parser
def set_verbosity(verbose):
verbosity_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
verbosity = verbosity_levels[min(verbose, len(verbosity_levels) - 1)]
logging.getLogger().setLevel(verbosity)
logging.getLogger('las').setLevel(verbosity)
def main():
parser = create_parser()
args = vars(parser.parse_args())
set_verbosity(args.pop('verbose'))
profile = args.pop('profile', None)
try:
cmd = args.pop('cmd')
except:
parser.print_help()
exit(1)
try:
if profile:
credentials = Credentials(*read_from_file(section=profile))
args['las_client'] = Client(credentials)
else:
args['las_client'] = Client()
except (configparser.NoOptionError, configparser.NoSectionError, MissingCredentials) as e:
logging.exception(e)
print('Could not locate credentials.')
return
kwargs = {k: v for k, v in args.items() if v != NotProvided}
if kwargs:
result = cmd(**kwargs)
result = json.dumps(result, indent=2) if isinstance(result, dict) else result
print(result)
else:
parser.print_help()
exit(1)
if __name__ == '__main__':
main()
|
LucidtechAI/las-cli
|
lascli/__main__.py
|
__main__.py
|
py
| 3,452 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "textwrap.dedent",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "parser.add_argument",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "parser.add_argument",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "parser.add_argument",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "__version__.__version__",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "parser.add_subparsers",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "parser.create_app_clients_parser",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "parser.create_assets_parser",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "parser.create_datasets_parser",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "parser.create_deployment_environments_parser",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "parser.create_documents_parser",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "parser.create_logs_parser",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "parser.create_models_parser",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "parser.create_organizations_parser",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "parser.create_payment_methods_parser",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "parser.create_plans_parser",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "parser.create_predictions_parser",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "parser.create_roles_parser",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "parser.create_secrets_parser",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "parser.create_transitions_parser",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "parser.create_users_parser",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "parser.create_workflows_parser",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "argcomplete.autocomplete",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "logging.DEBUG",
"line_number": 72,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "parser.parse_args",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "parser.print_help",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "las.Credentials",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "las.credentials.read_from_file",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "las.Client",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "las.Client",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "configparser.NoOptionError",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "configparser.NoSectionError",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "las.credentials.MissingCredentials",
"line_number": 96,
"usage_type": "name"
},
{
"api_name": "logging.exception",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "util.NotProvided",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "parser.print_help",
"line_number": 107,
"usage_type": "call"
}
] |
36766531397
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import time
data = pd.read_csv('C:/Users/SwetaMankala/Desktop/Assignments/EAI6000/ma_statewide_2020_04_01.csv',low_memory= False)
data.head(10)
# Checking the shape of the data set
data.shape
data['location'].unique()
# Using the commqand below we can see what exactly our columns look like
data.columns
data['county_name'].unique()
data['subject_race'].unique()
data.info()
n=data['raw_row_number'].count()
x=data['subject_race'].value_counts()
print('Numerical columns:',data.select_dtypes(include=np.number).columns)
print('Categorical columns:',data.select_dtypes(include='object').columns)
df = pd.DataFrame(data)
median1 = df['subject_age'].median()
df['subject_age'].fillna(median1, inplace = True)
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['race'] = pd.Series(len(df['subject_race']), index=df.index)
df['race'] = 0
#To assign null values
df.loc[(df['subject_race'] != 'hispanic') |
(df['subject_race'] != 'white') |
(df['subject_race'] != 'black') |
(df['subject_race'] != 'asian/pacific islander') |
(df['subject_race'] != 'other') |
(df['subject_race'].isnull() == True), 'race'] = np.nan
#To assign the categorical values to the dataframe 'race'
df.loc[(df['subject_race'] == 'hispanic') |
(df['subject_race'] == 'white') |
(df['subject_race'] == 'black') |
(df['subject_race'] == 'other') |
(df['subject_race'] == 'asian/pacific islander'), 'race'] = df['subject_race']
race_copy = df['race'].copy(deep = True)
# Fill NaN values.
df['race'].fillna(value = 1, inplace = True)
# Obtain values for every race.Axis=0 for rows
race_copy.dropna(axis = 0, inplace = True)
sorted_race = race_copy.value_counts(normalize = True).sort_index()
# Fill one values for individual person with randomly picked from random choice.
df['race'] = df['race'].apply(lambda x: np.random.choice([x for x in sorted_race.index],
replace = True, p = sorted_race) if (x == 1) else x).astype(str)
#Normalize=True prints the relative frequency of the values
print("\nFilled NaNs normalized:\n", df['race'].value_counts(normalize = True))
df['subject_race'] = df['race']
df['subject_race'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['sex'] = pd.Series(len(df['subject_sex']), index = df.index)
df['sex'] = 0
# Randomly stick sex to every user with NaN value.
df.loc[(df['subject_sex'] != 'male') |
(df['subject_sex'] != 'female') |
(df['subject_sex'].isnull() == True), 'sex'] = np.nan
df.loc[(df['subject_sex'] == 'male') |
(df['subject_sex'] == 'female'), 'sex'] = df['subject_sex']
# Create a copy to calculate proportions.
sex_copy = df['sex'].copy(deep = True)
# Fill NaN values.
df['sex'].fillna(value = 1, inplace = True)
# Obtain values for every sex.
sex_copy.dropna(axis = 0, inplace = True)
sorted_sex = sex_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['sex'] = df['sex'].apply(lambda x: np.random.choice([x for x in sorted_sex.index],
replace = True, p = sorted_sex) if (x == 1) else x).astype(str)
print("Gender proportions after filled NaNs: \n", df['sex'].value_counts(normalize = True))
df['subject_sex'] = df['sex']
df['subject_sex'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['outcome_v'] = pd.Series(len(df['outcome']), index = df.index)
df['outcome_v'] = 0
# Randomly stick sex to every user with NaN value.
df.loc[(df['outcome'] != 'citation') |
(df['outcome'] != 'warning') |
(df['outcome'] != 'arrest') |
(df['outcome'].isnull() == True), 'outcome_v'] = np.nan
df.loc[(df['outcome'] != 'citation') |
(df['outcome'] != 'warning') |
(df['outcome'] != 'arrest'), 'outcome_v'] = df['outcome']
# Create a copy to calculate proportions.
outcome_copy = df['outcome_v'].copy(deep = True)
# Fill NaN values.
df['outcome_v'].fillna(value = 1, inplace = True)
outcome_copy.dropna(axis = 0, inplace = True)
sorted_outcome = outcome_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['outcome_v'] = df['outcome_v'].apply(lambda x: np.random.choice([x for x in sorted_outcome.index],
replace = True, p = sorted_outcome) if (x == 1) else x).astype(str)
print("Outcome proportions after filled NaNs: \n", df['outcome_v'].value_counts(normalize = True))
df['outcome'] = df['outcome_v']
df['outcome'].value_counts()
#Segregate the values based on the categories, remove the nulls and normalize the data column
df['vehicle'] = pd.Series(len(df['vehicle_type']), index = df.index)
df['vehicle'] = 0
df.loc[(df['vehicle_type'] != 'Commerical') |
(df['vehicle_type'] != 'Passenger') |
(df['vehicle_type'] != 'Motorcycle') |
(df['vehicle_type'] != 'Taxi/Livery') |
(df['vehicle_type'] != 'Trailer') |
(df['vehicle_type'].isnull() == True), 'vehicle'] = np.nan
df.loc[(df['vehicle_type'] != 'Commerical') |
(df['vehicle_type'] != 'Passenger') |
(df['vehicle_type'] != 'Motorcycle') |
(df['vehicle_type'] != 'Taxi/Livery') |
(df['vehicle_type'] != 'Trailer'), 'vehicle'] = df['vehicle_type']
# Create a copy to calculate proportions.
outcome_copy = df['vehicle'].copy(deep = True)
# Fill NaN values.
df['vehicle'].fillna(value = 1, inplace = True)
outcome_copy.dropna(axis = 0, inplace = True)
sorted_outcome = outcome_copy.value_counts(normalize = True).sort_index()
# Fill one values in suspector_sex_rand with randomly picked from random choice.
df['vehicle'] = df['vehicle'].apply(lambda x: np.random.choice([x for x in sorted_outcome.index],
replace = True, p = sorted_outcome) if (x == 1) else x).astype(str)
print("Vehicle Type proportions after filled NaNs: \n", df['vehicle'].value_counts(normalize = True))
df['vehicle_type'] = df['vehicle']
df['vehicle_type'].value_counts()
print(df.isnull().sum())
#Convert the object type variables to string
df['subject_sex'] = df['subject_sex'].astype(str)
df['subject_race'] = df['subject_race'].astype(str)
df['type'] = df['type'].astype(str)
df['arrest_made'] = df['arrest_made'].astype(str)
df['citation_issued'] = df['citation_issued'].astype(str)
df['outcome'] = df['outcome'].astype(str)
df['contraband_found'] = df['contraband_found'].astype(str)
df['contraband_drugs'] = df['contraband_drugs'].astype(str)
df['warning_issued'] = df['warning_issued'].astype(str)
df['contraband_weapons'] = df['contraband_weapons'].astype(str)
df['contraband_alcohol'] = df['contraband_alcohol'].astype(str)
df['contraband_other'] = df['contraband_other'].astype(str)
df['frisk_performed'] = df['frisk_performed'].astype(str)
df['search_conducted'] = df['search_conducted'].astype(str)
df['search_basis'] = df['search_basis'].astype(str)
df['reason_for_stop'] = df['reason_for_stop'].astype(str)
df['vehicle_type'] = df['vehicle_type'].astype(str)
df['vehicle_registration_state'] = df['vehicle_registration_state'].astype(str)
df['raw_Race'] = df['raw_Race'].astype(str)
data[data.subject_sex == "male"].location.value_counts()
data[data.subject_sex == "female"].location.value_counts()
# If we want to see number of violations per gender with respect to their race
race = data.groupby(["subject_sex"]).subject_race.value_counts(normalize= True).unstack()
race
plt.figure(figsize=(12, 8))
race.black.plot(kind="bar")
plt.figure(figsize=(12, 8))
race.white.plot(kind="bar")
plt.figure(figsize=(12, 8))
race.hispanic.plot(kind="bar")
# We want to check which year had the least number of stops
data.date
data['date'] = pd.to_datetime(data.date, format="%Y-%M-%d")
data["year"] = data.date.dt.year
import math
import seaborn as sns
sns.set_style('whitegrid')
# Rounding the integer to the next hundredth value plus an offset of 100
def round(x):
return 100 + int(math.ceil(x / 100.0)) * 100
sns.catplot("subject_sex", col = "reason_for_stop", col_wrap = 3,data = data[data.reason_for_stop.notnull()],kind = "count")
# Get current axis on current figure
axis = plt.gca()
# ylim max value to be set
max_y = data['subject_sex'].value_counts().max()
axis.set_ylim([0, round(max_y)])
# Iterate through the list of axes' patches
for p in axis.patches:
axis.text(p.get_x() + p.get_width()/2., p.get_height(), '%d' % int(p.get_height()),
fontsize=12, color='red', ha='center', va='bottom')
plt.show()
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
df['subject_race'] = label_encoder.fit_transform(df['subject_race'])
df['arrest_made'] = label_encoder.fit_transform(df['arrest_made'])
df['citation_issued'] = label_encoder.fit_transform(df['citation_issued'])
df['outcome'] = label_encoder.fit_transform(df['outcome'])
df['contraband_found'] = label_encoder.fit_transform(df['contraband_found'])
df['contraband_drugs'] = label_encoder.fit_transform(df['contraband_drugs'])
df['contraband_weapons'] = label_encoder.fit_transform(df['contraband_weapons'])
df['contraband_alcohol'] = label_encoder.fit_transform(df['contraband_alcohol'])
df['contraband_other'] = label_encoder.fit_transform(df['contraband_other'])
corr = df.corr()
corr.style.background_gradient(cmap='coolwarm')
|
anirudh0809/fundamentals_of_ai
|
linear_models/eda.py
|
eda.py
|
py
| 9,689 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.number",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 105,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 121,
"usage_type": "attribute"
},
{
"api_name": "pandas.Series",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 137,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.choice",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "pandas.to_datetime",
"line_number": 205,
"usage_type": "call"
},
{
"api_name": "seaborn.set_style",
"line_number": 210,
"usage_type": "call"
},
{
"api_name": "math.ceil",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "seaborn.catplot",
"line_number": 215,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.gca",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 227,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 227,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 230,
"usage_type": "call"
}
] |
39509933349
|
import argparse
from torch.nn import BatchNorm2d
from cifar.norm_layers import MyBatchNorm, BatchInstance, MyLayerNorm, MyGroupNorm, MyInstanceNorm
from cifar.dataset import get_data_loaders,get_default_device
from cifar.train import train
from cifar.model import MyResnet
# python3 train_cifar.py --normalization [ bn | in | bin | ln | gn | nn | torch_bn] --data_dir <directory_containing_data> --output_file <path to the trained model> --n [1 | 2 | 3 ]
my_parser = argparse.ArgumentParser(allow_abbrev=False)
my_parser.add_argument('--normalization', required=True, type=str, action='store',
choices=('bn', 'in', 'bin', 'ln', 'gn', 'nn', 'torch_bn'))
my_parser.add_argument('--data_dir', required=True, type=str, action='store')
my_parser.add_argument('--output_file', required=True, type=str, action='store')
my_parser.add_argument('--n', required=True, type=int, action='store', choices=(1, 2, 3))
args = my_parser.parse_args()
option_to_norm = {'bn': MyBatchNorm, 'in': MyInstanceNorm, 'bin': BatchInstance, 'ln': MyLayerNorm, 'gn': MyGroupNorm,
'nn': None, 'torch_bn': BatchNorm2d}
norm_layer = option_to_norm[args.normalization]
device = get_default_device()
if norm_layer == MyLayerNorm:
train_loader, val_loader, test_loader = get_data_loaders(args.data_dir, device, drop_last=True)
else:
train_loader, val_loader, test_loader = get_data_loaders(args.data_dir, device)
n = args.n
r = 10
resnet_model = MyResnet(n, r, norm=norm_layer)
model = resnet_model.to(device)
train(device, model, train_loader, val_loader, model_save_path=args.output_file, already_trained=False,
learning_rate=0.1, momentumValue=0.9, wieghtDecayValue=0.0001)
|
aps1310/COL_870
|
Assignment 1/2020MCS2448_2020MCS2468/train_cifar.py
|
train_cifar.py
|
py
| 1,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cifar.norm_layers.MyBatchNorm",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cifar.norm_layers.MyInstanceNorm",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cifar.norm_layers.BatchInstance",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cifar.norm_layers.MyLayerNorm",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "cifar.norm_layers.MyGroupNorm",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "cifar.dataset.get_default_device",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cifar.norm_layers.MyLayerNorm",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "cifar.dataset.get_data_loaders",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cifar.dataset.get_data_loaders",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "cifar.model.MyResnet",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cifar.train.train",
"line_number": 42,
"usage_type": "call"
}
] |
40033915805
|
#import necessary modules
import numpy as np
from numpy import load
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
data = load('trained_faces_enc.npz')
encodings, labels = data['arr_0'], data['arr_1']
n_folds = 10
kf = StratifiedKFold(n_splits=n_folds, shuffle=True)
accuracies = []
precisions = []
recalls = []
f1_scores = []
for train_index, val_index in kf.split(encodings, labels):
X_train, X_val = encodings[train_index], encodings[val_index]
y_train, y_val = labels[train_index], labels[val_index]
in_encoder = Normalizer(norm='l2')
in_encoder.fit(X_train)
trainX = in_encoder.transform(X_train)
valX = in_encoder.transform(X_val)
lb = LabelEncoder()
lb.fit(y_train)
y_train = lb.transform(y_train)
y_val = lb.transform(y_val)
svm = make_pipeline(MinMaxScaler(), SVC(kernel='rbf', C=1, gamma=0.01, probability=True))
svm.fit(trainX, y_train)
y_pred = svm.predict(X_val)
accuracy = accuracy_score(y_val, y_pred)
precision = precision_score(y_val, y_pred, average='weighted')
recall = recall_score(y_val, y_pred, average='weighted')
f1 = f1_score(y_val, y_pred, average='weighted')
accuracies.append(accuracy)
precisions.append(precision)
recalls.append(recall)
f1_scores.append(f1)
avg_accuracy = np.mean(accuracies)
avg_precision = np.mean(precisions)
avg_recall = np.mean(recalls)
avg_f1_score = np.mean(f1_scores)
print("Average Accuracy:", avg_accuracy*100)
print("Average Precision:", avg_precision*100)
print("Average Recall:", avg_recall*100)
print("Average F1 Score:", avg_f1_score*100)
|
karth1ksr/Face-Recognition-with-Facenet
|
cross validation.py
|
cross validation.py
|
py
| 1,907 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "numpy.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.StratifiedKFold",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.Normalizer",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "sklearn.pipeline.make_pipeline",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.svm.SVC",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.precision_score",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.recall_score",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.f1_score",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 55,
"usage_type": "call"
}
] |
35428261847
|
"""
This script detects laughter within all audio files contained in the directory
`root_dir/audio/raw`, and save one pickle file for each audio file with
laughter timecodes in the directory `root_dir/audio/laughter`.
"""
import argparse
import os
import os.path as osp
import pickle
from laughter_detection.core.laughter_detector import LaughterDetector
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"root_dir", type=str, help="Path to the root of FunnyNet dataset"
)
parser.add_argument(
"--embedding-name",
"-e",
type=str,
help="embedding model to use.",
default="byola",
)
parser.add_argument(
"--laughter-dir",
"-l",
type=str,
help="Path to the directory to save detected laughters",
default=None,
)
parser.add_argument(
"--n-clusters", "-n", type=int, help="Number of clusters", default=3
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_arguments()
embedding_name = args.embedding_name
root_dir = args.root_dir
laughter_dir = args.laughter_dir
n_clusters = args.n_clusters
if not laughter_dir:
laughter_dir = osp.join(root_dir, "audio", "laughter", embedding_name)
if not osp.exists(laughter_dir):
os.makedirs(laughter_dir)
raw_dir = osp.join(root_dir, "audio", "raw")
audio_filenames = sorted(os.listdir(raw_dir))
laughter_detector = LaughterDetector(
embedding_name, root_dir, num_workers=6, n_clusters=n_clusters
)
pred_timecodes = laughter_detector.detect_laughters()
for current_filename, current_timecodes in pred_timecodes.items():
laughter_filename = f"{current_filename[:-4]}.pk"
laughter_path = osp.join(laughter_dir, laughter_filename)
# Save laughter timecodes
with open(laughter_path, "wb") as f:
pickle.dump(current_timecodes, f)
|
robincourant/FunnyNet
|
laughter_detection/scripts/detect_laughters.py
|
detect_laughters.py
|
py
| 1,990 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "laughter_detection.core.laughter_detector.LaughterDetector",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "pickle.dump",
"line_number": 70,
"usage_type": "call"
}
] |
11833093627
|
import qrcode
from django.db import models
from django.utils.text import slugify
from django.utils.html import mark_safe
from cms.models import Title
from django.contrib.sites.models import Site
import uuid
class QrCodeUrlPost(models.Model):
TITLE_URLS = [(o.path, o.title) for o in Title.objects.filter(publisher_is_draft=False).exclude(path='')]
uuid_field = models.UUIDField(blank=True, editable=False)
page_url = models.CharField(blank=True, max_length=20, choices=TITLE_URLS)
url = models.CharField(blank=True, max_length=255, verbose_name="Url")
thumbnail = models.ImageField(blank=True, upload_to='qrCode')
slug = models.SlugField(max_length=255, unique=True, blank=True)
name = models.CharField(max_length=255, unique=True, verbose_name="Name")
last_updated = models.DateTimeField(auto_now=True)
start_date = models.DateField(blank=True, null=True)
activate = models.BooleanField(default=False, verbose_name="Activate")
created_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
current_site = Site.objects.get_current()
if not self.uuid_field:
self.uuid_field = uuid.uuid4()
self.slug = self.uuid_field
if self.page_url:
self.url = str(current_site.domain) + '/' + str(self.page_url)
# self.url = 'http://127.0.0.1:8000/' + str(self.page_url)
img = qrcode.make(self.url)
type(img) # qrcode.image.pil.PilImage
img_name = str(self.slug) + '.png'
img.save('./media/qrCode/' + img_name)
self.thumbnail = img_name
super().save(*args, **kwargs)
def img_preview(self):
return mark_safe(f'<img src = "/media/qrCode/{self.thumbnail}" width = "150"/>')
def link_preview(self):
return mark_safe(f'<a href="{self.url}" target=_blank>{self.uuid_field}</a>')
|
vazvieirafrederic67/qrCodePlugin
|
models.py
|
models.py
|
py
| 1,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "cms.models.Title.objects.filter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cms.models.Title.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cms.models.Title",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.db.models.UUIDField",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.db.models.ImageField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.SlugField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "django.db.models.BooleanField",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "django.db.models.DateTimeField",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "django.contrib.sites.models.Site.objects.get_current",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.contrib.sites.models.Site.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.sites.models.Site",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "qrcode.make",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "django.utils.html.mark_safe",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.utils.html.mark_safe",
"line_number": 49,
"usage_type": "call"
}
] |
13006866992
|
import numpy as np
import pandas as pd
import cv2
from PIL import Image
import joblib
import cv2
import numpy as np
import time
import pandas as pd
import imagehash
import multiprocessing as mp
import logging
import os
from dataclasses import dataclass, field
from typing import List, Dict
from slideextract.config import ocr_feature_names, ocr_new_slide_clf, DATA_FILE_PATH
from slideextract.comparator.docDiffBuilder import docDiffBuilder, doc_diff_comparator
from slideextract.processing.ffmpeg_sample_video import sample_video
logger = logging.getLogger('baseSlide')
@dataclass
class BaseSlideExtractor:
"""
base class for slide extractor,
all slide extractor should inherit from this class
have general methods to extract images from video
compare slides based on imagebased feature and ocr feature
"""
name = "baseSlideExtractor"
data_file_path = None
def __init__(self, *args, **kwargs) -> None:
pass
def extract_slides(self, mp4_file_path: str):
NotImplementedError
def extract_slides_from_file(self, mp4_file_path: str, threads: int = 0):
self.data_file_path = os.path.join(DATA_FILE_PATH, os.path.basename(mp4_file_path).split(".")[0], self.name)
output_path = os.path.join(self.data_file_path, "frames")
frames_data = sample_video(mp4_file_path, output_path=output_path, threads=threads)
return self.extract_slides_from_frames(frames_data)
def extract_slides_from_frames(self, frames_data: dict):
NotImplementedError
def _generate_ocr_doc_feature(self, ocr_paragraph1: str, ocr_paragraph2: str, doc_diff_comparator: docDiffBuilder=doc_diff_comparator):
"""
generate feature feature based on ocr results
"""
doc1 = ocr_paragraph1
doc2 = ocr_paragraph2
doc_compare_dict = doc_diff_comparator.compare(doc1, doc2)
doc_compare_dict['frame_token_ct'] = max([len(doc1), len(doc2)])
# need to test if dataframe results results
feature_df = pd.DataFrame([doc_compare_dict])
feature_df = feature_df.rename(columns={'letter_dis':'letter_dissim'})
return feature_df[ocr_feature_names]
def _compare_ocr_results(self, ocr_slide_indices: List, ocr_paragraphs: Dict, clf_model) -> pd.DataFrame:
ocr_slide_record = []
for index in ocr_slide_indices:
if index > 0:
feature_df = \
self._generate_ocr_doc_feature(
ocr_paragraph1=ocr_paragraphs[index], ocr_paragraph2=ocr_paragraphs[index-1])
ocr_is_new_slide = ocr_new_slide_clf.predict(feature_df)[0]
ocr_slide_record.append((index, ocr_is_new_slide))
ocr_new_slide_df = pd.DataFrame(ocr_slide_record)
ocr_new_slide_df.columns = ['index', 'ocr_is_new_slide']
return ocr_new_slide_df
def _classify_if_ocr_same(self, feature_df: pd.DataFrame, clf_model) -> bool:
"""
classify if ocr results are the same
"""
return clf_model.predict(feature_df)[0]
@classmethod
def compare_frames(frames, comparators):
"""
Use the output of 1, and a list of python callable[(image1, image2), float],
return the dataframe with the following columns:
index: index of the frame
phash: percetual hash of the image with previous frame (create phash comparater)
dhash: dhash diff of the image with previous frame (create dhash comparater)
"""
data = []
prev_frame = None
for index, frame in frames.items():
row = {"index": index}
if prev_frame is not None:
for comparator, name in comparators:
row[name] = comparator(prev_frame, frame)
data.append(row)
prev_frame = frame
return pd.DataFrame(data)
|
shex1627/slideextract
|
src/slideextract/slide_extractors/baseSlideExtractor.py
|
baseSlideExtractor.py
|
py
| 4,003 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "slideextract.config.DATA_FILE_PATH",
"line_number": 42,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "slideextract.processing.ffmpeg_sample_video.sample_video",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "slideextract.comparator.docDiffBuilder.docDiffBuilder",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "slideextract.comparator.docDiffBuilder.doc_diff_comparator",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "slideextract.comparator.docDiffBuilder.doc_diff_comparator.compare",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "slideextract.comparator.docDiffBuilder.doc_diff_comparator",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "slideextract.config.ocr_feature_names",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "slideextract.config.ocr_new_slide_clf.predict",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "slideextract.config.ocr_new_slide_clf",
"line_number": 70,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 24,
"usage_type": "name"
}
] |
26869499538
|
from flask import Flask, redirect, request, session, Response, jsonify
from flask_login import LoginManager
from flask_mail import Mail
from myapp.models.base import db
# 初始化 Loginmanager
login_manager = LoginManager()
mail = Mail()
def create_app():
app = Flask(__name__)
app.config.from_object('myapp.secure')
app.config.from_object('myapp.setting')
# 注册flask-login
login_manager.init_app(app)
# 登录页面
login_manager.login_view = 'web.login'
login_manager.login_message = '请先进行登陆'
# 邮件注册
mail.init_app(app)
# 注册蓝图
register_blueprint(app)
# 注册SQLAlchemy
db.init_app(app)
db.create_all(app=app)
app.response_class = AutoJsonifyResponse
return app
def register_blueprint(app):
# 注册book里web的蓝图
from myapp.controller import api
app.register_blueprint(api)
class AutoJsonifyResponse(Response):
@classmethod
def force_type(cls, response, environ=None):
if isinstance(response, (list, dict)):
response = jsonify(response)
return super(Response, cls).force_type(response, environ)
|
102244653/WebByFlask
|
myapp/__init__.py
|
__init__.py
|
py
| 1,159 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask_login.LoginManager",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_mail.Mail",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "myapp.models.base.db.init_app",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "myapp.models.base.db",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "myapp.models.base.db.create_all",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "myapp.models.base.db",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "myapp.controller.api",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "flask.Response",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 43,
"usage_type": "argument"
}
] |
34105917071
|
import cv2 as cv
import numpy as np
def diferenca():
captura = cv.VideoCapture(0)
while True:
ret, frame = captura.read()
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
cv.imshow("Video", np.subtract(frame, quarto))
k = cv.waitKey(30) & 0xff
if k == 27:
break
captura.release()
cv.destroyAllWindows()
if __name__ == '__main__':
diferenca()
|
gabrielga-dev/visao-computacional-2022
|
s6/main.py
|
main.py
|
py
| 419 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.subtract",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 19,
"usage_type": "call"
}
] |
23856904095
|
'''
Created by Han Xu
email:[email protected]
'''
import xml.etree.ElementTree as ET
def read_configuration(xml_file):
# 解析 XML 文件
tree = ET.parse(xml_file)
root = tree.getroot()
# 获取camvid数据集路径
camvid_path = root.find('camvid_path').text
# 获取模型路径
HANet_oneHAM_path = root.find('HANet_oneHAM_path').text
HANet_twoHAM_path = root.find('HANet_twoHAM_path').text
# 获取保存模型的路径
save_path = root.find('save_path').text
return camvid_path, HANet_oneHAM_path, HANet_twoHAM_path, save_path
# 用于测试的 XML 文件路径
xml_file_path = "conf.xml"
# 读取配置信息
import os
# 获取当前脚本所在的目录
current_directory = os.path.dirname(os.path.abspath(__file__))
# 切换到该目录
os.chdir(current_directory)
camvid_path, HANet_oneHAM_path, HANet_twoHAM_path, save_path = read_configuration(xml_file_path)
if __name__ == "__main__":
# 打印读取到的信息
print(f"camvid_path: {camvid_path}")
print(f"HANet_oneHAM_path: {HANet_oneHAM_path}")
print(f"HANet_twoHAM_path: {HANet_twoHAM_path}")
print(f"save_path: {save_path}")
|
UnderTurrets/HeightDriven_DoubleAttentions_Net
|
conf/__init__.py
|
__init__.py
|
py
| 1,167 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 35,
"usage_type": "call"
}
] |
40017294295
|
import mysql.connector
import matplotlib.pyplot as plt
import argparse
import os
import random
import pandas as pd
import datetime
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
def createDatabase(dataBase="smartBottle"):
"""连接数据库,创建database"""
mydb = mysql.connector.connect(
host='localhost',
user='root',
passwd='123456'
)
mycursor = mydb.cursor()
mycursor.execute(f"CREATE DATABASE {dataBase}")
mydb.commit()
return mydb
def connectDatabase(dataBase="smartBottle"):
"""连接mysql"""
mydb = mysql.connector.connect(
host='localhost',
user='root',
passwd='123456',
database=dataBase)
return mydb
def createTable(mydb, tableName="drinkData"):
"""创建表"""
mycursor = mydb.cursor()
# mycursor.execute(f"drop table {tableName}")
# mydb.commit()
mycursor.execute(f"CREATE TABLE `{tableName}` \
(`id` int(11) NOT NULL AUTO_INCREMENT,\
`data_extract` varchar(1000),\
`label` varchar(1000),\
`saveTime` datetime,\
`user_id` int(11),\
PRIMARY KEY (`id`)\
);")
mydb.commit()
return mydb
def insertData(mydb, tableName = "drinkData"):
"""插入数据"""
mycursor = mydb.cursor()
sql = f"INSERT INTO {tableName} (user_id, data, label, saveTime) VALUES(%s, %s, %s, %s);"
with open('../data/80ml.txt', 'r') as f:
con = f.readlines()
# print(len(con)/3)
for i in range(int(len(con)/3)):
i *= 3
sql = "INSERT INTO drinkData (user_id, data_y, data_z, label) VALUES(%s, %s, %s, %s);"
y = con[i]
z = con[i+1]
label = con[i+2]
val = (1, y, z, label)
mycursor.execute(sql, val)
mydb.commit()
def readFromExcel(mydb, path):
files = os.scandir(path)
mycursor = mydb.cursor()
for f in files:
df = pd.read_excel(f.path)
start, end = int(df.loc[0, 'S']), int(df.loc[0, 'E'])
drink = f.path.split('/')[-1].split('_')[0]
if drink not in ['Stable', 'Walk']:
if start == end:
continue
drink = int(drink[:-2])
else:
drink = 0
y = df.loc[:, 'Y']
z = df.loc[:, 'Z']
y = [str(round(num/1080, 3)) for num in y]
z = [str(round(num/1080, 3)) for num in z]
label = ['0' for i in range(len(y))]
for i in range(start-1, end-1):
label[i] = str(round(drink / (end - start), 3))
data_y = ','.join(y)
data_z = ','.join(z)
label = ','.join(label)
sql = "insert into drinkData (user_id, data_y, data_z, label) VALUES(%s, %s, %s, %s)"
val = (1, data_y, data_z, label)
mycursor.execute(sql, val)
mydb.commit()
return f'success save {len(files)} data'
def readFromExcel2(path):
df = pd.read_excel(path)
y = df.loc[:, 'Y']
z = df.loc[:, 'Z']
y = [str(round(num/1080, 3)) for num in y]
z = [str(round(num/1080, 3)) for num in z]
cob = np.array([y, z])
data = featExtra(cob.T)
print(f'data {data.shape}')
return data.reshape(1, -1)[:, :70], y, z
def dataAug(mydb, length):
"""
params:
length: data length after augrement
num: nums of data generated
"""
pass
def randomDeletePointDA(data, label, length):
"""data, label长度在dataAug中判断, """
dataLen = len(data)
indices = list(range(dataLen))
random.shuffle(indices)
remove_indices = indices[:dataLen-length]
new_data = [data[i] for i in range(dataLen) if i not in remove_indices]
new_label = [label[i] for i in range(dataLen) if i not in remove_indices]
return new_data, new_label
def crossDA():
pass
def featExtra(input):
"""
输入前将数据处理成numpy
return np
"""
# 创建PCA对象 感觉可以放到前面 不过先别想那么多
pca = PCA(n_components=1)
result = pca.fit_transform(input)
return result.flatten()
# for j in range(10):
# sql = "INSERT INTO drinkData (user_id, data, label, saveTime) VALUES(%s, %s, %s, %s);"
# data =[str(random.randint(0, 100)) for i in range(100)]
# label = [str(random.choice([0, 5])) for i in range(100)]
# data = ','.join(data)
# label = ','.join(label)
# now = datetime.datetime.now()
# now = now.strftime("%Y-%m-%d %H:%M:%S")
# val = (1, data, label, now)
# mycursor.execute(sql, val)
# print(mycursor.rowcount, 'record inserted.')
# mydb.commit()
def getAllData(mydb, tableName="drinkData"):
"""读取数据"""
mycursor = mydb.cursor()
mycursor.execute(f"SELECT * FROM {tableName}")
myresult = mycursor.fetchall()
# for x in myresult:
# print(x)
return myresult
def visual(y, z, label):
y = y.split(',')
z = z.split(',')
label = label.split(',')
y = [float(i) for i in y]
z = [float(i) for i in z]
label = [float(i) for i in label]
time = [i for i in range(len(label))]
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(time, y, label='y')
plt.plot(time, z, label='z')
plt.xlabel("time")
plt.ylabel("angle")
plt.legend()
plt.figure(1)
plt.subplot(2, 1, 2)
plt.plot(time, label)
plt.xlabel("time")
plt.ylabel("drink")
print(f'total drink {sum(label)}')
plt.show()
def visual2(data, label):
time = [i for i in range(len(label))]
plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(time, data, label='data')
plt.xlabel("time")
plt.ylabel("angle")
plt.legend()
plt.figure(1)
plt.subplot(2, 1, 2)
plt.plot(time, label)
plt.xlabel("time")
plt.ylabel("drink")
print(f'total drink {sum(label)}')
plt.show()
def list2str(arr):
new_arr = [str(round(i,3)) for i in arr]
return ','.join(new_arr)
def raw2new():
"""
delete all data in new_database,
raw_data -> dataAug(size=70) -> new_data
"""
mydb = connectDatabase("smartBottle")
result = getAllData(mydb)
# save data from raw_data to new_data (得处理一下之后导入都是只导入新的)
scaler = MinMaxScaler(feature_range=(-1, 1))
mycursor = mydb.cursor()
# delete old database
# sql = "delete from drinkDataEnhan"
# mycursor.execute(sql)
# mydb.commit()
#
for line in result:
y = [float(num) for num in line[2].split(',')]
z = [float(num) for num in line[3].split(',')]
label = [float(num) for num in line[4].split(',')]
data = np.array([y, z])
data = featExtra(data.T)
#NOTE - change data to y
y = np.array(y)
data = scaler.fit_transform(y.reshape(-1, 1))
data = data.flatten().tolist()
for i in range(30):
if len(data) <= 70:
break
if len(data) != len(label):
break
new_data, new_label = randomDeletePointDA(data, label, 70)
sql = "INSERT INTO drinkDataEnhan (user_id, data_extract, label) VALUES(%s, %s, %s);"
val = (1, list2str(new_data), list2str(new_label))
mycursor.execute(sql, val)
mydb.commit()
def checkData():
mydb = connectDatabase("smartBottle")
result = getAllData(mydb, "drinkDataEnhan")
cnt = dict()
for line in result:
label = [float(num) for num in line[2].split(',')]
drink = sum(label)
k = str(int(drink/10))
if k in cnt.keys():
value = cnt[k]
cnt.update({k: value+1})
else:
cnt[k] = 1
print(f'drink label {sum(label)}')
print(f'data nums {len(result)}')
print(f'dict {cnt}')
if __name__ == "__main__":
# 参数
# parser = argparse.ArgumentParser()
# parser.add_argument('--dataBaseName', type=str, default="smartBottle",
# help='name of the database')
# config = parser.parse_args()
"""
raw_data -> dataAug -> new_data
raw_data: id, data_x, data_y, data_z, label, save_time, user_id
new_data: id, data, label, save_time, user_id
"""
raw2new()
checkData()
# createTable(mydb, "drinkDataEnhan")
# result =
# result = getAllData(mydb)
# visual(result[0][2], result[0][3], result[0][4])
|
YuTheon/NUS_AIOT_web2
|
setMysqlData.py
|
setMysqlData.py
|
py
| 7,386 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "mysql.connector.connector.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "os.scandir",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 97,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 119,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 135,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 170,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 170,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 171,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 171,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 173,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 174,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 174,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 175,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 175,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 176,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 178,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 180,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 180,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 182,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 185,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 185,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 189,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 189,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 190,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 190,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 192,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 193,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.legend",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 194,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 196,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 198,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 199,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 200,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 200,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 203,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 219,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 233,
"usage_type": "call"
}
] |
74535254588
|
import seaborn as sns
from matplotlib import pyplot as plt
import tools
import numpy as np
import pandas as pd
def plot_missrate_comp():
processed_row = tools.load_pkl('outputs/feature_explore[ards@origin]/row_missrate.pkl').flatten()
processed_col = tools.load_pkl('outputs/feature_explore[ards@origin]/col_missrate.pkl').flatten()
raw_row = tools.load_pkl('outputs/feature_explore[raw@version]/row_missrate.pkl').flatten()
raw_col = tools.load_pkl('outputs/feature_explore[raw@version]/col_missrate.pkl').flatten()
row_data = np.concatenate([processed_row, raw_row], axis=0)
col_data = np.concatenate([processed_col, raw_col], axis=0)
for data, label in zip([row_data, col_data], ['row', 'col']):
df = pd.DataFrame(data, columns=['data'])
df['source'] = 'raw'
lens = len(processed_row) if label == 'row' else len(processed_col)
df.loc[:lens, 'source'] = 'processed'
sns.histplot(df, x='data', hue='source', bins=20, stat='proportion', common_norm=False, shrink=0.95, element='bars', edgecolor=None)
plt.xlabel(f'{label} missrate')
plt.savefig(f'test_plot/{label}_missrate.png')
plt.close()
if __name__ == '__main__':
plot_missrate_comp()
|
on1262/sepsisdataprocessing
|
test.py
|
test.py
|
py
| 1,239 |
python
|
en
|
code
| 2 |
github-code
|
6
|
[
{
"api_name": "tools.load_pkl",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tools.load_pkl",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tools.load_pkl",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tools.load_pkl",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "seaborn.histplot",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.xlabel",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
}
] |
3989954821
|
from typing import TYPE_CHECKING, Any, Dict, List, Self, Union, cast
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..schemas.deposit import Deposit
from ..schemas.recent_2_result_type_1 import Recent2ResultType1
@_attrs_define
class Recent2:
"""
Attributes:
result (Union['Deposit', 'Recent2ResultType1', Unset]):
error (Union[Unset, List[str]]):
"""
result: Union["Deposit", "Recent2ResultType1", Unset] = UNSET
error: Union[Unset, List[str]] = UNSET
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
from ..schemas.deposit import Deposit
result: Union[Dict[str, Any], Unset]
if isinstance(self.result, Unset):
result = UNSET
elif isinstance(self.result, Deposit):
result = UNSET
if not isinstance(self.result, Unset):
result = self.result.to_dict()
else:
result = UNSET
if not isinstance(self.result, Unset):
result = self.result.to_dict()
error: Union[Unset, List[str]] = UNSET
if not isinstance(self.error, Unset):
error = self.error
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if result is not UNSET:
field_dict["result"] = result
if error is not UNSET:
field_dict["error"] = error
return field_dict
@classmethod
def from_dict(cls: Self, src_dict: Dict[str, Any]) -> Self:
from ..schemas.deposit import Deposit
from ..schemas.recent_2_result_type_1 import Recent2ResultType1
d = src_dict.copy()
def _parse_result(
data: object,
) -> Union["Deposit", "Recent2ResultType1", Unset]:
if isinstance(data, Unset):
return data
try:
if not isinstance(data, dict):
raise TypeError()
_result_type_0 = data
result_type_0: Union[Unset, Deposit]
if isinstance(_result_type_0, Unset):
result_type_0 = UNSET
else:
result_type_0 = Deposit.from_dict(_result_type_0)
return result_type_0
except: # noqa: E722
pass
if not isinstance(data, dict):
raise TypeError()
_result_type_1 = data
result_type_1: Union[Unset, Recent2ResultType1]
if isinstance(_result_type_1, Unset):
result_type_1 = UNSET
else:
result_type_1 = Recent2ResultType1.from_dict(_result_type_1)
return result_type_1
result = _parse_result(d.pop("result", UNSET))
error = cast(List[str], d.pop("error", UNSET))
recent_2 = cls(
result=result,
error=error,
)
recent_2.additional_properties = d
return recent_2
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
tlg7c5/kraken-connector
|
kraken_connector/schemas/recent_2.py
|
recent_2.py
|
py
| 3,654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "attrs.field",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "types.UNSET",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "schemas.deposit.Deposit",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "types.UNSET",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "types.UNSET",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "typing.Union",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 43,
"usage_type": "argument"
},
{
"api_name": "typing.Dict",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Self",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 66,
"usage_type": "argument"
},
{
"api_name": "typing.Union",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "schemas.deposit.Deposit",
"line_number": 72,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 73,
"usage_type": "argument"
},
{
"api_name": "types.UNSET",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "schemas.deposit.Deposit.from_dict",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "schemas.deposit.Deposit",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "schemas.recent_2_result_type_1.Recent2ResultType1",
"line_number": 84,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 85,
"usage_type": "argument"
},
{
"api_name": "types.UNSET",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "schemas.recent_2_result_type_1.Recent2ResultType1.from_dict",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "schemas.recent_2_result_type_1.Recent2ResultType1",
"line_number": 88,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 92,
"usage_type": "argument"
},
{
"api_name": "typing.cast",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number": 94,
"usage_type": "argument"
},
{
"api_name": "typing.List",
"line_number": 105,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 108,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 111,
"usage_type": "name"
},
{
"api_name": "attrs.define",
"line_number": 13,
"usage_type": "name"
}
] |
32661755491
|
"""
A basic CNN model
/**
* @author Xinping Wang
* @email [[email protected]]
* @create date 2021-09-11 09:32:41
* @modify date 2021-09-11 09:32:41
* @desc [description]
*/
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import numpy as np
from imageio import imread
classes = ("W", "R", "N1", "N2", "N3")
class Passthrough(nn.Module):
def __init__(self, n_channels=8, n_classes=5):
super().__init__()
self.conv1 = nn.Conv1d(8, 16, 15)
self.conv2 = nn.Conv1d(16, 32, 9)
self.conv3 = nn.Conv1d(32, 64, 5)
self.conv4 = nn.Conv1d(64, 128, 3)
self.conv5 = nn.Conv1d(128, 128, 3)
self.flat = nn.Flatten(1, -1)
self.fc = nn.Linear(128*5, 5)
self.maxpool = nn.MaxPool1d(5, stride=5)
self.leakyrelu = nn.LeakyReLU(0.01)
self.dropout = nn.Dropout(p=0.1)
self.softmax = nn.LogSoftmax()
self.avgpool = nn.AdaptiveAvgPool1d(5)
def forward(self, x):
x = self.dropout(self.maxpool(self.leakyrelu(self.conv1(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv2(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv3(x))))
x = self.dropout(self.maxpool(self.leakyrelu(self.conv4(x))))
x = self.avgpool(self.conv5(x))
x = self.softmax(self.fc(self.flat(x)))
# x = nn.MaxPool1d(5,5)
# x = F.relu(self.conv2(x))
# x = F.relu(self.conv3(x))
# x = F.relu(self.conv4(x))
# x = F.relu(self.conv5(x))
# x = F.softmax(self.fc(self.flat(x)), dim=1)
return x
|
CillianWang/ENN
|
Library/Models/CNN.py
|
CNN.py
|
py
| 1,777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "torch.nn.Module",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv1d",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "torch.nn.Flatten",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "torch.nn.MaxPool1d",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "torch.nn.LeakyReLU",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "torch.nn.Dropout",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "torch.nn.LogSoftmax",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "torch.nn.AdaptiveAvgPool1d",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 44,
"usage_type": "name"
}
] |
10623839028
|
import random
from discord import Colour
"""
These are some presets configs, that are predefined
and normally dont need any changes (Thats why they are not in the config file
"""
bottest = True # decides if the bot checks other bots messages
ignorfiles = ['image/gif', 'image/jpeg'] # Content types to ignor. Check out https://en.wikipedia.org/wiki/Media_type
checkorange = 1 # if more or equal than that checks are positive the embed will be orange
checkred = 3 # if more or equal than that checks are positive the embed will be red
helpembedcolour = Colour(random.randint(0, 16777215))
|
veni-vidi-code/VirusTotalDiscordBot
|
Cogs/settings.py
|
settings.py
|
py
| 599 |
python
|
en
|
code
| 3 |
github-code
|
6
|
[
{
"api_name": "discord.Colour",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 18,
"usage_type": "call"
}
] |
72255274108
|
from __future__ import annotations
import asyncio
import datetime
import time
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import asyncpg
import discord
from discord.ext import commands
from utils import (
AvatarsPageSource,
AvatarView,
FieldPageSource,
Pager,
format_bytes,
human_timedelta,
to_bytes,
format_status,
BlankException,
)
from ._base import CogBase
if TYPE_CHECKING:
from bot import Bot
from cogs.context import Context
class UserCommands(CogBase):
@commands.command(name="avatar", aliases=("pfp", "avy", "av"))
async def avatar(
self,
ctx: Context,
*,
user: Optional[Union[discord.Member, discord.User]] = commands.Author,
):
"""Gets the avatar of a user"""
user = user or ctx.author
embed = discord.Embed(
color=self.bot.embedcolor
if user.color == discord.Color.default()
else user.color
)
embed.set_author(name=str(user), icon_url=user.display_avatar.url)
embed.set_image(url=user.display_avatar.url)
sql = """SELECT created_at FROM avatars WHERE user_id = $1 ORDER BY created_at DESC"""
latest_avatar = await self.bot.pool.fetchval(sql, user.id)
if latest_avatar:
embed.timestamp = latest_avatar
embed.set_footer(text="Avatar changed")
await ctx.send(
embed=embed,
view=AvatarView(ctx, user, embed, user.display_avatar),
check_ref=True,
)
@commands.command(name="avatars", aliases=("pfps", "avys", "avs"))
async def avatars(
self, ctx: Context, user: Union[discord.Member, discord.User] = commands.Author
):
"""Shows all of a users avatars"""
sql = """SELECT * FROM avatars WHERE user_id = $1 ORDER BY created_at DESC"""
results = await self.bot.pool.fetch(sql, user.id)
if results == []:
raise ValueError("User has no avatar history saved.")
entries: List[Tuple[str, datetime.datetime, int]] = [
(
r["avatar"],
r["created_at"],
r["id"],
)
for r in results
]
source = AvatarsPageSource(entries=entries)
source.embed.color = (
self.bot.embedcolor if user.color == discord.Color.default() else user.color
)
source.embed.title = f"Avatars for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="banner")
async def banner(self, ctx: Context, *, user: discord.User = commands.Author):
"""Shows a users banner"""
user = await ctx.bot.fetch_user(user.id)
if user.banner is None:
raise TypeError("This user has no banner.")
file = await user.banner.to_file(
filename=f'banner.{"gif" if user.banner.is_animated() else "png"}'
)
embed = discord.Embed()
embed.set_author(name=f"{str(user)}'s banner", icon_url=user.display_avatar.url)
embed.set_image(
url=f'attachment://banner.{"gif" if user.banner.is_animated() else "png"}'
)
await ctx.send(file=file, embed=embed)
async def _index_member(self, guild: discord.Guild, member: discord.Member) -> bool:
sql = """
INSERT INTO member_join_logs (member_id, guild_id, time)
SELECT $1, $2, $3
WHERE NOT EXISTS (
SELECT 1
FROM member_join_logs
WHERE member_id = $1 AND guild_id = $2 AND time = $3
);
"""
await self.bot.pool.execute(
sql,
member.id,
guild.id,
member.joined_at,
)
return True
@commands.group(name="joins", invoke_without_command=True)
async def joins(
self,
ctx: Context,
*,
user: Union[discord.Member, discord.User] = commands.Author,
):
"""Shows how many times a user joined a server
Note: If they joined before I was added then I will not have any data for them."""
guild = ctx.guild
results: Optional[int] = await self.bot.pool.fetchval(
"SELECT COUNT(member_id) FROM member_join_logs WHERE member_id = $1 AND guild_id = $2",
user.id,
guild.id,
)
if not results:
if isinstance(user, discord.Member):
results = await self._index_member(guild, user)
if results:
results = 1
else:
return await ctx.send(f"I have no join records for {user} in {guild}")
await ctx.send(
f"{user} has joined {guild} {results:,} time{'s' if results > 1 else ''}."
)
@commands.command(name="uptime")
async def uptime(self, ctx: Context, *, member: Optional[discord.Member]):
"""Shows how long a user has been online."""
bot = self.bot
me = bot.user
if me is None or bot.uptime is None:
return
if member is None or member and member.id == me.id:
return await ctx.send(
f"Hello, I have been awake for {human_timedelta(bot.uptime, suffix=False)}."
)
if "uptime" in await self.bot.redis.smembers(f"opted_out:{member.id}"):
raise BlankException(f"Sorry, {member} has opted out from uptime logging.")
results: Optional[datetime.datetime] = await bot.pool.fetchval(
"SELECT time FROM uptime_logs WHERE user_id = $1", member.id
)
message = (
f"{member} has been {format_status(member)} for {human_timedelta(results, suffix=False)}."
if results
else f"{member} has been {format_status(member)} as long as I can tell."
)
await ctx.send(message)
@commands.command(name="usernames", aliases=("names",))
async def usernames(self, ctx: Context, user: discord.User = commands.Author):
results = await self.bot.pool.fetch(
"SELECT * FROM username_logs WHERE user_id = $1 ORDER BY created_at DESC",
user.id,
)
if results == []:
await ctx.send(f"I have no username records for {user}.")
return
entries = [
(
r["username"],
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.color = self.bot.embedcolor
source.embed.title = f"Usernames for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="discrims", aliases=("discriminators",))
async def discrims(self, ctx: Context, user: discord.User = commands.Author):
"""Shows all discriminators a user has had.
This is the numbers after your username."""
results = await self.bot.pool.fetch(
"SELECT * FROM discrim_logs WHERE user_id = $1 ORDER BY created_at DESC",
user.id,
)
if results == []:
await ctx.send(f"I have no discriminator records for {user}")
return
entries = [
(
f'#{r["discrim"]}',
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.color = self.bot.embedcolor
source.embed.title = f"Discriminators for {user}"
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.command(name="nicknames", aliases=("nicks",))
async def nicknames(
self,
ctx: Context,
*,
user: discord.User = commands.Author,
):
"""Shows all nicknames a user has had in a guild."""
if ctx.guild is None:
return
results = await self.bot.pool.fetch(
"SELECT * FROM nickname_logs WHERE user_id = $1 AND guild_id = $2 ORDER BY created_at DESC",
user.id,
ctx.guild.id,
)
if results == []:
await ctx.send(f"I have no nickname records for {user} in {ctx.guild}")
return
entries = [
(
r["nickname"],
f'{discord.utils.format_dt(r["created_at"], "R")} | {discord.utils.format_dt(r["created_at"], "d")} | `ID: {r["id"]}`',
)
for r in results
]
source = FieldPageSource(entries=entries)
source.embed.title = f"Nicknames for {user} in {ctx.guild}"
source.embed.color = self.bot.embedcolor
pager = Pager(source, ctx=ctx)
await pager.start(ctx)
@commands.group(
name="avatarhistory",
aliases=("avyh", "pfph", "avh"),
invoke_without_command=True,
)
async def avatar_history(
self, ctx: Context, *, user: discord.User = commands.Author
):
"""Shows the avatar history of a user.
This will only show the first 100, to view them all and in HD run the command `avatars`"""
async with ctx.typing():
sql = """
SELECT * FROM avatars WHERE user_id = $1
ORDER BY created_at DESC LIMIT 100
"""
records: List[asyncpg.Record] = await self.bot.pool.fetch(
sql,
user.id,
)
if records == []:
await ctx.send(f"{user} has no avatar history on record.")
return
avatars = await asyncio.gather(
*[to_bytes(ctx.session, row["avatar"]) for row in records]
)
fp = await format_bytes(ctx.guild.filesize_limit, avatars)
file = discord.File(
fp,
f"{user.id}_avatar_history.png",
)
if len(records) >= 100:
first_avatar: datetime.datetime = await self.bot.pool.fetchval(
"""SELECT created_at FROM avatars WHERE user_id = $1 ORDER BY created_at ASC""",
user.id,
)
else:
first_avatar = records[-1]["created_at"]
embed = discord.Embed(timestamp=first_avatar)
embed.set_footer(text="First avatar saved")
embed.set_author(
name=f"{user}'s avatar history", icon_url=user.display_avatar.url
)
embed.set_image(url=f"attachment://{user.id}_avatar_history.png")
await ctx.send(embed=embed, file=file)
@avatar_history.command(name="server", aliases=("guild",))
async def avatar_history_guild(
self,
ctx: Context,
guild: Optional[discord.Guild] = None,
*,
member: discord.Member = commands.Author,
):
"""Shows the server avatar history of a user."""
guild = guild or ctx.guild
async with ctx.typing():
sql = """
SELECT * FROM guild_avatars WHERE member_id = $1 AND guild_id = $2
ORDER BY created_at DESC LIMIT 100
"""
fetch_start = time.perf_counter()
records: List[asyncpg.Record] = await self.bot.pool.fetch(
sql, member.id, guild.id
)
fetch_end = time.perf_counter()
if records == []:
raise ValueError(f"{member} has no server avatar history on record.")
avatars = await asyncio.gather(
*[to_bytes(ctx.session, row["avatar"]) for row in records]
)
gen_start = time.perf_counter()
fp = await format_bytes(guild.filesize_limit, avatars)
file = discord.File(
fp,
f"{member.id}_avatar_history.png",
)
gen_end = time.perf_counter()
if len(records) == 100:
sql = """SELECT created_at FROM guild_avatars WHERE member_id = $1 AND guild_id = $1 ORDER BY created_at ASC"""
first_avatar: datetime.datetime = await self.bot.pool.fetchval(
sql, member.id, guild.id
)
else:
first_avatar = records[-1]["created_at"]
embed = discord.Embed(timestamp=first_avatar)
embed.set_footer(text="First avatar saved")
embed.set_author(
name=f"{member}'s guild avatar history", icon_url=member.display_avatar.url
)
embed.description = f"`Fetching :` {round(fetch_end - fetch_start, 2)}s\n`Generating:` {round(gen_end - gen_start, 2)}s"
embed.set_image(url=f"attachment://{member.id}_avatar_history.png")
await ctx.send(embed=embed, file=file)
|
LeoCx1000/fish
|
src/cogs/discord_/user.py
|
user.py
|
py
| 12,878 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "_base.CogBase",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "discord.User",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "discord.Color.default",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "discord.Color",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "utils.AvatarView",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "discord.User",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "utils.AvatarsPageSource",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "discord.Color.default",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "discord.Color",
"line_number": 86,
"usage_type": "attribute"
},
{
"api_name": "utils.Pager",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 93,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "discord.Guild",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 110,
"usage_type": "attribute"
},
{
"api_name": "cogs.context.Context",
"line_number": 132,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "discord.User",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 134,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 142,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 149,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.group",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 163,
"usage_type": "name"
},
{
"api_name": "discord.Member",
"line_number": 163,
"usage_type": "attribute"
},
{
"api_name": "bot.user",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "bot.uptime",
"line_number": 168,
"usage_type": "attribute"
},
{
"api_name": "utils.human_timedelta",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "bot.uptime",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "utils.BlankException",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "typing.Optional",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "bot.pool.fetchval",
"line_number": 179,
"usage_type": "call"
},
{
"api_name": "bot.pool",
"line_number": 179,
"usage_type": "attribute"
},
{
"api_name": "utils.format_status",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "utils.human_timedelta",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "utils.format_status",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 192,
"usage_type": "name"
},
{
"api_name": "discord.utils.format_dt",
"line_number": 206,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 206,
"usage_type": "attribute"
},
{
"api_name": "utils.FieldPageSource",
"line_number": 211,
"usage_type": "call"
},
{
"api_name": "utils.Pager",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 191,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 191,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 218,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 218,
"usage_type": "name"
},
{
"api_name": "discord.utils.format_dt",
"line_number": 235,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 235,
"usage_type": "attribute"
},
{
"api_name": "utils.FieldPageSource",
"line_number": 240,
"usage_type": "call"
},
{
"api_name": "utils.Pager",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 217,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 217,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 249,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 251,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 251,
"usage_type": "name"
},
{
"api_name": "discord.utils.format_dt",
"line_number": 270,
"usage_type": "call"
},
{
"api_name": "discord.utils",
"line_number": 270,
"usage_type": "attribute"
},
{
"api_name": "utils.FieldPageSource",
"line_number": 275,
"usage_type": "call"
},
{
"api_name": "utils.Pager",
"line_number": 278,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.command",
"line_number": 246,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 246,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "discord.User",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 287,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 287,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 299,
"usage_type": "name"
},
{
"api_name": "asyncpg.Record",
"line_number": 299,
"usage_type": "attribute"
},
{
"api_name": "asyncio.gather",
"line_number": 308,
"usage_type": "call"
},
{
"api_name": "utils.to_bytes",
"line_number": 309,
"usage_type": "call"
},
{
"api_name": "utils.format_bytes",
"line_number": 312,
"usage_type": "call"
},
{
"api_name": "discord.File",
"line_number": 313,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 319,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 326,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.group",
"line_number": 281,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 281,
"usage_type": "name"
},
{
"api_name": "cogs.context.Context",
"line_number": 338,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 339,
"usage_type": "name"
},
{
"api_name": "discord.Guild",
"line_number": 339,
"usage_type": "attribute"
},
{
"api_name": "discord.Member",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Author",
"line_number": 341,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 341,
"usage_type": "name"
},
{
"api_name": "time.perf_counter",
"line_number": 352,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 353,
"usage_type": "name"
},
{
"api_name": "asyncpg.Record",
"line_number": 353,
"usage_type": "attribute"
},
{
"api_name": "time.perf_counter",
"line_number": 356,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
"line_number": 361,
"usage_type": "call"
},
{
"api_name": "utils.to_bytes",
"line_number": 362,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 365,
"usage_type": "call"
},
{
"api_name": "utils.format_bytes",
"line_number": 366,
"usage_type": "call"
},
{
"api_name": "discord.File",
"line_number": 367,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 371,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 375,
"usage_type": "attribute"
},
{
"api_name": "discord.Embed",
"line_number": 381,
"usage_type": "call"
}
] |
40693675853
|
import argparse
import glob
import os
import shutil
import subprocess # noqa: S404
import sys
from collections import namedtuple
from types import MappingProxyType
from typing import Iterable, List, Optional
HOST_BUILD_CTX = '/tmp/magma_orc8r_build' # noqa: S108
HOST_MAGMA_ROOT = '../../../.'
IMAGE_MAGMA_ROOT = os.path.join('src', 'magma')
GOLINT_FILE = '.golangci.yml'
TEST_RESULT_DIR = 'orc8r/cloud/test-results'
MODULES = (
'orc8r',
'lte',
'feg',
'cwf',
'dp',
)
DEPLOYMENT_TO_MODULES = MappingProxyType({
'all': MODULES,
'orc8r': ('orc8r'),
'fwa': ('orc8r', 'lte'),
'ffwa': ('orc8r', 'lte', 'feg'),
'cwf': ('orc8r', 'lte', 'feg', 'cwf'),
})
DEPLOYMENTS = DEPLOYMENT_TO_MODULES.keys()
EXTRA_COMPOSE_FILES = (
'docker-compose.metrics.yml',
# For now, logging is left out of the build because the fluentd daemonset
# and forwarder pod shouldn't change very frequently - we can build and
# push locally when they need to be updated.
# We can integrate this into the CI pipeline if/when we see the need for it
# 'docker-compose.logging.yml',
)
MagmaModule = namedtuple('MagmaModule', ['name', 'host_path'])
def main() -> None:
"""
Run docker-compose script
"""
_check_assumptions()
args = _parse_args()
mods = _get_modules(DEPLOYMENT_TO_MODULES[args.deployment])
if not args.extras:
_create_build_context(mods)
if args.mount:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'bash'])
_down(args)
elif args.generate:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make fullgen'])
_down(args)
elif args.lint:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make lint'])
_down(args)
elif args.tidy:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make tidy'])
_down(args)
elif args.precommit:
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make precommit'])
_down(args)
elif args.coverage:
_run(['up', '-d', 'postgres_test'])
_run(['build', 'test'])
_run(['run', '--rm'] + _get_mnt_vols(mods) + ['test', 'make cover'])
_down(args)
elif args.tests:
_run(['up', '-d', 'postgres_test'])
_run(['build', 'test'])
_run(['run', '--rm'] + _get_test_result_vol() + ['test', 'make test'])
_down(args)
elif args.build_service:
_run(['build', args.build_service])
else:
d_args = _get_default_file_args(args) + _get_default_build_args(args)
_run(d_args)
def _check_assumptions():
"""Check assumptions about environment."""
cwd = os.path.dirname(os.path.realpath(__file__))
if cwd != os.getcwd():
sys.exit("Must run from orc8r/cloud/docker directory")
if 'PWD' not in os.environ:
msg = (
"$PWD environment variable must be set.\n"
"Normally this is set by your shell. Try running without sudo, "
"then try explicitly setting the PWD env var."
)
sys.exit(msg)
def _get_modules(mods: Iterable[str]) -> Iterable[MagmaModule]:
"""
Read the modules config file and return all modules specified.
"""
modules = []
for m in mods:
abspath = os.path.abspath(os.path.join(HOST_MAGMA_ROOT, m))
module = MagmaModule(name=m, host_path=abspath)
modules.append(module)
return modules
def _create_build_context(modules: Iterable[MagmaModule]) -> None:
""" Clear out the build context from the previous run """
shutil.rmtree(HOST_BUILD_CTX, ignore_errors=True)
os.mkdir(HOST_BUILD_CTX)
print("Creating build context in '%s'..." % HOST_BUILD_CTX)
for m in modules:
_copy_module(m)
def _down(args: argparse.Namespace) -> None:
if not args.up:
_run(['down'])
def _run(cmd: List[str]) -> None:
""" Run the required docker compose command """
cmd = ['docker', 'compose', '--compatibility'] + cmd
print("Running '%s'..." % ' '.join(cmd))
try:
subprocess.run(cmd, check=True) # noqa: S603
except subprocess.CalledProcessError as err:
sys.exit(err.returncode)
def _get_mnt_vols(modules: Iterable[MagmaModule]) -> List[str]:
""" Return the volumes argument for docker compose commands """
vols = [
# .golangci.yml file
'-v', '%s:%s' % (
os.path.abspath(os.path.join(HOST_MAGMA_ROOT, GOLINT_FILE)),
os.path.join(os.sep, IMAGE_MAGMA_ROOT, GOLINT_FILE),
),
]
# Per-module directory mounts
for m in modules:
vols.extend(['-v', '%s:%s' % (m.host_path, _get_module_image_dst(m))])
return vols
def _get_test_result_vol() -> List[str]:
"""Return the volume argment to mount TEST_RESULT_DIR
Returns:
List[str]: -v command to mount TEST_RESULT_DIR
"""
return [
'-v', '%s:%s' % (
os.path.abspath(os.path.join(HOST_MAGMA_ROOT, TEST_RESULT_DIR)),
os.path.join(os.sep, IMAGE_MAGMA_ROOT, TEST_RESULT_DIR),
),
]
def _get_default_file_args(args: argparse.Namespace) -> List[str]:
def make_file_args(fs: Optional[Iterable[str]] = None) -> List[str]:
if fs is None:
return []
fs = ['docker-compose.yml'] + \
list(fs) + ['docker-compose.override.yml']
ret = []
for f in fs:
ret.extend(['-f', f])
return ret
if args.all:
return make_file_args(EXTRA_COMPOSE_FILES)
# Default implicitly to docker-compose.yml + docker-compose.override.yml
return make_file_args()
def _get_default_build_args(args: argparse.Namespace) -> List[str]:
mods = DEPLOYMENT_TO_MODULES[args.deployment]
ret = [
'build',
'--build-arg', 'MAGMA_MODULES=%s' % ' '.join(mods),
]
if args.nocache:
ret.append('--no-cache')
return ret
def _copy_module(module: MagmaModule) -> None:
""" Copy module directory into the build context """
build_ctx = _get_module_host_dst(module)
def copy_to_ctx(d: str) -> None:
shutil.copytree(
os.path.join(module.host_path, d),
os.path.join(build_ctx, d),
)
if module.name == 'nms':
copy_to_ctx('scripts')
else:
copy_to_ctx('cloud')
# Orc8r module also has lib/ and gateway/
if module.name == 'orc8r':
copy_to_ctx('lib')
copy_to_ctx('gateway')
# Optionally copy cloud/configs/
# Converts e.g. lte/cloud/configs/ to configs/lte/
if os.path.isdir(os.path.join(module.host_path, 'cloud', 'configs')):
shutil.copytree(
os.path.join(module.host_path, 'cloud', 'configs'),
os.path.join(HOST_BUILD_CTX, 'configs', module.name),
)
# Copy the go.mod file for caching the go downloads
# Preserves relative paths between modules
for f in glob.iglob(build_ctx + '/**/go.mod', recursive=True):
gomod = f.replace(
HOST_BUILD_CTX, os.path.join(HOST_BUILD_CTX, 'gomod'),
)
print(gomod)
os.makedirs(os.path.dirname(gomod))
shutil.copyfile(f, gomod)
def _get_module_image_dst(module: MagmaModule) -> str:
"""
Given a path to a module on the host, return the intended destination
in the final image.
Parameters:
module: Magma module
Returns:
str: destination in the final image
"""
return os.path.join(os.sep, IMAGE_MAGMA_ROOT, module.name)
def _get_module_host_dst(module: MagmaModule) -> str:
"""
Given a path to a module on the host, return the intended destination
in the build context.
Parameters:
module: Magma module
Returns:
str: destination in the build context
"""
return os.path.join(HOST_BUILD_CTX, IMAGE_MAGMA_ROOT, module.name)
def _parse_args() -> argparse.Namespace:
""" Parse the command line args """
# There are multiple ways to invoke finer-grained control over which
# images are built.
#
# (1) How many images to build
#
# all: all images
# default: images required for minimum functionality
# - excluding metrics images
# - including postgres, proxy, etc
#
# (2) Of the core orc8r images, which modules to build
#
# Defaults to all modules, but can be further specified by targeting a
# deployment type.
parser = argparse.ArgumentParser(description='Orc8r build tool')
# Run something
parser.add_argument(
'--tests', '-t',
action='store_true',
help='Run unit tests',
)
parser.add_argument(
'--mount', '-m',
action='store_true',
help='Mount the source code and create a bash shell',
)
parser.add_argument(
'--generate', '-g',
action='store_true',
help='Mount the source code and regenerate generated files',
)
parser.add_argument(
'--precommit', '-c',
action='store_true',
help='Mount the source code and run pre-commit checks',
)
parser.add_argument(
'--lint', '-l',
action='store_true',
help='Mount the source code and run the linter',
)
parser.add_argument(
'--tidy', '-i',
action='store_true',
help='Mount the source code and run go mod tidy',
)
parser.add_argument(
'--coverage', '-o',
action='store_true',
help='Generate test coverage statistics',
)
# Build something
parser.add_argument(
'--all', '-a',
action='store_true',
help='Build all containers',
)
parser.add_argument(
'--extras', '-e',
action='store_true',
help='Build extras (non-essential) images (i.e. no proxy or lte)',
)
parser.add_argument(
'--deployment', '-d',
action='store',
default='all',
help='Build deployment type: %s' % ','.join(DEPLOYMENTS),
)
parser.add_argument(
'--build-service', '-b',
help='Build particular service',
)
# How to do it
parser.add_argument(
'--nocache', '-n',
action='store_true',
help='Build the images with no Docker layer caching',
)
parser.add_argument(
'--up', '-u',
action='store_true',
help='Leave containers up after running tests',
)
args = parser.parse_args()
return args
if __name__ == '__main__':
main()
|
magma/magma
|
orc8r/cloud/docker/build.py
|
build.py
|
py
| 10,627 |
python
|
en
|
code
| 1,605 |
github-code
|
6
|
[
{
"api_name": "os.path.join",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "types.MappingProxyType",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 99,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 99,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 103,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 109,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 118,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 118,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 124,
"usage_type": "name"
},
{
"api_name": "shutil.rmtree",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 134,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 139,
"usage_type": "name"
},
{
"api_name": "subprocess.run",
"line_number": 144,
"usage_type": "call"
},
{
"api_name": "subprocess.CalledProcessError",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "typing.Iterable",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 154,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 154,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 155,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 149,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 172,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 173,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 164,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 178,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "typing.Iterable",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 179,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 178,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 196,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 196,
"usage_type": "name"
},
{
"api_name": "shutil.copytree",
"line_number": 212,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 213,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 213,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 214,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 214,
"usage_type": "attribute"
},
{
"api_name": "os.path.isdir",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 229,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 229,
"usage_type": "call"
},
{
"api_name": "shutil.copytree",
"line_number": 230,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 231,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 231,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 232,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 232,
"usage_type": "attribute"
},
{
"api_name": "glob.iglob",
"line_number": 237,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 239,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 239,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 242,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 242,
"usage_type": "attribute"
},
{
"api_name": "shutil.copyfile",
"line_number": 243,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 257,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 271,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 271,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 292,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 274,
"usage_type": "attribute"
}
] |
72532300669
|
# pylint: disable=redefined-outer-name
# pylint: disable=unused-argument
import json
from pathlib import Path
from typing import Any, Iterable
from unittest.mock import AsyncMock, call
from uuid import UUID, uuid4
import pytest
from models_library.projects import NodesDict, ProjectID
from models_library.projects_networks import NetworksWithAliases
from models_library.projects_nodes import Node
from pydantic import BaseModel, PositiveInt
from pytest_mock.plugin import MockerFixture
from simcore_service_director_v2.modules.projects_networks import (
_get_networks_with_aliases_for_default_network,
_send_network_configuration_to_dynamic_sidecar,
)
# UTILS
class MockedCalls(BaseModel):
detach: list[Any]
attach: list[Any]
class Example(BaseModel):
existing_networks_with_aliases: NetworksWithAliases
new_networks_with_aliases: NetworksWithAliases
expected_calls: MockedCalls
@classmethod
def using(
cls,
existing: dict[str, Any],
new: dict[str, Any],
detach: list[Any],
attach: list[Any],
) -> "Example":
return cls(
existing_networks_with_aliases=NetworksWithAliases.parse_obj(existing),
new_networks_with_aliases=NetworksWithAliases.parse_obj(new),
expected_calls=MockedCalls(detach=detach, attach=attach),
)
def _node_id(number: int) -> str:
return f"{UUID(int=number)}"
def _node_alias(number: int) -> str:
return f"node_alias_{number}"
def _network_name(number: int) -> str:
return f"network_{number}"
@pytest.fixture
def examples_factory(mock_scheduler: AsyncMock, project_id: ProjectID) -> list[Example]:
return [
# nothing exists
Example.using(
existing={},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
detach=[],
attach=[
call.attach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
network_alias=_node_alias(2),
),
call.attach_project_network(
node_id=UUID(_node_id(1)),
project_network=_network_name(1),
network_alias=_node_alias(1),
),
],
),
# with existing network, remove node 2
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
f"{_node_id(1)}": _node_alias(1),
}
},
detach=[
call.detach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
),
],
attach=[],
),
# remove node 2 and add node 2 with different alias
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(3),
}
},
detach=[
call.detach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
),
],
attach=[
call.attach_project_network(
node_id=UUID(_node_id(2)),
project_network=_network_name(1),
network_alias=_node_alias(3),
),
],
),
# nothing happens when updates with the same content
Example.using(
existing={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
new={
_network_name(1): {
_node_id(1): _node_alias(1),
_node_id(2): _node_alias(2),
}
},
detach=[],
attach=[],
),
]
@pytest.fixture
def mock_scheduler() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def mock_director_v0_client() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def rabbitmq_client() -> AsyncMock:
return AsyncMock()
@pytest.fixture
def project_id() -> ProjectID:
return uuid4()
@pytest.fixture
def dy_workbench_with_networkable_labels(mocks_dir: Path) -> NodesDict:
dy_workbench_template = mocks_dir / "fake_dy_workbench_template.json"
assert dy_workbench_template.exists()
dy_workbench = json.loads(dy_workbench_template.read_text())
parsed_workbench: NodesDict = {}
for node_uuid, node_data in dy_workbench.items():
node_data["label"] = f"label_{uuid4()}"
parsed_workbench[node_uuid] = Node.parse_obj(node_data)
return parsed_workbench
@pytest.fixture
def fake_project_id() -> ProjectID:
return uuid4()
@pytest.fixture
def user_id() -> PositiveInt:
return 1
@pytest.fixture
def mock_docker_calls(mocker: MockerFixture) -> Iterable[dict[str, AsyncMock]]:
requires_dynamic_sidecar_mock = AsyncMock()
requires_dynamic_sidecar_mock.return_value = True
class_base = "simcore_service_director_v2.modules.dynamic_sidecar.scheduler._task.DynamicSidecarsScheduler"
mocked_items = {
"attach": mocker.patch(f"{class_base}.attach_project_network", AsyncMock()),
"detach": mocker.patch(f"{class_base}.detach_project_network", AsyncMock()),
"requires_dynamic_sidecar": mocker.patch(
"simcore_service_director_v2.modules.projects_networks.requires_dynamic_sidecar",
requires_dynamic_sidecar_mock,
),
}
yield mocked_items
async def test_send_network_configuration_to_dynamic_sidecar(
mock_scheduler: AsyncMock,
project_id: ProjectID,
examples_factory: list[Example],
mock_docker_calls: dict[str, AsyncMock],
) -> None:
for example in examples_factory:
await _send_network_configuration_to_dynamic_sidecar(
scheduler=mock_scheduler,
project_id=project_id,
new_networks_with_aliases=example.new_networks_with_aliases,
existing_networks_with_aliases=example.existing_networks_with_aliases,
)
mock_scheduler.assert_has_calls(example.expected_calls.attach, any_order=True)
mock_scheduler.assert_has_calls(example.expected_calls.detach, any_order=True)
async def test_get_networks_with_aliases_for_default_network_is_json_serializable(
mock_director_v0_client: AsyncMock,
fake_project_id: ProjectID,
dy_workbench_with_networkable_labels: dict[str, Any],
user_id: PositiveInt,
rabbitmq_client: AsyncMock,
mock_docker_calls: dict[str, AsyncMock],
) -> None:
assert await _get_networks_with_aliases_for_default_network(
project_id=fake_project_id,
user_id=user_id,
new_workbench=dy_workbench_with_networkable_labels,
director_v0_client=mock_director_v0_client,
rabbitmq_client=rabbitmq_client,
)
|
ITISFoundation/osparc-simcore
|
services/director-v2/tests/unit/test_modules_project_networks.py
|
test_modules_project_networks.py
|
py
| 7,545 |
python
|
en
|
code
| 35 |
github-code
|
6
|
[
{
"api_name": "pydantic.BaseModel",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases.parse_obj",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases.parse_obj",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "models_library.projects_networks.NetworksWithAliases",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "models_library.projects.ProjectID",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "unittest.mock.call.attach_project_network",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call.attach_project_network",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call",
"line_number": 80,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call.detach_project_network",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call",
"line_number": 101,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call.detach_project_network",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call",
"line_number": 123,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 124,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call.attach_project_network",
"line_number": 129,
"usage_type": "call"
},
{
"api_name": "unittest.mock.call",
"line_number": 129,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 130,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 158,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 156,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 157,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 161,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 168,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 166,
"usage_type": "attribute"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 173,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 171,
"usage_type": "attribute"
},
{
"api_name": "models_library.projects.ProjectID",
"line_number": 172,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "models_library.projects.NodesDict",
"line_number": 183,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 186,
"usage_type": "call"
},
{
"api_name": "models_library.projects_nodes.Node.parse_obj",
"line_number": 187,
"usage_type": "call"
},
{
"api_name": "models_library.projects_nodes.Node",
"line_number": 187,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 176,
"usage_type": "attribute"
},
{
"api_name": "models_library.projects.NodesDict",
"line_number": 177,
"usage_type": "name"
},
{
"api_name": "uuid.uuid4",
"line_number": 194,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 192,
"usage_type": "attribute"
},
{
"api_name": "models_library.projects.ProjectID",
"line_number": 193,
"usage_type": "name"
},
{
"api_name": "pytest.fixture",
"line_number": 197,
"usage_type": "attribute"
},
{
"api_name": "pydantic.PositiveInt",
"line_number": 198,
"usage_type": "name"
},
{
"api_name": "pytest_mock.plugin.MockerFixture",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 204,
"usage_type": "call"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 208,
"usage_type": "call"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 202,
"usage_type": "attribute"
},
{
"api_name": "typing.Iterable",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 203,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 220,
"usage_type": "name"
},
{
"api_name": "models_library.projects.ProjectID",
"line_number": 221,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 223,
"usage_type": "name"
},
{
"api_name": "simcore_service_director_v2.modules.projects_networks._send_network_configuration_to_dynamic_sidecar",
"line_number": 226,
"usage_type": "call"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 238,
"usage_type": "name"
},
{
"api_name": "models_library.projects.ProjectID",
"line_number": 239,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 240,
"usage_type": "name"
},
{
"api_name": "pydantic.PositiveInt",
"line_number": 241,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 242,
"usage_type": "name"
},
{
"api_name": "unittest.mock.AsyncMock",
"line_number": 243,
"usage_type": "name"
},
{
"api_name": "simcore_service_director_v2.modules.projects_networks._get_networks_with_aliases_for_default_network",
"line_number": 245,
"usage_type": "call"
}
] |
4452602501
|
import os
import sys
import mock
import unittest
import pkg_resources
from pybkick.kick import kick, main as kick_main, MissingSourceCode
from pybkick.pyboard import Pyboard
class TestKick(unittest.TestCase):
"""Test that we can kick code over to the PyBoard
"""
def testBasicKick(self):
test_data_path = pkg_resources.resource_filename(__name__, 'test_data')
kick(
port='/dev/ttyACM0',
src=test_data_path,
entry_point=None
)
def testKickFromCommandLine(self):
test_data_path = pkg_resources.resource_filename(__name__, 'test_data')
fake_argv = [sys.argv[0], '--src=%s' % test_data_path, '--dst=tmp']
with mock.patch('sys.argv', fake_argv):
fake_kick = mock.Mock()
with mock.patch('pybkick.kick', fake_kick):
kick_main()
fake_kick.assert_called_once()
def testKickMissingDirectory(self):
missing_test_data_path = os.path.join(pkg_resources.resource_filename(__name__, 'test_data'), 'missing')
with self.assertRaises(MissingSourceCode):
kick(
port='/dev/ttyACM0',
src=missing_test_data_path,
dst='tmp',
entry_point=None
)
def testKickTestData(self):
test_dir = pkg_resources.resource_filename(__name__, 'test_data')
port = '/dev/ttyACM0'
kick(port=port,
src=test_dir
)
pb = Pyboard(port)
with pb.raw_repl():
for filename in ['a.txt', 'b.txt']:
self.assertTrue(pb.file_exists(filename))
pb.rm(filename)
self.assertFalse(pb.file_exists(filename))
|
salimfadhley/pybkick
|
src/pybkick_tests/test_kick.py
|
test_kick.py
|
py
| 1,878 |
python
|
en
|
code
| 5 |
github-code
|
6
|
[
{
"api_name": "unittest.TestCase",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pybkick.kick.kick",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "mock.patch",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "mock.Mock",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mock.patch",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pybkick.kick.main",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pybkick.kick.MissingSourceCode",
"line_number": 35,
"usage_type": "argument"
},
{
"api_name": "pybkick.kick.kick",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "pkg_resources.resource_filename",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "pybkick.kick.kick",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pybkick.pyboard.Pyboard",
"line_number": 50,
"usage_type": "call"
}
] |
13523037639
|
from flask import Flask
import os.path
app = Flask(__name__)
@app.route("/")
def hello():
if os.path.exists('/volume/test'):
return "Hello from pvc!"
return "Hello World!"
if __name__ == "__main__":
app.run()
|
prgcont/workshop-OKD
|
cz/lekce-2/demoapp/app.py
|
app.py
|
py
| 236 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "flask.Flask",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "name"
}
] |
13923936476
|
import os
import random
from os import listdir
from os.path import splitext
from tqdm import tqdm
import numpy as np
import cv2
from utils.FileOperator import *
# img_tf_flip(r'E:\Project\Unet-vanilla\data\img_backup',r'E:\Project\Unet-vanilla\data\mask_backup')
# img_tf_flip(r'../data/backup/img', r'../data/backup/mask')
def dir_bit_or(img_dir, mask_dir,dst_dir):
'''
将CPU生成图所在文件夹与GPU生成图所在文件夹中所有的图对应地进行按位或操作
:param img_dir: CPU生成图路径
:param mask_dir: GPU生成图路径
:return:
'''
img_ids = get_files_name(img_dir)
mask_ids = get_files_name(mask_dir)
length = len(img_ids)
for i in tqdm(range(0, length)):
img = cv2.imread(fr'{img_dir}/{img_ids[i]}.png')
t,img = cv2.threshold(img,210,255,cv2.THRESH_BINARY)
mask = cv2.imread(fr'{mask_dir}/{mask_ids[i]}.png')
dst = cv2.bitwise_or(img, mask)
pth = os.path.join(dst_dir,f'{img_ids[i]}_bitor.png')
ret = cv2.imwrite(pth, dst)
assert ret, 'save failed'
def copy_mask():
'''
复制mask
:return:
'''
img_ids = get_files_name(r'..\data\masks-backup')
length = len(img_ids)
for i in tqdm(range(0, length)):
# print(img_ids[i])
img = cv2.imread(fr'../data/masks-backup/{img_ids[i]}.png')
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_scratch.png', img)
assert ret, 'save failed'
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_stain.png', img)
assert ret, 'save failed'
ret = cv2.imwrite(fr'../data/masks/{img_ids[i]}_dot.png', img)
# img = cv2.imread(fr'../data/masks-backup/background.png')
# ret = cv2.imwrite(fr'../data/temp/{i}_background.png', img)
assert ret, 'save failed'
# print(i)
print('done')
import PIL #'6.2.1'
import cv2 #'4.1.1'
def patchit(root_dir,dst_dir):
auto_make_directory(dst_dir)
file_paths = get_files_pth(root_dir)
for pth in file_paths:
img = cv2.imread(pth,0)
auto_make_directory(dst_dir)
_,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
cv2.imwrite(os.path.join(dst_dir,os.path.basename(pth)),img, [cv2.IMWRITE_PNG_BILEVEL, 1])
|
ssocean/UNet-Binarization
|
utils/Augmentation.py
|
Augmentation.py
|
py
| 2,259 |
python
|
en
|
code
| 4 |
github-code
|
6
|
[
{
"api_name": "tqdm.tqdm",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.bitwise_or",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "cv2.imwrite",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "cv2.imwrite",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 68,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "cv2.IMWRITE_PNG_BILEVEL",
"line_number": 68,
"usage_type": "attribute"
}
] |
34382591529
|
from django.contrib import admin
app_name = 'admin'
# Register your models here.
#当前目录下models
from myapp.models import Grades, Students
#创建班级的时候同时创建2个学生
class StudentInfo(admin.TabularInline):
model = Students
extra = 2
@admin.register(Grades)
class GradesAdmin(admin.ModelAdmin):
inlines = [StudentInfo,]
list_display = ('pk', 'gname', 'gdate','ggirlnum','gboynum','isDelete',)
list_filter = ('gname',)
search_fields = ('gname',)
list_per_page = 5
#添加修改页属性
fields = ('ggirlnum', 'gboynum', 'gname', 'gdate',)
#fieldsets = []
@admin.register(Students)
class StudentsAdmin(admin.ModelAdmin):
def gender(self):
if self.sgender:
return '男'
else:
return '女'
#列名
gender.short_description = '性别'
list_display = ('pk', 'sname', 'sage',gender,'scontend', 'sgrade', 'isDelete',)
list_per_page = 5
#actions_on_top = Flase
#admin.site.register(Grades, GradesAdmin)
#admin.site.register(Students,StudentsAdmin)
|
pyslin/project01
|
myapp/admin.py
|
admin.py
|
py
| 1,071 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.contrib.admin.TabularInline",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "myapp.models.Students",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "myapp.models.Grades",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "myapp.models.Students",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "django.contrib.admin",
"line_number": 22,
"usage_type": "name"
}
] |
35987644586
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import argopandas as argo
wmos = [4902596, 4902597]
var_names = ['PRES', 'TEMP', 'PSAL', 'DOXY', 'CHLA', 'BBP700']
for wmo in wmos:
ix = argo.float(wmo).synthetic_prof
up = ix.subset_direction('asc')
down = ix.subset_direction('desc')
up['CYCLE'] = [int(f[-6:-3]) for f in up.file]
down['CYCLE'] = [int(f[-7:-4]) for f in down.file]
cycles = set(up.CYCLE.unique()).intersection(down.CYCLE.unique())
for cycle in cycles:
fig, ax = plt.subplots()
up_sub = up.loc[up.CYCLE == cycle]
down_sub = down.loc[down.CYCLE == cycle]
up_data = up_sub.levels
down_data = down_sub.levels
sns.lineplot(data=up_data, x='DOXY', y='PRES', sort=False, estimator=None, ax=ax)
sns.lineplot(data=down_data, x='DOXY', y='PRES', sort=False, estimator=None, ax=ax)
ax.set_ylim((200,0))
|
cgrdn/argo-sci
|
src/pac-provor/initial_plot.py
|
initial_plot.py
|
py
| 951 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "argopandas.float",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "seaborn.lineplot",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "seaborn.lineplot",
"line_number": 31,
"usage_type": "call"
}
] |
33689496961
|
from string import ascii_lowercase
from behave import given, then
from toolium.utils.dataset import map_param
from common.utils import assert_arrays_equal, payload_to_table_format, replace_param
@given("there is {chore_types:d} chore type")
@given("there are {chore_types:d} chore types")
def step_create_chore_types(context, chore_types):
for i in range(chore_types):
raw_data = {
"id": f"ct-{ascii_lowercase[i]}",
"name": f"ct-{ascii_lowercase[i]}",
"description": f"description{i+1}",
}
context.execute_steps(
f"""
Given I use the admin API key
When I send a request to the Api resource "createChoreType" with body params
{payload_to_table_format(raw_data)}
Then the response status code is "200"
And I clear the token
"""
)
@then('the response contains the chores "{ids}"')
def step_response_contains_chores(context, ids):
ids = replace_param(ids)
if not ids:
ids = []
elif isinstance(ids, str):
ids = list(map(int, ids.replace(" ", "").split(",")))
elif isinstance(ids, int):
ids = [ids]
original = map_param("[CONF:examples.simple_chore_types]")
res_json = context.res.json()
for field_name in ("completed_at", "created_at"):
for item in res_json:
del item[field_name]
expected = [original[x - 1] for x in ids]
assert_arrays_equal(expected, res_json)
|
sralloza/chore-management-api
|
test/steps/aliases/chore_types.py
|
chore_types.py
|
py
| 1,503 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "string.ascii_lowercase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "common.utils.payload_to_table_format",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "behave.given",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "behave.given",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "common.utils.replace_param",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "toolium.utils.dataset.map_param",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "common.utils.assert_arrays_equal",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "behave.then",
"line_number": 29,
"usage_type": "call"
}
] |
16708430730
|
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
# nodes=['A','B','C','D','E','F','1']
dataset = pd.read_csv("./topology.csv", header=None)
lenth = dataset.shape[0]
G=nx.Graph()
edges=[]
for i in range(lenth):
edges.append((dataset.values[i, 0], dataset.values[i, 1]))
r=G.add_edges_from(edges)
# shortest_way=nx.shortest_path(G,"F","D")
# print(shortest_way)
#
# nx.draw(G, with_labels=True,node_color='r', node_size=50,)
# plt.show()
options = {"node_color": "red", "node_size": 300, "linewidths": 0, "width": 0.1, "with_labels": True}
pos = nx.spring_layout(G, random_state=1969) # Seed for reproducible layout
nx.draw(G, pos, **options)
plt.show()
|
GAVIN-YAN/FinTechauthon2022
|
topology/topology.py
|
topology.py
|
py
| 712 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "networkx.draw",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 26,
"usage_type": "name"
}
] |
43585654615
|
"""fitnesspro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import re_path, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import re_path
from fitnessproapp import views
urlpatterns = [
re_path('admin/', admin.site.urls),
re_path(r'^$', views.login, name='login'),
re_path(r'^Forgot_password$',views.Forgot_password, name='Forgot_password'),
re_path(r'^index/$', views.index, name='index'),
re_path(r'^User_profile/$',views.User_profile,name='User_profile'),
re_path(r'^User_edit_profile/$',views.User_edit_profile,name='User_edit_profile'),
re_path(r'^about/$', views.about, name='about'),
re_path(r'^classes/$', views.classes, name='classes'),
re_path(r'^train/$', views.train, name='train'),
re_path(r'^selecttrainer/$', views.selecttrainer, name='selecttrainer'),
re_path(r'^shedule/$', views.shedule, name='shedule'),
re_path(r'^contact/$', views.contact, name='contact'),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^userpaymentpage/$', views.userpaymentpage, name='userpaymentpage'),
re_path(r'^online_training/$', views.online_training, name='online_training'),
re_path(r'^offline_training/$', views.offline_training, name='offline_training'),
re_path(r'^onlin/$', views.onlin, name='onlin'),
re_path(r'^onedit/(?P<i_id>[0-9]+)/$', views.onedit, name='onedit'),
re_path(r'^onlineedit/(?P<oned_id>[0-9]+)/$', views.onlineedit, name='onlineedit'),
re_path(r'^offlin/$', views.offlin, name='offlin'),
re_path(r'^Usert_profile/$',views.Usert_profile,name='Usert_profile'),
re_path(r'^Usert_edit_profile/$',views.Usert_edit_profile,name='Usert_edit_profile'),
re_path(r'^offedit/(?P<i_id>[0-9]+)/$', views.offedit, name='offedit'),
re_path(r'^offlineedit/(?P<offd_id>[0-9]+)/$', views.offlineedit, name='offlineedit'),
re_path(r'^staffd/$', views.staffd, name='staffd'),
re_path(r'^maint/$', views.maint, name='maint'),
re_path(r'^admhome/$', views.admhome, name='admhome'),
re_path(r'^admreg/$', views.admreg, name='admreg'),
re_path(r'^admregedit/(?P<i_id>[0-9]+)/$', views.admregedit, name='admregedit'),
re_path(r'^admregistration/(?P<reg_id>[0-9]+)/$', views.admregistration, name='admregistration'),
re_path(r'^admintimetable/$', views.admintimetable, name='admintimetable'),
re_path(r'^admin_view_timetable/$', views.admin_view_timetable, name='admin_view_timetable'),
re_path(r'^admin_edit_timetable/(?P<i_id>[0-9]+)/$', views.admin_edit_timetable, name='admin_edit_timetable'),
re_path(r'^admin_editpage/(?P<timet_id>[0-9]+)/$', views.admin_editpage, name='admin_editpage'),
re_path(r'^delete_batch/(?P<p_id>[0-9]+)/$', views.delete_batch, name='delete_batch'),
re_path(r'^admin_userpayment/$', views.admin_userpayment, name='admin_userpayment'),
re_path(r'^admin_payment/$', views.admin_payment, name='admin_payment'),
re_path(r'^admin_pay_page/$', views.admin_pay_page, name='admin_pay_page'),
re_path(r'^Trainee_logout/$', views.Trainee_logout, name='Trainee_logout'),
re_path(r'^Trainer_logout/$', views.Trainer_logout, name='Trainer_logout'),
re_path(r'^SuperAdmin_logout/$', views.SuperAdmin_logout, name='SuperAdmin_logout'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
sanjaymurali1910/fitnessclub
|
fitnesspro/urls.py
|
urls.py
|
py
| 4,115 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "django.urls.re_path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.login",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.Forgot_password",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.index",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.User_profile",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.User_edit_profile",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.about",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.classes",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.train",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.selecttrainer",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.shedule",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.contact",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.signup",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.userpaymentpage",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.online_training",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.offline_training",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.onlin",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.onedit",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.onlineedit",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.offlin",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.Usert_profile",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.Usert_edit_profile",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.offedit",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 49,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.offlineedit",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.staffd",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.maint",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admhome",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admreg",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 54,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admregedit",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admregistration",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admintimetable",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_view_timetable",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_edit_timetable",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 59,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_editpage",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.delete_batch",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_userpayment",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 62,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_payment",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 63,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.admin_pay_page",
"line_number": 64,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 64,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.Trainee_logout",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.Trainer_logout",
"line_number": 66,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 66,
"usage_type": "name"
},
{
"api_name": "django.urls.re_path",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "fitnessproapp.views.SuperAdmin_logout",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "fitnessproapp.views",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.STATIC_URL",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.STATIC_ROOT",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "django.conf.urls.static.static",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.MEDIA_URL",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 76,
"usage_type": "name"
},
{
"api_name": "django.conf.settings.MEDIA_ROOT",
"line_number": 76,
"usage_type": "attribute"
}
] |
72096598268
|
import pygame
import random
import rospy
import math
from geometry_msgs.msg import TwistStamped
from geometry_msgs.msg import PoseStamped
from armf import armtakeoff
rospy.init_node('make_a_circle', anonymous=True)
current_pos = PoseStamped()
def main():
pygame.init()
screen = pygame.display.set_mode((640, 480))
clock = pygame.time.Clock()
x_curser1 = 160
y_curser1 = 240
x_curser2 = 480
y_curser2 = 240
mode= ''
count = 0
radius = 10
screen.fill((0, 0, 0))
color_curser1 = (0,255,0)
color_curser2 = (0,255,0)
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
arm = armtakeoff()
arm.arm()
arm.takeoff()
while True:
screen.fill((0, 0, 0))
x_curser1 = 160
y_curser1 = 240
x_curser2 = 480
y_curser2 = 240
pressed = pygame.key.get_pressed()
alt_held = pressed[pygame.K_LALT] or pressed[pygame.K_RALT]
ctrl_held = pressed[pygame.K_LCTRL] or pressed[pygame.K_RCTRL]
for event in pygame.event.get():
# determin if X was clicked, or Ctrl+W or Alt+F4 was used
if event.type == pygame.QUIT:
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_w and ctrl_held:
return
if event.key == pygame.K_F4 and alt_held:
return
if event.key == pygame.K_ESCAPE:
return
# determine if a letter key was pressed
if event.key == pygame.K_w:
mode = 'up'
elif event.key == pygame.K_s:
mode = 'down'
elif event.key == pygame.K_d:
mode = 'right'
elif event.key == pygame.K_a:
mode = 'left'
elif event.key == pygame.K_SPACE:
mode = 'jump'
elif event.key == pygame.K_q:
mode = 'yaw'
elif event.key == pygame.K_e:
mode = 'yawri8'
elif event.key == pygame.K_LCTRL:
mode = 'low'
elif event.key == pygame.K_h:
mode = 'hold'
color_curser1 = (255,0,0)
color_curser2 = (255,0,0)
color_curser3 = (0,0,255)
color_curser4 = (0,0,255)
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
x_curser1,y_curser1,x_curser2,y_curser2 = curserControl(screen,x_curser1,y_curser1,x_curser2,y_curser2,mode,count,color_curser1,color_curser1,radius)
pygame.draw.circle(screen, color_curser3, (x_curser1, y_curser1), radius)
pygame.draw.circle(screen, color_curser4, (x_curser2, y_curser2), radius)
pygame.display.flip()
clock.tick(60)
def curserControl(screen,x_curser1,y_curser1,x_curser2,y_curser2,mode,count,color_curser1,color_curser2,radius):
publish_velocity=rospy.Publisher('/mavros/setpoint_velocity/cmd_vel', TwistStamped,queue_size=20)
vel=TwistStamped()
if mode == 'up':
vel.twist.linear.x= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.x= 0
y_curser1= y_curser1 -20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("up")
elif mode == 'down':
vel.twist.linear.x= -0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.x= 0
y_curser1= y_curser1 +20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("down")
elif mode == 'right':
vel.twist.linear.z= 0
vel.twist.linear.y= -0.8
publish_velocity.publish(vel)
vel.twist.linear.y= 0
x_curser1= x_curser1 +20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("right")
elif mode == 'left':
vel.twist.linear.y= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.y= 0
x_curser1= x_curser1 -20
pygame.draw.circle(screen, color_curser1, (x_curser1, y_curser1), radius)
print("left")
elif mode == 'jump':
vel.twist.linear.z= 1
publish_velocity.publish(vel)
vel.twist.linear.z= 0
y_curser2= y_curser2 -20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("jump")
elif mode == 'low':
vel.twist.linear.z= -0.5
publish_velocity.publish(vel)
vel.twist.linear.z= 0
y_curser2= y_curser2 +20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("low")
elif mode == 'yaw':
vel.twist.angular.z= 0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.z= 0
x_curser2= x_curser2 -20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("yawleft")
elif mode == 'yawri8':
vel.twist.angular.z= -0.8
vel.twist.linear.z= 0
publish_velocity.publish(vel)
vel.twist.linear.z= 0
x_curser2= x_curser2 +20
pygame.draw.circle(screen, color_curser2, (x_curser2, y_curser2), radius)
print("yawri8")
elif mode == 'hold':
vel.twist.angular.z= 0
publish_velocity.publish(vel)
print("hold")
return x_curser1, y_curser1 ,x_curser2, y_curser2
main()
|
DarkcrusherX/indoor_nav
|
src/transmitter.py
|
transmitter.py
|
py
| 5,866 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "rospy.init_node",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.PoseStamped",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.init",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "armf.armtakeoff",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pygame.key.get_pressed",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "pygame.key",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LALT",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RALT",
"line_number": 48,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LCTRL",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_RCTRL",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "pygame.event.get",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "pygame.event",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.QUIT",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "pygame.KEYDOWN",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_w",
"line_number": 57,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_F4",
"line_number": 59,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_ESCAPE",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_w",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_s",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_d",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_a",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_SPACE",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_q",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_e",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_LCTRL",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "pygame.K_h",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 90,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 90,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 91,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 96,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.flip",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "rospy.Publisher",
"line_number": 104,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.TwistStamped",
"line_number": 104,
"usage_type": "argument"
},
{
"api_name": "geometry_msgs.msg.TwistStamped",
"line_number": 105,
"usage_type": "call"
},
{
"api_name": "pygame.draw.circle",
"line_number": 115,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 115,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 123,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 123,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 140,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 140,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 148,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 157,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 157,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 167,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.circle",
"line_number": 177,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 177,
"usage_type": "attribute"
}
] |
6323653516
|
import asyncio
from typing import *
from urllib.parse import urlencode
from datetime import datetime
from pprint import pformat as pf
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
import jikanpy
from enum import Enum
from copy import copy, deepcopy
from pprint import pprint
import traceback
import aiohttp
import dotenv
import asyncio
from fuzzywuzzy import fuzz
from expiring_dict import ExpiringDict
from core import getLogger, stopwatch
log = getLogger(__name__)
class MALRatings(Enum):
g = "G - All Ages"
pg = "PG - Children"
pg_13 = "PG-13 - Teens 13 or older"
r = "R - 17+ (violence & profanity) "
r_plus = "R+ - Mild Nudity 17+"
rx = "Rx - Hentai 18+"
class MALTypes(Enum):
ANIME = 1
MANGA = 2
class MyAnimeListAIOClient:
"""Wrapper for MyAnimeList API Endpoint"""
client_id: str = ""
TTL = 60*60
response_cache = ExpiringDict(ttl=TTL)
def __init__(
self,
client_id: str = None,
):
"""A wrapper for the Non-user based mal api endpoints (-> no oauth needed)"""
self.log = logging.getLogger(__name__)
self.log.setLevel(logging.INFO)
self._id = client_id or self.client_id or dotenv.dotenv_values()["ID"]
if not self._id and not self.client_id:
raise RuntimeError(
"Client id has to be passed into the constructor or in the .env file under key `ID`. Consider calling `set_credentails`"
)
self._base_url = r"https://api.myanimelist.net/v2"
self._session = None
@classmethod
def set_credentials(cls, client_id: str):
""""set the client id"""
cls.client_id = client_id
@property
def session(self) -> aiohttp.ClientSession:
"""Get AioHTTP session by creating it if it doesn't already exist"""
if not self._session or self._session.closed:
self._session = aiohttp.ClientSession()
return self._session
async def _make_request(
self,
endpoint: str,
value: Optional[str] = None,
optional_query: Dict[str, str] = None,
) -> Dict[str, Any]:
query = None
if value and not value.startswith("/"):
value = "/" + value
if optional_query:
query = f"?{urlencode(optional_query)}"
url = f"{self._base_url}/{endpoint}{value or ''}{query or ''}"
async with self.session.get(url, headers=self.headers) as resp:
json = await resp.json(encoding="utf-8")
await self._session.close()
self.log.debug(f"request: {url}")
self.log.debug(f"response: {pf(json)}")
if not resp.ok:
raise RuntimeError(f"{url} returned status code {resp.status}")
return json
@property
def headers(self) -> Dict[str, str]:
if not self._id:
raise RuntimeError("Client id has to be passed into the constructor or in the .env file under key `ID`")
return {"X-MAL-CLIENT-ID": self._id}
async def fetch_anime(
self,
id: int
) -> Dict[str, Any]:
"""fetch an Anime by it's ID
Args:
-----
id : int
the mal ID of that anime
"""
fields = (
"id,title,main_picture,alternative_titles,"
"start_date,end_date,synopsis,mean,rank,popularity,"
"num_list_users,num_scoring_users,nsfw,created_at,"
"updated_at,media_type,status,genres,my_list_status,"
"num_episodes,start_season,broadcast,source,"
"average_episode_duration,rating,pictures,background,"
"related_anime,related_manga,recommendations,studios,statistics,"
"average_episode_duration,opening_themes,ending_themes"
)
resp = await self._make_request(
endpoint="anime",
value=str(id),
optional_query={"fields": fields}
)
return resp
async def _search(self):
pass
async def search_anime(self, query: str, include_nsfw=True, fallback: bool = False) -> Dict[str, Any]:
"""search for anime by name
Args:
-----
query : str
the query to search for
include_nsfw : bool
whether to include nsfw results
fallback : bool
whether or not to limit the query to 50 chars
Returns:
--------
Dict[str, Any]
the response json
"""
try:
resp = self.response_cache[query]
return deepcopy(resp)
except KeyError:
pass
fields = (
"id,title,main_picture,alternative_titles,"
"start_date,end_date,synopsis,mean,rank,popularity,"
"num_list_users,num_scoring_users,nsfw,created_at,"
"updated_at,media_type,status,genres,my_list_status,"
"num_episodes,start_season,broadcast,source,"
"average_episode_duration,rating,pictures,background,"
"related_anime,related_manga,recommendations,studios,statistics,"
"average_episode_duration,opening_themes,ending_themes"
)
a = datetime.now()
kwargs = {"nsfw": "true" if include_nsfw else "false"}
try:
resp = await self._make_request(
endpoint="anime",
optional_query={
"q": query,
"fields":fields,
"limit":"50",
**kwargs
})
except RuntimeError as e:
if fallback:
log.warning(f"Error while fetching anime - title len = {len(query)}")
log.warning(traceback.format_exc())
return None
else:
log.warning(f"fallback search for title {query}")
return await self.search_anime(query[:50], include_nsfw, True)
log.info(f"fetched {len(resp['data'])} anime in {(datetime.now() - a).total_seconds():.2f}s")
self.response_cache.ttl(query, deepcopy(resp), self.TTL)
return deepcopy(resp)
|
zp33dy/inu
|
inu/utils/rest/my_anime_list.py
|
my_anime_list.py
|
py
| 6,129 |
python
|
en
|
code
| 1 |
github-code
|
6
|
[
{
"api_name": "logging.basicConfig",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "core.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "expiring_dict.ExpiringDict",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "dotenv.dotenv_values",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession",
"line_number": 67,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 162,
"usage_type": "name"
},
{
"api_name": "traceback.format_exc",
"line_number": 176,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 181,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 181,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 182,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 183,
"usage_type": "call"
}
] |
26529327586
|
#유기농 배추
from collections import deque
def bfs(x,y):
queue = deque()
queue.append((x,y))
graph[x][y] = 0
while queue:
x, y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0<=nx<n and 0<=ny<m and graph[nx][ny] == 1:
queue.append((nx,ny))
graph[nx][ny] = 0
return
graph = []
dx = [-1,1, 0, 0]
dy = [0,0,-1,1]
t = int(input())
for _ in range(t):
cnt = 0
m, n, k = map(int, input().split())
graph = [[0 for _ in range(m)] for _ in range(n)]
for _ in range(k):
x, y = map(int, input().split())
graph[y][x] = 1
for i in range(n):
for j in range(m):
if graph[i][j] == 1:
bfs(i,j)
cnt += 1
print(cnt)
|
Jaeheon-So/baekjoon-algorithm
|
DFS, BFS/1012.py
|
1012.py
|
py
| 822 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "collections.deque",
"line_number": 4,
"usage_type": "call"
}
] |
18100605374
|
"""
https://leetcode.com/problems/maximum-ice-cream-bars/
1833. Maximum Ice Cream Bars
It is a sweltering summer day, and a boy wants to buy some ice cream bars.
At the store, there are n ice cream bars. You are given an array costs of length n, where costs[i] is the price of the ith ice cream bar in coins. The boy initially has coins coins to spend, and he wants to buy as many ice cream bars as possible.
Return the maximum number of ice cream bars the boy can buy with coins coins.
Note: The boy can buy the ice cream bars in any order.
"""
from typing import List
class Solution:
def maxIceCream(self, costs: List[int], coins: int) -> int:
answer = 0
freq = [0] * (max(costs) + 1)
# Fill in the list of frequency
# (each index is cost of icecream)
for cost in costs:
freq[cost] += 1
for cost, amount in enumerate(freq):
# If frequency is 0, skip it
if freq[cost] == 0:
continue
# If cost * amount is less than coins,
# simply decrease the coins by cost * amount
if amount * cost <= coins:
coins -= amount * cost
answer += amount
continue
# At this point we cannot buy amount * cost
# So coins // cost should be the amount of icecream we can buy
answer += coins // cost
# And don't forget to exist loop
# (we can't buy icecreams anymore)
break
return answer
|
hirotake111/leetcode_diary
|
leetcode/1833/solution.py
|
solution.py
|
py
| 1,538 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
}
] |
41457482585
|
from sqlite3 import IntegrityError, Connection
import schema
from flask import g
from flask import request
from flask import Blueprint
from flask.views import MethodView
from werkzeug.exceptions import abort
from flaskr import settings
from flaskr.utils.auth import login_required
from flaskr.utils.db import get_db, get_all_posts, get_post, get_all_comments
bp_index = Blueprint("index", __name__, url_prefix='/api')
bp = Blueprint("post", __name__, url_prefix="/api/post")
bp_comment = Blueprint("comment", __name__, url_prefix='/api/comment')
class Index(MethodView):
def get(self):
"""
Get posts.
:return json 200: List[Post]
"""
return dict(posts=get_all_posts())
class Post(MethodView):
def get(self):
"""
Get posts.
:return json 200: List[Post]
"""
return dict(posts=get_all_posts())
@staticmethod
def post_schema(from_dict: dict) -> dict:
try:
data = schema.Schema({
"title": schema.And(str, lambda x: settings.POST_TITLE_MIN_WORDS_LENGTH <= len(x) <= settings.POST_TITLE_MAX_WORDS_LENGTH),
"body": schema.And(str, lambda x: settings.POST_BODY_MIN_WORDS_LENGTH <= len(x) <= settings.POST_BODY_MAX_WORDS_LENGTH)
}).validate(from_dict)
except schema.SchemaError:
raise abort(400, "Wrong args.")
else:
return data
@login_required
def post(self):
"""
Create a post
:param form title: post title
:param form body: post content text
:return json 201: Post
"""
data = self.post_schema(dict(request.form))
db: Connection = get_db()
try:
with db:
cur = db.cursor()
cur.execute(
"INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)",
(data['title'], data['body'], g.user["id"]),
)
except IntegrityError:
raise abort(400, "Duplicated data")
post = get_post(post_id=cur.lastrowid)
return dict(post), 201
class PostId(MethodView):
def get(self, post_id: int):
"""
Get post by specific id
:param int post_id: id of post
:return json 200: Post
"""
post = get_post(post_id=post_id)
if post is None:
abort(404, f"Post id {post_id} doesn't exist.")
return post
def comment_schema(form_dict: dict) -> dict:
try:
data = schema.Schema({
"body": schema.And(
str,
lambda x: settings.COMMENT_BODY_MIN_WORDS_LENGTH <= len(x) <= settings.COMMENT_BODY_MAX_WORDS_LENGTH
)
}).validate(form_dict)
except schema.SchemaError:
raise abort(400, "Wrong args.")
else:
return data
class PostIdComment(MethodView):
def get(self, post_id: int):
"""
Get all comments of a post
:param: int post_id:
:return json 200: {"comments": List[Comment]}
"""
return dict(comments=get_all_comments(post_id=post_id))
@login_required
def post(self, post_id: int):
"""
Add a comment to post
:param int post_id:
:param form body: comment content text
:return json 201: Post
"""
data = comment_schema(dict(request.form))
db = get_db()
with db:
cur = db.cursor()
cur.execute("INSERT INTO comment (author_id, body, parent_id, post_id) VALUES (?, ?, ?, ?)",
(g.user['id'], data['body'], None, post_id))
return get_post(post_id), 201
class CommentId(MethodView):
def get(self, comment_id: int):
"""
Get a comment.
:param int comment_id:
:return json 200: Comment
"""
row = get_db().execute("SELECT id, post_id, created, body FROM comment WHERE id=?", (comment_id, )).fetchone()
if not row:
raise abort(400, f'Comment {comment_id} not found.')
return dict(row)
class CommentIdComment(MethodView):
@login_required
def post(self, comment_id: int):
"""
Add a comment of a comment.
Login required
:param int comment_id: id of comment
:param form body: comment content text
:param form body: comment content text
:raise 401 Login required:
:return json 201: Post
"""
data = comment_schema(dict(request.form))
db = get_db()
comment = db.execute(
"SELECT post_id FROM comment WHERE id=?",
(comment_id, )
).fetchone()
if not comment:
raise abort(400, f"Comment not found.")
with db:
cur = db.cursor()
cur.execute("INSERT INTO comment (author_id, body, parent_id, post_id) VALUES (?, ?, ?, ?)",
(g.user['id'], data['body'], comment_id, comment['post_id']))
return get_post(comment['post_id']), 201
bp_index.add_url_rule('/', view_func=Index.as_view('Index'))
bp.add_url_rule('/<int:post_id>/comment', view_func=PostIdComment.as_view('PostIdComment'))
bp.add_url_rule('/<int:post_id>', view_func=PostId.as_view('PostId'))
bp.add_url_rule('/', view_func=Post.as_view('Post'))
bp_comment.add_url_rule('/<int:comment_id>/comment',
view_func=CommentIdComment.as_view('PostIdCommentIdComment'))
bp_comment.add_url_rule('/<int:comment_id>', view_func=CommentId.as_view('PostIdCommentId'))
|
MioYvo/unlimited-level-messages
|
backend/flaskr/views/post.py
|
post.py
|
py
| 5,566 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.Blueprint",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_all_posts",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_all_posts",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "schema.Schema",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "schema.And",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flaskr.settings.POST_TITLE_MIN_WORDS_LENGTH",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "flaskr.settings",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "flaskr.settings.POST_TITLE_MAX_WORDS_LENGTH",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "schema.And",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "flaskr.settings.POST_BODY_MIN_WORDS_LENGTH",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "flaskr.settings",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "flaskr.settings.POST_BODY_MAX_WORDS_LENGTH",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "schema.SchemaError",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "sqlite3.Connection",
"line_number": 58,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_db",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "sqlite3.IntegrityError",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "flaskr.utils.db.get_post",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "flaskr.utils.auth.login_required",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "flask.views.MethodView",
"line_number": 74,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_post",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "schema.Schema",
"line_number": 91,
"usage_type": "call"
},
{
"api_name": "schema.And",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "flaskr.settings.COMMENT_BODY_MIN_WORDS_LENGTH",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "flaskr.settings",
"line_number": 94,
"usage_type": "name"
},
{
"api_name": "flaskr.settings.COMMENT_BODY_MAX_WORDS_LENGTH",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "schema.SchemaError",
"line_number": 97,
"usage_type": "attribute"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 98,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 103,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_all_comments",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "flask.request.form",
"line_number": 120,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 120,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_db",
"line_number": 121,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 125,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 125,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_post",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "flaskr.utils.auth.login_required",
"line_number": 112,
"usage_type": "name"
},
{
"api_name": "flask.views.MethodView",
"line_number": 130,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_db",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "flask.views.MethodView",
"line_number": 143,
"usage_type": "name"
},
{
"api_name": "flask.request.form",
"line_number": 155,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 155,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_db",
"line_number": 156,
"usage_type": "call"
},
{
"api_name": "werkzeug.exceptions.abort",
"line_number": 162,
"usage_type": "call"
},
{
"api_name": "flask.g.user",
"line_number": 167,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 167,
"usage_type": "name"
},
{
"api_name": "flaskr.utils.db.get_post",
"line_number": 169,
"usage_type": "call"
},
{
"api_name": "flaskr.utils.auth.login_required",
"line_number": 144,
"usage_type": "name"
}
] |
5026987486
|
# -*- coding:utf-8 -*-
my_name = "分数"
import pygame
pygame.init()
my_font = pygame.font.SysFont("simSun", 66)
name_surface = my_font.render(u'分数', True, (0, 0, 0), (255, 255, 255))
pygame.image.save(name_surface, "name.png")
enemy_hit_dict = dict()
score = 0
ENEMY_SCORE = 100
# enemy_hit_dict = pygame.sprite.groupcollide(enemy_group, hero.bullets, True, True)
# score += len(enemy_hit_dict) * ENEMY_SCORE; # 计算得分
# enemy_hit_group.add(enemy_hit_dict)
screen = pygame.display.set_mode((480, 700))
bg = pygame.image.load("./images/background.png")
# 2、使用blit方法将背景绘制在屏幕的(0,0)位置
screen.blit(bg, (0, 0))
screen.blit(name_surface, (20, 20))
# 3、更新屏幕
pygame.display.update()
|
xinlongOB/python_docment
|
飞机大战/字体.py
|
字体.py
|
py
| 737 |
python
|
en
|
code
| 0 |
github-code
|
6
|
[
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.font.SysFont",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.save",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.update",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 28,
"usage_type": "attribute"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.