seq_id
stringlengths 4
11
| text
stringlengths 113
2.92M
| repo_name
stringlengths 4
125
⌀ | sub_path
stringlengths 3
214
| file_name
stringlengths 3
160
| file_ext
stringclasses 18
values | file_size_in_byte
int64 113
2.92M
| program_lang
stringclasses 1
value | lang
stringclasses 93
values | doc_type
stringclasses 1
value | stars
int64 0
179k
⌀ | dataset
stringclasses 3
values | pt
stringclasses 78
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|
8855381653
|
import re
import string
import pyarabic.araby as ab
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# nltk.download('stopwords')
stopwords = stopwords.words('Arabic')
tw = []
tweets = 0
class pre_processing:
def clean_data(self):
for tweets in self:
tashkel_removed = ab.strip_diacritics(tweets)
emotion_removed = re.sub('['
'(\U0001F600-\U0001F92F|\U0001F300-\U0001F5FF|\U0001F680-\U0001F6FF|\U0001F190-\U0001F1FF|\U00002702'
'-\U000027B0|\U0001F926-\U0001FA9F|\u200d|\u2640-\u2642|\u2600-\u2B55|\u23cf|\u23e9|\u231a|\ufe0f'
')]+', '', tashkel_removed)
# remove english words
eng_removed = re.sub(r'[a-zA-Z]', '', emotion_removed)
pattern = r'[' + string.punctuation + ']'
# Remove special characters from the string
spchar_removed = re.sub(pattern, '', eng_removed)
# to remove stop words
text_tokens = word_tokenize(spchar_removed)
remove_sw = ' '.join([i for i in text_tokens if i not in stopwords])
# Remove digits
digit_removed = re.sub("\d+", "", remove_sw)
tw.append(digit_removed)
return tw
def display(self):
for i in self:
print(i, sep='\n')
def count_punct(self):
count = sum([1 for char in self if char in string.punctuation])
return round(count / (len(self) - self.count(" ")), 3) * 100
|
Minaaa01/Arabic-Tweets-Classification
|
preprocessing.py
|
preprocessing.py
|
py
| 1,601 |
python
|
en
|
code
| 0 |
github-code
|
50
|
21542153332
|
N = int(input())
v = [0]
f = 0
for i in range(N):
v.append(int(input()))
for i in range(1, N+1):
for j in range(i+1, N+1):
lie = []
a = [1]*(N+1)
a[i] = a[j] = -1
for k in range(1, N+1):
if v[k]*a[abs(v[k])] < 0:
lie.append(k)
if len(lie) == 2 and a[lie[0]]+a[lie[1]] == 0:
print(i, j)
f = 1
break
if f == 1:
break
if f == 0:
print('No Solution')
|
hurttttr/MyPythonCode
|
PAT/1089 狼人杀-简单版.py
|
1089 狼人杀-简单版.py
|
py
| 475 |
python
|
en
|
code
| 3 |
github-code
|
50
|
41512522344
|
# -*- coding: utf-8 -*-
"""
@author:XuMing([email protected])
@description: pip install fastapi uvicorn
"""
import argparse
import uvicorn
import sys
import os
from fastapi import FastAPI, Query
from starlette.middleware.cors import CORSMiddleware
from loguru import logger
sys.path.append('..')
from nerpy import NERModel
pwd_path = os.path.abspath(os.path.dirname(__file__))
# Use fine-tuned model
parser = argparse.ArgumentParser()
parser.add_argument("--model_name_or_path", type=str, default="shibing624/bert4ner-base-chinese",
help="Model save dir or model name")
args = parser.parse_args()
s_model = NERModel('bert', args.model_name_or_path)
# define the app
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"])
@app.get('/')
async def index():
return {"message": "index, docs url: /docs"}
@app.get('/entity')
async def entity(q: str = Query(..., min_length=1, max_length=128, title='query')):
try:
preds, outputs, entities = s_model.predict([q], split_on_space=False)
result_dict = {'entity': entities}
logger.debug(f"Successfully get sentence entity, q:{q}")
return result_dict
except Exception as e:
logger.error(e)
return {'status': False, 'msg': e}, 400
if __name__ == '__main__':
uvicorn.run(app=app, host='0.0.0.0', port=8001)
|
shibing624/nerpy
|
examples/server_demo.py
|
server_demo.py
|
py
| 1,439 |
python
|
en
|
code
| 84 |
github-code
|
50
|
40094658430
|
from __future__ import print_function
import os
import FWCore.ParameterSet.Config as cms
# for json support
try: # FUTURE: Python 2.6, prior to 2.6 requires simplejson
import json
except:
try:
import simplejson as json
except:
print("Please use lxplus or set an environment (for example crab) with json lib available")
sys.exit(1)
inputfiles = os.environ["ALIGNMENT_INPUTFILES"].split(" ")
iteration = int(os.environ["ALIGNMENT_ITERATION"])
jobnumber = int(os.environ["ALIGNMENT_JOBNUMBER"])
mapplots = (os.environ["ALIGNMENT_MAPPLOTS"] == "True")
segdiffplots = (os.environ["ALIGNMENT_SEGDIFFPLOTS"] == "True")
curvatureplots = (os.environ["ALIGNMENT_CURVATUREPLOTS"] == "True")
globaltag = os.environ["ALIGNMENT_GLOBALTAG"]
inputdb = os.environ["ALIGNMENT_INPUTDB"]
trackerconnect = os.environ["ALIGNMENT_TRACKERCONNECT"]
trackeralignment = os.environ["ALIGNMENT_TRACKERALIGNMENT"]
trackerAPEconnect = os.environ["ALIGNMENT_TRACKERAPECONNECT"]
trackerAPE = os.environ["ALIGNMENT_TRACKERAPE"]
trackerBowsconnect = os.environ["ALIGNMENT_TRACKERBOWSCONNECT"]
trackerBows = os.environ["ALIGNMENT_TRACKERBOWS"]
gprcdconnect = os.environ["ALIGNMENT_GPRCDCONNECT"]
gprcd = os.environ["ALIGNMENT_GPRCD"]
iscosmics = (os.environ["ALIGNMENT_ISCOSMICS"] == "True")
station123params = os.environ["ALIGNMENT_STATION123PARAMS"]
station4params = os.environ["ALIGNMENT_STATION4PARAMS"]
cscparams = os.environ["ALIGNMENT_CSCPARAMS"]
minTrackPt = float(os.environ["ALIGNMENT_MINTRACKPT"])
maxTrackPt = float(os.environ["ALIGNMENT_MAXTRACKPT"])
minTrackP = float(os.environ["ALIGNMENT_MINTRACKP"])
maxTrackP = float(os.environ["ALIGNMENT_MAXTRACKP"])
minTrackerHits = int(os.environ["ALIGNMENT_MINTRACKERHITS"])
maxTrackerRedChi2 = float(os.environ["ALIGNMENT_MAXTRACKERREDCHI2"])
allowTIDTEC = (os.environ["ALIGNMENT_ALLOWTIDTEC"] == "True")
twoBin = (os.environ["ALIGNMENT_TWOBIN"] == "True")
weightAlignment = (os.environ["ALIGNMENT_WEIGHTALIGNMENT"] == "True")
minAlignmentHits = int(os.environ["ALIGNMENT_MINALIGNMENTHITS"])
combineME11 = (os.environ["ALIGNMENT_COMBINEME11"] == "True")
maxEvents = int(os.environ["ALIGNMENT_MAXEVENTS"])
skipEvents = int(os.environ["ALIGNMENT_SKIPEVENTS"])
maxResSlopeY = float(os.environ["ALIGNMENT_MAXRESSLOPEY"])
preFilter = (os.environ["ALIGNMENT_PREFILTER"] == "True")
muonCollectionTag = os.environ["ALIGNMENT_MUONCOLLECTIONTAG"]
maxDxy = float(os.environ["ALIGNMENT_MAXDXY"])
minNCrossedChambers = int(os.environ["ALIGNMENT_MINNCROSSEDCHAMBERS"])
# optionally: create ntuples along with tmp files
createAlignNtuple = False
envNtuple = os.getenv("ALIGNMENT_CREATEALIGNNTUPLE")
if envNtuple is not None:
if envNtuple=='True': createAlignNtuple = True
# optionally: create a ntuple with MapPlot plugin
createMapNtuple = False
envNtuple = os.getenv("ALIGNMENT_CREATEMAPNTUPLE")
if envNtuple is not None:
if envNtuple=='True': createMapNtuple = True
# optionally do selective DT or CSC alignment
doDT = True
doCSC = True
envDT = os.getenv("ALIGNMENT_DO_DT")
envCSC = os.getenv("ALIGNMENT_DO_CSC")
if envDT is not None and envCSC is not None:
if envDT=='True' and envCSC=='False':
doDT = True
doCSC = False
if envDT=='False' and envCSC=='True':
doDT = False
doCSC = True
# optionally use JSON file for good limi mask
good_lumis = []
json_file = os.getenv("ALIGNMENT_JSON")
#json_file = 'Cert_136035-144114_7TeV_StreamExpress_Collisions10_JSON.txt'
if json_file is not None and json_file != '':
jsonfile=file(json_file, 'r')
jsondict = json.load(jsonfile)
runs = sorted(jsondict.keys())
for run in runs:
blocks = sorted(jsondict[run])
prevblock = [-2,-2]
for lsrange in blocks:
if lsrange[0] == prevblock[1]+1:
#print "Run: ",run,"- This lumi starts at ", lsrange[0], " previous ended at ", prevblock[1]+1, " so I should merge"
prevblock[1] = lsrange[1]
good_lumis[-1] = str("%s:%s-%s:%s" % (run, prevblock[0], run, prevblock[1]))
else:
good_lumis.append(str("%s:%s-%s:%s" % (run, lsrange[0], run, lsrange[1])))
prevblock = lsrange
process = cms.Process("GATHER")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
process.load("Geometry.RPCGeometry.rpcGeometry_cfi")
process.load("Geometry.CSCGeometry.cscGeometry_cfi")
process.load("Geometry.CommonTopologies.bareGlobalTrackingGeometry_cfi")
#add TrackDetectorAssociator lookup maps to the EventSetup
process.load("TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff")
from TrackingTools.TrackAssociator.DetIdAssociatorESProducer_cff import *
from TrackingTools.TrackAssociator.default_cfi import *
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.MuonNumberingInitialization = cms.ESProducer("MuonNumberingInitialization")
process.MuonNumberingRecord = cms.ESSource( "EmptyESSource",
recordName = cms.string( "MuonNumberingRecord" ),
iovIsRunNotTime = cms.bool( True ),
firstValid = cms.vuint32( 1 )
)
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load('Configuration.StandardSequences.MagneticField_cff')
if len(good_lumis)>0:
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(*inputfiles),
skipEvents = cms.untracked.uint32(skipEvents),
lumisToProcess = cms.untracked.VLuminosityBlockRange(*good_lumis))
else:
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(*inputfiles),
skipEvents = cms.untracked.uint32(skipEvents))
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(maxEvents))
#process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring("cout"),
cout = cms.untracked.PSet(threshold = cms.untracked.string("ERROR"),
ERROR = cms.untracked.PSet(limit = cms.untracked.int32(10))))
process.load("Alignment.MuonAlignmentAlgorithms.MuonAlignmentFromReference_cff")
process.looper.ParameterBuilder.Selector.alignParams = cms.vstring("MuonDTChambers,%s,stations123" % station123params, "MuonDTChambers,%s,station4" % station4params, "MuonCSCChambers,%s" % cscparams)
# TODO : uncomment the line below when AlignmentProducer is updated:
#process.looper.muonCollectionTag = cms.InputTag(muonCollectionTag)
process.looper.algoConfig.writeTemporaryFile = "alignment%04d.tmp" % jobnumber
process.looper.algoConfig.doAlignment = False
process.looper.algoConfig.muonCollectionTag = cms.InputTag(muonCollectionTag)
process.looper.algoConfig.minTrackPt = minTrackPt
process.looper.algoConfig.maxTrackPt = maxTrackPt
process.looper.algoConfig.minTrackP = minTrackP
process.looper.algoConfig.maxTrackP = maxTrackP
process.looper.algoConfig.maxDxy = maxDxy
process.looper.algoConfig.minTrackerHits = minTrackerHits
process.looper.algoConfig.maxTrackerRedChi2 = maxTrackerRedChi2
process.looper.algoConfig.allowTIDTEC = allowTIDTEC
process.looper.algoConfig.minNCrossedChambers = minNCrossedChambers
process.looper.algoConfig.twoBin = twoBin
process.looper.algoConfig.weightAlignment = weightAlignment
process.looper.algoConfig.minAlignmentHits = minAlignmentHits
process.looper.algoConfig.combineME11 = combineME11
process.looper.algoConfig.maxResSlopeY = maxResSlopeY
#process.looper.algoConfig.createNtuple = createAlignNtuple
process.looper.algoConfig.minDT13Hits = 7
process.looper.algoConfig.doDT = doDT
process.looper.algoConfig.doCSC = doCSC
process.looper.monitorConfig = cms.PSet(monitors = cms.untracked.vstring())
if mapplots:
process.load("Alignment.CommonAlignmentMonitor.AlignmentMonitorMuonSystemMap1D_cfi")
process.looper.monitorConfig.monitors.append("AlignmentMonitorMuonSystemMap1D")
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D = process.AlignmentMonitorMuonSystemMap1D
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.muonCollectionTag = cms.InputTag(muonCollectionTag)
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minTrackPt = minTrackPt
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.maxTrackPt = maxTrackPt
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minTrackP = minTrackP
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.maxTrackP = maxTrackP
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.maxDxy = maxDxy
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minTrackerHits = minTrackerHits
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.maxTrackerRedChi2 = maxTrackerRedChi2
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.allowTIDTEC = allowTIDTEC
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minNCrossedChambers = process.looper.algoConfig.minNCrossedChambers
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minDT13Hits = process.looper.algoConfig.minDT13Hits
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minDT2Hits = process.looper.algoConfig.minDT2Hits
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.minCSCHits = process.looper.algoConfig.minCSCHits
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.doDT = doDT
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.doCSC = doCSC
process.looper.monitorConfig.AlignmentMonitorMuonSystemMap1D.createNtuple = createMapNtuple
if segdiffplots:
process.load("Alignment.CommonAlignmentMonitor.AlignmentMonitorSegmentDifferences_cfi")
process.looper.monitorConfig.monitors.append("AlignmentMonitorSegmentDifferences")
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences = process.AlignmentMonitorSegmentDifferences
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.muonCollectionTag = cms.InputTag(muonCollectionTag)
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minTrackPt = minTrackPt
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minTrackP = minTrackP
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.maxDxy = maxDxy
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minTrackerHits = minTrackerHits
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.maxTrackerRedChi2 = maxTrackerRedChi2
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.allowTIDTEC = allowTIDTEC
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minNCrossedChambers = process.looper.algoConfig.minNCrossedChambers
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minDT13Hits = process.looper.algoConfig.minDT13Hits
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minDT2Hits = process.looper.algoConfig.minDT2Hits
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.minCSCHits = process.looper.algoConfig.minCSCHits
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.doDT = doDT
process.looper.monitorConfig.AlignmentMonitorSegmentDifferences.doCSC = doCSC
if curvatureplots:
process.load("Alignment.CommonAlignmentMonitor.AlignmentMonitorMuonVsCurvature_cfi")
process.looper.monitorConfig.monitors.append("AlignmentMonitorMuonVsCurvature")
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature = process.AlignmentMonitorMuonVsCurvature
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.muonCollectionTag = cms.InputTag(muonCollectionTag)
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minTrackPt = minTrackPt
#process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minTrackP = minTrackP
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.maxDxy = maxDxy
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minTrackerHits = minTrackerHits
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.maxTrackerRedChi2 = maxTrackerRedChi2
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.allowTIDTEC = allowTIDTEC
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minNCrossedChambers = process.looper.algoConfig.minNCrossedChambers
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minDT13Hits = process.looper.algoConfig.minDT13Hits
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minDT2Hits = process.looper.algoConfig.minDT2Hits
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.minCSCHits = process.looper.algoConfig.minCSCHits
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.doDT = doDT
process.looper.monitorConfig.AlignmentMonitorMuonVsCurvature.doCSC = doCSC
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = cms.string(globaltag)
process.looper.applyDbAlignment = True
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.load("Alignment.MuonAlignmentAlgorithms.MuonAlignmentPreFilter_cfi")
process.MuonAlignmentPreFilter.minTrackPt = minTrackPt
process.MuonAlignmentPreFilter.minTrackP = minTrackP
process.MuonAlignmentPreFilter.minTrackerHits = minTrackerHits
process.MuonAlignmentPreFilter.allowTIDTEC = allowTIDTEC
if iscosmics:
process.MuonAlignmentPreFilter.tracksTag = cms.InputTag("ALCARECOMuAlGlobalCosmics:GlobalMuon")
if preFilter: process.Path = cms.Path(process.offlineBeamSpot * process.MuonAlignmentPreFilter * process.MuonAlignmentFromReferenceGlobalCosmicRefit)
else: process.Path = cms.Path(process.offlineBeamSpot * process.MuonAlignmentFromReferenceGlobalCosmicRefit)
process.looper.tjTkAssociationMapTag = cms.InputTag("MuonAlignmentFromReferenceGlobalCosmicRefit:Refitted")
else:
#process.MuonAlignmentPreFilter.tracksTag = cms.InputTag("ALCARECOMuAlCalIsolatedMu:GlobalMuon")
process.MuonAlignmentPreFilter.tracksTag = cms.InputTag("globalMuons")
process.MuonAlignmentFromReferenceGlobalMuonRefit.Tracks = cms.InputTag("globalMuons")
if preFilter: process.Path = cms.Path(process.offlineBeamSpot * process.MuonAlignmentPreFilter * process.MuonAlignmentFromReferenceGlobalMuonRefit)
else: process.Path = cms.Path(process.offlineBeamSpot * process.MuonAlignmentFromReferenceGlobalMuonRefit)
process.looper.tjTkAssociationMapTag = cms.InputTag("MuonAlignmentFromReferenceGlobalMuonRefit:Refitted")
if len(muonCollectionTag) > 0: # use Tracker Muons
process.Path = cms.Path(process.offlineBeamSpot * process.newmuons)
process.MuonAlignmentFromReferenceInputDB.connect = cms.string("sqlite_file:%s" % inputdb)
process.MuonAlignmentFromReferenceInputDB.toGet = cms.VPSet(cms.PSet(record = cms.string("DTAlignmentRcd"), tag = cms.string("DTAlignmentRcd")),
cms.PSet(record = cms.string("CSCAlignmentRcd"), tag = cms.string("CSCAlignmentRcd")))
if trackerconnect != "":
from CondCore.DBCommon.CondDBSetup_cfi import *
process.TrackerAlignmentInputDB = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string(trackerconnect),
toGet = cms.VPSet(cms.PSet(record = cms.string("TrackerAlignmentRcd"), tag = cms.string(trackeralignment))))
process.es_prefer_TrackerAlignmentInputDB = cms.ESPrefer("PoolDBESSource", "TrackerAlignmentInputDB")
if trackerAPEconnect != "":
from CondCore.DBCommon.CondDBSetup_cfi import *
process.TrackerAlignmentErrorInputDB = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string(trackerAPEconnect),
toGet = cms.VPSet(cms.PSet(cms.PSet(record = cms.string("TrackerAlignmentErrorExtendedRcd"), tag = cms.string(trackerAPE)))))
process.es_prefer_TrackerAlignmentErrorInputDB = cms.ESPrefer("PoolDBESSource", "TrackerAlignmentErrorInputDB")
if trackerBowsconnect != "":
from CondCore.DBCommon.CondDBSetup_cfi import *
process.TrackerSurfaceDeformationInputDB = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string(trackerBowsconnect),
toGet = cms.VPSet(cms.PSet(cms.PSet(record = cms.string("TrackerSurfaceDeformationRcd"), tag = cms.string(trackerBows)))))
process.es_prefer_TrackerSurfaceDeformationInputDB = cms.ESPrefer("PoolDBESSource", "TrackerSurfaceDeformationInputDB")
if gprcdconnect != "":
from CondCore.DBCommon.CondDBSetup_cfi import *
process.GlobalPositionInputDB = cms.ESSource("PoolDBESSource",
CondDBSetup,
connect = cms.string(gprcdconnect),
toGet = cms.VPSet(cms.PSet(record = cms.string("GlobalPositionRcd"), tag = cms.string(gprcd))))
process.es_prefer_GlobalPositionInputDB = cms.ESPrefer("PoolDBESSource", "GlobalPositionInputDB")
## the following was needed for Nov 2010 alignment to pick up new lorentz angle and strip conditions for tracker
#process.poolDBESSourceLA = cms.ESSource("PoolDBESSource",
# BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
# DBParameters = cms.PSet(
# messageLevel = cms.untracked.int32(0),
# authenticationPath = cms.untracked.string('.')
# #messageLevel = cms.untracked.int32(2),
# #authenticationPath = cms.untracked.string('/path/to/authentication')
# ),
# timetype = cms.untracked.string('runnumber'),
# connect = cms.string('frontier://PromptProd/CMS_COND_31X_STRIP'),
# toGet = cms.VPSet(cms.PSet(
# record = cms.string('SiStripLorentzAngleRcd'),
# tag = cms.string('SiStripLorentzAngle_GR10_v2_offline')
# ))
#)
#process.es_prefer_LA = cms.ESPrefer('PoolDBESSource','poolDBESSourceLA')
#
#process.poolDBESSourceBP = cms.ESSource("PoolDBESSource",
# BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
# DBParameters = cms.PSet(
# messageLevel = cms.untracked.int32(0),
# authenticationPath = cms.untracked.string('.')
# #messageLevel = cms.untracked.int32(2),
# #authenticationPath = cms.untracked.string('/path/to/authentication')
# ),
# timetype = cms.untracked.string('runnumber'),
# connect = cms.string('frontier://PromptProd/CMS_COND_31X_STRIP'),
# toGet = cms.VPSet(cms.PSet(
# record = cms.string('SiStripConfObjectRcd'),
# tag = cms.string('SiStripShiftAndCrosstalk_GR10_v2_offline')
# ))
#)
#process.es_prefer_BP = cms.ESPrefer('PoolDBESSource','poolDBESSourceBP')
process.looper.saveToDB = False
process.looper.saveApeToDB = False
del process.PoolDBOutputService
process.TFileService = cms.Service("TFileService", fileName = cms.string("plotting%03d.root" % jobnumber))
|
cms-sw/cmssw
|
Alignment/MuonAlignmentAlgorithms/python/gather_cfg.py
|
gather_cfg.py
|
py
| 19,154 |
python
|
en
|
code
| 985 |
github-code
|
50
|
22647988119
|
from typing import List
import bisect
class Solution:
# 풀이 1. 투 포인터
def twoSum1(self, numbers: List[int], target: int) -> List[int]:
left, right = 0, len(numbers) - 1
while not left == right:
if numbers[left] + numbers[right] < target:
left += 1
elif numbers[left] + numbers[right] > target:
right -= 1
else:
return left + 1, right + 1
# 풀이 2. 이진 검색
def twoSum2(self, numbers: List[int], target: int) -> List[int]:
for i, number in enumerate(numbers):
left, right = i + 1, len(numbers) - 1
expected = target - number
# 이진 검색으로 나머지 값 판별
while left <= right:
mid = left + (right - left) // 2
if numbers[mid] < expected:
left = mid + 1
elif numbers[mid] > expected:
right = mid - 1
else:
return [i + 1, mid + 1]
# 풀이 3. bisect 모듈 + 슬라이싱
def twoSum3(self, numbers: List[int], target: int) -> List[int]:
for i, number in enumerate(numbers):
expected = target - number
index = bisect.bisect_left(numbers[i + 1:], expected)
if index < len(numbers[i + 1:]) and \
numbers[index + i + 1] == expected:
return [i + 1, index + i + 2]
# 풀이 4. bisect 모듈 + 슬라이싱 최소화
def twoSum4(self, numbers: List[int], target: int) -> List[int]:
for i, number in enumerate(numbers):
expected = target - number
nums = numbers[i + 1:]
index = bisect.bisect_left(nums, expected)
if index < len(nums) and numbers[index + i + 1] == expected:
return [i + 1, index + i + 2]
# 풀이 5. bisect 모듈 + 슬라이싱 제거
def twoSum5(self, numbers: List[int], target: int) -> List[int]:
for i, number in enumerate(numbers):
expected = target - number
index = bisect.bisect_left(numbers, expected, i + 1)
if index < len(numbers) and numbers[index] == expected:
return [i + 1, index + 1]
|
Wooyongjeong/python-algorithm-interview
|
18 이진 검색/68 두 수의 합 2.py
|
68 두 수의 합 2.py
|
py
| 2,272 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26187968939
|
import os
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from megatron.training import forward_step
from megatron.utils import setup_for_inference_or_eval, init_wandb
from megatron.logging import tb_wandb_log
from eval_tasks import run_eval_harness
from pprint import pprint
from datetime import datetime
import json
def main():
model, neox_args = setup_for_inference_or_eval(use_cache=False)
results = run_eval_harness(
model,
forward_step,
neox_args,
eval_tasks=neox_args.eval_tasks,
bootstrap_iters=10000,
)
if neox_args.rank == 0:
init_wandb(neox_args=neox_args)
# log to wandb
for k, v in results["results"].items():
if isinstance(v, dict):
for k2, v2 in v.items():
k3 = "_".join([k, k2])
tb_wandb_log(
f"eval/{k3}",
v2,
neox_args.iteration,
use_wandb=neox_args.use_wandb,
)
else:
tb_wandb_log(
f"eval/{k}",
v,
neox_args.iteration,
use_wandb=neox_args.use_wandb,
)
pprint(results)
results_path = (
f'eval_results_{datetime.now().strftime("%m-%d-%Y-%H-%M-%S")}.json'
)
if neox_args.eval_results_prefix:
results_path = f"{neox_args.eval_results_prefix}_{results_path}"
with open(results_path, "w") as f:
json.dump(results, f, indent=4)
if __name__ == "__main__":
main()
|
EleutherAI/gpt-neox
|
evaluate.py
|
evaluate.py
|
py
| 1,707 |
python
|
en
|
code
| 6,191 |
github-code
|
50
|
35209467789
|
import logging, py_libgit.settings
logger = logging.getLogger(__name__)
from py_libgit.core.commit_blob import CommitBlob
from py_libgit.core.repo import Repo
class Commit:
def __init__(self):
logger.info('Create the Commit object')
self.repo = Repo()
def create_commit(self, author, commit_message, root_tree_entry):
'''Create a new commit under the object directory to save the current working history
Return:
The hash of the commit entry
'''
commit_hash = self.repo.create_commit(author, commit_message, root_tree_entry)
return commit_hash
|
tony-yang/e-libgit
|
py_libgit/py_libgit/api/commit.py
|
commit.py
|
py
| 619 |
python
|
en
|
code
| 0 |
github-code
|
50
|
41851812122
|
import cv2
import numpy as np
import smtplib
import threading
import geocoder
from datetime import datetime
# Global variables
Alarm_Status = False
Email_Status = False
Fire_Reported = 0
# Function to send an email with incident location and screenshot
def send_mail_function(lat, lon, screenshot_path):
recipientEmail = "[email protected]"
recipientEmail = recipientEmail.lower()
# Get the device's current location
g = geocoder.ip('me')
location = g.latlng # Get latitude and longitude
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login("[email protected]", 'madhan2004')
# Construct the email message with location information
message = f"Warning: A Fire Accident has been reported at the following location:\nLatitude: {location[0]}\nLongitude: {location[1]}"
# Attach the screenshot to the email
with open(screenshot_path, 'rb') as screenshot_file:
screenshot_data = screenshot_file.read()
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.base import MIMEBase
from email import encoders
msg = MIMEMultipart()
msg.attach(MIMEText(message, 'plain'))
image = MIMEImage(screenshot_data, name="screenshot.jpg")
msg.attach(image)
# Set email subject and recipients
msg['Subject'] = "Fire Report"
msg['From'] = "[email protected]"
msg['To'] = recipientEmail
# Send the email
server.sendmail('[email protected]', recipientEmail, msg.as_string())
print("Sent to {}".format(recipientEmail))
server.close()
except Exception as e:
print(e)
# Function to dehaze the frame
def dehaze(frame, t=0.1):
I = frame / 255.0 # Normalize the frame
dark_channel = np.min(I, axis=2)
A = 1 - t * dark_channel
A[A < 0.1] = 0.1 # Minimum value for A to avoid extreme dehazing
J = (I - A[:, :, np.newaxis]) / np.maximum(A[:, :, np.newaxis], 0.1)
J = (J * 255).astype(np.uint8)
return J
# Function to detect fire and draw bounding boxes
def detect_fire(frame):
frame = cv2.resize(frame, (960, 540))
# Dehaze the frame (you can use the dehaze function from a previous response)
dehazed_frame = dehaze(frame)
blur = cv2.GaussianBlur(dehazed_frame, (21, 21), 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower = [18, 50, 50]
upper = [35, 255, 255]
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
mask = cv2.inRange(hsv, lower, upper)
no_red = cv2.countNonZero(mask)
if int(no_red) > 15000:
# Find contours of the fire region
contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if contours:
for contour in contours:
# Draw a bounding box around the fire region
x, y, w, h = cv2.boundingRect(contour)
cv2.rectangle(dehazed_frame, (x, y), (x + w, y + h), (0, 0, 255), 5)
# Add text 'Fire'
cv2.putText(dehazed_frame, 'Fire', (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
return True, dehazed_frame
else:
return False, dehazed_frame
# Capture video from a file or camera (change the source as needed)
video = cv2.VideoCapture("C:/Users/User/Downloads/fire/4.mp4") # Replace with your video source
while True:
(grabbed, frame) = video.read()
if not grabbed:
break
fire_detected, frame = detect_fire(frame)
if fire_detected:
Fire_Reported += 1
if Fire_Reported >= 1:
if Alarm_Status == False:
# Get and pass the current location to the send_mail_function
g = geocoder.ip('me')
location = g.latlng
# Capture a screenshot of the current frame
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
screenshot_path = f"screenshot_{timestamp}.jpg"
cv2.imwrite(screenshot_path, frame)
# Send the email with the screenshot
threading.Thread(target=send_mail_function, args=(location[0], location[1], screenshot_path)).start()
Alarm_Status = True
cv2.imshow("output", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
video.release()
|
dharaneesh-202/fire-dehazing
|
mail_fire.py
|
mail_fire.py
|
py
| 4,702 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31040691362
|
import torch
import torch.nn as nn
from ..gnn.nf import NFGNN
from ..readout.sum_and_max import SumAndMax
__all__ = ['NFPredictor']
# pylint: disable=W0221
class NFPredictor(nn.Module):
"""Neural Fingerprint (NF) for regression and classification on graphs.
NF is introduced in `Convolutional Networks on Graphs for Learning Molecular
Fingerprints <https://arxiv.org/abs/1509.09292>`__. This model can be used for
regression and classification on graphs.
After updating node representations, we perform a sum and max pooling on them
and concatenate the output of the two operations, which is then fed into an
MLP for final prediction.
For classification tasks, the output will be logits, i.e.
values before sigmoid or softmax.
Parameters
----------
in_feats : int
Number of input node features.
n_tasks : int
Number of tasks, which is also the output size. Default to 1.
hidden_feats : list of int, optional
``hidden_feats[i]`` gives the size of node representations after the i-th NF layer.
``len(hidden_feats)`` equals the number of NF layers. By default, we use
``[64, 64]``.
max_degree : int
The maximum node degree to consider when updating weights. Default to be 10.
activation : list of activation functions or None
If not None, ``activation[i]`` gives the activation function to be used for
the i-th NF layer. ``len(activation)`` equals the number of NF layers.
By default, ReLU is applied for all NF layers.
batchnorm : list of bool, optional
``batchnorm[i]`` decides if batch normalization is to be applied on the output of
the i-th NF layer. ``len(batchnorm)`` equals the number of NF layers. By default,
batch normalization is applied for all NF layers.
dropout : list of float, optional
``dropout[i]`` decides the dropout to be applied on the output of the i-th NF layer.
``len(dropout)`` equals the number of NF layers. By default, dropout is not applied
for all NF layers.
predicor_hidden_size : int
Size for hidden representations in the output MLP predictor. Default to be 128.
predictor_batchnorm : bool
Whether to apply batch normalization in the output MLP predictor. Default to be True.
Default to be True.
predictor_dropout : float
The dropout probability in the output MLP predictor. Default to be 0.
predictor_activation : activation function
The activation function in the output MLP predictor. Default to be Tanh.
"""
def __init__(self, in_feats, n_tasks=1, hidden_feats=None, max_degree=10, activation=None,
batchnorm=None, dropout=None, predictor_hidden_size=128, predictor_batchnorm=True,
predictor_dropout=0., predictor_activation=torch.tanh):
super(NFPredictor, self).__init__()
self.gnn = NFGNN(in_feats, hidden_feats, max_degree, activation, batchnorm, dropout)
gnn_out_feats = self.gnn.gnn_layers[-1].out_feats
self.node_to_graph = nn.Linear(gnn_out_feats, predictor_hidden_size)
if predictor_batchnorm:
self.predictor_bn = nn.BatchNorm1d(predictor_hidden_size)
else:
self.predictor_bn = None
if predictor_dropout > 0:
self.predictor_dropout = nn.Dropout(predictor_dropout)
else:
self.predictor_dropout = None
self.readout = SumAndMax()
self.predictor_activation = predictor_activation
self.predict = nn.Linear(2 * predictor_hidden_size, n_tasks)
def reset_parameters(self):
"""Reinitialize model parameters."""
self.gnn.reset_parameters()
self.node_to_graph.reset_parameters()
if self.predictor_bn is not None:
self.predictor_bn.reset_parameters()
def forward(self, g, feats):
"""Update node representations.
Parameters
----------
g : DGLGraph
DGLGraph for a batch of graphs
feats : FloatTensor of shape (N, M1)
* N is the total number of nodes in the batch of graphs
* M1 is the input node feature size, which equals in_feats in initialization
Returns
-------
FloatTensor of shape (B, n_tasks)
* Predictions on graphs
* B for the number of graphs in the batch
"""
feats = self.gnn(g, feats)
feats = self.node_to_graph(feats)
if self.predictor_bn is not None:
feats = self.predictor_bn(feats)
if self.predictor_dropout is not None:
feats = self.predictor_dropout(feats)
graph_feats = self.readout(g, feats)
if self.predictor_activation is not None:
graph_feats = self.predictor_activation(graph_feats)
return self.predict(graph_feats)
|
awslabs/dgl-lifesci
|
python/dgllife/model/model_zoo/nf_predictor.py
|
nf_predictor.py
|
py
| 4,890 |
python
|
en
|
code
| 641 |
github-code
|
50
|
40200792380
|
import FWCore.ParameterSet.Config as cms
TrackerDTCAnalyzer_params = cms.PSet (
InputTagAccepted = cms.InputTag( "TrackerDTCProducer", "StubAccepted" ), # dtc passed stubs selection
InputTagLost = cms.InputTag( "TrackerDTCProducer", "StubLost" ), # dtc lost stubs selection
InputTagTTStubDetSetVec = cms.InputTag( "TTStubsFromPhase2TrackerDigis", "StubAccepted" ), # original TTStub selection
InputTagTTClusterDetSetVec = cms.InputTag( "TTClustersFromPhase2TrackerDigis", "ClusterInclusive" ), # original TTCluster selection
InputTagTTClusterAssMap = cms.InputTag( "TTClusterAssociatorFromPixelDigis", "ClusterAccepted" ), # tag of AssociationMap between TTCluster and TrackingParticles
UseMCTruth = cms.bool( True ) # eneables analyze of TPs # eneables analyze of TPs
)
|
cms-sw/cmssw
|
L1Trigger/TrackerDTC/python/Analyzer_cfi.py
|
Analyzer_cfi.py
|
py
| 991 |
python
|
en
|
code
| 985 |
github-code
|
50
|
27211452073
|
# 差分数组工具类
class Difference(object):
def __init__(self):
self.diff = []
# 输入一个初始数组,区间操作将在这个数组上进行
def difference(self, nums):
assert len(nums) > 0
m = len(nums)
# 根据初始条件构造差分数组
self.diff = [0] * (m+1) # 创建和nums长度一致的全0数组
nums.insert(0, 0) # 往前面的数组增加一个0元素,方便后续操作
for i in range(m + 1):
self.diff[i] = nums[i] - nums[i - 1]
nums.pop(0)
self.diff.pop(0)
# 给闭区间 [i,j] 增加 val(可以是负数)
def increment(self, i, j, val):
self.diff[i] += val # i位置+val
if j + 1 < len(self.diff): # j位置-val,因为后续根据diff复原时后面重复加了
self.diff[j+1] -= val
print(self.diff)
# 根据差分数组改变结果
# 返回结果数组
def result(self, nums):
# res = [0] * len(self.diff)
# res = nums
res = self.diff # bug1: 这里改成self.diff,如果是self.nums刚开始位置变化有无
# 根据差分结果构造结果数组
for i in range(1, len(self.diff)):
res[i] = res[i-1] + self.diff[i]
return res # ??
# print(res)
# 区间加法
# 题目描述:
"""
https://labuladong.gitee.io/algo/2/21/56/
"""
# 相关标签: 差分数组
# 思路:
"""
根据上面提供的差分数组工具类,没进行一次操作后
更新一次差分数组并且更新一次num值
"""
# 运行结果:
"""
"""
def getModifiedArray(length, updates):
nums = [0] * length
diff = Difference()
for update in updates:
i, j, val = update[0], update[1], update[2]
diff.difference(nums)
diff.increment(i, j, val)
nums = diff.result(nums)
print(nums)
if __name__ == '__main__':
# test 差分数组工具类
# diff = Difference()
# nums = [8, 2, 6, 3, 1]
# i, j, val = 1, 3, 3
# diff.difference(nums)
# diff.increment(i, j, val)
# diff.result(nums)
# test getModifiedArray
length = 5
updates = [[1, 3, 2], [2, 4, 3], [0, 2, -2]]
getModifiedArray(length, updates)
|
zranguai/leetcode-solution
|
LeetCode/数组题/370.区间加法(差分数组).py
|
370.区间加法(差分数组).py
|
py
| 2,257 |
python
|
en
|
code
| 1 |
github-code
|
50
|
74781837275
|
class Settings:
def __init__(self):
self.screen_with = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
self.ship_speed = 1.5
# 子弹设置
self.bullet_speed = 1.0
self.bullet_width = 3
self.bullet_height = 15
self.bullet_color = (60, 60, 60)
self.bullet_allowed = 3
# 外星人设置
self.alien_speed = 1.0
self.fleet_drop_speed = 10
# fleet_direction为1表示向右移,为-1表示向左移
self.fleet_direction = 1
|
eatureide/python
|
alien_invasion/settings.py
|
settings.py
|
py
| 558 |
python
|
en
|
code
| 0 |
github-code
|
50
|
72961458395
|
from os import path
from config import load_config
import subprocess
def valid_signature(fpath, sig_fpath):
command = ['gpg', '--verify', sig_fpath, fpath]
rc = subprocess.call(command)
return not rc
def sign_file(fpath, sig_fpath):
command = ['gpg', '--output', sig_fpath, '--detach-sig', fpath]
rc = subprocess.call(command)
if rc:
raise SignatureFailedError('Failed with return code: {rc}'.format(rc))
class SignatureFailedError(OSError):
pass
if __name__ == "__main__":
(tzdata_loc, iana_sig_loc, du_sig_loc) = load_config(
('tzdata_loc', 'iana_sig_loc', 'du_sig_loc')
)
from tzdata_files import get_tzdata_files, get_sig_files
from tzdata_files import load_directory, data_key, sig_key
d_flist = load_directory(tzdata_loc)
s_flist = load_directory(iana_sig_loc)
data_files = get_tzdata_files(d_flist)
data_files = get_sig_files(s_flist, c_dict=data_files)
invalid_sigs = []
for version, subdict in data_files.items():
try:
dfname = subdict[data_key]
except KeyError:
continue
try:
sfname = subdict[sig_key]
except KeyError:
continue
data_path = path.join(tzdata_loc, dfname)
sig_path = path.join(iana_sig_loc, sfname)
du_sig_path = path.join(du_sig_loc, sfname)
if path.exists(du_sig_path) and valid_signature(data_path, du_sig_path):
continue
if valid_signature(data_path, sig_path):
sign_file(data_path, du_sig_path)
else:
invalid_sigs.append(sig_path)
if len(invalid_sigs):
err_out = 'validation_errors.log'
with open(err_out, 'w') as f:
for inv_sig in invalid_sigs:
print(inv_sig, file=f)
print("Some invalid signatures were found. See {err_out} for details".format(err_out=err_out))
|
dateutil/tzdata
|
generate_signatures.py
|
generate_signatures.py
|
py
| 1,919 |
python
|
en
|
code
| 1 |
github-code
|
50
|
11981791114
|
# coding=utf-8
import os
import shutil
import requests
import img2pdf
from tqdm import tqdm
from zipfile import ZipFile
from PyPDF2 import PdfFileWriter, PdfFileReader
from threading import Thread
from config import Config
headers = {
'User-Agent': '环球银幕HD 2.2 rv:1.0 (iPad; iOS 12.1.3; zh_CN)'.encode('utf-8'),
'Host': Config.host,
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip'
}
def make_dir(path):
isExists = os.path.exists(path)
if not isExists:
os.mkdir(path)
def download_zip(path, file, file_url):
make_dir(path)
r = requests.get(file_url, headers=headers)
with open(os.path.join(path, file), 'wb') as zip_file:
zip_file.write(r.content)
def get_zipfile_namelist(path, file):
with ZipFile(os.path.join(path, file), 'r') as zipObj:
namelist = zipObj.namelist()
return namelist
def extract_zipfile(path, file):
with ZipFile(os.path.join(path, file), 'r') as zipObj:
infolist = zipObj.infolist()
if len(infolist) == 2:
if infolist[0].file_size > infolist[1].file_size:
zipObj.extract(infolist[0].filename, 'unzip')
else:
zipObj.extract(infolist[1].filename, 'unzip')
elif len(infolist) == 1:
zipObj.extract(infolist[0].filename, 'unzip')
else:
print('压缩文件有误,请检查后重试。' + file)
def convert_img2pdf(path, filename, file):
with open(os.path.join(path, filename), "wb") as f:
f.write(img2pdf.convert(file))
# 下载 mix_online.zip
mix_url = 'http://' + Config.host + '/items/' + Config.path + '/mix_online.zip'
download_zip('temp', 'mix_online.zip', mix_url)
# 根据 mix_online.zip 取得分页下载链接
listOfFileNames = get_zipfile_namelist('temp', 'mix_online.zip')
pages = len(listOfFileNames)
if pages > 10:
print('本期杂志有 %s 页。产生下载链接中……' % (pages - 1))
download_links = []
host = headers.get('Host')
for i in range(1, pages):
download_links.append('http://%s/items/%s/layout_%s.zip' % (host, Config.path, i))
else:
print('请检查 mix_online.zip 后重试。')
os.remove(os.path.join('temp', 'mix_online.zip'))
# 下载 layout_X.zip
threads = []
num = 1
print('开始下载分页。')
for link in download_links:
pass
file_name = 'layout_' + str(num) + '.zip'
t = Thread(target=download_zip, args=['temp', file_name, link])
t.start()
threads.append(t)
num += 1
for t in tqdm(threads):
t.join()
print('下载完成')
# 解压 ZIP
file_list = os.listdir('temp')
print('开始解压文件')
for file in file_list:
try:
extract_zipfile('temp', file)
except:
print(str(file) + ' 解压失败, 重试中')
download_zip('temp', file, 'http://' + Config.host + '/items/' + Config.path + '/' + file)
extract_zipfile('temp', file)
print(str(file) + ' 解压成功')
print('解压完成')
# 重命名文件,图片转 PDF
file_list = []
for root, dirs, files in os.walk('unzip'):
if len(dirs) == 0:
file_list.append(os.path.join(root, files[0]))
print('正在处理分页')
make_dir('pdf')
for file in file_list:
# P000XXX -> XXX
# pXXX -> XXX
page = int(file.split('/')[1].replace('P', '').replace('p', ''))
# XXX -> XXX.pdf
filename = str(page) + '.pdf'
if os.path.splitext(file)[-1] == '.pdf':
# rename pdf
os.rename(file, os.path.join('pdf', filename))
else:
# 处理图片的情况,一般是封面封底
convert_img2pdf('pdf', filename, file)
# 合并 pdf
PDF_output = PdfFileWriter()
for i in range(pages - 1):
WorkPath = 'pdf/' + str(i) + '.pdf'
PDF_input = PdfFileReader(WorkPath)
addNext = PDF_input.getPage(0)
PDF_output.addPage(addNext)
os.remove(WorkPath)
PDF_output.write(open(Config.path.split('/')[1] + '.pdf', 'wb'))
shutil.rmtree('unzip')
shutil.rmtree('pdf')
shutil.rmtree('temp')
print("合并完成")
|
moriwang/WorldScreen
|
main.py
|
main.py
|
py
| 4,014 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40343609822
|
"""Chatbot101 with AWS Lambda Console Script.
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
"""
import requests
import json
from webexteamsbot import TeamsBot
from models import Response
from functools import partial
import argparse
import getpass
#Set Token Statically
TOKEN = ""
#Create Token flag
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--token", type=str, help="The webex bot token", default="")
args = parser.parse_args()
if TOKEN:
pass
elif args.token:
#set Token to TOKEN via --token flag
TOKEN = args.token
else:
#Get Token via cli input
TOKEN = getpass.getpass("Enter Webex Token: ")
# An example using a Response object. Response objects allow more complex
# replies including sending files, html, markdown, or text. Rsponse objects
# can also set a roomId to send response to a different room from where
# incoming message was recieved.
def ret_message(incoming_msg):
"""
Sample function that uses a Response object for more options.
:param incoming_msg: The incoming message object from Teams
:return: A Response object based reply
"""
# Create a object to create a reply.
response = Response()
# Set the text of the reply.
response.text = "Here's a fun little meme."
# Craft a URL for a file to attach to message
u = "https://sayingimages.com/wp-content/uploads/"
u = u + "aaaaaalll-righty-then-alrighty-meme.jpg"
response.files = u
return response
def current_time(bot, incoming_msg):
"""
Sample function that returns the current time for a provided timezone
:param incoming_msg: The incoming message object from Teams
:return: A Response object based reply
"""
# Extract the message content, without the command "/time"
timezone = bot.extract_message("/time", incoming_msg.text).strip()
timezone = timezone or "est"
# Craft REST API URL to retrieve current time
# Using API from http://worldclockapi.com
u = "http://worldclockapi.com/api/json/{timezone}/now".format(
timezone=timezone)
r = requests.get(u).json()
# If an invalid timezone is provided, the serviceResponse will include
# error message
if r["serviceResponse"]:
return "Error: " + r["serviceResponse"]
# Format of returned data is "YYYY-MM-DDTHH:MM<OFFSET>"
# Example "2018-11-11T22:09-05:00"
returned_data = r["currentDateTime"].split("T")
cur_date = returned_data[0]
cur_time = returned_data[1][:5]
timezone_name = r["timeZoneName"]
# Craft a reply string.
reply = "In {TZ} it is currently {TIME} on {DATE}.".format(
TZ=timezone_name, TIME=cur_time, DATE=cur_date
)
return reply
def instatiate_bot(event, debug=True):
return TeamsBot("WebexBot", event, TOKEN, debug=debug)
def default(message):
return "Thank you for submitting your question or comment.\
Our team is hard at work, however we will respond to your question or comment within 1 business day.\
Until then, here’s CX Cloud FedRAMP Asked & Answered to review while we work hard to answer your question."
def hey(message):
return f"Hey {message.personEmail}"
def main(event, context):
webex_bot = instatiate_bot(event)
# add commands here
webex_bot.add_command("/time", "A default msg", partial(current_time, webex_bot))
webex_bot.set_help_message("Howdy")
webex_bot.set_greeting(default)
# Respond to message
reply = webex_bot.process_incoming_message()
print(reply)
msg = {"message": reply}
return {
'statusCode': 200,
'body': json.dumps(msg)
}
|
coleyr/Lambda_webex_bot
|
app.py
|
app.py
|
py
| 3,967 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74187664474
|
from insanonym_utils.runner import Runner
from insanonym_utils.utils import _readModel
from insanonym_utils.models import FileConfigModel, DeleteAlgorithm, DeleteOptions
from os import getcwd
from pandas import isnull
def test_create_dataframe():
model = _readModel(getcwd(), 'parser.cfg')
df = Runner(model)
assert df.dataframe.empty == False
def test_delete_columns():
model = _readModel(getcwd(), 'parser.cfg')
deleteAlgorithm = DeleteAlgorithm(name='delete', options=DeleteOptions(columns=['date', 'latitude']))
model.algorithms = [ deleteAlgorithm ]
df = Runner(model)
df.execute()
assert isnull(df.dataframe.iloc[:,1]).any()
assert isnull(df.dataframe.iloc[:,2]).any()
|
danymat/INSAnonym-utils
|
tests/test_anon.py
|
test_anon.py
|
py
| 719 |
python
|
en
|
code
| 8 |
github-code
|
50
|
41735345882
|
from tkinter import *
from PIL import ImageTk, Image
import random
import numpy as np
root=Tk()
########SCORES####################
global scored7
scored7=None
global task1_score
global task2_score
global task3_score
global task4_score
global task5_score
global task6_score
global task_7_score
global task_8_score
global task_9_score
global task10_score
task1_score=0
task2_score=0
task3_score=0
task4_score=0
task5_score=0
task6_score=0
task7_score=3
task8_score=11
task9_score=0
task10_score=0
#status='incomplete'
words=['man', 'cat', 'dog', 'mango', 'sunday', 'sit', 'dress', 'break', 'door', 'drag', 'dance', 'zoo', 'shirt', 'queen', 'country', 'yam', 'trust', 'grease', 'click', 'rest', 'mate', 'ship', 'car', 'salt', 'lock', 'train' ]
select_words=random.sample(words,k=5)
#constants for task 5
drag_data={'item':None, 'x':0, 'y':0}
s1,s2,s3,s4,s5=None,None,None,None,None
#for task 7
background=['green', 'blue', 'red','yellow','red', 'green', 'blue', 'red','yellow','black']
back_list_num=1
task_7_score=7
#for task 8
img_index=1
#for task 10
emotions=['happy', 'angry','sad','friendly', 'sick','shy','proud','surprised']
task10_img_index=0
def change_colour(b):
if b['text'] in select_words:
b['bg']='green'
global task3_score
task3_score+=1
else:
b['bg']='white'
def MousePress(event):
drag_data['item']=canvas.find_closest(event.x,event.y)[0]
drag_data['x']=event.x
drag_data['y']=event.y
def MouseRelease(event):
drag_data['item']=None
drag_data['x']=0
drag_data['y']=0
def drag(event):
if drag_data['item'] in [s1,s2,s3,s4,s5]:
canvas.move(drag_data['item'], event.x-drag_data['x'],event.y-drag_data['y'])
drag_data['x']=event.x
drag_data['y']=event.y
def show_time(time,t):
canvas.create_text(50*time,100, text=f'{time} sec..', font='Tahoma 12 ')
if time==10:
if t=='t1':
button=Button(canvas,text='click to continue to next task', font='Tahoma 15 bold', bg='red' )
button.place(x=300,y=100)
button['command']=lambda b=button:task_2(b)
if t=='t2':
button=Button(canvas,text='click to continue to next task', font='Tahoma 15 bold', bg='red' )
button.place(x=300,y=100)
button['command']=lambda b=button:task_3(b)
if t=='t3':
label=Label(canvas, text='Can you remember the five words shown to you earlier?\nIf you can find them here, click them.', font='Tahoma 20 bold', bg='green')
label.place(x=20, y=100)
b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15,b16,b17,b18,b19,b20,b21,b22,b23,b24,b25=None, None, None, None,None, None, None, None,None, None, None, None,None, None, None, None,None, None, None, None,None,None, None, None, None
button=[b1,b2,b3,b4,b5,b6,b7,b8,b9,b10,b11,b12,b13,b14,b15,b16,b17,b18,b19,b20,b21,b22,b23,b24,b25]
for i in range (25):
button[i]=Button(canvas,text=words[i], font='Tahoma 15 bold', bg='blue', width=6, compound='c')
button[i].place(x=100*(i%8)+10,y=np.floor(i/8)*50+400)
button[i]['command']=lambda b=button[i]:change_colour(b)
next_button=Button(canvas,text='click to continue to next task', font='Tahoma 15 bold', bg='red' )
next_button.place(x=300,y=50)
next_button['command']=lambda b=button, b_=next_button, l=label:task_4(b,b_,l)
def task_2(button):
button.destroy()
instruction=canvas.create_text(380,150, text='Learning and memory: Task 2\n\nTake note of these alphabets, you will be asked to recall them (later).', font='Tahoma 15 bold')
#numbers to be shown
letters=list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
number=random.sample(letters, k=5)
global letter_task
letter_task=list(number)
#show number
number1=Label(canvas, text=number[0],font='Tahoma 20 bold', bg='yellow' )
number1.place(x=330, y=270)
number2=Label(canvas, text=number[1],font='Tahoma 20 bold', bg='yellow' )
number2.place(x=360, y=270)
number3=Label(canvas, text=number[2],font='Tahoma 20 bold', bg='yellow' )
number3.place(x=390, y=270)
number4=Label(canvas, text=number[3],font='Tahoma 20 bold', bg='yellow' )
number4.place(x=420, y=270)
number5=Label(canvas, text=number[4],font='Tahoma 20 bold', bg='yellow' )
number5.place(x=450, y=270)
#show time
root.after(1000, lambda:show_time(1,'t2'))
root.after(2000, lambda:show_time(2,'t2'))
root.after(3000, lambda:show_time(3,'t2'))
root.after(4000, lambda:show_time(4,'t2'))
root.after(5000, lambda:show_time(5,'t2'))
root.after(6000, lambda:show_time(6,'t2'))
root.after(7000, lambda:show_time(7,'t2'))
root.after(8000, lambda:show_time(8,'t2'))
root.after(9000, lambda:show_time(9,'t2'))
root.after(10000, lambda:show_time(10,'t2'))
###remove everything after 10 seconds
number1.after(10000, lambda:number1.destroy())
number2.after(10000, lambda:number2.destroy())
number3.after(10000, lambda:number3.destroy())
number4.after(10000, lambda:number4.destroy())
number5.after(10000, lambda:number5.destroy())
root.after(10000, lambda: canvas.delete('all'))
def task_3(button):
button.destroy()
instruction=canvas.create_text(350,150, text='Learning and memory: Task 3\n\nTake note of these words, you will be asked to recall them (later).', font='Tahoma 15 bold')
#numbers to be shown
number=select_words
#show number
number1=Label(canvas, text=number[0],font='Tahoma 20 bold', bg='yellow' )
number1.place(x=300, y=270)
number2=Label(canvas, text=number[1],font='Tahoma 20 bold', bg='yellow' )
number2.place(x=400, y=270)
number3=Label(canvas, text=number[2],font='Tahoma 20 bold', bg='yellow' )
number3.place(x=500, y=270)
number4=Label(canvas, text=number[3],font='Tahoma 20 bold', bg='yellow' )
number4.place(x=350, y=350)
number5=Label(canvas, text=number[4],font='Tahoma 20 bold', bg='yellow' )
number5.place(x=450, y=350)
#show time
root.after(1000, lambda:show_time(1,'t3'))
root.after(2000, lambda:show_time(2,'t3'))
root.after(3000, lambda:show_time(3,'t3'))
root.after(4000, lambda:show_time(4,'t3'))
root.after(5000, lambda:show_time(5,'t3'))
root.after(6000, lambda:show_time(6,'t3'))
root.after(7000, lambda:show_time(7,'t3'))
root.after(8000, lambda:show_time(8,'t3'))
root.after(9000, lambda:show_time(9,'t3'))
root.after(10000, lambda:show_time(10,'t3'))
###remove everything after 10 seconds
number1.after(10000, lambda:number1.destroy())
number2.after(10000, lambda:number2.destroy())
number3.after(10000, lambda:number3.destroy())
number4.after(10000, lambda:number4.destroy())
number5.after(10000, lambda:number5.destroy())
root.after(10000, lambda: canvas.delete('all'))
def task_4(b,b_,l):
l.destroy()
b_.destroy()
for i in b:
i.destroy()
instruction=canvas.create_text(400,150, text='Learning: Task 1 (Naming)\n\nIn this task you are to match the pictures below with a word describing them.\nYou would need to pick your words from list below', font='Tahoma 15 bold')
#photos={'guiter','umbrella', 'pot', 'frying-pan', 'pencil','carrot', 'horse', 'mosque' }
root.carrot=carrot=ImageTk.PhotoImage(Image.open('./Images/carrot.jpg'))
root.frying_pan=frying_pan=ImageTk.PhotoImage(Image.open('./Images/frying-pan.jpg'))
root.guiter=guiter=ImageTk.PhotoImage(Image.open('./Images/guitar.jpg'))
root.pot=pot=ImageTk.PhotoImage(Image.open('./Images/pot.jpg'))
root.pencil=pencil=ImageTk.PhotoImage(Image.open('./Images/pencil.jpg'))
root.horse=horse=ImageTk.PhotoImage(Image.open('./Images/horse.jpg'))
root.mosque=mosque=ImageTk.PhotoImage(Image.open('./Images/mosque.jpg'))
root.plane=plane=ImageTk.PhotoImage(Image.open('./Images/plane.jpg'))
#root.church=church=ImageTk.PhotoImage(Image.open('./Images/church.png'))
canvas.create_image(150,(230+(50)), image=horse)
canvas.create_image(150,(230+(110)), image=mosque)
canvas.create_image(150,(230+(170)), image=plane)
canvas.create_image(150,(230+(230)), image=pencil)
l_horse, l_mosque, l_plane, l_pencil=Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10)
l_horse.place(x=200,y=230+(30))
l_mosque.place(x=200,y=230+(90))
l_plane.place(x=200,y=230+(160))
l_pencil.place(x=200,y=230+(220))
l_carrot, l_frying_pan, l_guitar, l_pot=Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10)
l_carrot.place(x=450,y=230+(30))
l_frying_pan.place(x=450,y=230+(90))
l_guitar.place(x=450,y=230+(160))
l_pot.place(x=450,y=230+(220))
canvas.create_image(400,(230+(50)), image=carrot)
canvas.create_image(400,(230+(110)), image=frying_pan)
canvas.create_image(400,(230+(170)), image=guiter)
canvas.create_image(400,(230+(230)), image=pot)
l1=Label(canvas, text='GUITAR UMBRELLA RATS POT FRYING-PAN PENCIL CARROT',font='Tahoma 15 bold', bg='yellow')
l1.place(x=40,y=550)
l2=Label(canvas, text='HORSE MONKEY PLANE BIRD SCHOOL HOUSE CHURCH MOSQUE ',font='Tahoma 15 bold', bg='yellow')
l2.place(x=40,y=580)
finito=Button(canvas,text='COMPLETE', font='Tahoma 15 bold', bg='green' )
finito.place(x=300,y=500)
finito['command']=lambda f=finito:task_5(f,l1,l2,l_horse, l_mosque, l_plane, l_pencil, l_carrot, l_frying_pan, l_guitar, l_pot)
def task_5(f,l1,l2,l_horse, l_mosque, l_plane, l_pencil, l_carrot, l_frying_pan, l_guitar, l_pot):
global task4_score
if l_horse.get().upper()=='HORSE':
task4_score+=1
if l_mosque.get().upper()=='MOSQUE':
task4_score+=1
if l_plane.get().upper()=='PLANE':
task4_score+=1
if l_pencil.get().upper()=='PENCIL':
task4_score+=1
if l_carrot.get().upper()=='CARROT':
task4_score+=1
if l_frying_pan.get().upper()=='FRYING-PAN':
task4_score+=1
if l_guitar.get().upper()=='GUITAR':
task4_score+=1
if l_pot.get().upper()=='POT':
task4_score+=1
print(task4_score)
f.destroy()
l1.destroy()
l2.destroy()
l_horse.destroy()
l_mosque.destroy()
l_plane.destroy()
l_pencil.destroy()
l_carrot.destroy()
l_frying_pan.destroy()
l_guitar.destroy()
l_pot.destroy()
canvas.delete('all')
instruction=canvas.create_text(450,50, text='Learning: Task 2 (Comprehension)\n\nIn this task you are to match the group of pictures with a suitable descriptive word.\nYou would need to pick your words from list below', font='Tahoma 15 bold')
root.cat=cat=ImageTk.PhotoImage(Image.open('./Images/cat.jpg'))
root.dog=dog=ImageTk.PhotoImage(Image.open('./Images/dog.jpg'))
root.bird=bird=ImageTk.PhotoImage(Image.open('./Images/bird.jpg'))
root.tortoise=tortoise=ImageTk.PhotoImage(Image.open('./Images/tortoise.jpg'))
root.lion=lion=ImageTk.PhotoImage(Image.open('./Images/lion.jpg'))
root.car=car=ImageTk.PhotoImage(Image.open('./Images/car.jpg'))
root.ship=ship=ImageTk.PhotoImage(Image.open('./Images/ship.jpg'))
root.plane=plane=ImageTk.PhotoImage(Image.open('./Images/plane.jpg'))
root.train=train=ImageTk.PhotoImage(Image.open('./Images/train.jpg'))
root.truck=truck=ImageTk.PhotoImage(Image.open('./Images/truck.jpg'))
root.mango=mango=ImageTk.PhotoImage(Image.open('./Images/mango.jpg'))
root.apple=apple=ImageTk.PhotoImage(Image.open('./Images/apple.jpg'))
root.bananna=bananna=ImageTk.PhotoImage(Image.open('./Images/bananna.jpg'))
root.cucumber=cucumber=ImageTk.PhotoImage(Image.open('./Images/cucumber.jpg'))
root.van=van=ImageTk.PhotoImage(Image.open('./Images/van.jpg'))
root.house=house=ImageTk.PhotoImage(Image.open('./Images/house.jpg'))
root.freezer=freezer=ImageTk.PhotoImage(Image.open('./Images/freezer.jpg'))
root.drawer=drawer=ImageTk.PhotoImage(Image.open('./Images/drawer.jpg'))
root.phone=phone=ImageTk.PhotoImage(Image.open('./Images/phone.jpg'))
root.letters=letters=ImageTk.PhotoImage(Image.open('./Images/letters.jpg'))
root.microphone=microphone=ImageTk.PhotoImage(Image.open('./Images/microphone.jpg'))
root.email=email=ImageTk.PhotoImage(Image.open('./Images/email.jpg'))
canvas.create_image(100,(100+(50)), image=cat)
canvas.create_image(170,(100+(50)), image=dog)
canvas.create_image(240,(100+(50)), image=bird)
canvas.create_image(310,(100+(50)), image=tortoise)
canvas.create_image(380,(100+(50)), image=lion)
canvas.create_image(100,(180+(50)), image=car)
canvas.create_image(170,(180+(50)), image=ship)
canvas.create_image(240,(180+(50)), image=plane)
canvas.create_image(310,(180+(50)), image=train)
canvas.create_image(380,(180+(50)), image=truck)
canvas.create_image(100,(260+(50)), image=mango)
canvas.create_image(170,(260+(50)), image=apple)
canvas.create_image(240,(260+(50)), image=bananna)
canvas.create_image(310,(260+(50)), image=cucumber)
canvas.create_image(100,(340+(50)), image=van)
canvas.create_image(170,(340+(50)), image=house)
canvas.create_image(240,(340+(50)), image=freezer)
canvas.create_image(310,(340+(50)), image=drawer)
canvas.create_image(100,(420+(50)), image=phone)
canvas.create_image(170,(420+(50)), image=letters)
canvas.create_image(240,(420+(50)), image=microphone)
canvas.create_image(310,(420+(50)), image=email)
group1, group2, group3, group4, group5=Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10),Entry(canvas, font='Tahoma 15 bold', width=10)
group1.place(x=460,y=90+(50))
group2.place(x=460,y=170+(50))
group3.place(x=460,y=250+(50))
group4.place(x=460,y=330+(50))
group5.place(x=460,y=410+(50))
l1=Label(canvas, text='ANIMAL PETS FOOD VEGETABLES FRUITS TRANSPORT ',font='Tahoma 15 bold', bg='yellow')
l1.place(x=40,y=550)
l2=Label(canvas, text=' INFORMATION STORAGE COMMUNICATION SOCIALIZATION ',font='Tahoma 15 bold', bg='yellow')
l2.place(x=40,y=580)
finito=Button(canvas,text='COMPLETE', font='Tahoma 15 bold', bg='green' )
finito.place(x=300,y=500)
finito['command']=lambda f=finito:task_query(f,l1,l2,group1, group2, group3, group4, group5)
def task_query(f,l1,l2,group1, group2, group3, group4, group5):
global task5_score
if group1.get().upper()=='ANIMAL':
task5_score+=1
if group2.get().upper()=='TRANSPORT':
task5_score+=1
if group3.get().upper()=='FRUITS':
task5_score+=1
if group4.get().upper()=='STORAGE':
task5_score+=1
if group5.get().upper()=='COMMUNICATION':
task5_score+=1
f.destroy()
l1.destroy()
l2.destroy()
group1.destroy()
group2.destroy()
group3.destroy()
group4.destroy()
group5.destroy()
canvas.delete('all')
instruction=canvas.create_text(450,50, text='Learning & Memory: \n\nCan you remember the numbers that were shown to you earlier? What were they?', font='Tahoma 15 bold')
label_num=Label(root, text='Numbers:',font='Tahoma 25 bold',width=10)
label_num.place(x=200, y=200)
answer_num1=Entry(root, font='Tahoma 25 bold',width=1)
answer_num1.place(x=400, y=200)
answer_num2=Entry(root, font='Tahoma 25 bold',width=1)
answer_num2.place(x=430, y=200)
answer_num3=Entry(root, font='Tahoma 25 bold',width=1)
answer_num3.place(x=460, y=200)
answer_num4=Entry(root, font='Tahoma 25 bold',width=1)
answer_num4.place(x=490, y=200)
answer_num5=Entry(root, font='Tahoma 25 bold',width=1)
answer_num5.place(x=520, y=200)
label_lett=Label(root, text='Letters:',font='Tahoma 25 bold',width=10)
label_lett.place(x=200, y=270)
answer_let1=Entry(root, font='Tahoma 25 bold',width=1)
answer_let1.place(x=400, y=270)
answer_let2=Entry(root, font='Tahoma 25 bold',width=1)
answer_let2.place(x=430, y=270)
answer_let3=Entry(root, font='Tahoma 25 bold',width=1)
answer_let3.place(x=460, y=270)
answer_let4=Entry(root, font='Tahoma 25 bold',width=1)
answer_let4.place(x=490, y=270)
answer_let5=Entry(root, font='Tahoma 25 bold',width=1)
answer_let5.place(x=520, y=270)
finito=Button(canvas,text='COMPLETE', font='Tahoma 15 bold', bg='green' )
finito.place(x=300,y=500)
finito['command']=lambda f=finito:task_6(f,label_lett,answer_let1,answer_let2,answer_let3,answer_let4,answer_let5, label_num,answer_num1,answer_num2,answer_num3,answer_num4,answer_num5)
def task_6(f,label_lett,answer_let1,answer_let2,answer_let3,answer_let4,answer_let5, label_num,answer_num1,answer_num2,answer_num3,answer_num4,answer_num5):
######conduct scoring for the two tasks
global task1_score
global task2_score
task1_score=0
task2_score=0
if answer_num1.get() in number_task:
task1_score+=1
if answer_num1.get() == number_task[0]:
task1_score+=1
if answer_num2.get() in number_task:
task1_score+=1
if answer_num2.get() == number_task[1]:
task1_score+=1
if answer_num3.get() in number_task:
task1_score+=1
if answer_num3.get() == number_task[2]:
task1_score+=1
if answer_num4.get() in number_task:
task1_score+=1
if answer_num4.get() == number_task[3]:
task1_score+=1
if answer_num5.get() in number_task:
task1_score+=1
if answer_num5.get() == number_task[4]:
task1_score+=1
if answer_let1.get().upper() in letter_task:
task2_score+=1
if answer_let1.get().upper() == letter_task[0]:
task2_score+=1
if answer_let2.get().upper() in letter_task:
task2_score+=1
if answer_let2.get().upper() == letter_task[1]:
task2_score+=1
if answer_let3.get().upper() in letter_task:
task2_score+=1
if answer_let3.get().upper() == letter_task[2]:
task2_score+=1
if answer_let4.get().upper() in letter_task:
task2_score+=1
if answer_let4.get().upper() == letter_task[3]:
task2_score+=1
if answer_let5.get().upper() in letter_task:
task2_score+=1
if answer_let5.get().upper() == letter_task[4]:
task2_score+=1
print(task1_score)
f.destroy()
label_lett.destroy()
answer_let1.destroy()
answer_let2.destroy()
answer_let3.destroy()
answer_let4.destroy()
answer_let5.destroy()
label_num.destroy()
answer_num1.destroy()
answer_num2.destroy()
answer_num3.destroy()
answer_num4.destroy()
answer_num5.destroy()
canvas.delete('all')
######task 7 code starts here##########
instruction=canvas.create_text(450,50, text='Executive:\n\nPlease arrange these cards from the smallest to the largest number', font='Tahoma 15 bold')
card_set=[f'{i}.jpg' for i in range(1,31)]
temp=random.sample(card_set,k=5)
global select_cards
select_cards=temp
root.pic_a=pic_a=ImageTk.PhotoImage(Image.open(f'./Images/{select_cards[0]}'))
root.pic_b=pic_b=ImageTk.PhotoImage(Image.open(f'./Images/{select_cards[1]}'))
root.pic_c=pic_c=ImageTk.PhotoImage(Image.open(f'./Images/{select_cards[2]}'))
root.pic_d=pic_d=ImageTk.PhotoImage(Image.open(f'./Images/{select_cards[3]}'))
root.pic_e=pic_e=ImageTk.PhotoImage(Image.open(f'./Images/{select_cards[4]}'))
global s1,s2,s3,s4,s5
s1=canvas.create_image(100,(300+(50)), image=pic_a)
s2=canvas.create_image(250,(300+(50)), image=pic_b)
s3=canvas.create_image(400,(300+(50)), image=pic_c)
s4=canvas.create_image(550,(300+(50)), image=pic_d)
s5=canvas.create_image(700,(300+(50)), image=pic_e)
for i in range (5):
canvas.create_rectangle(50+(i*150),100, 150+(i*150),200)
canvas.bind('<ButtonPress>',MousePress)
canvas.bind('<ButtonRelease>', MouseRelease)
canvas.bind('<Motion>', drag)
finito=Button(canvas,text='COMPLETE', font='Tahoma 15 bold', bg='green' )
finito.place(x=300,y=500)
finito['command']=lambda f=finito:task_7(f)
def task_7(f):
result=({select_cards[0].split('.')[0]:canvas.coords(s1)[0], select_cards[1].split('.')[0]:canvas.coords(s2)[0], select_cards[2].split('.')[0]:canvas.coords(s3)[0], select_cards[3].split('.')[0]:canvas.coords(s4)[0], select_cards[4].split('.')[0]:canvas.coords(s5)[0]})
result=dict(sorted(result.items(),key=lambda item:item[1])).keys()
result=[int(item) for item in result]
answer=sorted(result)
for i in range (5):
if answer[i]==result[i]:
global task6_score
task6_score+=2
f.destroy()
canvas.delete('all')
#####Task 7 starts here#######################
instruction=canvas.create_text(400,50, text='Psychomotor/attention/concentration: Task 1\n\nIn this task, images of different colours appear for 5 seconds\nTap on colour ‘RED’ anytime it pops up', font='Tahoma 15 bold')
finito=Button(canvas,text=' \n \n ', font='Tahoma 15 bold', bg=background[0])
finito.place(x=300,y=250)
canvas.after(5000,lambda:task_7b(finito))
def task_7b(f):
global back_list_num
f.destroy()
finito=Button(canvas,text=' \n \n ', font='Tahoma 15 bold', bg=background[back_list_num])
finito.place(x=300,y=250)
finito['command']=lambda:task_7_scorer(finito)
back_list_num+=1
if back_list_num>9:
global scored7
scored7=False
finito.destroy()
canvas.delete('all')
next=Button(canvas,text='Click to move to the next task', font='Tahoma 15 bold', bg='red')
next.place(x=300,y=100)
next['command']=lambda f=next:task_8(f)
else:
canvas.after(5000,lambda:task_7b(finito))
def task_7_scorer(f):
global task_7_score
global scored7
if scored7==False:
if f['background']=='red':
task_7_score+=1
scored7=True
else:
task_7_score-=1
scored7=True
def task_8(f,):
print('Score 7', task_7_score)
f.destroy()
canvas.delete('all')
instruction=canvas.create_text(450,50, text='Psychomotor/attention/concentration: Task 2\n\nIn this task, images of different objects and numbers appear for 5 seconds\nTap on the images or numbers displayed begins with a letter \'F\' when they appear', font='Tahoma 15 bold')
global s1
global s2
global s3
global s4
global s5
global s6
global s7
global s8
global s9
global s10
global s11
global s12
global s13
global s14
global s15
global s16
global s17
global s18
global s19
global s20
image_task8=['five.jpg', 'two.jpg', 'eight.jpg', 'flute.jpg','phone.jpg','facial.jpg', 'chair.jpg','58.jpg','fork.jpg', '85.jpg', 'book.jpg', 'house.jpg','face-cap.jpg', 'fingerprint.jpg', 'television.jpg', 'fan.jpg', 'pencil.jpg','frame.jpg', 'car.jpg', 'dog.jpg']
root.s1=s1=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[0]}'))
root.s2=s2=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[1]}'))
root.s3=s3=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[2]}'))
root.s4=s4=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[3]}'))
root.s5=s5=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[4]}'))
root.s6=s6=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[5]}'))
root.s7=s7=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[6]}'))
root.s8=s8=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[7]}'))
root.s9=s9=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[8]}'))
root.s10=s10=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[10]}'))
root.s11=s11=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[11]}'))
root.s12=s12=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[12]}'))
root.s13=s13=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[13]}'))
root.s14=s14=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[14]}'))
root.s15=s15=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[15]}'))
root.s16=s16=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[16]}'))
root.s17=s17=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[17]}'))
root.s18=s18=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[18]}'))
root.s19=s19=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[19]}'))
root.s20=s20=ImageTk.PhotoImage(Image.open(f'./Images/{image_task8[9]}'))
global s
s=[s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13,s14,s15,s16,s17,s18,s19,s20]
finito=Button(canvas,image=s1)
finito.place(x=300,y=250)
finito['command']=lambda:task_8_scorer(img_index)
canvas.after(5000,lambda:task_8b(finito))
def task_8b(f):
global img_index
f.destroy()
finito=Button(canvas,image=s[img_index])
finito.place(x=300,y=250)
finito['command']=lambda:task_8_scorer(img_index)
img_index+=1
if img_index>19:
finito.destroy()
canvas.delete('all')
next=Button(canvas,text='Click to move to the next task', font='Tahoma 15 bold', bg='red')
next.place(x=300,y=100)
next['command']=lambda f=next:task_9(f)
else:
canvas.after(5000,lambda:task_8b(finito))
def task_8_scorer(sj):
global task8_score
if sj in [1,4,6,8,9,13,14,16,18]:
task8_score+=1
else:
task8_score-=1
def task_9(f,):
f.destroy()
canvas.delete('all')
instruction=canvas.create_text(420,100, text='Attention /concentration:\n\nPlease provide the answers to these questions.', font='Tahoma 18 bold', )
q1a=Label(canvas, text=' T U\n 3\n+\n 5', font='times 17 bold')
q1a.place(x=50, y=200)
q1_answer=Entry(canvas, font='times 17 bold', width=6)
q1_answer.place(x=60,y=310)
q2a=Label(canvas, text=' T U\n 2\n+\n 7', font='times 17 bold')
q2a.place(x=150, y=200)
q2_answer=Entry(canvas, font='times 17 bold', width=6)
q2_answer.place(x=160,y=310)
q3a=Label(canvas, text=' T U\n 6\n-\n 4', font='times 17 bold')
q3a.place(x=250, y=200)
q3_answer=Entry(canvas, font='times 17 bold', width=6)
q3_answer.place(x=260,y=310)
q4a=Label(canvas, text=' T U\n 8\n+\n 7', font='times 17 bold')
q4a.place(x=350, y=200)
q4_answer=Entry(canvas, font='times 17 bold', width=6)
q4_answer.place(x=360,y=310)
q5a=Label(canvas, text=' T U\n 5\n+\n 9', font='times 17 bold')
q5a.place(x=450, y=200)
q5_answer=Entry(canvas, font='times 17 bold', width=6)
q5_answer.place(x=460,y=310)
q6a=Label(canvas, text=' T U\n 1 6\n-\n 7', font='times 17 bold')
q6a.place(x=550, y=200)
q6_answer=Entry(canvas, font='times 17 bold', width=6)
q6_answer.place(x=560,y=310)
q7a=Label(canvas, text=' T U\n 4 1\n-\n 2 4', font='times 17 bold')
q7a.place(x=650, y=200)
q7_answer=Entry(canvas, font='times 17 bold', width=6)
q7_answer.place(x=660,y=310)
q8a=Label(canvas, text=' T U\n 3 3\n+\n 2 7', font='times 17 bold')
q8a.place(x=750, y=200)
q8_answer=Entry(canvas, font='times 17 bold', width=6)
q8_answer.place(x=760,y=310)
q9a=Label(canvas, text=' T U\n 6 4\n-\n 3 5', font='times 17 bold')
q9a.place(x=850, y=200)
q9_answer=Entry(canvas, font='times 17 bold', width=6)
q9_answer.place(x=860,y=310)
q10a=Label(canvas, text=' T U\n 8 7\n-\n 6 5', font='times 17 bold')
q10a.place(x=50, y=380)
q10_answer=Entry(canvas, font='times 17 bold', width=6)
q10_answer.place(x=60,y=490)
finito=Button(canvas,text='COMPLETE', font='Tahoma 15 bold', bg='green' )
finito.place(x=400,y=550)
finito['command']=lambda f=finito:task_10(f,[q1a,q2a,q3a,q4a,q5a,q6a,q7a,q8a,q9a,q10a],[q1_answer,q2_answer,q3_answer,q4_answer,q5_answer,q6_answer,q7_answer,q8_answer,q9_answer,q10_answer])
def task_10(f,ls,ls2):
global task9_score
if len(ls2[0].get()) > 0:
task9_score+=1
if ls2[0].get()==8:
task9_score+=1
if len(ls2[1].get()) > 0:
task9_score+=1
if ls2[1].get()==9:
task9_score+=1
if len(ls2[2].get()) > 0:
task9_score+=1
if ls2[2].get()==10:
task9_score+=1
if len(ls2[3].get()) > 0:
task9_score+=1
if ls2[3].get()==15:
task9_score+=1
if len(ls2[4].get()) > 0:
task9_score+=1
if ls2[4].get()==14:
task9_score+=1
if len(ls2[5].get()) > 0:
task9_score+=1
if ls2[5].get()==23:
task9_score+=1
if len(ls2[6].get()) > 0:
task9_score+=1
if ls2[6].get()==65:
task9_score+=1
if len(ls2[7].get()) > 0:
task9_score+=1
if ls2[7].get()==60:
task9_score+=1
if len(ls2[8].get()) > 0:
task9_score+=1
if ls2[8].get()==99:
task9_score+=1
if len(ls2[9].get()) > 0:
task9_score+=1
if ls2[9].get()==152:
task9_score+=1
#Task 10 starts here
for item in ls:
item.destroy()
for item in ls2:
item.destroy()
f.destroy()
canvas.delete('all')
instruction=canvas.create_text(450,100, text='Social Cognition:\n\nPlease click on the word that matches the emotion/state \ndescribed in the picture.', font='Tahoma 18 bold', )
global s1
global s2
global s3
global s4
global s5
global s6
global s7
global s8
root.s1=s1=ImageTk.PhotoImage(Image.open('./Images/happy.jpg'))
root.s2=s2=ImageTk.PhotoImage(Image.open('./Images/angry.jpg'))
root.s3=s3=ImageTk.PhotoImage(Image.open(f'./Images/sad.jpg'))
root.s4=s4=ImageTk.PhotoImage(Image.open(f'./Images/friendly.jpg'))
root.s5=s5=ImageTk.PhotoImage(Image.open(f'./Images/sick.jpg'))
root.s6=s6=ImageTk.PhotoImage(Image.open(f'./Images/shy.jpg'))
root.s7=s7=ImageTk.PhotoImage(Image.open(f'./Images/proud.jpg'))
root.s8=s8=ImageTk.PhotoImage(Image.open(f'./Images/surprised.jpg'))
global s
s=[s1,s2,s3,s4,s5,s6,s7,s8]
finito=Label(canvas,image=s1)
finito.place(x=300,y=250)
b_happy=Button(canvas,text='happy', font='Tahoma 15 bold', bg='yellow', width=7)
b_happy.place(x=850,y=250)
b_angry=Button(canvas,text='angry', font='Tahoma 15 bold', bg='yellow', width=7)
b_angry.place(x=700,y=250)
b_sad=Button(canvas,text='sad', font='Tahoma 15 bold', bg='yellow', width=7)
b_sad.place(x=700,y=350)
b_friendly=Button(canvas,text='friendly', font='Tahoma 15 bold', bg='yellow', width=7)
b_friendly.place(x=850,y=400)
b_sick=Button(canvas,text='sick', font='Tahoma 15 bold', bg='yellow', width=7)
b_sick.place(x=850,y=300)
b_shy=Button(canvas,text='shy', font='Tahoma 15 bold', bg='yellow', width=7)
b_shy.place(x=700,y=300)
b_proud=Button(canvas,text='proud', font='Tahoma 15 bold', bg='yellow', width=7)
b_proud.place(x=850,y=350)
b_surprised=Button(canvas,text='surprised', font='Tahoma 15 bold', bg='yellow', width=7)
b_surprised.place(x=700,y=400)
b_happy['command']=lambda:task_10b(finito,b_happy,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_angry['command']=lambda:task_10b(finito,b_angry,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_sad['command']=lambda:task_10b(finito,b_sad,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_friendly['command']=lambda:task_10b(finito,b_friendly,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_sick['command']=lambda:task_10b(finito,b_sick,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_shy['command']=lambda:task_10b(finito,b_shy,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_proud['command']=lambda:task_10b(finito,b_proud,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_surprised['command']=lambda:task_10b(finito,b_surprised,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
def task_10b(f,pressed,b):
global task10_img_index
global emotions
global task10_score
if pressed['text']==emotions[task10_img_index]:
task10_score+=1
task10_img_index+=1
print(task10_score)
f.destroy()
for butt in b:
butt.destroy()
###Task 10 starts here
finito=Label(canvas,image=s[task10_img_index])
finito.place(x=300,y=250)
b_happy=Button(canvas,text='happy', font='Tahoma 15 bold', bg='yellow', width=7)
b_happy.place(x=850,y=250)
b_angry=Button(canvas,text='angry', font='Tahoma 15 bold', bg='yellow', width=7)
b_angry.place(x=700,y=250)
b_sad=Button(canvas,text='sad', font='Tahoma 15 bold', bg='yellow', width=7)
b_sad.place(x=700,y=350)
b_friendly=Button(canvas,text='friendly', font='Tahoma 15 bold', bg='yellow', width=7)
b_friendly.place(x=850,y=400)
b_sick=Button(canvas,text='sick', font='Tahoma 15 bold', bg='yellow', width=7)
b_sick.place(x=850,y=300)
b_shy=Button(canvas,text='shy', font='Tahoma 15 bold', bg='yellow', width=7)
b_shy.place(x=700,y=300)
b_proud=Button(canvas,text='proud', font='Tahoma 15 bold', bg='yellow', width=7)
b_proud.place(x=850,y=350)
b_surprised=Button(canvas,text='surprised', font='Tahoma 15 bold', bg='yellow', width=7)
b_surprised.place(x=700,y=400)
if task10_img_index<5:
b_happy['command']=lambda:task_10b(finito,b_happy,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_sad['command']=lambda:task_10b(finito,b_sad,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_angry['command']=lambda:task_10b(finito,b_angry,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_sick['command']=lambda:task_10b(finito,b_sick,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_friendly['command']=lambda:task_10b(finito,b_friendly,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_shy['command']=lambda:task_10b(finito,b_shy,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_proud['command']=lambda:task_10b(finito,b_proud,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
b_surprised['command']=lambda:task_10b(finito,b_surprised,[b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised])
else:
finito.destroy()
for butt in [b_happy,b_angry,b_sad,b_friendly,b_sick,b_shy,b_proud,b_surprised]:
butt.destroy()
canvas.delete('all')
next=Button(canvas,text='Click to complete test', font='Tahoma 15 bold', bg='blue')
next.place(x=300,y=100)
next['command']=lambda f=next:felicitation(f)
def felicitation(f):
f.destroy()
canvas.create_text(450,150, text=f'Thanks for completing the assessment', font='Tahoma 18 bold', )
canvas.create_text(450,300, text=f'\n\nYour score is:\nLearning and memory: {task1_score+task2_score+task3_score}/25\nLanguage: {task4_score+task5_score}/18\nExecutive: {task6_score}/10\nPsychomotor: {task7_score+task8_score}/30\nAttention/concentration: {task9_score}/10\nSocial Cognition {task10_score}/10\n\nYour total score: {task1_score+task2_score+task3_score} ', font='Tahoma 18 bold', )
######################################################################################################################################################
#create canvas
canvas=Canvas(root, height=600, width=1000)
instruction=canvas.create_text(470,150, text='LagosCogniTool\n\nThe purpose of this tool is to assess your memory function to determine if your memory is normal for your age. \nThere are about 6 sections with an estimate of 2 minutes to complete each section, making a total of 12 minutes to complete. \nYou need to look for a quiet place without distraction to complete the test. \nTo correctly determine your memory function, it is important you answer these questions on your own without seeking help \nfrom another person or any other source like online materials or use of calculators. \n\nThe test will be based on correct answers and the time taken to perform the tasks. \nThanks\n\n\nYou have 2 minutes to complete each activity ', font='Times 13 bold')
finito=Button(canvas,text='Start Test', font='Tahoma 15 bold', bg='green' )
finito.place(x=400,y=550)
finito['command']=lambda f=finito:start_task(f)
def start_task(f):
f.destroy()
canvas.delete('all')
instruction=canvas.create_text(350,150, text='Learning and memory: Task 1\nTake note of these numbers, you will be asked to recall them (later).', font='Tahoma 15 bold')
#numbers to be shown
number=random.randint(10000,99999)
number=str(number)
global number_task
number_task=list(number)
#show number
number1=Label(canvas, text=number[0],font='Tahoma 20 bold', bg='yellow' )
number1.place(x=330, y=270)
number2=Label(canvas, text=number[1],font='Tahoma 20 bold', bg='yellow' )
number2.place(x=360, y=270)
number3=Label(canvas, text=number[2],font='Tahoma 20 bold', bg='yellow' )
number3.place(x=390, y=270)
number4=Label(canvas, text=number[3],font='Tahoma 20 bold', bg='yellow' )
number4.place(x=420, y=270)
number5=Label(canvas, text=number[4],font='Tahoma 20 bold', bg='yellow' )
number5.place(x=450, y=270)
#show time
root.after(1000, lambda:show_time(1,'t1'))
root.after(2000, lambda:show_time(2,'t1'))
root.after(3000, lambda:show_time(3,'t1'))
root.after(4000, lambda:show_time(4,'t1'))
root.after(5000, lambda:show_time(5,'t1'))
root.after(6000, lambda:show_time(6,'t1'))
root.after(7000, lambda:show_time(7,'t1'))
root.after(8000, lambda:show_time(8,'t1'))
root.after(9000, lambda:show_time(9,'t1'))
root.after(10000, lambda:show_time(10,'t1'))
###remove everything after 10 seconds
number1.after(10000, lambda:number1.destroy())
number2.after(10000, lambda:number2.destroy())
number3.after(10000, lambda:number3.destroy())
number4.after(10000, lambda:number4.destroy())
number5.after(10000, lambda:number5.destroy())
root.after(10000, lambda: canvas.delete('all'))
#pack canvas
canvas.pack()
root.mainloop()
|
allstemconsults/MemoryApp
|
logosCogni.py
|
logosCogni.py
|
py
| 40,082 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34405940578
|
#Quesion 1
import numpy as np
class Question1():
def __init__(self):
self.cluster1 = np.empty((20,2)) #cluster variable
self.cluster2 = np.empty((20,2)) #cluster variable
self.cluster3 = np.empty((20,2)) #cluster variable
self.cluster4 = np.empty((20,2)) #cluster variable
self.clusters = np.empty((80, 2)) #combined clusters
self.generateClusters()
def generateClusters(self):
self.cluster1 = np.random.normal((1,-1), (1,1), (20,2))
self.cluster2 = np.random.normal((-1,1), (1,1), (20,2))
self.cluster3 = np.random.normal((-1,-1), (1,1), (20,2))
self.cluster4 = np.random.normal((1,1), (1,1), (20,2))
return self.cluster1, self.cluster2, self.cluster3, self.cluster4
def getClusters(self):
self.clusters = np.concatenate((self.cluster1,self.cluster2,self.cluster3,self.cluster4), axis=0)
return self.clusters
|
AceAtomz/ML-Clustering-Tut
|
Quesion1.py
|
Quesion1.py
|
py
| 954 |
python
|
es
|
code
| 0 |
github-code
|
50
|
6991523771
|
# encoding: UTF-8
from collections import defaultdict
import functools
from itertools import chain
from twisted.python import log
from twisted.internet import defer, reactor
from .. import command
from . import ircutil
"""
The auth module provides the Auth plugin, which handles authorization of irc
users. It identifies irc users by the response code 330 from a whois, commonly
used to supply the username the user is logged in with.
The plugin hooks incoming irc events and adds a function to the event object:
has_permission(). This function takes two parameters: a permission string, and
a channel, and returns a deferred which fires with a boolean value indicating
if the user that initiated the event has that permission in that channel or
not. (if channel is irrelevant for the permission, it should be None,
indicating a global permission)
Permission strings are heirarchical strings delimited by dots. If a user has a
permission, they also have all sub-permissions of that permission. For example,
if a user has permission "foo.bar", these calls return true:
has_permission("foo.bar")
has_permission("foo.bar.baz")
Groups are also implemented and are fairly simple. Group names start with a %,
and groups are assigned permissions just the same as users. Users are then
assigned to groups, and the users inherit all the permissions of that group, as
well as their individually assigned permissions. Pretty standard.
For example, a group has permission irc.op, then all users of that group are
granted irc.op. Groups are assigned permissions, and users are assigned
membership in a group.
Groups do not cascade. You cannot assign a group to another group.
Permission commands fall under the !permission command. Group commands are
under the !group command. Most permission manipulation commands require the
permission auth.edit. The group manipulation commands are different. They
require the auth.edit.group.<groupname> permission (which is auto granted if
you have auth.edit). This is so you can grant permission to edit a specific
group, while not requiring having access to edit all groups or all permissions.
"""
def satisfies(user_perm, auth_perm):
"""Does the user permission satisfy the required auth_perm?
If auth_perm is some permission required by a command, and user_perm is
some permission that a user has, this function returns True if user_perm
grants access to auth_perm
Permission strings are hierarchical. Granting admin will grant admin,
admin.op1, admin.op2, etc.
Simple globs are supported, and do not transcend dots. Granting
admin.*.foo will allow admin.bar.foo and admin.baz.foo, but not
admin.bar.baz.foo
Globs at the end of permissions match as expected from the above rules.
Granting admin.* will allow admin.foo, admin.bar, admin.foo.baz, etc. (but
NOT 'admin' by itself!)
The super-user's permission is simply *
Partial globbing is not allowed. Only entire permission elements may be
globs. "admin.foo*bar" is not allowed. Instead, split it up to
"admin.foo.*.bar"
"""
user_parts = user_perm.split(".")
auth_parts = auth_perm.split(".")
if len(user_parts) > len(auth_parts):
# The auth required is more general than the user permission. There's
# no way for this to be satisfied.
return False
# Check all the corresponding elements match.
for userelem, authelem in zip(user_parts, auth_parts):
if userelem == "*" or authelem == "*":
continue
if userelem != authelem:
return False
# There may be more elements of auth_parts, but that's okay. It just means
# the user has a more general (more powerful) permission than is required.
return True
class Auth(command.CommandPluginSuperclass):
"""Auth plugin.
Provides a reliable set of permissions other plugins can rely on. For
certain irc events, installs a has_permission() callback which can be used
to query if a user has a particular permission.
"""
REQUIRES = ["ircutil.IRCWhois"]
DEFAULT_CONFIG = {
# Maps authnames to a list of (channel, perm string) tuples
"perms": {},
# A list of (channel, perm string) tuples
"defaultperms": [],
# Maps authnames to a list of groups they're in
"groups": {},
}
def start(self):
super(Auth, self).start()
# Install a middleware hook for all irc events
self.install_middleware("irc.on_*")
# maps hostmasks to authenticated usernames, or None to indicate the
# user doesn't have any auth information
self.authd_users = {}
permgroup = self.install_cmdgroup(
grpname="permission",
permission="auth.edit",
helptext="Permission manipulation commands",
)
permgroup.install_command(
cmdname="grant",
cmdmatch="add|grant",
argmatch=r"(?P<name>%?\w+) (?P<perm>[^ ]+)(?: (?P<channel>[^ ]+))?$",
callback=self.permission_add,
cmdusage="<authname | %groupname> <permission> [channel]",
helptext="Grants a user the specified permission, either globally or in the specified channel. If authname starts with an %, it indicates a group",
)
permgroup.install_command(
cmdname="revoke",
cmdmatch="revoke|remove",
argmatch=r"(?P<name>%?\w+) (?P<perm>[^ ]+)(?: (?P<channel>[^ ]+))?$",
callback=self.permission_revoke,
cmdusage="<authname | %groupname> <permission> [channel]",
helptext="Revokes the specified permission from the user, either globally or in the specifed channel. If authname starts with an %, it indicates a group",
)
permgroup.install_command(
cmdname="list",
argmatch=r"(?P<name>%?[\w.]+)?$",
callback=self.permission_list,
cmdusage="[authname | %groupname]",
helptext="Lists the permissions granted to the given or current user. If authname starts with an %, it indicates a group",
)
permgroup.install_command(
cmdname="default add",
argmatch="(?P<perm>[^ ]+)(?: (?P<channel>[^ ]+))?$",
callback=self.add_default,
cmdusage="<permission> [channel]",
helptext="Adds a default permission; a permission that everyone implicitly has, either globally or in the specified channel",
)
permgroup.install_command(
cmdname="default revoke",
argmatch="(?P<perm>[^ ]+)(?: (?P<channel>[^ ]+))?$",
callback=self.revoke_default,
cmdusage="<permission> [channel]",
helptext="Revokes a default permission, either globally or in the specified channel",
)
permgroup.install_command(
cmdname="default list",
callback=self.list_default,
cmdusage="<permission>",
helptext="Lists the default permissions",
)
### Install group commands
groupgroup = self.install_cmdgroup(
grpname="group",
permission="auth.edit.group.*",
helptext="Authentication group manipulation commands",
)
groupgroup.install_command(
cmdname="add",
argmatch=r"(?P<user>[^ ]+) (?P<group>%\w+)$",
callback=self.group_add,
cmdusage="<user> <%group>",
helptext="Adds a user to the named group. Group names start with a %",
)
groupgroup.install_command(
cmdname="remove",
argmatch=r"(?P<user>[^ ]+) (?P<group>%\w+)$",
callback=self.group_remove,
cmdusage="<user> <%group>",
helptext="Removes a user from the named group. Group names start with a %",
)
groupgroup.install_command(
cmdname="list",
argmatch=r"(?P<group>%\w+)?$",
callback=self.group_list,
cmdusage="[%group]",
helptext="Lists the members of the specified group, or list all the groups. Group names start with a %",
)
# Top level command
self.install_command(
cmdname="whoami",
permission=None,
callback=self.permission_list,
helptext="Tells you who you're auth'd as and lists your permissions.",
)
def received_middleware_event(self, event):
"""For events that are applicable, install a handler one can call to
see if a user has a particular permission.
This way, auth permissions aren't checked until a plugin actually wants
to verify identity.
"""
if event.eventtype in [
"irc.on_privmsg",
"irc.on_mode_changed",
"irc.on_user_joined",
"irc.on_action",
"irc.on_topic_updated",
]:
event.has_permission = functools.partial(self._has_permission, event.user)
event.where_permission = functools.partial(self._where_permission, event.user)
return event
@defer.inlineCallbacks
def _get_permissions(self, hostmask):
"""This function returns the permissions granted to the given user,
identifying them in the process by doing a whois lookup if necessary.
It returns a deferred object which fires with an iterable over
(channel, permissionstr) tuples the user has, or an empty list of the
user does not have any permissions or the user could not be identified.
It does NOT include any default permissions, only permissions
explicitly granted to the user (along with any groups the user is in).
This method may send a whois to the server, in which case it looks for
an IRC 330 command back from the server indicating the user's authname
"""
# Check if the user is already identified by a previous whois
if hostmask in self.authd_users:
authname = self.authd_users[hostmask]
else:
# No cached entry for that hostmask in authd_users. Do a whois and look
# it up.
log.msg("Permission request for %s, but I don't know the authname. Doing a whois" % (hostmask,))
nick = hostmask.split("!")[0]
try:
whois_info = (yield self.transport.issue_request("irc.whois", nick))
except ircutil.WhoisError as e:
log.msg("Whois failed: %s" % e)
whois_info = {}
if "330" not in whois_info:
# No auth information. Cache this value for one minute
authname = None
self.authd_users[hostmask] = None
def cacheprune():
if hostmask in self.authd_users and self.authd_users[hostmask] == None:
del self.authd_users[hostmask]
reactor.callLater(60, cacheprune)
else:
authname = self.authd_users[hostmask] = whois_info["330"][1]
# if authname is none at this point, it indicates the whois didn't
# return any auth info. Remember this method does not account for
# default permissions, so just return an empty set
perms = set()
if authname:
perms.update(tuple(x) for x in self.permissions[authname])
# Now dereference perms from any groups the user is in
for group in self.config['groups'].get(authname, []):
perms.update(tuple(x) for x in self.permissions[group])
defer.returnValue(perms)
@defer.inlineCallbacks
def _has_permission(self, hostmask, permission, channel):
"""Asks if the user identified by hostmask has the given permission
string `permission` in the given channel. Channel can be None to
indicate a global permission is required.
This function is installed as event.has_permission() by the Auth
plugin, and is partially evaluated with the hostname already filled in,
so only the remaining arguments are specified when calling.
It returns a deferred object which passes to its callback a boolean
value: True if the user has access, and False if the user does not.
"""
if permission == None:
defer.returnValue(True)
return
user_perms = (yield self._get_permissions(hostmask))
for perm_channel, user_perm in chain(user_perms, self.config['defaultperms']):
# Does perm_channel apply to `channel`?
if not (
# One of these must be true for this permission to
# apply here.
perm_channel is None or
perm_channel == channel
):
continue
# Does user_perm satisfy `permission`?
if satisfies(user_perm, permission):
defer.returnValue(True)
return
defer.returnValue(False)
@defer.inlineCallbacks
def _where_permission(self, hostmask, permission):
"""This is a call made specifically for help-related plugins. It
returns a list of channels where the given user has the given
permission.
This function is installed on event objects as
event.where_permission(), partially evaluated with the hostname, so it
only needs the permission.
This returns a deferred. It produces a set of channels that have
`permission`, or an empty list if the user doesn't have the permission
anywhere.
"""
if permission == None:
defer.returnValue([None])
return
user_perms = (yield self._get_permissions(hostmask))
channels = set()
for perm_channel, user_perm in chain(user_perms, self.config['defaultperms']):
# If the user's permission user_perm grants `permission`, add
# `perm_channel` to the channel set
if satisfies(user_perm, permission):
channels.add(perm_channel)
defer.returnValue(channels)
### Reload event
def reload(self):
super(Auth, self).reload()
self.config['perms'] = defaultdict(list, self.config['perms'])
self.permissions = self.config['perms']
# Also turn groups into a defaultdict
self.config['groups'] = defaultdict(list, self.config['groups'])
### The command plugin callbacks, installed above
def permission_add(self, event, match):
groupdict = match.groupdict()
name = groupdict['name']
perm = groupdict['perm']
channel = groupdict.get("channel", None)
# This must be a list, even though a tuple is more appropriate, because
# they come back from json as a list. If it's changed to a tuple, you
# must convert them on reload and also change the .remove() method in
# permission_revoke()
self.permissions[name].append([channel, perm])
self.config.save()
if channel:
event.reply("Permission {0} granted for {usergroup} {1} in channel {2}".format(
perm, name, channel,
usergroup = "group" if name.startswith("%") else "user",
))
else:
event.reply("Permission {0} granted globally for {usergroup} {1}".format(perm, name,
usergroup = "group" if name.startswith("%") else "user",
))
def permission_revoke(self, event, match):
groupdict = match.groupdict()
name = groupdict['name']
perm = groupdict['perm']
channel = groupdict.get("channel", None)
try:
self.permissions[name].remove([channel, perm])
except ValueError:
# keyerror if the user doesn't have any, valueerror if the user has
# some but not this one
if channel:
event.reply("{usergroup} {0} doesn't have permission {1} in channel {2}!".format(
name, perm, channel,
usergroup = "Group" if name.startswith("%") else "User",
))
else:
event.reply("{usergroup} {0} doesn't have the global permission {1}!".format(
name, perm,
usergroup = "Group" if name.startswith("%") else "User",
))
else:
self.config.save()
if channel:
event.reply("Permission {0} revoked for {usergroup} {1} in channel {2}".format(
perm, name, channel,
usergroup = "group" if name.startswith("%") else "user",
))
else:
event.reply("Global permission {0} revoked for user {1}".format(perm, name,
usergroup = "group" if name.startswith("%") else "user",
))
@defer.inlineCallbacks
def permission_list(self, event, match):
name = match.groupdict().get('name', None)
if name:
perms = set(tuple(x) for x in self.permissions[name])
if name.startswith("%"):
msgstr = "group {0} has".format(name)
else:
msgstr = "user {0} has".format(name)
groups = self.config['groups'][name]
else:
# Get info about the current user
perms = set((yield self._get_permissions(event.user)))
if self.authd_users.get(event.user, None):
event.reply("You are identified as %s" % self.authd_users[event.user])
groups = self.config['groups'][self.authd_users[event.user]]
else:
event.reply("I don't know who you are")
groups = []
msgstr = "you have"
# dereference groups
for group in groups:
perms.update(tuple(x) for x in self.permissions[group])
# Maps channels to the permissions `user` holds in that channel
perms_map = defaultdict(set)
for perm_chan, perm in perms:
perms_map[perm_chan].add(perm)
globalperms = perms_map.pop(None, set())
if globalperms:
event.reply("%s these global permissions: %s" % (
msgstr.capitalize(), ", ".join(globalperms)))
else:
event.reply("%s no global permissions =(" % (msgstr,))
# If this isn't a direct message, don't show all the other channels
if event.direct:
for perm_chan, perms in perms_map.items():
event.reply("In channel %s %s: %s" % (
perm_chan, msgstr,
", ".join(perms)
))
elif perms_map:
this_chan = perms_map.pop(event.channel, None)
if this_chan:
event.reply("In channel %s %s: %s" % (
event.channel, msgstr,
", ".join(this_chan)
))
if perms_map:
event.reply("Also, %s some permissions in other channels. (Ask me in private to see them)" %
msgstr)
### Default permission callbacks
def add_default(self, event, match):
groupdict = match.groupdict()
permission = groupdict['perm']
channel = groupdict.get("channel", None)
if [channel, permission] not in self.config['defaultperms']:
self.config['defaultperms'].append([channel, permission])
self.config.save()
if channel:
event.reply("Done! Everybody now has %s in %s!" % (permission, channel))
else:
event.reply("Done! Everybody now has %s globally!" % (permission,))
else:
event.reply("That's already a default permission. Idiot.")
def revoke_default(self, event, match):
groupdict = match.groupdict()
permission = groupdict['perm']
channel = groupdict.get("channel", None)
try:
self.config['defaultperms'].remove([channel, permission])
except ValueError:
event.reply("That permission is not in the default list")
else:
self.config.save()
event.reply("Done. Revoked.")
def list_default(self, event, match):
perms_map = defaultdict(set)
for perm_chan, perm in self.config['defaultperms']:
perms_map[perm_chan].add(perm)
globalperms = perms_map.pop(None, set())
if globalperms:
event.reply("Default global permissions: %s" %
", ".join(globalperms))
else:
event.reply("No global permissions")
for perm_chan, perms in perms_map.items():
event.reply("Default permissions for channel %s: %s" % (
perm_chan,
", ".join(perms)))
@defer.inlineCallbacks
def group_add(self, event, match):
gd = match.groupdict()
user = gd['user']
group = gd['group']
if not (yield self._has_permission(event.user,
"auth.edit.group.{0}".format(group), None)):
event.reply("You do not have permissions to modify that group")
return
permlist = self.config['groups'][user]
if group in permlist:
event.reply("User {0} is already a member of group {1}".format(
user, group))
else:
permlist.append(group)
self.config.save()
event.reply("User {0} added as a member of group {1}".format(
user, group))
@defer.inlineCallbacks
def group_remove(self, event, match):
gd = match.groupdict()
user = gd['user']
group = gd['group']
if not (yield self._has_permission(event.user,
"auth.edit.group.{0}".format(group), None)):
event.reply("You do not have permissions to modify that group")
return
permlist = self.config['groups'][user]
if group not in permlist:
event.reply("User {0} is not a member of group {1}".format(
user, group))
else:
permlist.remove(group)
self.config.save()
event.reply("User {0} removed from group {1}".format(
user, group))
@defer.inlineCallbacks
def group_list(self, event, match):
gd = match.groupdict()
group = gd['group']
if group:
# Request to list a group. First make sure the user has access to the group.
if not (yield self._has_permission(event.user,
"auth.edit.group.{0}".format(group), None)):
event.reply("You do not have permissions to view that group")
return
members = set()
for user, groups in self.config['groups'].items():
if group in groups:
members.add(user)
members = sorted(members)
event.reply("Members in group {0}: {1}".format(group, ", ".join(members)),
notice=True, direct=True)
else:
# List all groups the user has access to.
allgroups = set()
for user, groups in self.config['groups'].items():
allgroups.update(groups)
# Filter the groups the user has access to
access_groups = []
for g in allgroups:
if (yield self._has_permission(event.user,
"auth.edit.group.{0}".format(g), None)):
access_groups.append(g)
access_groups.sort()
log.msg(access_groups)
event.reply("Groups you have access to: {0}".format(", ".join(access_groups)))
|
brownan/abbott
|
abbott/plugins/auth.py
|
auth.py
|
py
| 24,312 |
python
|
en
|
code
| 9 |
github-code
|
50
|
24117747636
|
import math
# Press the variable dot tab tab will show you all the methods you can perform on itd
radius_str = input("Enter the radius of the circle: ")
radius_int = int(radius_str)
circumference = 2 * math.pi * radius_int
area = math.pi * (radius_int ** 2)
print("The circumference is: ", circumference, " and the area is: ", area)
|
SamAdesoba/Python-codes
|
deitel_exercises/class_works/area.py
|
area.py
|
py
| 336 |
python
|
en
|
code
| 2 |
github-code
|
50
|
2978234686
|
import os
class Recorder:
def __init__(self, snake, food):
"""
Constructor for the recorder, initializes it's variables
:param snake: the starting snake of the game - to be recorded
:param food: the starting food of the game - to be recorded
"""
self.snakes = [snake] # list of the snakes
self.foods = [food] # list of the foods
def update(self, snake, food):
"""
updates the lists of the recorded information
:param snake: the current snake to be recorded
:param food: the current food (of the same state) to be recorded
:return: None
"""
# updating the lists with the new information - the next state of the game
self.snakes.append(snake)
self.foods.append(food)
def hard_save(self, game_name=None, dir_name='human_history'):
"""
saves a RAZ file of the recording
:param game_name: the name of the game, if None will automativally be "Game{number}.RAZ"
:param dir_name: the directory to save the game in, automatically set to "human_history"
:return:
"""
if not game_name:
n = len(os.listdir(os.path.join(os.path.dirname(__file__), f'{dir_name}')))
game_name = f'Game{n}'
path = os.path.join(os.path.dirname(__file__), f'{dir_name}/{game_name}.RAZ')
while os.path.exists(path):
name = input('Name already exists, enter another one:')
path = os.path.join(os.path.dirname(__file__), f'{dir_name}/{game_name}.RAZ')
with open(path, 'w') as f:
f.write(str(self.snakes))
f.write('\n')
f.write(str(self.foods))
|
OrelAvraham/Final-Project-Snake
|
game_viewer/recorder.py
|
recorder.py
|
py
| 1,716 |
python
|
en
|
code
| 0 |
github-code
|
50
|
33215429968
|
import sys
n, m = map(int, sys.stdin.readline().rstrip().split())
parents = [i for i in range(n+1)]
def find(node):
if parents[node] == node: return node
else:
parents[node] = find(parents[node])
# 메모라이제이션
return parents[node]
def union(node1, node2):
root1, root2 = find(node1), find(node2)
parents[root2] = root1
def is_union(node1, node2):
root1, root2 = find(node1), find(node2)
if root1 == root2: return True
else: return False
for _ in range(m):
cmd, node1, node2 = map(int, sys.stdin.readline().rstrip().split())
if cmd == 0:
# 합집합
union(node1, node2)
else:
# 합집합 체크
if is_union(node1, node2): print('YES')
else: print('NO')
|
PJunyeong/Coding-Test
|
Baekjoon/1717_집합의 표현.py
|
1717_집합의 표현.py
|
py
| 773 |
python
|
en
|
code
| 0 |
github-code
|
50
|
39205271761
|
try:
import numpy
except ImportError:
pass
else:
import unittest
from contracts import decorate, new_contract, ContractNotRespected
new_contract('rgb', 'array[HxWx3],H>0,W>0')
new_contract('rgba', 'array[HxWx4],H>0,W>0')
def blend_function(image1, image2, bug=False):
"""
Blends two RGB or RGBA images together.
:param image1: The first image to blend.
:type image1: (rgb|rgba),array[HxWx*]
:param image2: The second image to blend.
:type image2: (rgb|rgba),array[HxWx*]
:param bug: Introduce a bug to check the contracts.
:type bug: bool
:return: The blended image.
:rtype: rgb,array[HxWx3]
"""
H, W = image1.shape[0], image1.shape[1]
if bug:
# if we want to show a bug, return a different shape
W += 1
result = numpy.zeros((H, W, 3), 'uint8')
# put here the actual function
image2
return result
im_float = numpy.zeros((10, 10, 3), dtype='float32')
rgb_small = numpy.zeros((10, 10, 3), dtype='uint8')
rgb_large = numpy.zeros((20, 20, 3), dtype='uint8')
rgba_small = numpy.zeros((10, 10, 3), dtype='uint8')
rgba_large = numpy.zeros((20, 20, 3), dtype='uint8')
class ArrayTest(unittest.TestCase):
def setUp(self):
self.blend = decorate(blend_function)
def test_correct_behavior(self):
self.blend(rgb_small, rgb_small)
self.blend(rgb_small, rgba_small)
self.blend(rgba_small, rgb_small)
self.blend(rgb_large, rgba_large)
self.blend(rgba_large, rgb_large)
def test_incorrect1(self):
self.assertRaises(ContractNotRespected, self.blend, None, None)
def test_incorrect2(self):
self.assertRaises(ContractNotRespected, self.blend,
None, rgb_small)
def test_incorrect3(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, None)
def test_incorrect4(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, rgb_large)
def test_incorrect5(self):
self.assertRaises(ContractNotRespected, self.blend,
rgb_small, rgb_large)
def test_incorrect6(self):
# check that rtype checking works, introduce a bug
self.assertRaises(ContractNotRespected, self.blend, rgb_small,
rgb_small, bug=True)
|
AndreaCensi/contracts
|
src/contracts/testing/array_extended_test.py
|
array_extended_test.py
|
py
| 2,648 |
python
|
en
|
code
| 392 |
github-code
|
50
|
41048378978
|
from collections import defaultdict
def readFile(input):
infile = open(input, 'r').read().split('\n\n')
return infile
def parseFields(fields):
classes = defaultdict(list)
separated = fields.split('\n')
for field in separated:
name, ranges = field.split(': ')
separatedRanges = ranges.split(' or ')
for ind_range in separatedRanges:
l, r = ind_range.split('-')
classes[name].append([int(l), int(r)])
return classes
def parseTickets(ticketlist):
tickets = ticketlist.split('\n')
ticketList = []
for ticket in range(1, len(tickets)):
ticketList.append(list(map(int, tickets[ticket].split(','))))
return ticketList
def getCardsAndCalcErrorRate(fields, tickets):
errorRate = 0
errorcards = set()
for i, ticket in enumerate(tickets):
for number in ticket:
seen = False
for key in fields.keys():
if not seen:
ranges = fields[key]
for l, r in ranges:
if number >= l and number <= r:
seen = True
break
if seen:
break
if not seen:
errorRate += number
errorcards.add(i)
return errorcards, errorRate
def getClassTranslation(ticketClasses, validTickets):
potentTranslate = defaultdict(list)
for ticket in validTickets:
for i, number in enumerate(ticket):
for key in ticketClasses.keys():
if not key in potentTranslate:
potentTranslate[key] = [0 for _ in range(len(ticketClasses))]
ranges = ticketClasses[key]
if potentialClassValidity(number, ranges):
potentTranslate[key][i] += 1
translation = refinePotentialTranslation(potentTranslate, len(validTickets))
return translation
def refinePotentialTranslation(potentials, needed_nums):
finalTranslation = {}
seennums = set()
while len(finalTranslation) < len(potentials):
for key in potentials.keys():
if key not in finalTranslation:
pots = potentials[key]
valids = []
for i, x in enumerate(pots):
if i not in seennums:
if x == needed_nums:
valids.append(i)
if len(valids) == 1:
finalTranslation[key] = valids[0]
seennums.add(valids[0])
return finalTranslation
def potentialClassValidity(number, ranges):
for l, r in ranges:
if number >= l and number <= r:
return True
return False
def getDepartureMultiplication(translation, ticket):
needed = ['departure location', 'departure station', 'departure platform', 'departure track', 'departure date', 'departure time']
out = 1
for field in needed:
out *= ticket[translation[field]]
return out
def main():
fields, myTicket, ticketList = readFile('input.txt')
ticketClasses = parseFields(fields)
otherTickets = parseTickets(ticketList)
errorcards, errorRate = getCardsAndCalcErrorRate(ticketClasses, otherTickets)
validTickets = [ticket for i, ticket in enumerate(otherTickets) if i not in errorcards]
translation = getClassTranslation(ticketClasses, validTickets)
personalTicket = parseTickets(myTicket)
print(errorRate)
print(getDepartureMultiplication(translation, personalTicket[0]))
if __name__ == "__main__":
main()
|
ryanlberg/AdventOfCode2020
|
day16/ticket_translation.py
|
ticket_translation.py
|
py
| 3,643 |
python
|
en
|
code
| 0 |
github-code
|
50
|
2886527281
|
from flask_wtf import FlaskForm
from govuk_frontend_wtf.wtforms_widgets import GovRadioInput, GovSelect, GovSubmitInput
from wtforms.fields import RadioField, SelectField, SubmitField
from wtforms.validators import AnyOf, InputRequired
class CookiesForm(FlaskForm):
functional = RadioField(
"Do you want to accept functional cookies?",
widget=GovRadioInput(),
validators=[
InputRequired(message="Select yes if you want to accept functional cookies")
],
choices=[("no", "No"), ("yes", "Yes")],
default="no",
)
analytics = RadioField(
"Do you want to accept analytics cookies?",
widget=GovRadioInput(),
validators=[
InputRequired(message="Select yes if you want to accept analytics cookies")
],
choices=[("no", "No"), ("yes", "Yes")],
default="no",
)
save = SubmitField("Save cookie settings", widget=GovSubmitInput())
class DownloadForm(FlaskForm):
file_format = SelectField(
"File type",
widget=GovSelect(),
validators=[AnyOf(["json", "xlsx"])],
choices=[
("xlsx", "XSLX (Excel)"),
("json", "JSON"),
],
default=None,
)
download = SubmitField("Download", widget=GovSubmitInput())
|
communitiesuk/funding-service-design-post-award-data-frontend
|
app/main/forms.py
|
forms.py
|
py
| 1,307 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28127292160
|
# -*- coding: utf-8 -*-
"""
Name: Royston Marian Mascarenhas
USC email: [email protected]
EE559 Final Project
Spring 2019
@author: royma
"""
from util import *
class Transform():
def __init__(self,data,labels,tdata,tlabels):
self.data = data
self.labels = labels
self.tdata = tdata
self.tlabels = tlabels
self.n = self.data.shape[0]
self.nf = self.data.shape[1]
self.col = self.data.columns
def perfPCA(self,nc=3,ncflag=True,lowvar=0.9,thresh=0.5,upvar=0.95):
if (ncflag==True):
self.pcaobj = PCA(n_components=nc)
self.transdata = self.pcaobj.fit_transform(self.data)
print("Number of principal components: "+str(self.transdata.shape[1]))
print("Variance of each principal component: ")
print(self.pcaobj.explained_variance_ratio_)
result_var = sum (self.pcaobj.explained_variance_ratio_)
print("Total variance covered by principal components: "+ str(result_var))
self.transdata = pd.DataFrame(self.data,columns = self.col)
return self.transdata
else:
endflag = 0
varlst = []
complst = []
threshdata = int(np.floor(thresh*self.data.shape[1]))
print("number of components needed "+str(threshdata))
for ivar in tqdm(np.arange(lowvar,0.98,0.005)):
self.pcaobj = PCA(ivar)
self.pcaobj.fit(self.data)
self.transdata = self.pcaobj.transform(self.data)
result_var = sum (self.pcaobj.explained_variance_ratio_)
print("Total variance covered by principal components: "+ str(result_var)+" for"+
str(self.transdata.shape[1])+" components ")
complst.append(self.transdata.shape[1])
varlst.append(result_var)
if (ivar > upvar and self.transdata.shape[1] < threshdata):
endflag = 1
break
else:
print("discarded")
if (endflag == 0 ):
print("Best variance/component combination not found")
self.pcaobj = PCA(lowvar)
self.pcaobj.fit(self.data)
self.transdata = self.pcaobj.transform(self.data)
#self.transdatat = self.pcaobj.transform(self.tdata)
result_var = sum (self.pcaobj.explained_variance_ratio_)
else:
#self.transdatat = self.pcaobj.transform(self.tdata)
plt.figure(1)
plt.title("Variance v/s number of components graph")
plt.xlabel("Number of components")
plt.ylabel("Increasing variance")
plt.plot(complst,varlst)
plt.show()
print("Final number of principal components: "+str(self.transdata.shape[1]))
print("Variance of each principal component: ")
print(self.pcaobj.explained_variance_ratio_)
result_var = sum (self.pcaobj.explained_variance_ratio_)
print("Final total variance covered by principal components: "+ str(result_var))
self.transdata = pd.DataFrame(self.transdata)
return self.transdata
def hot_encode(self):
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(self.labels)
enc.transform(self.labels)
def pca_transform(self,data):
return (self.pcaobj.transform(data))
'''def visualize(self):
pcaobj = PCA(n_components=3)
self.transdata = pcaobj.fit_transform(self.data)
result_var = sum (pcaobj.explained_variance_ratio_)
print("Fidelity factor of visualization: "+ str(result_var))'''
|
rmmasc/Course-Project---Prediction-of-Air-Pressure-System-Failure-at-Scania-Trucks
|
Transform.py
|
Transform.py
|
py
| 3,849 |
python
|
en
|
code
| 1 |
github-code
|
50
|
26396772126
|
class Solution:
def reverse(self, x: int) -> int:
ans_max = (2**31 - 1) // 10
ans_min = -2**31//10 + 1
b = abs(x)
ans = 0
while True:
if ans_min <= ans <= ans_max:
ans = b % 10 + ans * 10
else:
return 0
if b // 10 == 0:
break
else:
b = b // 10
return ans if x > 0 else -ans
test_int = -2147483641
res = Solution().reverse(test_int)
print(res)
|
steve3ussr/PyCharmProject
|
LeetCode/LeetCode100/reverse_opt.py
|
reverse_opt.py
|
py
| 511 |
python
|
fr
|
code
| 0 |
github-code
|
50
|
28569555137
|
# The solution has a time complexity of O(n), where n is the length of the input strings. This is because the solution iterates over the characters of both strings once to count their occurrences and then compares the counts.
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
# check if length of both strings is same or not
if len(s) != len(t):
return False
# initialize empty hashmaps for each strings
count_S, count_T = {}, {}
# build the hashmaps
# For each character at index i, it increments the count for that character in the respective dictionary using count_S.get(s[i], 0) and count_T.get(t[i], 0). The get method returns the count of the character if it exists in the dictionary or 0 if it doesn't.
for i in range(len(s)):
count_S[s[i]] = 1 + count_S.get(s[i], 0)
count_T[t[i]] = 1 + count_T.get(t[i], 0)
# iterate through both hashmaps and check if count for each key is same
# current_char is the index for keys in the hashmap
for current_char in count_S:
if count_S[current_char] != count_T.get(current_char, 0):
return False
# otherwise, both hashmaps are same, t is anagram of s
return True
|
aakashmanjrekar11/leetcode
|
4. String/242. Valid Anagram.py
|
242. Valid Anagram.py
|
py
| 1,211 |
python
|
en
|
code
| 0 |
github-code
|
50
|
74529773914
|
from flask import Flask, request, jsonify
from flask_cors import CORS
import numpy as np
import base64
from PIL import Image
from io import BytesIO
import onnxruntime
app = Flask(__name__)
CORS(app)
ort_session = onnxruntime.InferenceSession("captcha_reader_model5.onnx")
@app.route('/', methods=["POST"])
def main_func():
# get image data
data = request.get_json()
base64_data = data.get('image', None)
if base64_data == None:
return jsonify({"captcha_value":"Image Not found"})
# changing base64 img data to image array
_, encoded_data = base64_data.split(',', 1)
image_bytes = base64.b64decode(encoded_data)
try:
image = Image.open(BytesIO(image_bytes))
except:
return jsonify({"captcha_value": "Image Not found"})
# preprocessing
threshold = 192
process_image = np.array(image)
process_image = np.mean(process_image, axis=-1)
process_image = np.where(process_image < threshold, 0, 255).astype(np.uint8)
# separating each letter
img_list = separate(process_image)
# loading classifier
np_arr = np.array(img_list).reshape(5, 1, 28, 28)
ort_inputs = {'input.1': np_arr.astype('float32')}
val = ort_session.run(None, ort_inputs)
output = np.argmax(val, axis=-1)[0]
ans = "".join([int2label_dict[i] for i in output])
return jsonify({"captcha_value": ans})
# ------- function to separate each character ---------
def separate(full_image):
# vertical cutting
prev = False
prev2 = False
arr = []
for i in range(full_image[0].shape[0]):
if np.all(full_image[:, i] < 127) and not prev:
arr.append(i)
prev = True
prev2 = False
elif not np.all(full_image[:, i] < 127) and not prev2:
arr.append(i)
prev2 = True
prev = False
# --------------
crop_img = []
for i in range((len(arr) - 1) // 2):
image = full_image[:, arr[2*i + 1]: arr[2*i + 2]]
# horizontal cutting
prev = False
prev2 = False
arr2 = []
for j in range(image.shape[0]):
if np.all(image[j, :] < 127) and not prev:
arr2.append(j)
prev = True
prev2 = False
elif not np.all(image[j, :] < 127) and not prev2:
arr2.append(j)
prev2 = True
prev = False
image = image[arr2[1]: arr2[-1], :]
# --------------
# padding
pad_width = int((image.shape[0] - image.shape[1]) / 2)
pad_size = 5
if pad_width > 0:
image = np.pad(image, ((pad_size, pad_size), (pad_width + pad_size, pad_width + pad_size)), mode="constant")
else:
image = np.pad(image, ((pad_size - pad_width, pad_size - pad_width), (pad_size, pad_size)), mode="constant")
# ----------
image = Image.fromarray(image)
resized_image = image.resize((28, 28))
resized_image = np.array(resized_image)
crop_img.append(resized_image)
return crop_img
# dictionary to change label to int and int to label
label2int_dict = {}
for i in range(62):
if i < 10:
label2int_dict[str(i)] = i
elif i <36:
label2int_dict[chr(97 + i - 10)] = i
else:
label2int_dict[chr(65 + i - 36)] = i
int2label_dict = {}
for key in label2int_dict:
int2label_dict[label2int_dict[key]] = key
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
himanshukumargupta11012/AIMS_captcha_autofill
|
flask-api/app.py
|
app.py
|
py
| 3,534 |
python
|
en
|
code
| 1 |
github-code
|
50
|
11155030454
|
import matplotlib
import pandas as pd
import torch
from matplotlib import pyplot as plt
from pandas import read_csv
from torch import nn, optim
from torch.utils.data import DataLoader, TensorDataset
matplotlib.use('TkAgg')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def init_data(csv_path: str, mode='CPU') -> TensorDataset:
"""
读取并初始化数据
:param csv_path: csv文件的路径
:param mode: 模式,需指定使用CPU或GPU计算,并据此将数据转换为对应类型的tensor类型
:return: 用TensorDataset包装起来的数据集
"""
df = read_csv(csv_path)
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
if mode == 'GPU':
X = torch.tensor(X.values, dtype=torch.float32, device=device)
y = torch.tensor(y.values, dtype=torch.float32, device=device)
else:
X = torch.tensor(X.values, dtype=torch.float32)
y = torch.tensor(y.values, dtype=torch.float32)
return TensorDataset(X, y)
def train_test_split(data: TensorDataset, test_size=0.2) -> tuple[DataLoader, DataLoader]:
"""
划分训练集和测试集,并以DataLoader的形式分别返回。由于本次实验均采用20%留出法,所以test_size默认为0.2
:param data: 原始数据集,需要用TensorDataset的形式包装起来
:param test_size: 测试集占比,默认0.2
:return: 依次返回训练集和测试集
"""
data, test_data = torch.utils.data.random_split(
data, [len(data) - int(len(data) * test_size), int(len(data) * test_size)])
train_loader = DataLoader(data, batch_size=16, shuffle=True)
test_loader = DataLoader(test_data, batch_size=16, shuffle=True)
return train_loader, test_loader
# 线性模型
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.fc1 = nn.Linear(1, 10) # 输入层,输入维度为1,输出维度为10
self.fc2 = nn.Linear(10, 10) # 隐藏层
self.fc3 = nn.Linear(10, 1) # 输出层,输出维度重回到1
def forward(self, x): # 前向传播时采用relu激活函数
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 逻辑回归模型
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
self.fc1 = nn.Linear(2, 10) # 输入层,输入维度为2,输出维度为10
self.fc2 = nn.Linear(10, 10) # 隐藏层
self.fc3 = nn.Linear(10, 10) # 隐藏层
self.fc4 = nn.Linear(10, 2) # 输出层,输出维度重回到2
def forward(self, x): # 前向传播时采用relu激活函数
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.relu(self.fc3(x))
x = self.fc4(x)
return x
def regression():
"""
第一题:线性回归
:return: None
"""
dataset = init_data('./ex1data.csv') # 读取数据
# 定义模型、损失函数和优化器
net = LinearRegression()
criterion = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=0.01)
dataset_train, dataset_test = train_test_split(dataset, 0.2)
# 训练模型:100个epoch,每个epoch都计算一次训练集和测试集的损失
loss_train, loss_test = [], []
for epoch in range(100):
# 计算训练集损失
running_loss = 0.0
for i, data in enumerate(dataset_train):
X_train, y_train = data
optimizer.zero_grad()
y_pred = net(X_train).squeeze()
loss = criterion(y_pred, y_train) # 均方误差损失
loss.backward() # 反向传播
optimizer.step() # 更新参数
running_loss += loss.item()
loss_train.append(running_loss / len(dataset_train)) # 记录训练集损失
running_loss = 0.0 # 重置测试集损失
with torch.no_grad(): # 测试集仅用于计算损失,不需要反向传播
for i, data in enumerate(dataset_test):
X_test, y_test = data
y_pred = net(X_test).squeeze() # 预测
loss = criterion(y_pred, y_test) # 计算均方差损失
running_loss += loss.item()
loss_test.append(running_loss / len(dataset_test))
# 可视化
plt.plot(loss_train, label='train', linewidth=3)
plt.plot(loss_test, label='test', linewidth=3)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 保存数据
df = pd.DataFrame({'train': loss_train, 'test': loss_test})
df.to_csv('Regression loss.csv', index=False)
def classify():
"""
第二题:逻辑回归
:return: None
"""
dataset = init_data('./ex1data2.csv') # 读取数据
# 定义模型、损失函数和优化器
net = Classifier()
criterion = nn.CrossEntropyLoss() # 交叉熵损失
optimizer = optim.Adam(net.parameters(), lr=0.01)
dataset_train, dataset_test = train_test_split(dataset, 0.2)
# 训练模型:100个epoch,每个epoch都计算一次训练集和测试集的损失
loss_train, loss_test = [], []
for epoch in range(400):
# 计算训练集损失
running_loss = 0.0
for i, data in enumerate(dataset_train):
X_train, y_train = data
optimizer.zero_grad()
y_pred = net(X_train)
loss = criterion(y_pred.squeeze(), y_train.squeeze().long()) # 交叉熵损失
loss.backward() # 反向传播
optimizer.step() # 更新参数
running_loss += loss.item()
loss_train.append(running_loss / len(dataset_train)) # 记录训练集损失
correct_num, count_num = 0, 0 # 重置测试集损失
with torch.no_grad():
for i, data in enumerate(dataset_test):
X_test, y_test = data
y_pred = net(X_test) # 预测
_, y_pred = torch.max(y_pred.data, dim=1)
count_num += y_test.size(0) # 累加测试集样本数
correct_num += (y_pred == y_test.squeeze()).sum().item() # 累加预测正确的样本数
loss_test.append(1 - correct_num / count_num) # 记录测试集损失,由于是分类问题,所以直接记录错误率了
# 可视化
plt.plot(loss_train, label='train', linewidth=3)
plt.plot(loss_test, label='test', linewidth=3)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 保存数据
df = pd.DataFrame({'train': loss_train, 'test': loss_test})
df.to_csv('Classification loss.csv', index=False)
if __name__ == "__main__":
# regression()
classify()
|
Steven-Zhl/YNU_ISE_Courses
|
MachineLearning/Experiments/Exp4/Code/neural_network.py
|
neural_network.py
|
py
| 6,747 |
python
|
en
|
code
| 3 |
github-code
|
50
|
10627039940
|
from create_node import Node
class LinkedList:
def __init__(self):
self.head = None
def push(self, name, value):
#if new list
newNode = Node(name, value)
if not self.head:
self.head = newNode
else:
#save head's current position
temp = self.head
#move head's position until you find a None value for
#nextNode
while(temp.next):
temp = temp.next
#set the none value to the new node
temp.next = newNode
|
ChadMcintire/data_structures
|
ll.py
|
ll.py
|
py
| 562 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9639247764
|
import tkinter as tk
root = tk.Tk()
COLS_COUNT = 4
STATES_COLORS = dict({
'gut': 'green',
'schlecht': 'red',
'repair': 'yellow'
})
def create_grid_from_list(data):
"""
Creates a multi-dimensional array from a flat array,
used for visualizing a grid
:param data: One dimensional array
:return: Multi dimensional array
"""
rows = (len(data) // COLS_COUNT) + 1
values = []
for i in range(rows):
values.append([])
for j in range(COLS_COUNT):
index = i * COLS_COUNT + j
if index >= len(data):
break
if data[index]['state']:
values[i].append(data[index])
else:
values[i].append(data[index])
return values
def start_grid_vis(data):
"""
Function to create grid and start tkinter
:param data: One dimensional array with results from classifier
:return: None
"""
values = create_grid_from_list(data)
for i in range(len(values)):
for j in range(len(values[i])):
state = values[i][j]['state']
meta = values[i][j]['tool']
button = tk.Button(root, text=state, bg=STATES_COLORS[state], padx=40, pady=40)
button.grid(row=i, column=j, padx=10, pady=10, sticky="nsew")
root.mainloop()
def start_grid_vis_val(data, real_data):
"""
Function to create grid and start tkinter
:param data: One dimensional array with results from classifier
:return: None
"""
values = create_grid_from_list(data)
label = tk.Label(root, text='Predictions').grid(row=0, sticky="ew")
for i in range(len(values)):
for j in range(len(values[i])):
state = values[i][j]['state']
meta = values[i][j]['tool']
button = tk.Button(root, text=str(i + 1), bg=STATES_COLORS[state], padx=40, pady=40)
button.grid(row=i + 1, column=j, padx=10, pady=10, sticky="nsew")
real_values = create_grid_from_list(real_data)
label = tk.Label(root, text='Labels').grid(row=len(values), sticky="ew")
for i in range(len(real_values)):
for j in range(len(real_values[i])):
state = real_values[i][j]['state']
meta = real_values[i][j]['tool']
button = tk.Button(root, text=str(i + 1 + len(real_values)), bg=STATES_COLORS[state], padx=40, pady=40)
button.grid(row=i + 1 + len(real_values), column=j, padx=10, pady=10, sticky="nsew")
root.mainloop()
|
OverDriveGain/yazan-project
|
GUI/start.py
|
start.py
|
py
| 2,496 |
python
|
en
|
code
| 0 |
github-code
|
50
|
9919933010
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Adatok beolvasása
df = pd.read_csv('adatok2.csv')
# Első öt sor megjelenítése
print(df.head())
# Matplotlib használata egy egyszerű diagramhoz
plt.figure(figsize=(10, 6))
plt.plot(df['YEAR'], df['ESTIMATE'])
plt.title('Matplotlib diagram')
plt.xlabel('YEAR')
plt.ylabel('ESTIMATE')
plt.show()
# Seaborn használata egy bonyolultabb diagramhoz
sns.set(style="whitegrid")
plt.figure(figsize=(10, 6))
sns.barplot(x='YEAR', y='ESTIMATE', data=df)
plt.title('Seaborn diagram')
plt.show()
# Hisztogram készítése
plt.figure(figsize=(10, 6))
plt.hist(df['ESTIMATE'], bins=10, edgecolor='black')
plt.title('Histogram of ESTIMATE')
plt.xlabel('ESTIMATE')
plt.ylabel('Frequency')
plt.show()
# Boxplot készítése
sns.set(style="whitegrid")
plt.figure(figsize=(10, 6))
sns.boxplot(y='ESTIMATE', data=df)
plt.title('Boxplot of ESTIMATE')
plt.show()
|
wodor1/python_data_vis
|
app.py
|
app.py
|
py
| 968 |
python
|
hu
|
code
| 0 |
github-code
|
50
|
36484915030
|
import sys
ans = sys.maxsize
#정수 arr의 최대 최소 구하기
#최소값구하기
arr= [162,5784,789321,75364757]
for i in arr:
if ans>i:
ans = i
print(ans)
#진법 변환
#from 10진법 to n진법
bin(100) #2진법
oct(100) #8진법
hex(100) #16진법
#출력결과 각각
#0b1100100
#0o144
#0x64
#n진법 to 10진법
int('0b1100100',2)
int('0o144',8)
int('0x64',16)
#백준2729번 이진수 덧셈
import sys
input = sys.stdin.readline
for _ in range(int(input())):
t1, t2 = map(str,input().split())
a1 = int('0b'+t1,2)
a2 = int('0b'+t2,2)
print( str(bin(a1+a2))[2:])
#백준 11005번 진법
import sys
input = sys.stdin.readline
N,B = map(int, input().split())
arr = []
while (N !=0):
tempr = N%B #나머지 저장
N = int(N/B) #형변환 자동이므로 명시.
if (tempr<10):
arr.append(str(tempr))
else: arr.append( chr(tempr+55) )
arr.reverse()
print( ''.join(arr))
|
Mullan2020/python_practice
|
test_d3.py
|
test_d3.py
|
py
| 1,003 |
python
|
ko
|
code
| 0 |
github-code
|
50
|
10101806680
|
"""Test setup for integration and functional tests.
When we import PloneTestCase and then call setupPloneSite(), all of
Plone's products are loaded, and a Plone site will be created. This
happens at module level, which makes it faster to run each test, but
slows down test runner startup.
"""
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
# When ZopeTestCase configures Zope, it will *not* auto-load products
# in Products/. Instead, we have to use a statement such as:
# ztc.installProduct('SimpleAttachment')
# This does *not* apply to products in eggs and Python packages (i.e.
# not in the Products.*) namespace. For that, see below.
# All of Plone's products are already set up by PloneTestCase.
@onsetup
def setup_product():
"""Set up the package and its dependencies.
The @onsetup decorator causes the execution of this body to be
deferred until the setup of the Plone site testing layer. We could
have created our own layer, but this is the easiest way for Plone
integration tests.
"""
# Load the ZCML configuration for the example.tests package.
# This can of course use <include /> to include other packages.
fiveconfigure.debug_mode = True
import Products.Ploneboard
import Products.PloneboardNotify
zcml.load_config('configure.zcml', Products.Ploneboard)
zcml.load_config('configure.zcml', Products.PloneboardNotify)
fiveconfigure.debug_mode = False
ztc.installProduct('Ploneboard')
ztc.installProduct('PloneboardNotify')
# The order here is important: We first call the (deferred) function
# which installs the products we need for this product. Then, we let
# PloneTestCase set up this product on installation.
setup_product()
# Below I don't use Products.xxx for Plone 2.5 compatibility
ptc.setupPloneSite(products=['Products.CMFPlacefulWorkflow',
'Products.PloneboardNotify'])
class TestCase(ptc.PloneTestCase):
"""We use this base class for all the tests in this package. If
necessary, we can put common utility or setup code in here. This
applies to unit test cases.
"""
class FunctionalTestCase(ptc.FunctionalTestCase):
"""We use this class for functional integration tests that use
doctest syntax. Again, we can put basic common utility or setup
code in here.
"""
def afterSetUp(self):
self.portal.portal_membership.addMember('root', 'secret',
('Manager','Owner'), [],
properties={'fullname': 'The Admin',
'email': '[email protected]'})
self.portal.portal_membership.addMember('member', 'secret',
('Member',), [],
properties={'fullname': 'The Member',
'email': '[email protected]'})
self.portal.portal_membership.addMember('another_member', 'secret',
('Member',), [],
properties={'fullname': 'Another Member',
'email': '[email protected]'})
def enableForumGlobally(self):
portal_types = self.portal.portal_types
forum_type = portal_types.getTypeInfo('PloneboardForum')
forum_type.global_allow = True
|
collective/Products.PloneboardNotify
|
Products/PloneboardNotify/tests/base.py
|
base.py
|
py
| 3,649 |
python
|
en
|
code
| 0 |
github-code
|
50
|
4806969389
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
from sklearn.feature_selection import SelectFromModel
###################################################
############ Removing Features with low var ###
def remove_with_var_thresh(X,thresh):
sel = VarianceThreshold(threshold=thresh)
return sel.fit_transform(X)
def Univariate_feature_selection(X,y):
return SelectKBest(chi2, k=2).fit_transform(X, y)
def recursive_feature_elim(X,y):
y = np.round(y*10)
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
def L1_based_selection(X,y):
y = np.round(y*10)
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(X, y)
model = SelectFromModel(lsvc, prefit=True)
return model.transform(X)
def tree_selection(X,y):
y = np.round(y*10)
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(X, y)
# clf.feature_importances_
model = SelectFromModel(clf, prefit=True)
return model.transform(X)
def save_numpy(arr,filename):
np.save(filename, arr)
if __name__ == "__main__":
thresh =(.01 * (1 - .01))
# print(thresh)
X = np.load('bined_x.npy')
y = np.load('bined_y.npy')
print('shapes of raw data',X.shape,y.shape)
var_thresh_data = remove_with_var_thresh(X,thresh)
save_numpy(var_thresh_data,'var_thresh_data')
print('shape after var thresh',var_thresh_data.shape)
# uni_data = Univariate_feature_selection(X,y) #cannot work with negative data
# print('shape of uni data',uni_data.shape)
L1_data = L1_based_selection(X,y)\
save_numpy(L1_data,'L1_data')
print('shape of L1 data',L1_data.shape)
tree_data = tree_selection(X,y)
print('shape of tree data',tree_data.shape)
save_numpy(tree_data,'tree_data')
# recursive_feature_elim(X,y) #plots stuff instead of returning
|
antreashp/DataMining_2020
|
dim_reduction.py
|
dim_reduction.py
|
py
| 2,762 |
python
|
en
|
code
| 0 |
github-code
|
50
|
10670426989
|
import argparse
import numpy as np
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
pp = PdfPages('expt1.pdf')
pyplot.figure()
pyplot.clf()
parser = argparse.ArgumentParser()
parser.add_argument('-file', '--file', type=str, default='expt1.out', help='IP of server')
args = parser.parse_args()
with open(args.file, 'r') as f:
data = f.read().split('\n')[:-1]
points = np.zeros([len(data), 7])
for i, d in enumerate(data):
points[i][0] = i + 1
points[i][1:] = np.array([float(x) for x in d.split()])
for i in range(1, 7):
pyplot.plot(points[:, 0], points[:, i], label="State %s" % i)
pyplot.legend()
pyplot.xlabel('updates')
pyplot.ylabel('value function')
pyplot.yscale('symlog')
pp.savefig()
pp.close()
# pyplot.show()
|
martiansideofthemoon/cs747-assignments
|
assign4/plot.py
|
plot.py
|
py
| 779 |
python
|
en
|
code
| 0 |
github-code
|
50
|
25367093919
|
#!/usr/bin/env python3
from ev3dev2.motor import MoveTank, OUTPUT_A, OUTPUT_D
import time
import math
import sys
#Functions Setup
def Convert(DesDist, diam): #Conversion from DesDist (Desired Distance) to number of rotations based on the physical diameter of the wheels
NumRot = DesDist/(math.pi*diam)
return NumRot
def RotVari(diam, WheelDist): #Calculates number of rotations to achieve 360° EV3 spin. Based on wheel diameter and distance between the wheels
Circumf = math.pi*diam #Calculate circumference of the wheels
TurnCircumf = WheelDist*math.pi #Circumference of the invisible circle the EV3 wheels travel upon when rotating
NeccRot = TurnCircumf/Circumf #Rotations required for to achieve full 360° rotation of circle in "TurnCircumf"
return NeccRot
def RotateEV3(DesAng, NeccRot): #Rotate the EV3 to the desired angle. Calculates based on the necessary rotations determined from the "RotVari" function
AngMove = DesAng/360 #Ratio between desired turning angle and 360° of a full circle
NumRot = AngMove*NeccRot #Multiply necessary rotations and turn ratio to determine number of rotations for specific desired input
tank_pair.on_for_rotations(left_speed=-MotorSpeed/1.5, right_speed=MotorSpeed/1.5, rotations=NumRot) #Rotates at lower speed than default to reduce slippage.
##----------------------------SETUPS----------------------------##
tank_pair = MoveTank(OUTPUT_A, OUTPUT_D) #Assign Tank pairs based on physical EV3 build
MotorSpeed = 40 #Default program speed for motors
diam = 0.055 #Diameter of the wheels, in meters
WheelDist = 0.1 #Distance between the centers of the wheels (axle length)
CalibAng = 1.3 #Calibration value to rotate more/less based on terrain, slipage, etc. Increasing the value increases the rotation
NeccRot = RotVari(diam, WheelDist) #Calculate variables for EV3 rotation
#Assigning arguments recieved from main script to communicate EV3 movement
rotAng = float(sys.argv[1])*CalibAng #Angle to rotate can be positive (clockwise) or negative (counterclockwise). Adjust CalibAng as necessary
distMove = float(sys.argv[2]) #Distance to move. Also positive or negative
#---------Main Code---------#
NumRot = Convert(distMove, diam) #Calculate number of rotations necessary to move desired distance
RotateEV3(rotAng, NeccRot) #Rotate the EV3
time.sleep(.2)
tank_pair.on_for_rotations(left_speed=MotorSpeed, right_speed=MotorSpeed, rotations=NumRot) #Move EV3 linearly
print("\n Your EV3 has completed the desired movement\n")
|
jjaram117/EV3-Lidar
|
RunEV3.py
|
RunEV3.py
|
py
| 2,585 |
python
|
en
|
code
| 0 |
github-code
|
50
|
40806260266
|
# python3
import itertools
n, m = map(int, input().split())
edges = [ list(map(int, input().split())) for i in range(m) ]
# This solution prints a simple satisfiable formula
# and passes about half of the tests.
# Change this function to solve the problem.
clauses = []
vertices = range(1,n+1)
paths = range(1,n+1)
s_p = range(1,n)
adj = [[] for _ in range(n)]
for i, j in edges:
adj[i-1].append(j-1)
adj[j-1].append(i-1)
def varnum(i,j):
return i*n + j
def ex1of(options):
clauses.append([o for o in options])
for pair in itertools.combinations(options, 2):
clauses.append([-p for p in pair])
def printEquisatisfiableSatFormula():
for v in s_p:
ex1of([varnum(v,j) for j in paths])
for p in paths:
ex1of([varnum(p,j) for j in s_p])
for j in range(n):
for i, nodes in enumerate(adj):
clauses.append([-varnum(i, j)] + [varnum(n, j+1) for n in nodes])
print(len(clauses), n*n)
for c in clauses:
c.append(0)
print(' '.join(map(str, c)))
printEquisatisfiableSatFormula()
import itertools
n, m = map(int, input().split())
edges = [ list(map(int, input().split())) for i in range(m) ]
clauses = []
positions = range(1, n+1)
adj = [[] for _ in range(n)]
for i, j in edges:
adj[i-1].append(j-1)
adj[j-1].append(i-1)
def var_number(i, j):
return n*i + j
def exactly_One_Of(literals):
clauses.append([l for l in literals])
for pair in itertools.combinations(literals, 2):
clauses.append([-l for l in pair])
for i in range(n):
exactly_One_Of([var_number(i, j) for j in positions])
for j in positions:
exactly_One_Of([var_number(i, j) for i in range(n)])
for j in positions[:-1]:
for i, nodes in enumerate(adj):
clauses.append([-var_number(i, j)] + [var_number(n, j+1) for n in nodes])
print(len(clauses), n*n)
for c in clauses:
c.append(0)
print(' '.join(map(str, c)))
|
AYUSHNSUT/Algorithms-Specialization-Coursera
|
Advanced-Algorithms-and-Complexity/w3/cleaning_apartment/cleaning_apartment.py
|
cleaning_apartment.py
|
py
| 1,938 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8426369810
|
import sys
import abc
import numpy as np
from scipy.stats import geom, nbinom, poisson
import torch
from torch.autograd import Variable
from utils.helper import preprocess_gradients, get_step_loss
from utils.common import get_batch_functions, get_func_loss
from utils.helper import tensor_and, tensor_any, LessOrEqual
class BatchHandler(object):
__metaclass__ = abc.ABCMeta
# static class variable to count the batches
id = 0
@abc.abstractmethod
def cuda(self):
pass
@abc.abstractmethod
def __call__(self, *args, **kwargs):
pass
@abc.abstractmethod
def backward(self, *args):
pass
class ACTBatchHandler(BatchHandler):
def __init__(self, exper, is_train, optimizees=None):
self.is_train = is_train
self.type_prior = exper.type_prior
self.prior_shape_param1 = exper.config.ptT_shape_param
self.learner = exper.args.learner
# TODO temporary added self.version for test purposes different batch loss computation
self.version = exper.args.version
if self.is_train:
self.functions = get_batch_functions(exper)
self.horizon = exper.config.T
else:
self.horizon = exper.config.max_val_opt_steps
if optimizees is None:
raise ValueError("Parameter -optimizees- can't be None. Please provide a test set")
self.functions = optimizees
self.func_is_nn_module = torch.nn.Module in self.functions.__class__.__bases__
self.batch_size = self.functions.num_of_funcs
# will be intensively used to select the functions that still need to be optimzed after t steps
self.bool_mask = Variable(torch.ones(self.batch_size, 1).type(torch.ByteTensor))
self.float_mask = Variable(torch.ones(self.batch_size, 1))
self.tensor_one = Variable(torch.ones(1).double())
self.max_T = Variable(torch.FloatTensor([self.horizon]).expand_as(self.float_mask))
if self.is_train:
self.one_minus_eps = torch.FloatTensor(self.max_T.size()).uniform_(0, 1.).double()
self.one_minus_eps = Variable(self.tensor_one.data.expand_as(self.one_minus_eps) - self.one_minus_eps)
else:
# during evaluation we fix the threshold
self.one_minus_eps = Variable(torch.zeros(self.max_T.size()).double())
self.one_minus_eps[:] = exper.config.qt_threshold
# IMPORTANT, this tensor needs to have the same size as exper.epoch_stats["opt_step_hist"][exper.epoch] which
# is set in train_optimizer.py in the beginning of the epoch
self.halting_steps = Variable(torch.zeros(self.max_T.size()))
self.counter_compare = Variable(torch.zeros(self.max_T.size()))
self.q_t = Variable(torch.zeros(self.batch_size, self.horizon).double())
# array of rho_t values that we'll need to calculate the qt-values qt=\prod_{i=1}{t-1} (1-rho_i) rho_t
self.rho_t = Variable(torch.zeros(self.batch_size, self.horizon).double())
# IMPORTANT: we're using compare_probs only for comparison in the WHILE loop in order to determine when to stop
self.compare_probs = Variable(torch.zeros(self.max_T.size()).double())
self.cumulative_probs = Variable(torch.zeros(self.max_T.size()).double())
self.qt_last_step = Variable(torch.zeros(self.max_T.size()).double())
self.time_step = 0
self.step = 0
self.loss_sum = 0
self.kl_term = 0
self.batch_step_losses = []
self.backward_ones = torch.ones(self.batch_size)
# only used during evaluation to capture the last time step when at least one optimizee still needed processing
self.eval_last_step_taken = 0.
self.eps = 1e-320
self.iterations = Variable(torch.zeros(self.batch_size, 1))
self.qt_remainders = Variable(torch.zeros(self.batch_size, 1).double())
self.penalty_term = 0.
self.test_result_scores = []
# added for truncated BPTT
self.did_bptt_reset = False
self.forward_steps = 0
self.total_opt_loss = 0
self.total_kl_term = 0
self.total_sum_grads = 0
self.last_backward_step = 0
self.num_of_backwards = 0
# tinkering, add this numpy array to store the step losses for the MLP experiment in order to compute stddev
self.np_step_losses = np.zeros(self.horizon + 1)
self.verbose = False
if exper.args.cuda:
self.cuda()
def cuda(self):
self.bool_mask = self.bool_mask.cuda()
self.float_mask = self.float_mask.cuda()
self.max_T = self.max_T.cuda()
self.one_minus_eps = self.one_minus_eps.cuda()
self.halting_steps = self.halting_steps.cuda()
self.counter_compare = self.counter_compare.cuda()
self.q_t = self.q_t.cuda()
self.rho_t = self.rho_t.cuda()
self.compare_probs = self.compare_probs.cuda()
self.cumulative_probs = self.cumulative_probs.cuda()
self.tensor_one = self.tensor_one.cuda()
self.backward_ones = self.backward_ones.cuda()
self.qt_last_step = self.qt_last_step.cuda()
self.iterations = self.iterations.cuda()
self.qt_remainders = self.qt_remainders.cuda()
def act_step(self, exper, meta_optimizer):
"""
Subtleties of the batch processing. TODO: NEED TO EXPLAIN THIS!!!
:param exper:
:param meta_optimizer:
:return: IMPORTANT RETURNS LOSS STEP THAT IS NOT MULTIPLIED BY QT-VALUE! NUMPY FLOAT32
"""
self.init_trunc_bptt(exper, meta_optimizer)
loss = get_func_loss(exper, self.functions, average=False)
# make the forward step, which depends heavily on the experiment we're performing (MLP or Regression(T))
delta_param, rho_probs, eval_par_new = self.forward(loss, exper, meta_optimizer)
# then, apply the previous batch mask, although we did the forward pass with all functions, we filter here
delta_param = torch.mul(delta_param, self.float_mask)
# moved to method >>> compute_probs <<<
# rho_new = torch.mul(rho_probs, self.float_mask)
# compute new probability values, based on the cumulative probs construct new batch mask
# note that we also save the new rho_t values in the array self.rho_t (in the method compute_probs)
if self.learner == "meta_act":
# in Graves ACT model, the qt values are probabilities already : sigmoid(W^T h_t + bias) values
# so we don't use any stick-breaking here (meaning the rho_t values that we transfer in the act_sb
new_probs = torch.mul(rho_probs.double(), self.float_mask.double())
else:
# stick-breaking approach: transform RNN output rho_t values to probabilities
# method compute_probs multiplies with the float_mask object!
new_probs = self.compute_probs(rho_probs)
# we need to determine the indices of the functions that "stop" in this time step (new_funcs_mask)
funcs_that_stop = torch.le(self.compare_probs + new_probs, self.one_minus_eps)
less_or_equal = LessOrEqual()
# NOTE: ALSO for the Graves ACT model we increase all functions that participated in THIS ITERATION and
# therefore we compare with self.compare_probs BEFORE we increase them with the new probs
iterations = less_or_equal(self.compare_probs, self.one_minus_eps)
if self.learner == "meta_act":
self.halting_steps += iterations
else:
# and we increase the number of time steps TAKEN with the previous float_mask object to keep track of the steps
self.halting_steps += self.float_mask
self.iterations += iterations
new_batch_mask = tensor_and(funcs_that_stop, self.bool_mask)
new_float_mask = new_batch_mask.float()
# IMPORTANT: although above we constructed the new mask (for the next time step) we use the previous mask for the
# increase of the cumulative probs which we use in the WHILE loop to determine when to stop the complete batch
self.compare_probs += torch.mul(new_probs, self.float_mask.double())
# IMPORTANT UPDATE OF OPTIMIZEE PARAMETERS
if self.func_is_nn_module:
# awkward but true: delta_param has [batch_dim, num_params] due to multiplication with float masks, but
# must have transpose shape for this update
par_new = self.functions.get_flat_params() - delta_param.permute(1, 0)
else:
par_new = self.functions.params - delta_param
# increase the number of steps taken for the functions that are still in the race, we need these to compare
# against the maximum allowed number of steps
self.counter_compare += new_float_mask
# generate object that holds time_step_condition (which function has taken more than MAX time steps allowed)
# Note that self.max_T = HORIZON
time_step_condition = torch.lt(self.counter_compare, self.max_T)
# we then generate the mask that determines which functions will finally be part of the next round
next_iteration_condition = tensor_and(new_batch_mask, time_step_condition)
final_func_mask = (self.bool_mask - next_iteration_condition) == 1
# set the cumulative_probs for all functions that participate in the next time step
# Subtlety here: so for an optimizee stopping in this step, we don't want the cum-prob to be increased
# because we'll use the previous (step-1) cum-probs to determine the q_T value...the final
# q_t value for the halting step which gets assigned the REST prob mass
# we use this object for determining the rest-probability for the functions that stop after this time step
# the mask "final_func_mask" holds the indices of the functions that stop after this time step
# the mask "next_iteration_condition" holds the indices of the functions that continue in the next time step
self.cumulative_probs += torch.mul(new_probs, next_iteration_condition.double())
self.set_qt_values(new_probs, next_iteration_condition, final_func_mask)
# set the masks for the next time step
self.float_mask = new_float_mask
self.bool_mask = new_float_mask.type(torch.cuda.ByteTensor) if exper.args.cuda else \
new_float_mask.type(torch.ByteTensor)
# compute the new step loss
if self.is_train:
loss_step = self.step_loss(par_new, exper, average_batch=True)
else:
loss_step = self.step_loss(eval_par_new, exper, average_batch=True)
batch_loss = self.batch_step_losses[-1]
# we don't "have" any variance when batch size is 1
if self.batch_size > 1:
exper.add_step_loss_variance(batch_loss, self.step+1)
elif exper.args.problem == "mlp":
# batch_size is always one, but we want to compute stddev later, so just save all losses for the function
# we calculate the stddev in Experiment.eval() method
self.np_step_losses[self.step+1] = batch_loss.data.cpu().squeeze().numpy()[0]
# update batch functions parameter for next step
if self.is_train:
self.functions.set_parameters(par_new)
else:
self.functions.set_parameters(eval_par_new)
# MLP which is a torch.nn.module or regression function (which is just a normal Class object)
if self.func_is_nn_module:
self.functions.zero_grad()
else:
self.functions.params.grad.data.zero_()
return loss_step.data.cpu().squeeze().numpy()[0]
def forward(self, loss, exper, meta_optimizer):
# Note: splitting the logic between the different experiments:
# (1) MLP
# (2) All others (Regression & Regression_T)
# compute gradients of optimizee which will need for the meta-learner
if exper.args.problem == "mlp":
loss.backward()
if self.is_train:
delta_param, rho_probs = meta_optimizer.forward(Variable(torch.cat((preprocess_gradients(
self.functions.get_flat_grads().data),
self.functions.get_flat_params().data), 1)))
else:
delta_param, rho_probs = meta_optimizer.forward(Variable(torch.cat((preprocess_gradients(
self.functions.get_flat_grads().data),
self.functions.get_flat_params().data),
1), volatile=True))
if not self.is_train:
# during evaluation we keep ALL parameters generated in order to be able to compute new loss values
# for all optimizees
eval_par_new = Variable(self.functions.get_flat_params().data - delta_param.data.unsqueeze(1))
else:
eval_par_new = None
# we have no batch dimension, so we add one in order to adjust to the
# rho_probs has shape [num_of_flat_parameters, ]. Note, batch dimension is dim0
# for the rho_probs we can just add the 2nd dim where ever we want, because we take the mean anyway [1 x 1]
# for the delta_param the situation is unfortunately more confusing. In the act_step method [1 x num-params]
# would be appreciated because of the float-masks which have batch dim as dim0. But the set_parameter method
# of MLP object expects [num_params, 1]...
delta_param = delta_param.unsqueeze(0)
rho_probs = rho_probs.unsqueeze(1)
rho_probs = torch.mean(rho_probs, 0, keepdim=True)
else:
loss.backward(self.backward_ones)
param_size = self.functions.params.grad.size()
if self.is_train:
flat_grads = Variable(self.functions.params.grad.data.view(-1))
delta_param, rho_probs = meta_optimizer.forward(flat_grads)
else:
# IMPORTANT! BECAUSE OTHERWISE RUNNING INTO MEMORY ISSUES - PASS GRADS NOT IN A NEW VARIABLE
delta_param, rho_probs = meta_optimizer.forward(self.functions.params.grad.view(-1))
# (1) reshape parameter tensor (2) take mean to compute qt values
# try to produce ones
delta_param = delta_param.view(param_size)
rho_probs = torch.mean(rho_probs.view(*param_size), 1, keepdim=True)
if not self.is_train:
# during evaluation we keep ALL parameters generated in order to be able to compute new loss values
# for all optimizees
eval_par_new = Variable(self.functions.params.data - delta_param.data)
else:
eval_par_new = None
# register the baseline loss at step 0
if self.step == 0:
self.process_step0(exper, loss)
return delta_param, rho_probs, eval_par_new
def __call__(self, exper, epoch_obj, meta_optimizer, final_batch=False):
self.step = 0
if self.is_train:
do_continue = tensor_any(tensor_and(torch.le(self.compare_probs, self.one_minus_eps),
torch.lt(self.counter_compare, self.max_T))).data.cpu().numpy()[0]
else:
do_continue = True if self.step < self.horizon-1 else False
while do_continue:
# IMPORTANT! avg_loss_step is NOT MULTIPLIED BY THE qt-value!!!!
avg_loss_step = self.act_step(exper, meta_optimizer)
if self.is_train:
do_continue = tensor_any(
tensor_and(torch.le(self.compare_probs, self.one_minus_eps),
torch.lt(self.counter_compare, self.max_T))).data.cpu().numpy()[0]
else:
do_continue = True if self.step < self.horizon-1 else False
if self.eval_last_step_taken == 0:
num_next_iter = torch.sum(self.float_mask).data.cpu().squeeze().numpy()[0]
if num_next_iter == 0. and self.eval_last_step_taken == 0.:
self.eval_last_step_taken = self.step + 1
if self.is_train and exper.args.trunc_bptt and \
(self.forward_steps == exper.args.truncated_bptt_step or not do_continue):
loss_sum, sum_grads = self.backward(epoch_obj, meta_optimizer, exper.optimizer,
retain_graph=False,
trunc_bptt=do_continue)
self.num_of_backwards += 1
self.total_opt_loss += loss_sum
self.total_kl_term += self.kl_term
self.total_sum_grads += sum_grads
self.last_backward_step = self.step + 1
# important increase step after "do_continue" stuff but before adding step losses
self.step += 1
exper.add_step_loss(avg_loss_step, self.step, is_train=self.is_train)
exper.add_opt_steps(self.step, is_train=self.is_train)
epoch_obj.add_step_loss(avg_loss_step, last_time_step=not do_continue)
# print("final qt-probs")
# print(self.q_t[0:2, 0:self.step].data.cpu().numpy())
exper.add_step_qts(self.q_t[:, 0:self.step].data.cpu().numpy(), is_train=self.is_train)
exper.add_halting_steps(self.halting_steps, is_train=self.is_train)
# set the class variable if we reached a new maximum time steps
if epoch_obj.get_max_time_steps_taken(self.is_train) < self.step:
epoch_obj.set_max_time_steps_taken(self.step, self.is_train)
if not self.is_train:
if self.eval_last_step_taken == 0:
self.eval_last_step_taken = self.step
epoch_obj.set_max_time_steps_taken(self.eval_last_step_taken, self.is_train)
if exper.args.problem == "mlp" and final_batch:
# evaluate the last MLP that we optimized
accuracy = self.functions.test_model(exper.dta_set, exper.args.cuda, quick_test=True)
# exper.meta_logger.info("Epoch {}: last batch - accuracy of last MLP {:.4f}".format(exper.epoch, accuracy))
self.test_result_scores.append(accuracy)
def init_trunc_bptt(self, exper, meta_optimizer):
if exper.args.trunc_bptt and self.is_train:
# Keep states for truncated BPTT
if self.step % exper.args.truncated_bptt_step == 0:
if self.step > exper.args.truncated_bptt_step - 1:
keep_states = True
else:
keep_states = False
# exper.meta_logger.info("DEBUG@step %d - Resetting LSTM" % self.step)
self.forward_steps = 1
meta_optimizer.reset_lstm(keep_states=keep_states)
self.functions.reset_params()
self.loss_sum = 0
else:
self.forward_steps += 1
elif self.step == 0:
# ALL OTHER MODELS: only for first step reset LSTM
self.forward_steps = 1
# initialize LSTM
meta_optimizer.reset_lstm(keep_states=False)
self.functions.reset_params()
self.loss_sum = 0
def bptt_reset(self):
self.qt_last_step = Variable(self.qt_last_step.data)
self.q_t = Variable(self.q_t.data)
self.batch_step_losses = [Variable(tensor.data) for tensor in self.batch_step_losses]
self.compare_probs = Variable(self.compare_probs.data)
self.halting_steps = Variable(self.halting_steps.data)
self.rho_t = Variable(self.rho_t.data)
if not self.did_bptt_reset:
self.did_bptt_reset = True
def compute_probs(self, new_rho_t):
# if this is the first time we broke the stick, qts are just our probs
if self.step == 0:
probs = new_rho_t.double()
else:
# stick-breaking procedure: the new probs = \prod_{i=1}^{t-1} (1 - rho_i) rho_t
# actually we transform the product into a sum of logs and transform back to probs with torch.exp for
# numerical stability
one_minus_rho = torch.log((self.tensor_one.expand_as(new_rho_t) - self.rho_t[:, 0:self.step]))
probs = torch.exp(torch.sum(one_minus_rho, 1, keepdim=True) + torch.log(new_rho_t.double() + self.eps))
# previous compute style -- archive
# probs = torch.mul(torch.prod(self.tensor_one.expand_as(new_rho_t) - self.rho_t[:, 0:self.step], 1,
# keepdim=True), new_rho_t.double())
probs = torch.mul(probs, self.float_mask.double())
self.rho_t[:, self.step] = torch.mul(new_rho_t, self.float_mask).squeeze().double()
return probs
def backward(self, epoch_obj, meta_optimizer, optimizer, loss_sum=None, retain_graph=False, trunc_bptt=False):
if len(self.batch_step_losses) == 0:
raise RuntimeError("No batch losses accumulated. Can't execute backward() on object")
if loss_sum is None:
self.compute_batch_loss(weight_regularizer=epoch_obj.weight_regularizer)
else:
self.loss_sum = loss_sum
self.loss_sum.backward(retain_graph=retain_graph)
optimizer.step()
# print("Sum grads {:.4f}".format(meta_optimizer.sum_grads(verbose=True)))
# we don't want to reset all our internal meta learner variables when using efficient sampling
if not retain_graph:
meta_optimizer.reset_final_loss()
if trunc_bptt:
self.bptt_reset()
sum_grads = meta_optimizer.sum_grads()
meta_optimizer.zero_grad()
return self.loss_sum.data.cpu().squeeze().numpy()[0], sum_grads
def step_loss(self, new_parameters, exper, average_batch=True):
# Note: for the ACT step loss, we're only summing over the number of samples (dim1), for META model
# we also sum over dim0 - the number of functions. But for ACT we need the losses per function in the
# final_loss calculation (multiplied with the qt values, which we collect also for each function
loss = get_step_loss(self.functions, new_parameters, avg_batch=False, exper=exper, is_train=self.is_train)
if loss.dim() == 1:
loss = loss.unsqueeze(1)
self.batch_step_losses.append(loss)
if average_batch:
return torch.mean(loss)
else:
return loss
def compute_batch_loss(self, weight_regularizer=1., variational=True, original_kl=False, mean_field=True):
# get the q_t value for all optimizees for their halting step. NOTE: halting step has to be decreased with
# ONE because the index of the self.q_t array starts with 0 right
if self.version == "V3.1" or self.version == 'V3.2':
original_kl = False
variational = True
mean_field = True
elif self.version == "V1":
variational = True
original_kl = True
mean_field = False
idx_last_step = Variable(self.halting_steps.data.type(torch.LongTensor) - 1)
if self.q_t.cuda:
idx_last_step = idx_last_step.cuda()
q_T_values = torch.gather(self.q_t, 1, idx_last_step)
# q_T_values = self.compute_last_qt(idx_last_step)
# compute KL divergence term, take the mean over the mini-batch dimension 0
if variational:
if original_kl:
# construct the individual priors for each batch function, return DoubleTensor[batch_size, self.steps]
g_priors = self.construct_priors_v2()
kl_term = weight_regularizer * torch.mean(self.approximate_kl_div(q_T_values, g_priors), 0)
else:
# construct the individual priors for each batch function, return DoubleTensor[batch_size, self.steps]
if self.version == 'V3.2':
g_priors = self.construct_priors_v1(truncate=True)
else:
g_priors = self.construct_priors_v1()
qts = self.q_t[:, 0:self.step].double()
# compute KL divergence term, take the mean over the mini-batch dimension 0
kl_term = weight_regularizer * torch.mean(torch.sum(torch.mul(qts,
self.approximate_kl_div_with_sum(qts, g_priors))
, 1)
, 0)
else:
kl_term = weight_regularizer * self.compute_stochastic_ponder_cost()
# get the loss value for each optimizee for the halting step
if mean_field:
# get the loss value for each optimizee for the halting step
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
# last_step_loss = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
losses = torch.mean(torch.sum(torch.mul(self.q_t[:, 0:self.step], loss_matrix), 1), 0) # + last_step_loss
else:
loss_matrix = torch.cat(self.batch_step_losses, 1)
# REMEMBER THIS IS act_sbV2 where we multiply q_t with the log-likelihood
# losses = torch.mean(torch.sum(torch.mul(q_T_values, loss_matrix.double()), 1), 0)
# and this is act_sbV1
losses = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
# compute final loss, in which we multiply each loss by the qt time step values
# remainder = torch.mean(self.iterations).double()
if self.learner == "act_sb" and (self.version == "V3.1"):
remainder = weight_regularizer * torch.mean(self.qt_remainders).double()
self.loss_sum = (losses.double() + remainder).squeeze()
self.penalty_term = remainder.data.cpu().squeeze().numpy()[0]
self.kl_term = 0.
else:
self.loss_sum = (losses.double() + kl_term).squeeze()
self.kl_term = kl_term.data.cpu().squeeze().numpy()[0]
def compute_last_qt(self, halting_idx):
qt = Variable(torch.zeros(self.rho_t.size(0)))
if self.rho_t.cuda:
qt = qt.cuda()
i = 0
for idx in halting_idx.data.cpu().squeeze().numpy():
if int(idx) == 0:
qt[i] = self.rho_t[i, int(idx)] + (self.tensor_one - self.rho_t[i, int(idx)])
else:
qt[i] = torch.prod(self.tensor_one - self.rho_t[i, 0:int(idx)], 0, keepdim=True)
i += 1
return qt.unsqueeze(1)
def set_qt_values(self, new_probs, next_iteration_condition=None, final_func_mask=None):
qt = Variable(torch.zeros(new_probs.size(0)).double())
if new_probs.is_cuda:
qt = qt.cuda()
all_finals = torch.sum(final_func_mask).data.cpu().squeeze().numpy()[0]
if self.version == "V3.2":
final_due_to_fixed_horizon = final_func_mask
else:
final_due_to_fixed_horizon = torch.eq(self.counter_compare, self.max_T)
num_of_finals = torch.sum(final_due_to_fixed_horizon).data.cpu().squeeze().numpy()[0]
final_idx_due_to_fixed_horizon = final_due_to_fixed_horizon.data.squeeze().nonzero().squeeze()
qt[:] = new_probs
# Note: we only add the remainder of the probability mass for those optimizees that reached the MAX number of
# time steps condition. In this case we reached the theoretical INFINITE horizon and therefore we make sure the
# prob-mass adds up to one
self.qt_remainders = Variable(torch.zeros(self.batch_size, 1).double())
if new_probs.is_cuda:
self.qt_remainders = self.qt_remainders.cuda()
if num_of_finals > 0:
# subtlety here: we compute \prod_{i=1}^{t-1} (1 - rho_i) = remainder.
# indexing starts at 0 = first step. self.step starts counting at 0
# so when self.step=1 (we're actually in step 2 then) we only compute (1-rho_1)
# which should be correct in step 2 (because step 1 = rho_1)
self.qt_remainders[final_idx_due_to_fixed_horizon] = (self.tensor_one.expand_as(self.compare_probs) -
self.compare_probs)[final_idx_due_to_fixed_horizon]
# self.qt_remainders = (self.tensor_one.expand_as(self.compare_probs) - self.compare_probs)
# qt_remainder = torch.prod(self.tensor_one.expand_as(qt) - self.rho_t[:, 0:self.step], 1, keepdim=True)
qt[final_idx_due_to_fixed_horizon] = (new_probs + self.qt_remainders)[final_idx_due_to_fixed_horizon]
if self.verbose:
idx = final_idx_due_to_fixed_horizon[0]
print("new_prob ", new_probs[idx].data.cpu().squeeze().numpy()[0])
print("Remainder ", self.qt_remainders[idx].data.cpu().squeeze().numpy()[0])
print("qt-values")
if all_finals > 0 and self.version == "V3.1":
self.qt_remainders = (self.tensor_one.expand_as(self.compare_probs) - self.compare_probs)
self.q_t[:, self.step] = qt
if self.verbose and num_of_finals > 0:
print("self.q_t.size(1) {} and self.step {}".format(self.q_t.size(1), self.step))
print(self.q_t[idx, -10:].data.cpu().squeeze().numpy())
print("Sum probs: ", np.sum(self.q_t[idx, 0:self.step + 1].data.cpu().squeeze().numpy()))
def construct_priors_v2(self):
"""
Just get the p(t) values for the mini-batch using the indices of the "halting step" vector.
Note: the prior is not truncated
:return: prior values for optimizees at time step = halting step
"""
if self.type_prior == "geometric":
g_priors = geom.pmf(self.halting_steps.data.cpu().numpy(), p=(1-self.prior_shape_param1))
# g_priors = nbinom.pmf(self.halting_steps.data.cpu().numpy(), 50, p=0.3)
else:
raise ValueError("Unknown prior distribution {}. Only 1) geometric and 2) neg-binomial "
"are supported".format(self.type_prior))
g_priors = Variable(torch.from_numpy(g_priors).double())
if self.rho_t.is_cuda:
g_priors = g_priors.cuda()
return g_priors
def construct_priors_v1(self, truncate=False):
# construct array of arrays with different length of ranges, we need this to construct a 2D matrix that will be
# passed to the geometric PMF function of scipy
# !!! NOTE: halting_steps are already increased but not our step
current_step = self.step + 1
R = np.array([np.arange(1, i + 1) for i in self.halting_steps.data.cpu().numpy()])
if current_step > 1:
R = np.vstack([np.lib.pad(a, (0, (current_step - len(a))), 'constant', constant_values=0) for a in R])
if self.type_prior == "geometric":
g_priors = geom.pmf(R, p=(1 - self.prior_shape_param1))
else:
raise ValueError("Unknown prior distribution {}. Only 1) geometric and 2) neg-binomial "
"are supported".format(self.type_prior))
# print("Halting steps {}".format(np.array_str(self.halting_steps.data.cpu().numpy())))
# print("R")
# print("{}".format(np.array(R)))
if truncate:
g_priors = 1. / np.sum(g_priors) * g_priors
g_priors = Variable(torch.from_numpy(g_priors).double())
if self.q_t.is_cuda:
g_priors = g_priors.cuda()
return g_priors
def process_step0(self, exper, loss):
# in the MLP experiments the loss has size [1x1], we don't want to take the mean then
if loss.size(0) > 1:
baseline_loss = torch.mean(loss, 0).data.cpu().squeeze().numpy()[0]
else:
baseline_loss = loss.data.cpu().squeeze().numpy()[0]
exper.add_step_loss(baseline_loss, self.step, is_train=self.is_train)
exper.add_opt_steps(self.step, is_train=self.is_train)
# only calculate batch variance during evaluation and when our batch size is greater than 1 (not the case for MLP)
if not self.is_train and self.batch_size > 1:
exper.add_step_loss_variance(loss, self.step)
elif not self.is_train and exper.args.problem == "mlp":
# batch_size is always one, but we want to compute stddev later
self.np_step_losses[self.step] = baseline_loss
def compute_stochastic_ponder_cost(self):
"""
According to Graves paper a possible ponder cost when working with a stochastic ACT approach (see footnote 1
page 5): \sum_{n=1}^{N(t)} n p_t^{(n)}
:return:
"""
R = np.array([np.arange(1, i + 1) for i in self.halting_steps.data.cpu().numpy()])
R = np.vstack([np.lib.pad(a, (0, (self.horizon - len(a))), 'constant', constant_values=0) for a in R])
R = Variable(torch.from_numpy(R).double())
if self.q_t.is_cuda:
R = R.cuda()
C = torch.mul(R, self.q_t)
return torch.mean(torch.sum(C, 1), 0)
def approximate_kl_div(self, q_probs, prior_probs):
try:
kl_div = torch.sum(torch.log(q_probs + self.eps) - torch.log(prior_probs + self.eps), 1)
except RuntimeError:
print("q_probs.size ", q_probs.size())
print("prior_probs.size ", prior_probs.size())
raise RuntimeError("Running away from here...")
return kl_div
def approximate_kl_div_with_sum(self, q_probs, prior_probs, verbose=False):
"""
NOTE: only difference with the same method from parent class is that we're NOT taking the sum over the
time steps here, because we are going to multiply each kl-term(t) with the appropriate rho_t value
:param q_probs:
:param prior_probs:
:return:
"""
# we need a ByteTensor mask because q_probs has size [batch_size, self.step] and contains zeros for all
# steps of an optimizee after the halting step. Hence we need to multiply the result by the mask, passed as
# double()
mask = q_probs > 0.
try:
kl_div = torch.mul(torch.log(q_probs + self.eps) - torch.log(prior_probs + self.eps), mask.double())
if verbose:
max_steps = torch.sum(q_probs[0] > 0).data.cpu().numpy()[0]
if max_steps > 30:
print("q_probs length {}".format(max_steps))
print(q_probs[0].data.cpu().squeeze().numpy())
print(prior_probs[0].data.cpu().squeeze().numpy())
print("Sum KL-div for optimizee[0]")
print(np.sum(kl_div[0].data.cpu().squeeze().numpy()))
except RuntimeError:
print("q_probs.size ", q_probs.size())
print("prior_probs.size ", prior_probs.size())
raise RuntimeError("Running away from here...")
return kl_div
class MACTBatchHandler(ACTBatchHandler):
def __init__(self, exper, is_train, optimizees=None):
super(MACTBatchHandler, self).__init__(exper, is_train, optimizees)
self.one_minus_eps[:] = exper.config.qt_threshold
def cuda(self):
super(MACTBatchHandler, self).cuda()
def compute_batch_loss(self, weight_regularizer=1., loss_type="mean_field"):
# loss_type: (1) mean_field (2) final_step (3) combined
ponder_cost = self.compute_ponder_cost(tau=weight_regularizer)
if loss_type == "mean_field":
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
# qts = self.q_t[:, 0:loss_matrix.size(1)]
# losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix), 1), 0)
qts = self.q_t[:, self.last_backward_step:loss_matrix.size(1)]
losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix[:, self.last_backward_step:]), 1), 0)
elif loss_type == "combined":
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
qts = self.q_t[:, 0:loss_matrix.size(1)]
idx_last_step = Variable(self.halting_steps.data.type(torch.LongTensor) - 1)
if self.halting_steps.cuda:
idx_last_step = idx_last_step.cuda()
last_losses = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix), 1), 0) + last_losses
elif loss_type == "final_step":
idx_last_step = Variable(self.halting_steps.data.type(torch.LongTensor) - 1)
if self.halting_steps.cuda:
idx_last_step = idx_last_step.cuda()
loss_matrix = torch.cat(self.batch_step_losses, 1)
losses = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
else:
raise ValueError("Parameter loss_type {} not supported by this implementation".format(loss_type))
self.loss_sum = (losses.double() + ponder_cost).squeeze()
self.kl_term = ponder_cost.data.cpu().squeeze().numpy()[0]
def compute_ponder_cost(self, tau=5e-2):
"""
ponder cost for ONE time sequence according to the Graves ACT paper: c = N(t) + R(t)
where N(t) is the number of steps taken (in our case the halting step) plus R(t) the remainder of the
probability = (1 - \sum_{n=1}^{t-1} q_t) in our case
Important note on tau parameter:
According to Graves, they conducted grid-search for the synthetic tasks: tau = i x 10-j
i between 1-10 and j between 1-4 (see page 7)
:param tau: hyperparameter to scale the cost
:return:
"""
c = tau * torch.sum(torch.mean(self.halting_steps).double() + torch.mean(self.qt_last_step))
return c
def set_qt_values(self, new_probs, next_iteration_condition, final_func_mask):
qt = Variable(torch.zeros(new_probs.size()).double())
next_iter = torch.sum(next_iteration_condition).data.cpu().numpy()[0]
finals = torch.sum(final_func_mask).data.cpu().squeeze().numpy()[0]
next_idx = next_iteration_condition.data.squeeze().nonzero().squeeze()
final_idx = final_func_mask.data.squeeze().nonzero().squeeze()
if new_probs.is_cuda:
qt = qt.cuda()
if next_iter > 0:
qt[next_idx] = new_probs[next_idx]
# print("{} step next/final {}/{}".format(self.step, next, finals))
if finals > 0:
# subtlety here: we compute \prod_{i=1}^{t-1} (1 - rho_i) = remainder.
# indexing starts at 0 = first step. self.step starts counting at 0
# so when self.step=1 (we're actually in step 2 then) we only compute (1-rho_1)
# which should be correct in step 2 (because step 1 = rho_1)
qt_remainder = self.tensor_one.expand_as(self.compare_probs) - self.compare_probs
self.qt_last_step[final_idx] = (new_probs + qt_remainder)[final_idx]
qt[final_idx] = self.qt_last_step[final_idx]
if self.verbose:
idx = final_idx[0]
print("-----------------------------------------------")
print("Before sum probs: ", np.sum(self.q_t[idx, 0:self.step + 1].data.cpu().squeeze().numpy()))
print("new_prob ", new_probs[idx].data.cpu().squeeze().numpy()[0])
print("Remainder ", qt_remainder[idx].data.cpu().squeeze().numpy()[0])
print("Function idx {}".format(idx))
print("Cumulative probs ", self.cumulative_probs[idx].data.cpu().squeeze().numpy()[0])
print(self.q_t[idx, :self.step + 1].data.cpu().squeeze().numpy())
self.q_t[:, self.step] = qt
if self.verbose and finals > 0:
print("func: {} self.q_t.size(1) {} and self.step {}".format(idx, self.q_t.size(1), self.step))
print(self.q_t[idx, :self.step+1].data.cpu().squeeze().numpy())
print("Sum probs: ", np.sum(self.q_t[idx, 0:self.step + 1].data.cpu().squeeze().numpy()))
class ACTGravesBatchHandler(ACTBatchHandler):
def __init__(self, exper, is_train, optimizees=None):
super(ACTGravesBatchHandler, self).__init__(exper, is_train, optimizees)
self.one_minus_eps[:] = exper.config.qt_threshold
def cuda(self):
super(ACTGravesBatchHandler, self).cuda()
def compute_batch_loss(self, weight_regularizer=1., loss_type="mean_field"):
# loss_type: (1) mean_field (2) final_step (3) combined
ponder_cost = self.compute_ponder_cost(tau=weight_regularizer)
if loss_type == "mean_field":
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
qts = self.q_t[:, 0:loss_matrix.size(1)]
losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix), 1), 0)
elif loss_type == "combined":
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
qts = self.q_t[:, 0:loss_matrix.size(1)]
idx_last_step = Variable(self.halting_steps.data.type(torch.LongTensor) - 1)
if self.halting_steps.cuda:
idx_last_step = idx_last_step.cuda()
last_losses = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix), 1), 0) + last_losses
elif loss_type == "final_step":
idx_last_step = Variable(self.halting_steps.data.type(torch.LongTensor) - 1)
if self.halting_steps.cuda:
idx_last_step = idx_last_step.cuda()
loss_matrix = torch.cat(self.batch_step_losses, 1)
losses = torch.mean(torch.gather(loss_matrix, 1, idx_last_step), 0)
else:
raise ValueError("Parameter loss_type {} not supported by this implementation".format(loss_type))
self.loss_sum = (losses.double() + ponder_cost).squeeze()
self.kl_term = ponder_cost.data.cpu().squeeze().numpy()[0]
def compute_ponder_cost(self, tau=5e-2):
"""
ponder cost for ONE time sequence according to the Graves ACT paper: c = N(t) + R(t)
where N(t) is the number of steps taken (in our case the halting step) plus R(t) the remainder of the
probability = (1 - \sum_{n=1}^{t-1} q_t) in our case
Important note on tau parameter:
According to Graves, they conducted grid-search for the synthetic tasks: tau = i x 10-j
i between 1-10 and j between 1-4 (see page 7)
:param tau: hyperparameter to scale the cost
:return:
"""
c = tau * torch.sum(torch.mean(self.halting_steps).double() + torch.mean(self.qt_last_step))
return c
def set_qt_values(self, new_probs, next_iteration_condition, final_func_mask):
qt = Variable(torch.zeros(new_probs.size()).double())
next_iter = torch.sum(next_iteration_condition).data.cpu().numpy()[0]
finals = torch.sum(final_func_mask).data.cpu().squeeze().numpy()[0]
next_idx = next_iteration_condition.data.squeeze().nonzero().squeeze()
final_idx = final_func_mask.data.squeeze().nonzero().squeeze()
if new_probs.is_cuda:
qt = qt.cuda()
if next_iter > 0:
qt[next_idx] = new_probs[next_idx]
# print("{} step next/final {}/{}".format(self.step, next, finals))
if finals > 0:
# subtlety here: we compute \prod_{i=1}^{t-1} (1 - rho_i) = remainder.
# indexing starts at 0 = first step. self.step starts counting at 0
# so when self.step=1 (we're actually in step 2 then) we only compute (1-rho_1)
# which should be correct in step 2 (because step 1 = rho_1)
qt_remainder = self.tensor_one.expand_as(self.compare_probs) - self.compare_probs
self.qt_last_step[final_idx] = (new_probs + qt_remainder)[final_idx]
qt[final_idx] = self.qt_last_step[final_idx]
if self.verbose:
print("remainders", qt_remainder.size())
print("in final mask? ", final_func_mask[10].data.cpu().numpy()[0])
if final_func_mask[10].data.cpu().numpy()[0] == 1:
print("*** yes in final mask")
print("new_prob ", new_probs[10].data.cpu().squeeze().numpy()[0])
print("in qt ", qt[10].data.cpu().squeeze().numpy()[0])
print("Remainder ", qt_remainder[10].data.cpu().squeeze().numpy()[0])
self.q_t[:, self.step] = qt
if self.verbose and finals > 0 and final_func_mask[10].data.cpu().numpy()[0] == 1:
print(self.q_t[10, 0:self.step + 1].data.cpu().squeeze().numpy())
print("Sum probs: ", np.sum(self.q_t[10, 0:self.step + 1].data.cpu().squeeze().numpy()))
class MPACTBatchHandler(ACTBatchHandler):
def __init__(self, exper, is_train, optimizees=None):
super(MPACTBatchHandler, self).__init__(exper, is_train, optimizees)
def compute_batch_loss(self, weight_regularizer=1., variational=True, original_kl=False, mean_field=True):
# compute KL divergence term, take the mean over the mini-batch dimension 0
g_priors = self.construct_priors_v1(truncate=True)
loss_matrix = torch.cat(self.batch_step_losses, 1).double()
qts = self.q_t[:, self.last_backward_step:loss_matrix.size(1)].double()
g_priors = g_priors[:, self.last_backward_step:loss_matrix.size(1)]
# compute KL divergence term, take the mean over the mini-batch dimension 0
kl_term = weight_regularizer * torch.mean(torch.sum(torch.mul(qts,
self.approximate_kl_div_with_sum(qts, g_priors))
, 1)
, 0)
# get the loss value for each optimizee for the halting step
losses = torch.mean(torch.sum(torch.mul(qts, loss_matrix[:, self.last_backward_step:]), 1), 0) # + last_step_loss
# compute final loss, in which we multiply each loss by the qt time step values
self.loss_sum = (losses.double() + kl_term).squeeze()
self.kl_term = kl_term.data.cpu().squeeze().numpy()[0]
|
toologicbv/meta_learner
|
utils/batch_handler.py
|
batch_handler.py
|
py
| 46,663 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5922189513
|
"""A syllable written with IPA symbols."""
import enum
from typing import NamedTuple
class SyllableRegion(enum.Enum):
"""A region of a syllable."""
ONSET = enum.auto()
NUCLEUS = enum.auto()
CODA = enum.auto()
class SyllableAtom(NamedTuple):
"""The smallest block of a syllable."""
phoneme: str
region: SyllableRegion
class Syllable:
"""A collection of SyllableAtoms constituting a single syllable.
Attributes:
atoms: A list of SyllableAtoms. Concatenating the atoms and reading the
phonemes from each atom gives the pronunciation of the syllable.
"""
def __init__(self, onset, nucleus, coda):
"""Creates a Syllable.
Args:
onset: A list of strings. Each string is a phoneme in the onset.
nucleus: A string specifying the IPA symbols for the nucleus.
coda: A list of strings. Each string is a phoneme in the coda.
"""
self._atoms = []
for phoneme in onset:
self._atoms.append(SyllableAtom(phoneme, SyllableRegion.ONSET))
if nucleus != "":
self._atoms.append(SyllableAtom(nucleus, SyllableRegion.NUCLEUS))
for phoneme in coda:
self._atoms.append(SyllableAtom(phoneme, SyllableRegion.CODA))
def __str__(self):
return "".join([phoneme for phoneme, _ in self._atoms])
def map_atoms(self, atom_tuples_to_obj):
"""Map the phonemes of this syllable to objects.
Args:
atom_tuples_to_obj: A dictionary where the keys are tuples of
SyllableAtoms. There's no guarantee on what the values are.
Returns:
A list containing only elements which are values from the input
dictionary. The list is formed by iterating this syllable's
SyllableAtoms and at each step checking for the longest key in
`atom_tuples_to_obj` that matches the next `n` elements (where `n`
is the length of that specific key); those SyllableAtoms are then
mapped to the corresponding value in `atom_tuples_to_obj`.
"""
objs = []
max_tuple_length = len(max(atom_tuples_to_obj, key=len))
start = 0
while start < len(self._atoms):
found_match = False
for length in range(max_tuple_length + 1, 0, -1):
end = start + length
if end > len(self._atoms):
continue
obj = atom_tuples_to_obj.get(tuple(self._atoms[start:end]), None)
if obj is not None:
objs.append(obj)
start += length
found_match = True
break # Break out of the inner loop
if not found_match:
print(f"No match for {self._atoms[start]}")
return None
return objs
def is_last_phoneme_s(self):
"""Return True if the last phoneme of this syllable is 's'."""
if len(self._atoms) == 0:
return False
return self._atoms[-1].phoneme == "s"
|
AndrewHess/steno-tools
|
generator/syllable.py
|
syllable.py
|
py
| 3,120 |
python
|
en
|
code
| 6 |
github-code
|
50
|
7523605773
|
import pandas as pd
import csv
schedule_df = pd.read_csv(r"C:\Users\rober\OneDrive\Documents\Roberts Side Projects\May 22 output\2018-2019 actual_schedule_different_format.csv")
output_file_path = r"C:\Users\rober\OneDrive\Documents\Roberts Side Projects\May 22 output\back_to_back_output.csv"
def home_and_away_one_day(list):
home_list = []
away_list = []
for game in list:
if '@' in game:
game_list = game.split("@")
away_team = game_list[0]
away_list.append(away_team)
home_team = game_list[1]
home_list.append(home_team)
teams_playing = away_list + home_list
return teams_playing
team_list = list(schedule_df.iloc[:,0])
final_output = []
final_output.append(["day", "back_to_back_team"])
for col in range(len(schedule_df.columns)):
for team in team_list:
schedule_day = list(schedule_df.iloc[:,col])
schedule_day = [x for x in schedule_day if str(x) != 'nan']
teams_playing = home_and_away_one_day(schedule_day)
# so out of bounds does not occur
if col < len(schedule_df.columns)-1:
schedule_day_plus_one = list(schedule_df.iloc[:,col+1])
schedule_day_plus_one = [x for x in schedule_day_plus_one if str(x) != 'nan']
teams_playing_tomorrow = home_and_away_one_day(schedule_day_plus_one)
if team in teams_playing and team in teams_playing_tomorrow and col > 0:
inner_output = []
inner_output.append(col)
inner_output.append(team)
final_output.append(inner_output)
with open(output_file_path, "w", newline = '') as csv_output:
wr = csv.writer(csv_output)
wr.writerows(final_output)
|
robertforderer/nba_schedule_site
|
count_back_to_backs.py
|
count_back_to_backs.py
|
py
| 1,779 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5696964000
|
menosVinte = 0
maiorDeIdade = 0
homens = 0
while True:
print('-'*30)
print(' CADASTRE UMA PESSOA')
print('-'*30)
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: '))
while sexo not in 'MmFf':
sexo = str(input('Sexo [M/F]: '))
if idade > 18:
maiorDeIdade += 1
if sexo in 'Mm':
homens += 1
if sexo in 'Ff' and idade < 20:
menosVinte += 1
print('-'*30)
continua = str(input('Quer continuar? [S/N]: '))
while continua not in 'SsNn':
continua = str(input('Quer continuar? [S/N]: '))
if continua in 'Nn':
break
print('='*6,'FIM DO PROGRAMA','='*6)
print(f"""Total de pessoas com mais de 18 anos: {maiorDeIdade}
Ao todo temos {homens} homens cadastrados
E temos {menosVinte} mulheres com menos de 20 anos""")
|
LeandroAlves05/python-cev
|
desafios/d069.py
|
d069.py
|
py
| 817 |
python
|
pt
|
code
| 0 |
github-code
|
50
|
14605221762
|
# -*- coding: utf-8 -*-
"""
"""
__version__ = "1.0"
__author__ = "si wen wei"
import io
import sys
import argparse
import pytest
from sevenautotest import settings
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
class TestRunner(object):
CMD_MODEL_ARG_NAME = '-cmdmode'
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(self.CMD_MODEL_ARG_NAME, action="store_true", help="控制是否从命令行获取pytest运行参数")
known_args, unknown_args = self.parser.parse_known_args()
self.cmd_args = (known_args, unknown_args)
@property
def cmdmode(self):
return self.cmd_args[0].cmdmode
@property
def pytest_args_from_cmd_line(self):
return self.cmd_args[1].copy()
def run(self, args=None, plugins=None, mode='auto'):
""" return exit code, after performing an in-process test run.
@param args: list of command line arguments.
@param plugins: list of plugin objects to be auto-registered during
initialization.
@see pytest.main(list, list)
"""
mode = mode.lower()
if mode == 'cmdline' or (mode == 'auto' and self.cmdmode):
args = self.pytest_args_from_cmd_line
return pytest.main(args, plugins)
if __name__ == "__main__":
TestRunner().run(settings.PYTEST_COMMANDS)
|
ssofnh/SevenPytest_first_linwei_2021-01-24
|
SevenPytest/TestRunner.py
|
TestRunner.py
|
py
| 1,424 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28859937876
|
'''module dedicated to the biject class'''
class Biject:
'''class representing a bijection'''
def __init__(self, start_dict=None):
'''start_dict can be provided to
fill the bijection with something
otherwise, an empty bijection is created'''
self._left = {}
self._right = {}
if start_dict:
for l, r in start_dict.items():
self[l] = r
def __getitem__(self, key):
'''get the object corresponding to 'key'''
if key in self._left:
return self._left[key]
else:
return self._right[key]
def __setitem__(self, key, value):
'''create a pair with 'key' and 'value'
if key or value is already in use,
a KeyError is raised'''
try:
current_value = self[key]
if current_value is not value:
raise ValueError("Key already assigned to different value")
except KeyError:
pass
try:
current_key = self[value]
if current_key is not key:
raise ValueError("Value already assigned to different key")
except KeyError:
pass
self._left[key] = value
self._right[value] = key
def __delitem__(self, key):
'''delete 'key'
raises KeyError if 'key' is not
in bijection'''
if key in self._left:
value = self._left[key]
del self._left[key]
del self._right[value]
else:
value = self._right[key]
del self._right[key]
del self._left[value]
def __contains__(self, key):
'''returns true if 'key' is in bijection'''
return key in self._left or self._right
def __repr__(self):
'''return a representation of the bijection'''
if self._left:
return "Biject(%r)" % self._left
else:
return "Biject()"
def __iter__(self):
'''iterate over (key, value) in bijection'''
for left, right in self._left.items():
yield (left, right)
def __len__(self):
'''overriding len() to return number of pairs'''
return len(self._left)
|
ufosc/swampymud
|
swampymud/util/biject.py
|
biject.py
|
py
| 2,237 |
python
|
en
|
code
| 21 |
github-code
|
50
|
40759311695
|
from pyspark import SparkContext, SparkConf
from datetime import datetime
import numpy as np
import csv
import base64
from math import sqrt
def remove_header(csv):
csv_header = csv.first()
header = sc.parallelize([csv_header])
return csv.subtract(header)
conf = SparkConf().setAppName("YelpReviews").setMaster("local")
sc = SparkContext(conf=conf)
folder_name = "./data/"
input_file_name = "yelp_top_users_friendship_graph.csv"
output_file_name = "result_1.csv"
yelp_top_users_friendship_graph = remove_header(sc.textFile(folder_name + input_file_name)).map(lambda line: line.split(','))
yelp_top_users_friendship_graph.cache()
yelp_top_users_friendship_graph_rdd = yelp_top_users_friendship_graph.map(lambda fields: (fields[0], fields[1]))
nodes_out_degrees = yelp_top_users_friendship_graph_rdd.map(lambda k: (k[0], 1)).reduceByKey(lambda x, y: x + y)
nodes_in_degrees = yelp_top_users_friendship_graph_rdd.map(lambda k: (k[1], 1)).reduceByKey(lambda x, y: x + y)
""" b """
total_connections = nodes_in_degrees.values().sum()
count_out = nodes_out_degrees.distinct().count()
count_in = nodes_in_degrees.distinct().count()
mean_out = total_connections / count_out
mean_in = total_connections / count_in
median_out = np.median(nodes_out_degrees.values().collect())
median_in = np.median(nodes_in_degrees.values().collect())
yelp_top_users_friendship_graph.unpersist()
print("a1) Top 10 nodes (out degrees): {}".format(nodes_out_degrees.takeOrdered(10, key=lambda x: -x[1])))
print("a2) Top 10 nodes (in degrees): {}".format(nodes_in_degrees.takeOrdered(10, key=lambda x: -x[1])))
print("b1) Mean of in degrees in friendships graph: {}".format(mean_in))
print("b2) Mean of out degrees in friendships graph: {}".format(mean_out))
print("b3) Median of in degrees in friendships graph: {}".format(median_in))
print("b4) Median of out degrees in friendships graph: {}".format(median_out))
|
mathiasnh/TDT4305-project
|
project_part1/task4.py
|
task4.py
|
py
| 1,911 |
python
|
en
|
code
| 1 |
github-code
|
50
|
40125488520
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8ConcurrentGeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
comEnergy = cms.double(14000.0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'Charmonium:states(3S1) = 100443',
'Charmonium:O(3S1)[3S1(1)] = 0.76',
'Charmonium:O(3S1)[3S1(8)] = 0.0050',
'Charmonium:O(3S1)[1S0(8)] = 0.004',
'Charmonium:O(3S1)[3P0(8)] = 0.004',
'Charmonium:gg2ccbar(3S1)[3S1(1)]g = on',
'Charmonium:gg2ccbar(3S1)[3S1(1)]gm = on',
'Charmonium:gg2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3S1(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3S1(8)]g = on',
'Charmonium:gg2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:qg2ccbar(3S1)[1S0(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[1S0(8)]g = on',
'Charmonium:gg2ccbar(3S1)[3PJ(8)]g = on',
'Charmonium:qg2ccbar(3S1)[3PJ(8)]q = on',
'Charmonium:qqbar2ccbar(3S1)[3PJ(8)]g = on',
'100443:onMode = off',
'100443:onIfMatch = 443 211 -211',
'443:onMode = off',
'443:onIfMatch = 13 -13',
'PhaseSpace:pTHatMin = 10.'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters',
)
)
)
# Filter with high pT cut on dimuon, trying to accomodate trigger requirements.
psi2SIDfilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(100443),
MinPt = cms.untracked.double(0.0),
MinEta = cms.untracked.double(-2.4),
MaxEta = cms.untracked.double(2.4),
Status = cms.untracked.int32(2)
)
jpsifilter = cms.EDFilter("PythiaFilter",
ParticleID = cms.untracked.int32(443),
MinPt = cms.untracked.double(0.0),
MinEta = cms.untracked.double(-2.4),
MaxEta = cms.untracked.double(2.4),
Status = cms.untracked.int32(2)
)
# Next two muon filter are derived from muon reconstruction
muminusfilter = cms.EDFilter("PythiaDauVFilter",
MotherID = cms.untracked.int32(0),
MinPt = cms.untracked.vdouble(2.),
ParticleID = cms.untracked.int32(443),
ChargeConjugation = cms.untracked.bool(False),
MinEta = cms.untracked.vdouble(-2.4),
MaxEta = cms.untracked.vdouble(2.4),
NumberDaughters = cms.untracked.int32(1),
DaughterIDs = cms.untracked.vint32(-13)
)
muplusfilter = cms.EDFilter("PythiaDauVFilter",
MotherID = cms.untracked.int32(0),
MinPt = cms.untracked.vdouble(2.),
ParticleID = cms.untracked.int32(443),
ChargeConjugation = cms.untracked.bool(False),
MinEta = cms.untracked.vdouble(-2.4),
MaxEta = cms.untracked.vdouble(2.4),
NumberDaughters = cms.untracked.int32(1),
DaughterIDs = cms.untracked.vint32(13)
)
# two pion filter
piminusfilter = cms.EDFilter("PythiaDauVFilter",
MotherID = cms.untracked.int32(0),
MinPt = cms.untracked.vdouble(0.0),
ParticleID = cms.untracked.int32(100443),
ChargeConjugation = cms.untracked.bool(False),
MinEta = cms.untracked.vdouble(-2.4), # or 3.0 ?
MaxEta = cms.untracked.vdouble(2.4), # or 3.0 ?
NumberDaughters = cms.untracked.int32(1),
DaughterIDs = cms.untracked.vint32(-211)
)
piplusfilter = cms.EDFilter("PythiaDauVFilter",
MotherID = cms.untracked.int32(0),
MinPt = cms.untracked.vdouble(0.0),
ParticleID = cms.untracked.int32(100443),
ChargeConjugation = cms.untracked.bool(False),
MinEta = cms.untracked.vdouble(-2.4), # or 3.0 ?
MaxEta = cms.untracked.vdouble(2.4), # or 3.0 ?
NumberDaughters = cms.untracked.int32(1),
DaughterIDs = cms.untracked.vint32(211)
)
ProductionFilterSequence = cms.Sequence(generator*psi2SIDfilter*jpsifilter*muminusfilter*muplusfilter*piminusfilter*piplusfilter)
|
cms-sw/cmssw
|
Configuration/Generator/python/Psi2SToJPsiPiPi_14TeV_TuneCP5_pythia8_cfi.py
|
Psi2SToJPsiPiPi_14TeV_TuneCP5_pythia8_cfi.py
|
py
| 4,371 |
python
|
en
|
code
| 985 |
github-code
|
50
|
7739158161
|
from qgis.PyQt.QtWidgets import QTableWidgetItem
def add_row(table):
row = table.rowCount()
table.setRowCount(table.rowCount() + 1)
table.setItem(row, 0, QTableWidgetItem())
table.setItem(row, 1, QTableWidgetItem())
def remove_rows(table):
for item in table.selectedItems():
table.removeRow(item.row())
def query_params(table):
res = []
for row in range(table.rowCount()):
key, value = table.item(row, 0).text(), table.item(row, 1).text()
res.append(dict(key=key, value=value))
return res
|
infogeo54/CartoGIS54-config
|
utils/server.py
|
server.py
|
py
| 547 |
python
|
en
|
code
| 0 |
github-code
|
50
|
70679378075
|
from __future__ import print_function
# TODO : Write a test that does the split+merge to see if you get the identity map.
# TODO : implement a variant that doesn't do the exponential thing because it's hard to reconcile on the first layer
# Note that this splitting pattern will affect all the firsts from input to first layer,
# but then it will affect only half of the weights in all the other transitions.
# Because of that, we will be unable to perform the merge without having
# access to the voltron weights. This is because only half of the weights
# are being updated. The lion trajectories contain only half of the information.
import numpy as np
import h5py
def indices_partition(N, k, K):
# Out of indices {0..N-1}, shuffle the list
# and pick the k-th group out of K groups.
# Might return less elements when the
# last grouping doesn't have that many elements.
M = (N + (K-1)) / K
E = np.arange(N, dtype=np.intc)
np.random.shuffle(E)
assert k*M < E.shape[0]
return np.sort(E[k*M:(k+1)*M])
def run(nbr_of_splits,
seed,
i,
maractus_params_hdf5_input,
maractus_params_hdf5_output):
# we want to crash before we create the files, if we have illegal values here
assert i < nbr_of_splits
assert maractus_params_hdf5_input[-5:] == ".hdf5"
assert maractus_params_hdf5_output[-5:] == ".hdf5"
h5file_input = h5py.File(maractus_params_hdf5_input, "r")
h5file_output = h5py.File(maractus_params_hdf5_output, "w")
np.random.seed(seed)
k = 0
L_conv2d_layer_name = []
L_dense_layer_name = []
while True:
if '%d_lasagne.layers.Conv2DLayer' % k in h5file_input.keys():
L_conv2d_layer_name.append('%d_lasagne.layers.Conv2DLayer' % k)
elif '%d_lasagne.layers.DenseLayer' % k in h5file_input.keys():
L_dense_layer_name.append('%d_lasagne.layers.DenseLayer' % k)
else:
K = k
break
k = k + 1
for (k, layer_name) in enumerate(L_conv2d_layer_name + L_dense_layer_name):
if layer_name in L_conv2d_layer_name:
layer_name = '%d_lasagne.layers.Conv2DLayer' % k
group = h5file_input[layer_name]
W = np.copy(group['W'])
b = np.copy(group['b'])
W_momentum = np.copy(group['W_momentum']) if 'W_momentum' in group.keys() else None
b_momentum = np.copy(group['b_momentum']) if 'b_momentum' in group.keys() else None
(dim_out, dim_in, h, w) = W.shape
if k == 0:
# We don't touch the very first input layer.
# All trajectories see the input layer completely.
indices_out = indices_partition(dim_out, i, nbr_of_splits)
indices_in = np.arange(dim_in, dtype=np.intc)
else:
indices_out = indices_partition(dim_out, i, nbr_of_splits)
indices_in = indices_partition(dim_in, i, nbr_of_splits)
W_sub = W[indices_out,:,:,:][:,indices_in,:,:]
b_sub = b[indices_out,:,:]
W_momentum_sub = W_momentum[indices_out,:,:,:][:,indices_in,:,:] if (W_momentum is not None) else None
b_momentum_sub = b_momentum[indices_out,:,:] if (b_momentum is not None) else None
elif layer_name in L_dense_layer_name:
layer_name = '%d_lasagne.layers.DenseLayer' % k
group = h5file_input[layer_name]
W = np.copy(group['W'])
b = np.copy(group['b'])
W_momentum = np.copy(group['W_momentum']) if 'W_momentum' in group.keys() else None
b_momentum = np.copy(group['b_momentum']) if 'b_momentum' in group.keys() else None
# warning : the dimensions are NOT in the same order as
# in the case for the filters in the Conv2DLayer
(dim_in, dim_out) = W.shape
if k == 0:
raise Error("This was not part of the plan. Maractus doesn't have a dense layer 0.")
elif k == K-1:
# keep all the indices_out for the last layer
indices_out = np.arange(dim_out, dtype=np.intc)
indices_in = indices_partition(dim_in, i, nbr_of_splits)
else:
indices_out = indices_partition(dim_out, i, nbr_of_splits)
indices_in = indices_partition(dim_in, i, nbr_of_splits)
W_sub = W[:,indices_out][indices_in,:]
b_sub = b[indices_out]
W_momentum_sub = W_momentum[:,indices_out][indices_in,:] if (W_momentum is not None) else None
b_momentum_sub = b_momentum[indices_out] if (b_momentum is not None) else None
grp = h5file_output.create_group(layer_name)
grp.create_dataset('W', data=W_sub)
grp.create_dataset('b', data=b_sub)
if W_momentum_sub is not None:
grp.create_dataset('W_momentum', data=W_momentum_sub)
if b_momentum_sub is not None:
grp.create_dataset('b_momentum', data=b_momentum_sub)
# Add information about the indices used in those intermediate steps.
grp.create_dataset('indices_out', data=indices_out)
grp.create_dataset('indices_in', data=indices_in)
grp.create_dataset('original_W_shape', data=W.shape)
grp.create_dataset('original_b_shape', data=b.shape)
h5file_input.close()
h5file_output.close()
import sys, os
import getopt
def usage():
print("")
def main(argv):
"""
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "hv", ["nbr_of_splits=",
"seed=",
"i=",
"maractus_params_hdf5_input=",
"maractus_params_hdf5_output="])
except getopt.GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
usage()
sys.exit(2)
nbr_of_splits = None
seed = None
i = None
#scale_weights_factor = 1.0
maractus_params_hdf5_input = None
maractus_params_hdf5_output = None
verbose = False
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--nbr_of_splits"):
nbr_of_splits = int(a)
elif o in ("--seed"):
seed = int(a)
elif o in ("--i"):
i = int(a)
#elif o in ("--scale_weights_factor"):
# scale_weights_factor = float(a)
elif o in ("--maractus_params_hdf5_input"):
maractus_params_hdf5_input = a
elif o in ("--maractus_params_hdf5_output"):
maractus_params_hdf5_output = a
else:
assert False, "unhandled option"
assert nbr_of_splits is not None
assert seed is not None
assert i is not None
#assert type(scale_weights_factor) == float
assert maractus_params_hdf5_input
assert maractus_params_hdf5_output
run(nbr_of_splits,
seed,
i,
maractus_params_hdf5_input,
maractus_params_hdf5_output)
if __name__ == "__main__":
main(sys.argv)
"""
python split_maractus.py --nbr_of_splits=2 --seed=10 --i=0 --maractus_params_hdf5_input=/home/gyomalin/ML/tmp/maractus_exp10_01.hdf5 --maractus_params_hdf5_output=/home/gyomalin/ML/tmp/maractus_exp10_01_split_0.hdf5
python split_maractus.py --nbr_of_splits=2 --seed=10 --i=1 --maractus_params_hdf5_input=/home/gyomalin/ML/tmp/maractus_exp10_01.hdf5 --maractus_params_hdf5_output=/home/gyomalin/ML/tmp/maractus_exp10_01_split_1.hdf5
"""
|
gyom/ift6266h15
|
code/lasagne/split_maractus.py
|
split_maractus.py
|
py
| 8,107 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22578365660
|
def zl(st):
# 第一步:统一符号 对字符串的处理,用replace()
st = st.replace("''",'"')
print(st)
# 第二步:去掉中括号 字符串截取 [:: ]
st = st[2:-2]
print(st)
# 第三步:变成list 字符串切片 .split() 新建一个list变量
st_li = st.split('" , "')
print(st_li)
# 第四步:取出后面的数字 循环遍历取出list里面的每个值,对这个值进行截取
st_dict = {}
for i in st_li:
i_key = i[1:]
print(i_key)
# 第五步:统计相同的数字个数 用字典去统计
if (i_key not in st_dict):
st_dict[i_key] = 1
else:
st_dict[i_key] +=1
print(st_dict)
#第六步:判断数据中有没有同时存在三个相同数字和两个相同数字 if判断
v1 = 0#如果key对应的数值有3的 v1 = 1,如果没有则为0
v2 = 0# 如果key对应的数值有2的 v2 = 1,如果没有则为0
for key in st_dict:
if(st_dict[key] == 3):
v1 = 1
if(st_dict[key] == 2):
v2 = 1
if (v1 == 1 and v2 == 1):
print("这把牌可以三带二")
else:
print("只能炸了")
#open 是 python提供的一个内置函数:作用就是打开一个文件.参数一:文件路径;参数二:文件的打开模式 r只读,W可写入,a可读可写
#with open() as f 类似于 f = open ()他可以在with 的代码执行出问题的时候,做一些资源释放的工作
with open("D:\\softwaredata\\pychrm\\untitledgy1906A\\demo\\day-04\\cards.txt") as f:
# 读文件.readlines()作用就是把文件中整个内容按行读取出来,存到一个list中;read()把整个文件的内容读取出来,存到一个字符串中
f_li=f.readlines()
# for循环遍历这个列表
for line in f_li:
line = line.replace("\n","")
zl(line)
# print(line)
# print(f_li)
# zl('''["C2" , "D13" , "D2" , "H2" , "H9" , "S13"]''')
|
zhangli1229/gy-1906A
|
demo/day-04/practise.py
|
practise.py
|
py
| 1,977 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
16321492868
|
from bs4 import BeautifulSoup
import time
import json
htmlTinkoff = 'tinkoff.html'
htmlSber = 'sberbank.html'
htmls = [htmlTinkoff, htmlSber]
res = []
def getData():
for i in range(0, len(htmls)):
print(htmls[i])
with open(htmls[i], 'r') as f:
contents = f.read()
soup = BeautifulSoup(contents, 'html.parser')
allNews = soup.findAll('div', {'class': 'document__title'})
for news in allNews:
# time.sleep(4)
link = news.find('a').get('href')
bank = {
'bankName': htmls[i],
'link': link
}
res.append(bank)
# time.sleep(20)
print(res)
with open('res.json', 'w') as file:
json.dump(res, file, indent=2, ensure_ascii=False)
getData()
# print(soup.prettify())
#soup.find('li', {'class': 'search-item'}).find('div', {'class': 'document i-bem'}).find('h2', {'document__head'}).find('div', {'class': 'document__title'}).find('a', href=True)
|
injirez/bankNews
|
parserNewsHtml.py
|
parserNewsHtml.py
|
py
| 1,065 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15074024125
|
import tables
import numpy
import numpy.lib.recfunctions
import multiprocessing as mp
import collections
import logging
import signal
import shutil
import time
import os
import sys
import functools
import json
from tqdm import tqdm
from queue import Empty
from .db import Database
from .models import ProteinEntry
from .tablefmt import ProteinCacheInfo
from os.path import commonprefix, split
logger = logging.getLogger(__name__)
Protein = collections.namedtuple("Protein", ("entry_nr", "hog_id", "group"))
def signal_handler(signum, frame):
logger.info("received signal " + str(signum))
raise KeyboardInterrupt(signum)
def length_limited_set_formatter(s):
if len(s) > 10:
x = s.pop()
s.add(x)
return "Set(size={}, ex: {})".format(len(s), x)
else:
return str(s)
def are_orthologous(a: Protein, b: Protein):
if a.entry_nr == b.entry_nr:
return False
prefix = commonprefix((a.hog_id, b.hog_id))
if "." in prefix and prefix[-1].isdigit():
return False
return True
class CacheBuilderWorker(mp.Process):
def __init__(self, db_fpath, in_queue, out_queue, **kwargs):
super(CacheBuilderWorker, self).__init__(**kwargs)
self.db_fpath = db_fpath
self.in_queue = in_queue
self.out_queue = out_queue
self.h5 = None
self.db = None
def run(self):
self.db = Database(self.db_fpath)
self.h5 = self.db.get_hdf5_handle()
timelimit_get_from_in_queue = functools.partial(self.in_queue.get, timeout=120)
try:
try:
for job in iter(timelimit_get_from_in_queue, None):
fun, params = job
res = getattr(self, fun)(*params)
logger.debug("result for job ({}) ready".format(job))
self.out_queue.put((job, res))
except Empty:
logger.warning(
"No item nor termination signal received in Queue. Giving up"
)
logger.exception("Work-queue is empty")
self.out_queue.put("DONE")
except KeyboardInterrupt:
logger.info("received interrupt. Terminating")
self.db.close()
logger.info("terminating worker process {}".format(self.name))
def load_fam_members(self, fam):
members = []
hog_range = [self.db.format_hogid(x).encode("utf-8") for x in (fam, fam + 1)]
for row in self.h5.get_node("/Protein/Entries").where(
"({!r} <= OmaHOG) & (OmaHOG < {!r})".format(*hog_range)
):
members.append(
Protein(row["EntryNr"], row["OmaHOG"].decode(), row["OmaGroup"])
)
return members
def load_vps(self, entry_nr):
return self.db.get_vpairs(entry_nr)["EntryNr2"]
def load_grp_members(self, group):
return [
row["EntryNr"]
for row in self.h5.get_node("/Protein/Entries").where(
"OmaGroup == {:d}".format(group)
)
]
def analyse_fam(self, fam):
logger.debug("analysing family {}".format(fam))
fam_members = self.load_fam_members(fam)
logger.debug("family {} with {} members".format(fam, len(fam_members)))
grp_members = {
grp: set(self.load_grp_members(grp))
for grp in set(z.group for z in fam_members if z.group > 0)
}
counts = numpy.zeros(
len(fam_members), dtype=tables.dtype_from_descr(ProteinCacheInfo)
)
for i, p1 in tqdm(
enumerate(fam_members),
disable=len(fam_members) < 500,
desc="fam {}".format(fam),
):
vps = set(self.load_vps(p1.entry_nr))
ind_orth = set(p2.entry_nr for p2 in fam_members if are_orthologous(p1, p2))
grp = grp_members.get(p1.group, set([])) - set([p1.entry_nr])
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"entry {}: vps: {} ipw: {} grp: {} any: {}".format(
p1.entry_nr,
length_limited_set_formatter(vps),
length_limited_set_formatter(ind_orth),
length_limited_set_formatter(grp),
length_limited_set_formatter(vps | ind_orth | grp),
)
)
counts[i]["EntryNr"] = p1.entry_nr
counts[i]["NrPairwiseOrthologs"] = len(vps)
counts[i]["NrHogInducedPWOrthologs"] = len(ind_orth)
counts[i]["NrHogInducedPWParalogs"] = len(fam_members) - len(ind_orth) - 1
counts[i]["NrOMAGroupOrthologs"] = len(grp)
counts[i]["NrAnyOrthologs"] = len(vps | ind_orth | grp)
return counts
def analyse_singleton(self, entry_nr, group_nr):
logger.debug("analysing singleton {} (grp {})".format(entry_nr, group_nr))
vps = set(self.load_vps(entry_nr))
grp_members = set([])
if group_nr > 0:
grp_members = set(self.load_grp_members(group_nr))
counts = numpy.array(
[(entry_nr, len(vps), 0, 0, len(grp_members), len(vps | grp_members))],
dtype=tables.dtype_from_descr(ProteinCacheInfo),
)
return counts
def compute_familydata_json(self, fam):
famhog_id = self.db.format_hogid(fam)
logger.debug("analysing family {}".format(fam))
fam_members = self.load_fam_members(fam)
logger.debug("family {} with {} members".format(fam, len(fam_members)))
if len(fam_members) > 50000:
# this will likely fail to compute the MDS for so many points
# let's skip it for now.
genes_null_similarity = set(p.entry_nr for p in fam_members)
else:
try:
(
genes_null_similarity,
gene_similarity_vals,
) = self.db.get_gene_similarities_hog(famhog_id)
except Exception as e:
print("gene_similarity failed for {}: {}".format(fam, e))
raise
final_json_output = []
for p1 in fam_members:
to_append = {}
protein = ProteinEntry(self.db, p1.entry_nr)
to_append["id"] = p1.entry_nr
to_append["protid"] = protein.omaid
to_append["sequence_length"] = protein.sequence_length
to_append["taxon"] = protein.genome.species_and_strain_as_dict
to_append["xrefid"] = protein.canonicalid
to_append["gc_content"] = protein.gc_content
to_append["nr_exons"] = protein.nr_exons
if p1.entry_nr in genes_null_similarity:
to_append["gene_similarity"] = None
else:
to_append["gene_similarity"] = gene_similarity_vals[p1.entry_nr]
final_json_output.append(to_append)
return json.dumps(final_json_output)
class ConsistenceyError(Exception):
pass
class ResultHandler:
def __init__(self, cache_file):
self.cache_file = cache_file
self.ortholog_count_result = []
self.family_json_offset = []
self.in_memory_json_buffer = []
self.buffer_offset = 0
self.jobs = []
self._last_cache_timestamp = time.time()
self._offset_dtype = [("Fam", "i4"), ("offset", "i8"), ("length", "i4")]
def load_cache(self):
def resilient_load_data_node(h5, node):
try:
res = [h5.get_node(node).read()]
except tables.NoSuchNodeError:
res = []
return res
with tables.open_file(self.cache_file) as cache:
self.ortholog_count_result = resilient_load_data_node(
cache, "/ortholog_counts"
)
self.family_json_offset = resilient_load_data_node(
cache, "/family_json/offset"
)
self.buffer_offset = len(cache.root.family_json.buffer)
self.jobs = cache.root.pending_jobs.read(0)[0]
def add_jobs(self, jobs):
self.jobs.extend(jobs)
def handle_result(self, job, result):
if job[0] == "compute_familydata_json":
self.store_familydata_json_result(job, result)
elif job[0] in ("analyse_fam", "analyse_singleton"):
self.store_ortholog_count_result(job, result)
else:
raise ValueError("Unexpected result type")
self.jobs.remove(job)
if (
time.time() - self._last_cache_timestamp > 300
or sum(len(z) for z in self.in_memory_json_buffer) > 100e6
):
self.write_to_disk()
def store_familydata_json_result(self, job, result):
fam = job[1][0]
encoded_json = result.encode("utf-8")
json_as_np = numpy.ndarray(
(len(encoded_json),), buffer=encoded_json, dtype=tables.StringAtom(1)
)
self.in_memory_json_buffer.append(json_as_np)
self.family_json_offset.append(
numpy.array(
[(fam, self.buffer_offset, len(encoded_json))], dtype=self._offset_dtype
)
)
self.buffer_offset += len(encoded_json)
def store_ortholog_count_result(self, job, result):
self.ortholog_count_result.append(result)
def write_to_disk(self):
logger.info("writing a milestone to disk...")
transfer_data = False
if os.path.exists(self.cache_file):
os.replace(self.cache_file, self.cache_file + ".0")
transfer_data = True
with tables.open_file(self.cache_file, "w") as h5:
buf = h5.create_earray(
"/family_json",
"buffer",
tables.StringAtom(1),
(0,),
createparents=True,
expectedrows=1e9,
)
if transfer_data:
with tables.open_file(self.cache_file + ".0", "r") as prev:
buf.append(prev.root.family_json.buffer.read())
for el in self.in_memory_json_buffer:
buf.append(el)
buf.flush()
if len(self.family_json_offset) > 0:
off = numpy.lib.recfunctions.stack_arrays(
self.family_json_offset, usemask=False
)
h5.create_table("/family_json", "offset", None, obj=off)
if len(self.ortholog_count_result) > 0:
cnts = numpy.lib.recfunctions.stack_arrays(
self.ortholog_count_result, usemask=False
)
h5.create_table("/", "ortholog_counts", ProteinCacheInfo, obj=cnts)
a = h5.create_vlarray("/", "pending_jobs", tables.ObjectAtom())
a.append(self.jobs)
h5.flush()
if len(buf) != self.buffer_offset:
raise ConsistenceyError(
"buffer has unexpeced length: {}vs{}".format(
len(buf), self.buffer_offset
)
)
self.in_memory_json_buffer = []
self._last_cache_timestamp = time.time()
logger.info("finished writing milestone to {}".format(self.cache_file))
def build_cache(db_fpath, nr_procs=None, from_cache=None):
request_queue = mp.Queue()
result_queue = mp.Queue()
nr_procs = nr_procs if nr_procs else mp.cpu_count()
db = Database(db_fpath)
nr_entries = len(db.get_hdf5_handle().get_node("/Protein/Entries"))
result_handler = ResultHandler(from_cache)
if from_cache is not None and os.path.isfile(from_cache):
result_handler.load_cache()
jobs = result_handler.jobs
logger.debug(jobs)
else:
nr_fams = db.get_nr_toplevel_hogs()
singletons = [
(int(r["EntryNr"]), int(r["OmaGroup"]))
for r in db.get_hdf5_handle()
.get_node("/Protein/Entries")
.where('OmaHOG == b""')
]
logger.info(
"found {} hog and {} singleton jobs to be computed".format(
nr_fams, len(singletons)
)
)
jobs = [("analyse_fam", (fam + 1,)) for fam in range(nr_fams)]
jobs.extend([("compute_familydata_json", (fam + 1,)) for fam in range(nr_fams)])
jobs.extend([("analyse_singleton", singleton) for singleton in singletons])
logger.info(
"nr of jobs: {} (expected {})".format(
len(jobs), 2 * nr_fams + len(singletons)
)
)
result_handler.add_jobs(jobs)
db.close()
workers = []
for i in range(nr_procs):
w = CacheBuilderWorker(db_fpath, request_queue, result_queue, daemon=True)
w.start()
workers.append(w)
pending_jobs = set([])
for job in jobs:
request_queue.put(job)
pending_jobs.add(job)
# Sentinel objects to allow clean shutdown: 1 per worker.
for i in range(nr_procs):
request_queue.put(None)
finished = 0
try:
logger.info("start to receive results")
last_cache_timestamp = time.time()
for reply in tqdm(iter(result_queue.get, None), total=len(jobs)):
if reply == "DONE":
finished += 1
logger.info("{} workers finished".format(finished))
if finished == nr_procs:
if len(pending_jobs) > 0:
logger.error(
"still {} pending jobs...: {}".format(
len(pending_jobs), pending_jobs
)
)
break
else:
job, res = reply
logger.debug("received result for job {})".format(job))
result_handler.handle_result(job, res)
result_handler.write_to_disk()
logger.info("exit receiver loop. joining workers...")
for w in workers:
w.join()
logger.debug("all workers joined")
except KeyboardInterrupt as e:
logger.info("recived interrupt. writeing out temp results")
result_handler.write_to_disk()
sys.exit(99)
ret = numpy.lib.recfunctions.stack_arrays(
result_handler.ortholog_count_result, usemask=False
)
ret.sort(order="EntryNr")
logger.info("sorted results: {}".format(ret))
assert check_all_there(nr_entries, ret)
return ret
def check_all_there(nr_prots, cache):
if len(cache) == nr_prots:
return True
missings = set(range(1, nr_prots + 1)) - set(cache["EntryNr"])
logger.error("Missing cache value for {}".format(missings))
return False
def compute_and_store_cached_data(db_fpath, nr_procs=None, force=False, tmp_cache=None):
ortholog_cnts_cache_path = "/Protein/OrthologsCountCache"
if tmp_cache is None:
tmp_cache = "/tmp/compute_cache.h5"
with tables.open_file(db_fpath, "a") as h5:
try:
n = h5.get_node(ortholog_cnts_cache_path)
if force:
h5.remove_node(ortholog_cnts_cache_path)
else:
return
except tables.NoSuchNodeError:
pass
signal.signal(signal.SIGUSR2, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
cache = build_cache(db_fpath, nr_procs=nr_procs, from_cache=tmp_cache)
update_hdf5_from_cachefile(db_fpath, tmp_cache)
def update_hdf5_from_cachefile(db_fpath, tmp_cache):
ortholog_cnts_cache_path = "/Protein/OrthologsCountCache"
path, name = split(ortholog_cnts_cache_path)
with tables.open_file(db_fpath, "a") as h5, tables.open_file(
tmp_cache, "r"
) as cache_h5:
cached_cnts = cache_h5.get_node("/ortholog_counts").read()
cached_cnts.sort(order="EntryNr")
tab = h5.create_table(
path, name, ProteinCacheInfo, createparents=True, obj=cached_cnts
)
tab.colinstances["EntryNr"].create_csindex()
json_off = cache_h5.get_node("/family_json/offset").read()
json_off.sort(order="Fam")
tab = h5.root.RootHOG.MetaData.read()
for fam, off, length in json_off:
if tab[fam - 1]["FamNr"] != fam:
raise ConsistenceyError("table not properly ordered")
tab[fam - 1]["FamDataJsonOffset"] = off
tab[fam - 1]["FamDataJsonLength"] = length
h5.root.RootHOG.MetaData.modify_column(
column=tab["FamDataJsonOffset"], colname="FamDataJsonOffset"
)
h5.root.RootHOG.MetaData.modify_column(
column=tab["FamDataJsonLength"], colname="FamDataJsonLength"
)
json_in_buf = cache_h5.root.family_json.buffer
json_in_buf._f_copy(
h5.root.RootHOG, "JsonBuffer", expectedrows=len(json_in_buf)
)
|
DessimozLab/pyoma
|
pyoma/browser/compute_cache.py
|
compute_cache.py
|
py
| 16,864 |
python
|
en
|
code
| 0 |
github-code
|
50
|
5940123823
|
from transformers import Trainer, TrainingArguments
from transformers import T5Model, T5ForConditionalGeneration, AutoTokenizer
import wandb
from torch.utils.data import Dataset
from tokenizers import Tokenizer
from tokenizers import decoders
import pandas as pd
import torch
import json
wandb.init(project="tester", entity="codeblack")
model = T5ForConditionalGeneration.from_pretrained("t5-small")
args = TrainingArguments(
report_to="wandb",
output_dir="../output",
num_train_epochs=1,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
warmup_steps=500,
weight_decay=0.001,
logging_dir="./logs",
logging_steps=10,
learning_rate=1e-4
)
class ParaSentences(Dataset):
def __init__(self, tok_file = "../tokenizers/paraBert.json", pdata="../data/clean-data/train.json"):
super(ParaSentences, self).__init__()
self.tokenizer = Tokenizer.from_file(tok_file)
self.tokenizer.decoder = decoders.WordPiece()
self.tokenizer.enable_padding(pad_token="<pad>", pad_id=self.tokenizer.token_to_id("<pad>"), length=50)
self.decoder_input_ids = self.tokenizer.encode("<pad>").ids
with open(pdata) as f:
self.data = json.load(f)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
phrase = self.tokenizer.encode(self.data[idx]['phrase']).ids
pphrase = self.tokenizer.encode(self.data[idx]['paraphrase']).ids
return {
"input_ids": torch.tensor(phrase),
"decoder_input_ids": self.decoder_input_ids,
"labels": torch.tensor(pphrase)
}
train_ds = ParaSentences()
val_ds = ParaSentences(pdata="../data/clean-data/valid.json")
trainer = Trainer(model=model, args=args, train_dataset=train_ds, eval_dataset=val_ds)
trainer.train()
|
haresh121/Multi-Lingual-Paraphraser
|
scripts/main_trainer.py
|
main_trainer.py
|
py
| 1,835 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30078284715
|
import json
import requests
r = requests.get('http://localhost:3000')
data = r.json()
stringArray = []
for p in data:
sentence = "{0} is color: {1}".format(p["name"], p["color"])
stringArray.append(sentence)
print(sentence)
print(stringArray)
|
UndecidedTech/it3038c-scripts
|
Labs/Lab9/test.py
|
test.py
|
py
| 258 |
python
|
en
|
code
| 1 |
github-code
|
50
|
71635261915
|
import numpy as np
from pydub import AudioSegment
from PIL import Image
import requests
from pathlib import Path
import base64
import os
import random
from riffusion.spectrogram_converter import SpectrogramConverter
from riffusion.spectrogram_params import SpectrogramParams
from riffusion.spectrogram_image_converter import SpectrogramImageConverter
from riffusion.util import image_util
#!!! have to set path to your AI_Model folder
path_to_curr_dir = "/Users/willsaliba/Documents/Topics/MusicMagic1.0/AI_Model"
def generate_with_input_audio(prompt: str, randomness: str, path: str):
# Load audio file
segment = AudioSegment.from_file(path)
# Creation of audio to spectrogram Converter
params = SpectrogramParams()
converter = SpectrogramImageConverter(params=params)
# Convert audio to spectrogram
spectrogram_image = converter.spectrogram_image_from_audio(segment)
# Save the spectrogram image for use with the RiffusionPredictor server
#!!! have to manually set path to seed image
spectrogram_image_path_str = path_to_curr_dir + "/seed_images/spectrogram.png"
spectrogram_image.save(spectrogram_image_path_str)
# Convert the string path to a pathlib.Path object for JSON
spectrogram_image_path = Path(spectrogram_image_path_str)
data = {
"start": {
"prompt": prompt,
"seed": random.randint(1, 100),
"denoising": float(randomness),
},
"end": {
"prompt": prompt,
"seed": random.randint(1, 100),
"denoising": float(randomness),
},
"alpha": 0.5, # Latent space interpolation of start image and end image
"num_inference_steps": 50, # number of steps in diffusion process (should be 50 in production)
"seed_image_id": spectrogram_image_path.stem
}
response = requests.post("http://127.0.0.1:3013/run_inference/", json=data)
newTrackPath = handle_response(response)
return newTrackPath
def generate_without_input_audio(prompt: str, randomness: str):
data = {
"start": {
"prompt": prompt,
"seed": random.randint(1, 100),
"denoising": float(randomness),
},
"end": {
"prompt": prompt,
"seed": random.randint(1, 100),
"denoising": float(randomness),
},
"alpha": 0.5,
"num_inference_steps": 50, #should be 50 in production
}
response = requests.post("http://127.0.0.1:3013/run_inference/", json=data)
newTrackPath = handle_response(response, with_image=False)
return newTrackPath
def handle_response(response, with_image=True):
if response.status_code == 200:
output = response.json()
generated_audio = output['audio']
newTrackPath = path_to_curr_dir + "/outputs/generated_clip.mp3"
with open(newTrackPath, 'wb') as audio_file:
audio_file.write(base64.b64decode(generated_audio.split(',')[1]))
return "SUCCESS"
else:
print(f"Error: {response.text}")
return "error"
|
willsaliba/MusicMagicPlugin1.0
|
AI_Model/scripts/generate.py
|
generate.py
|
py
| 3,094 |
python
|
en
|
code
| 0 |
github-code
|
50
|
43659718119
|
import sys
import pandas as pd
import geocoder as g
if __name__ == "__main__":
print("Caution! The number of requests is highly limited. Please, double-check the input arguments and write \"Okay, proceed\"")
proceed = input(": ")
if proceed == "Okay, proceed":
if len(sys.argv) <= 4:
print("Invalid arguments")
exit(1)
df = pd.read_csv(sys.argv[1])
if '--first-run' in sys.argv:
df.insert(len(df.columns), 'x', None, True)
df.insert(len(df.columns), 'y', None, True)
l, r = int(sys.argv[2]), int(sys.argv[3])
for i in range(l, 1 + r):
print(f"Proceeding row: {i}\r")
coordinates = g.google(df.iloc[i, :]['Адрес ОЖФ'], key="")
df.at[i, 'y'] = coordinates.latlng[0]
df.at[i, 'x'] = coordinates.latlng[1]
df.to_csv(f"{sys.argv[1].split('.csv')[0]}_coordinates.csv")
else:
print("Aborting...")
|
DaniilOkrug/postomats-breach-department
|
ldt_model/collectors/apartments.py
|
apartments.py
|
py
| 966 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36559632589
|
# doublingtime.py
# Name: Brittany Kyncl
# Date: 9.5.22
# Course: CSD205
# Mod 7 Assignment: Time it will take to double investment
# main program purpose message
print('\nWelcome, lets calculate how long it will take for your investment to double!\nFirst, please enter your information below...')
while True:
# declare variables
principal = float(input('\nEnter your initial investment: '))
APR = float(input('Enter your annual interst rate: '))
time = 0
final_amount = principal
# while loop to determine years to double p
while final_amount < principal * 2:
final_amount = final_amount + final_amount * float(APR)/100
time = time + 1
# Message to show calculation results
print(f"\nAt a {APR}% annual interst rate, your investment of ${principal:0.2f} doubles in {time} years.\n")
print(f"Ending amount in {time} years: ${final_amount:0.2f}\n")
# cont. # loop to re-run main program
while True:
cont = str(input('Perform new calculation? (y/n): '))
if cont.lower() in ('y', 'n'):
break
print('Invalid Entry')
if cont.lower() == 'y':
continue
else:
print('Goodbye')
break
|
bkyncl/Python-Projects-CSD-200
|
Investment Doublin/doublingtime.py
|
doublingtime.py
|
py
| 1,219 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31704470383
|
import tkinter as tk
class Application(tk.Frame):
'''Sample tkinter application class'''
def __init__(self, master=None, title='<application>', **kwargs):
'''Create root window with frame, tune weight and resize'''
super().__init__(master, **kwargs)
self.master.title(title)
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.grid(sticky='NEWS')
self.create_widgets()
for column in range(self.grid_size()[0]):
self.columnconfigure(column, weight=1)
for row in range(self.grid_size()[1]):
self.rowconfigure(row, weight=1)
def create_widgets(self):
'''Create all the widgets'''
class App(Application):
def create_widgets(self):
self.figures = ['oval', 'rectangle', 'line']
self.fig_help = tk.StringVar()
self.move = False
self.T = tk.Text(self, undo=True, font='fixed', borderwidth=2, relief='groove')
self.T.bind('<Motion>', self.mouse)
self.T.tag_configure('good', foreground='green')
self.T.tag_configure('bad', foreground='red')
self.T.grid(row=0, column=0, sticky='NEWS')
self.B = tk.LabelFrame(self)
self.B.grid(row=1, columnspan=2, sticky='NEWS')
self.L = tk.Label(self.B, textvar=self.fig_help)
self.L.grid(row=1, column=1)
self.Q = tk.Button(self.B, text='Quit', command=self.master.quit)
self.C = tk.Canvas(self, borderwidth=2, relief='groove')
self.C.grid(row=0, column=1, sticky='NEWS')
self.CC = tk.Button(self.B, text='CLEAR', command=self.clearCanvas, relief='groove')
self.R = tk.Button(self.B, text='RUN', command=self.text2canvas, relief='groove')
for O in self.L, self.CC, self.R, self.Q:
O.grid(row=1, column=self.B.grid_size()[0], padx=20)
def clearCanvas(self):
self.C.delete('all')
def addTag(self, new_tag, line_num, length):
if new_tag:
old_tag = 'bad' if new_tag == 'good' else 'good'
else:
return
self.T.tag_remove(old_tag, '{}.0'.format(line_num), '{}.0 + {} chars'.format(line_num, length))
self.T.tag_add(new_tag, '{}.0'.format(line_num), '{}.0 + {} chars'.format(line_num, length))
def text2canvas(self):
self.clearCanvas()
text = self.T.get(1.0, tk.END).split('\n')
for i, line in enumerate(text):
if not line.lstrip():
continue
elif line[0] == '#':
self.addTag('good', i + 1, len(line))
continue
figure = line.split()[0]
newline = ', '.join(line.split()[1:])
print('self.C.create_{}({})'.format(figure, newline))
try:
eval('self.C.create_{}({})'.format(figure, newline))
self.addTag('good', i + 1, len(line))
except Exception:
self.addTag('bad', i + 1, len(line))
def mouse(self, event):
coords = '@' + str(event.x) + ',' + str(event.y)
check_y = self.T.index(coords)[0]
text = self.T.get('1.0', tk.END).split('\n')[int(check_y) - 1]
if ''.join(ch for ch in text if ch.isalnum()) == '':
self.fig_help.set('Nothing')
elif text.startswith('#'):
self.fig_help.set('Comment')
elif text.split()[0] in self.figures:
figure = text.split()[0]
help_line = '{} x0, y0, x1, y1,\n width=\'width\', fill=\'color\''.format(figure)
if figure != 'line':
help_line += ', outline=\'color\''
self.fig_help.set(help_line)
else:
self.fig_help.set('Error')
app = App(title='20210322_1')
app.mainloop()
|
alexey-kaz/pythonprac
|
20210322_1/task1.py
|
task1.py
|
py
| 3,780 |
python
|
en
|
code
| 0 |
github-code
|
50
|
38741574975
|
#================================================
from functions import *
from manager.GManager import GManager
from manager.GCode import GCode
#================================================
class GCalculator(GManager):
#================================================
def __init__(self):
GManager.__init__(self)
self.m_expression = ""
self.m_result = ""
#================================================
def serialize(self, _code = "calculator"):
lDom = GCode()
lDom.createDoc()
lDom.addDatas(_code, "expression", utf8_to_b64(self.m_expression))
lDom.addDatas(_code, "result", self.m_result)
return lDom.toString()
#================================================
def deserialize(self, _data, _code = "calculator"):
super().deserialize(_data)
lDom = GCode()
lDom.loadXml(_data)
self.m_expression = b64_to_utf8(lDom.getDatas(_code, "expression"))
self.m_result = lDom.getDatas(_code, "result")
#================================================
def run(self, _data):
self.deserialize(_data)
if self.m_method == "":
self.m_logs.addError("La méthode est obligatoire.")
elif self.m_method == "run_calculator":
self.onRunCalculator(_data)
else:
self.m_logs.addError("La méthode est inconnue.")
#================================================
def onRunCalculator(self, _data):
self.m_result = str(eval(self.m_expression))
self.m_logs.addData(self.serialize())
#================================================
|
gkesse/ReadyCode
|
app/python/server/code/src/manager/GCalculator.py
|
GCalculator.py
|
py
| 1,641 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28077901072
|
# -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2023/1/20 19:49
"""
from collections import deque
from typing import List
"""
病毒扩散得很快,现在你的任务是尽可能地通过安装防火墙来隔离病毒。
假设世界由 m x n 的二维矩阵 isInfected 组成, isInfected[i][j] == 0 表示该区域未感染病毒,而 isInfected[i][j] == 1 表示
该区域已感染病毒。可以在任意 2 个相邻单元之间的共享边界上安装一个防火墙(并且只有一个防火墙)。
每天晚上,病毒会从被感染区域向相邻未感染区域扩散,除非被防火墙隔离。现由于资源有限,每天你只能安装一系列防火墙来隔离其中一个
被病毒感染的区域(一个区域或连续的一片区域),且该感染区域对未感染区域的威胁最大且 保证唯一 。
你需要努力使得最后有部分区域不被病毒感染,如果可以成功,那么返回需要使用的防火墙个数; 如果无法实现,则返回在世界被病毒全部感染时
已安装的防火墙个数。
示例 1:
输入: isInfected = [[0,1,0,0,0,0,0,1],[0,1,0,0,0,0,0,1],[0,0,0,0,0,0,0,1],[0,0,0,0,0,0,0,0]]
输出: 10
解释:一共有两块被病毒感染的区域。
在第一天,添加 5 墙隔离病毒区域的左侧。病毒传播后的状态是:
第二天,在右侧添加 5 个墙来隔离病毒区域。此时病毒已经被完全控制住了。
示例 2:
输入: isInfected = [[1,1,1],[1,0,1],[1,1,1]]
输出: 4
解释: 虽然只保存了一个小区域,但却有四面墙。
注意,防火墙只建立在两个不同区域的共享边界上。
示例 3:
输入: isInfected = [[1,1,1,0,0,0,0,0,0],[1,0,1,0,1,1,1,1,1],[1,1,1,0,0,0,0,0,0]]
输出: 13
解释: 在隔离右边感染区域后,隔离左边病毒区域只需要 2 个防火墙。
"""
"""
思路:我们首先可以对矩阵 isInfected 进行广度优先搜索,具体地,当我们遍历到 isInfected 中的一个 1 时,就从这个 1 对应的位置开始
进行广度优先搜索,这样就可以得到连续的一块被病毒感染的区域。在搜索的过程中,如果当前是第 idx (idx≥1) 块被病毒感染的区域,我们就
把这些 1 都赋值成 −idx,这样就可以防止重复搜索,并且可以和非病毒区域 0 区分开来。同时,由于我们每次需要选择“对未感染区域的威胁
最大”的区域设置防火墙,因此我们还需要存储:
该区域相邻的未感染区域(即 0)的位置和个数;
如果需要位该区域设置防火墙,那么需要防火墙的个数。
对于前者,我们在广度优先搜索的过程中,只要在扩展 1 时搜索相邻的 0,就可以把这个 0 对应的位置放在一个哈希集合中。这里使用哈希集合
的原因是同一个 0 可能会和多个 1 相邻,可以防止重复计算。同时,由于多个 1 可能出现在不同的感染区域中,如果通过修改矩阵 isInfected
的形式来标记这些 0,会使得代码编写较为麻烦。
对于后者,计算的方法是类似的,在扩展 1 时如果搜索到相邻的 0,那么我们就需要在 1 和 0 之间的这条网格边上建一个防火墙。同一个 0 和
多个 1 相邻,就需要建立多个防火墙,因此我们只需要使用一个变量在广度优先搜索的过程中计数即可,无需考虑重复的情况。
在广度优先搜索完成后,如果我们没有发现任何感染区域,说明区域内不存在病毒,我们直接返回 0 作为答案。否则,我们需要找到“对未感染
区域的威胁最大”的区域,这里只需要找出对应的哈希集合的大小最大的那块区域即可。
在确定了区域(假设是第 idx 块区域)后,我们把矩阵中所有的 −idx 都变成 2,这样可以不影响任何搜索和判断;除此之外的所有负数都恢复
成 1。此外,所有哈希集合中存储的(除了第 idx 块区域对应的以外)所有相邻位置都需要从 0 变成 1,表示病毒的传播。
最后,如果我们发现区域一共只有一块,那么这次防火墙建立后,不会再有病毒传播,可以返回答案;否则我们还需要继续重复执行上述的所有步骤。
"""
class Solution:
@staticmethod
def containVirus(isInfected: List[List[int]]) -> int:
dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
m, n = len(isInfected), len(isInfected[0])
ans = 0
while True:
neighbors, firewalls = list(), list()
for i in range(m):
for j in range(n):
if isInfected[i][j] == 1:
# 第1块就把当前连通块的全都置为-1,第2块就全都置为-2,以此类推
q = deque([(i, j)])
neighbor = set()
firewall, idx = 0, len(neighbors) + 1
isInfected[i][j] = -idx
while q:
x, y = q.popleft()
for d in range(4):
nx, ny = x + dirs[d][0], y + dirs[d][1]
if 0 <= nx < m and 0 <= ny < n:
# 找到最大的联通块
if isInfected[nx][ny] == 1:
q.append((nx, ny))
isInfected[nx][ny] = -idx
# 周围未被感染的邻居加防火墙
elif isInfected[nx][ny] == 0:
firewall += 1
neighbor.add((nx, ny))
neighbors.append(neighbor)
firewalls.append(firewall)
if not neighbors:
break
# 找到感染区域最大的块
idx = 0
for i in range(1, len(neighbors)):
if len(neighbors[i]) > len(neighbors[idx]):
idx = i
# 将其他块还原,被感染的块就置为2
ans += firewalls[idx]
for i in range(m):
for j in range(n):
if isInfected[i][j] < 0:
if isInfected[i][j] != -idx - 1:
isInfected[i][j] = 1
else:
isInfected[i][j] = 2
# 对于没有被拦截的要扩散
for i, neighbor in enumerate(neighbors):
if i != idx:
for x, y in neighbor:
isInfected[x][y] = 1
if len(neighbors) == 1:
break
return ans
|
TankManBeta/LeetCode-Python
|
problem749_hard.py
|
problem749_hard.py
|
py
| 6,811 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
4019027960
|
import re
def lexer(input_string):
keywords = ['if', 'else', 'while', 'for', 'int', 'float']
operators = ['+', '-', '*', '/', '=', '==', '<', '>', '<=', '>=']
symbols = ['(', ')', '{', '}', ',', ';']
token_patterns = [
(r'\b(' + '|'.join(keywords) + r')\b', 'PALABRA_CLAVE'),
(r'\b\d+\b', 'NUMERO_ENTERO'),
(r'[+\-*/=<>:]', 'OPERADOR'),
(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', 'IDENTIFICADOR'),
(r'[(),;]', 'SIMBOLO'),
(r'\s+', None) # Ignorar espacios en blanco
]
tokens = []
while input_string:
match = None
for pattern, token_type in token_patterns:
regex = re.compile(pattern)
match = regex.match(input_string)
if match:
value = match.group(0)
if token_type:
tokens.append((value, token_type))
break
if not match:
print("Error: Carater no reconocido '{}'".format(input_string[0]))
break
input_string = input_string[len(match.group(0)):].lstrip()
return tokens
# Ejemplo de uso
codigo = "if x > 0: resultado = 2 * x"
tokens = lexer(codigo)
for token, tipo_token in tokens:
print(f"Token: {token}, Tipo: {tipo_token}")
print("\nLista de Tokens:")
for token, _ in tokens:
print(token)
|
NexusAOD/Proyecto-Traductores-de-Lenguaje-II
|
Etapa del proyecto analizador léxico completo/Analizador Lexico.py
|
Analizador Lexico.py
|
py
| 1,390 |
python
|
es
|
code
| 0 |
github-code
|
50
|
22925276009
|
#python program to print all prime numbers in a given interval
lower = int(input("enter lower value:"))
upper = int(input("enter upper value:"))
print("Prime numbers between", lower, "and", upper, "are:")
for num in range(lower, upper + 1):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
print(num)
|
aryngpt11/PythonProgramming
|
lab2python/primeno2.py
|
primeno2.py
|
py
| 391 |
python
|
en
|
code
| 2 |
github-code
|
50
|
32732784672
|
import sys
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal, pyqtSlot
import re
import sys
from mytreeview import MySortFilterProxyModel, ArticleViewer, SourceArticleDBModel
from comparator import ComparatorTableModel, ComparatorViewer, Comparator
from article import *
class EditTabPageWidget(QtWidgets.QWidget):
"""The tab of addtab2()"""
def __init__(self, entry=ArticleEntry(-1)):
super(EditTabPageWidget, self).__init__()
self.tempArticle = entry
topFiller = QWidget()
topFiller.setMaximumWidth(900) # Max width
topFiller.setMinimumHeight(1200)
layout = QGridLayout(topFiller)
# ##############################################################################################################
# Domain labels
# qlabel_uid = QLabel('uid')
qlabel_title = QLabel('title')
qlabel_nickname = QLabel('nickname')
qlabel_year = QLabel('year')
qlabel_venue = QLabel('venue')
qlabel_authors = QLabel('authors')
qlabel_tags = QLabel('tags')
# qlabel_createTime = QLabel('createTime')
# qlabel_lastReadTime = QLabel('lastReadTime')
# qlabel_lastModTime = QLabel('lastModTime')
qlabel_background = QLabel('background')
qlabel_pastWork = QLabel('pastWork')
qlabel_gap = QLabel('gap/problems')
qlabel_contribution = QLabel('contribution')
qlabel_mainMethod = QLabel('mainMethod')
qlabel_overview = QLabel('Overview')
qlabel_myFocus = QLabel('myFocus')
qlabel_doubts = QLabel('doubts')
qlabel_miscellaneous = QLabel('miscellaneous')
qlabel_year.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
qlabel_venue.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
# ##############################################################################################################
# Their widgets:
# qLine_uid = QLineEdit()
self.qLine_title = QLineEdit()
self.qLine_nickname = QLineEdit()
self.qSpin_year = QSpinBox()
self.qSpin_year.setMaximumWidth(100)
self.qLine_venue = QLineEdit()
self.qLine_authors = QLineEdit()
self.qLine_tags = QLineEdit()
self.qText_overview = QPlainTextEdit()
self.qText_background = QPlainTextEdit()
self.qText_pastWork = QPlainTextEdit()
self.qText_gap = QPlainTextEdit()
self.qText_contribution = QPlainTextEdit()
self.qText_mainMethod = QPlainTextEdit()
self.qText_myFocus = QPlainTextEdit()
self.qText_doubts = QPlainTextEdit()
self.qText_miscellaneous = QPlainTextEdit()
# ##############################################################################################################
# Layouts
# layout.addWidget()
qlabel_title.setBuddy(self.qLine_title)
layout.addWidget(qlabel_title, 0, 0, 1, 1)
layout.addWidget(self.qLine_title, 0, 1, 1, 7)
qlabel_nickname.setBuddy(self.qLine_nickname)
layout.addWidget(qlabel_nickname, 1, 0, 1, 1)
layout.addWidget(self.qLine_nickname, 1, 1, 1, 2)
qlabel_venue.setBuddy(self.qLine_venue)
layout.addWidget(qlabel_venue, 1, 3, 1, 1)
layout.addWidget(self.qLine_venue, 1, 4, 1, 2)
qlabel_year.setBuddy(self.qSpin_year)
layout.addWidget(qlabel_year, 1, 6, 1, 1)
layout.addWidget(self.qSpin_year, 1, 7, 1, 1)
self.qSpin_year.setRange(1950, 2050)
self.qSpin_year.setValue(datetime.datetime.utcnow().year)
qlabel_authors.setBuddy(self.qLine_authors)
layout.addWidget(qlabel_authors, 2, 0, 1, 1)
layout.addWidget(self.qLine_authors, 2, 1, 1, 7)
self.qLine_authors.setPlaceholderText("\";\" separated")
qlabel_tags.setBuddy(self.qLine_tags)
layout.addWidget(qlabel_tags, 3, 0, 1, 1)
layout.addWidget(self.qLine_tags, 3, 1, 1, 7)
self.qLine_tags.setPlaceholderText("\";\" separated")
qlabel_overview.setBuddy(self.qText_overview)
self.qText_overview.setTabChangesFocus(True)
layout.addWidget(qlabel_overview, 4, 0, 1, 1)
layout.addWidget(self.qText_overview, 4, 1, 1, 7)
self.qText_overview.setMaximumHeight(120)
qlabel_background.setBuddy(self.qText_background)
self.qText_background.setTabChangesFocus(True)
layout.addWidget(qlabel_background, 5, 0, 1, 1)
layout.addWidget(self.qText_background, 5, 1, 1, 7)
self.qText_background.setMaximumHeight(55)
qlabel_pastWork.setBuddy(self.qText_pastWork)
self.qText_pastWork.setTabChangesFocus(True)
layout.addWidget(qlabel_pastWork, 6, 0, 1, 1)
layout.addWidget(self.qText_pastWork, 6, 1, 1, 7)
self.qText_pastWork.setMaximumHeight(75)
qlabel_gap.setBuddy(self.qText_gap)
self.qText_gap.setTabChangesFocus(True)
layout.addWidget(qlabel_gap, 8, 0, 1, 1)
layout.addWidget(self.qText_gap, 8, 1, 1, 7)
self.qText_gap.setMaximumHeight(75)
qlabel_contribution.setBuddy(self.qText_contribution)
self.qText_contribution.setTabChangesFocus(True)
layout.addWidget(qlabel_contribution, 10, 0, 1, 1)
layout.addWidget(self.qText_contribution, 10, 1, 1, 7)
self.qText_contribution.setMaximumHeight(120)
qlabel_mainMethod.setBuddy(self.qText_mainMethod)
self.qText_mainMethod.setTabChangesFocus(True)
layout.addWidget(qlabel_mainMethod, 12, 0, 1, 1)
layout.addWidget(self.qText_mainMethod, 12, 1, 1, 7)
self.qText_mainMethod.setMaximumHeight(240)
qlabel_myFocus.setBuddy(self.qText_myFocus)
self.qText_myFocus.setTabChangesFocus(True)
layout.addWidget(qlabel_myFocus, 16, 0, 1, 1)
layout.addWidget(self.qText_myFocus, 16, 1, 1, 7)
self.qText_myFocus.setMaximumHeight(120)
qlabel_doubts.setBuddy(self.qText_doubts)
self.qText_doubts.setTabChangesFocus(True)
layout.addWidget(qlabel_doubts, 18, 0, 1, 1)
layout.addWidget(self.qText_doubts, 18, 1, 1, 7)
self.qText_doubts.setMaximumHeight(120)
qlabel_miscellaneous.setBuddy(self.qText_miscellaneous)
self.qText_miscellaneous.setTabChangesFocus(True)
layout.addWidget(qlabel_miscellaneous, 20, 0, 1, 1)
layout.addWidget(self.qText_miscellaneous, 20, 1, 1, 7)
self.qText_miscellaneous.setMaximumHeight(80)
# ##############################################################################################################
# layout.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
buttonLineFiller = QWidget()
# layout.addWidget(buttonLineFiller, 8, 0, 1, 3)
buttonLineLayout = QHBoxLayout(buttonLineFiller)
# buttonLineLayout.setAlignment(QtCore.Qt.AlignRight)
# buttonLineLayout.setAlignment(QtCore.Qt.AlignLeft)
self.btnSave = QPushButton("Save")
self.btnCancel = QPushButton("Cancel")
self.btnClose = QPushButton("Close")
buttonLineLayout.addWidget(self.btnSave)
buttonLineLayout.addWidget(self.btnCancel)
buttonLineLayout.addWidget(self.btnClose)
layout.addWidget(buttonLineFiller, 22, 4, 1, 4)
scroll = QScrollArea()
scroll.setWidget(topFiller)
scroll.setWidgetResizable(True)
scroll.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop) # set the topFiller's pose in the Area
vbox = QVBoxLayout()
vbox.addWidget(scroll)
self.setLayout(vbox)
self.setDisplayData()
def setDisplayData(self):
if self.tempArticle.uid < 0:
return
self.qLine_title.setText(self.tempArticle.title)
self.qLine_nickname.setText(self.tempArticle.nickname)
self.qSpin_year.setValue(self.tempArticle.year)
self.qLine_venue.setText(self.tempArticle.venue)
self.qLine_authors.setText("; ".join(self.tempArticle.authors))
self.qLine_tags.setText("; ".join(self.tempArticle.tags))
self.qText_background.setPlainText(self.tempArticle.background)
self.qText_pastWork.setPlainText(self.tempArticle.pastWork)
self.qText_gap.setPlainText(self.tempArticle.gap)
self.qText_contribution.setPlainText(self.tempArticle.contribution)
self.qText_mainMethod.setPlainText(self.tempArticle.mainMethod)
self.qText_overview.setPlainText(self.tempArticle.overview)
self.qText_myFocus.setPlainText(self.tempArticle.myFocus)
self.qText_doubts.setPlainText(self.tempArticle.doubts)
self.qText_miscellaneous.setPlainText(self.tempArticle.miscellaneous)
def setTitle(self):
self.tempArticle.title = self.qLine_title.text()
def setNickname(self):
self.tempArticle.nickname = self.qLine_nickname.text()
def setYear(self):
self.tempArticle.year = self.qSpin_year.value()
def setVenue(self):
self.tempArticle.venue = self.qLine_venue.text()
def setAuthors(self):
a = self.qLine_authors.text().strip()
b = re.split("\s*;\s*", a)
temp = []
for i in b:
if i:
temp.append(i)
self.tempArticle.authors = temp
def setTags(self):
a = self.qLine_tags.text().strip()
b = re.split("\s*;\s*", a)
temp = []
for i in b:
if i:
print("TAG")
temp.append(i)
self.tempArticle.tags = temp
def setBackground(self):
self.tempArticle.background = str(self.qText_background.toPlainText())
def setPastwork(self):
self.tempArticle.pastWork = str(self.qText_pastWork.toPlainText())
def setGap(self):
self.tempArticle.gap = str(self.qText_gap.toPlainText())
def setContribution(self):
self.tempArticle.contribution = str(self.qText_contribution.toPlainText())
def setMainmethod(self):
self.tempArticle.mainMethod = str(self.qText_mainMethod.toPlainText())
def setOverview(self):
self.tempArticle.overview = str(self.qText_overview.toPlainText())
def setMyfocus(self):
self.tempArticle.myFocus = str(self.qText_myFocus.toPlainText())
def setDoubts(self):
self.tempArticle.doubts = str(self.qText_doubts.toPlainText())
def setMiscellaneous(self):
self.tempArticle.miscellaneous = str(self.qText_miscellaneous.toPlainText())
class ComparatorTabPageWidget(QtWidgets.QWidget):
"""The tab of addtab1"""
def __init__(self):
super(ComparatorTabPageWidget, self).__init__()
hbox = QHBoxLayout()
gbox = QGridLayout()
# Deal with the side bar
leftFrame = QFrame()
leftFrame.setFrameShape(QFrame.StyledPanel)
leftFrame.setMinimumSize(200, 600)
leftFrame.setMaximumWidth(300) # Max width
leftFrame.setLayout(gbox)
compNameLabel = QtWidgets.QLabel("&Comparator Name:")
self.compNameLineEdit = QtWidgets.QLineEdit()
self.compNameLineEdit.setMaxLength(32)
compNameLabel.setBuddy(self.compNameLineEdit)
gbox.addWidget(compNameLabel, 0, 0, 1, 2)
gbox.addWidget(self.compNameLineEdit, 1, 0, 1, 2)
compCommentLabel = QtWidgets.QLabel("My Comment:")
self.compCommentText = QPlainTextEdit()
self.compCommentText.setTabChangesFocus(True)
compNameLabel.setBuddy(self.compCommentText)
gbox.addWidget(compCommentLabel, 2, 0, 1, 2)
gbox.addWidget(self.compCommentText, 3, 0, 1, 2)
groupFilter = QGroupBox("Entry Filter", leftFrame)
groupFilter.setMaximumHeight(250)
groupGridLayout = QGridLayout()
groupFilter.setLayout(groupGridLayout)
self.filterCaseSensitivityCheckBox = QtWidgets.QCheckBox("Case sensitive")
self.filterCaseSensitivityCheckBox.setChecked(False)
self.filterSyntaxCheckBox = QtWidgets.QCheckBox("Use Regex")
self.filterSyntaxCheckBox.setChecked(True)
self.filterPatternLineEdit = QtWidgets.QLineEdit()
self.filterDomainCombo = QtWidgets.QComboBox()
self.filterDomainCombo.addItem("All", searchableDomainIndex)
for i in searchableDomainIndex:
self.filterDomainCombo.addItem(headerNames[i], [i])
filterComboLabel = QtWidgets.QLabel("Filter Domain:")
filterComboLabel.setBuddy(self.filterDomainCombo)
groupGridLayout.addWidget(filterComboLabel, 0, 0, 1, 4)
groupGridLayout.addWidget(self.filterDomainCombo, 1, 0, 1, 4)
filterPatternLabel = QtWidgets.QLabel("&Filter pattern:")
filterPatternLabel.setBuddy(self.filterPatternLineEdit)
groupGridLayout.addWidget(filterPatternLabel, 2, 0, 1, 4)
groupGridLayout.addWidget(self.filterPatternLineEdit, 3, 0, 1, 4)
groupGridLayout.addWidget(self.filterCaseSensitivityCheckBox, 4, 0, 1, 4)
groupGridLayout.addWidget(self.filterSyntaxCheckBox, 5, 0, 1, 4)
gbox.addWidget(groupFilter, 4, 0, 1, 2)
self.btnRemove = QPushButton("Remove from comparator")
self.btnExpandSelected = QPushButton("Expand")
self.btnCollapseSelected = QPushButton("Collapse")
self.btnMoveUp = QPushButton("Move up")
self.btnMoveDown = QPushButton("Move Down")
self.btnSave = QPushButton("Save")
self.btnCancel = QPushButton("Cancel")
gbox.addWidget(self.btnRemove, 5, 0, 1, 2)
gbox.addWidget(self.btnExpandSelected, 6, 0, 1, 1)
gbox.addWidget(self.btnCollapseSelected, 6, 1, 1, 1)
gbox.addWidget(self.btnMoveUp, 8, 0, 1, 1)
gbox.addWidget(self.btnMoveDown, 8, 1, 1, 1)
gbox.addWidget(self.btnSave, 9, 0, 1, 1)
gbox.addWidget(self.btnCancel, 9, 1, 1, 1)
# deal with the main tree view
# TODO: think about reset data/load data
self.qCompViewer = ArticleViewer()
self.comp = Comparator()
self.originalComp = None
srcModel = SourceArticleDBModel(self.comp)
self.qCompViewer.setSourceModel(srcModel)
self.qCompViewer.setDefaultHeaderView()
self.qCompViewer.proxyModel.setDynamicSortFilter(False)
# finally the splitter
splitter1 = QSplitter(QtCore.Qt.Horizontal)
splitter1.addWidget(leftFrame)
splitter1.addWidget(self.qCompViewer)
splitter1.setSizes([100, 200])
hbox.addWidget(splitter1)
self.setLayout(hbox)
self.filterPatternLineEdit.textChanged.connect(self.textFilterChanged)
self.filterSyntaxCheckBox.toggled.connect(self.textFilterChanged)
self.filterCaseSensitivityCheckBox.toggled.connect(self.textFilterChanged)
self.filterDomainCombo.currentIndexChanged.connect(self.textFilterDomainChanged)
self.btnExpandSelected.clicked.connect(self.qCompViewer.onClickExpandRows)
self.btnCollapseSelected.clicked.connect(self.qCompViewer.onClickCollapseRows)
self.btnRemove.clicked.connect(self.onClicked_RemoveFromComparator)
# self.btnMoveUp.clicked.connect(self.onClicked_MoveUp) # FIXME
def setComparatorToShow(self, cp):
if cp:
self.comp = Comparator() # The one to be modified
self.originalComp = cp # The pointer to the original Comparator
self.comp.updateFromGiven(cp)
srcModel = SourceArticleDBModel(self.comp)
self.qCompViewer.setSourceModel(srcModel)
self.qCompViewer.loadHeaderState()
self.compNameLineEdit.setText(self.comp.name)
self.compCommentText.setPlainText(self.comp.comment)
def textFilterDomainChanged(self):
nd = self.filterDomainCombo.itemData(self.filterDomainCombo.currentIndex())
self.qCompViewer.proxyModel.setFilterDomains(nd)
self.textFilterChanged()
def textFilterChanged(self):
print(self.filterPatternLineEdit.text())
if self.filterSyntaxCheckBox.isChecked():
syntax = QtCore.QRegExp.PatternSyntax(QtCore.QRegExp.RegExp)
else:
syntax = QtCore.QRegExp.PatternSyntax(QtCore.QRegExp.FixedString)
caseSensitivity = (
self.filterCaseSensitivityCheckBox.isChecked()
and QtCore.Qt.CaseSensitive or QtCore.Qt.CaseInsensitive)
regExp = QtCore.QRegExp(self.filterPatternLineEdit.text(), caseSensitivity, syntax)
self.qCompViewer.proxyModel.setFilterRegExp(regExp)
# @pyqtSlot()
# def onClicked_MoveUp(self):
# indexes = self.qCompViewer.proxyView.selectedIndexes()
# rowsToMove = set()
# for idx in indexes:
# rowsToMove.add(idx.row())
# itemIndexes = sorted(list(rowsToMove))
# for no, itemIndex in enumerate(itemIndexes):
# if itemIndex == 0:
# continue
# if no == 0 or itemIndexes[no - 1] < itemIndex - 1:
# dest = itemIndex - 1
# else:
# dest = itemIndex
# self.qCompViewer.proxyModel.beginMoveRows(QtCore.QModelIndex(), itemIndex, itemIndex,
# QtCore.QModelIndex(), dest)
# self.qCompViewer.proxyModel.moveRow(QtCore.QModelIndex(), itemIndex, QtCore.QModelIndex(), dest)
# self.qCompViewer.proxyModel.endMoveRows()
@pyqtSlot()
def onClicked_RemoveFromComparator(self):
uids = self.qCompViewer.getSelectedItemUids()
if uids:
self.qCompViewer.proxyView.clearSelection()
for uid in uids:
srcRow = self.qCompViewer.proxyModel.sourceModel().ADB.uidList.index(uid)
self.qCompViewer.proxyModel.sourceModel().removeItem(srcRow)
return
|
lewisjiang/PaperMatrix
|
src/tabpagewidgets.py
|
tabpagewidgets.py
|
py
| 17,897 |
python
|
en
|
code
| 4 |
github-code
|
50
|
25781649937
|
# 斐波那契数列
# yield的用法
def fab2(max):
n,a,b=0,0,1
fab_list=[]
for i in range(max):
fab_list.append(b)
yield fab_list
a,b=b,a+b
for n in fab2(40):
print ( n )
|
domclass/Pyhton_Exercise_Codes
|
斐波那契数列.py
|
斐波那契数列.py
|
py
| 216 |
python
|
en
|
code
| 0 |
github-code
|
50
|
75303305435
|
from grai_schemas.v1.source import SourceV1
from .base import IntegrationAdapter
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from grai_source_mssql.base import MsSQLIntegration
class MssqlAdapter(IntegrationAdapter):
def get_integration(self) -> "MsSQLIntegration":
from grai_source_mssql.base import MsSQLIntegration
metadata = self.run.connection.metadata
secrets = self.run.connection.secrets
source = SourceV1.from_spec(
{
"id": self.run.source.id,
"name": self.run.source.name,
}
)
integration = MsSQLIntegration(
source=source,
user=metadata.get("user"),
password=secrets.get("password"),
database=metadata.get("database"),
host=metadata.get("host"),
port=metadata.get("port"),
driver=metadata.get("driver"),
namespace=self.run.connection.namespace,
additional_connection_strings=["TrustServerCertificate=yes"],
)
return integration
|
grai-io/grai-core
|
grai-server/app/connections/adapters/mssql.py
|
mssql.py
|
py
| 1,087 |
python
|
en
|
code
| 241 |
github-code
|
50
|
6578498398
|
from collections import deque
class Solution:
#Function to return list containing vertices in Topological order.
def topoSort(self, V, adj):
#Kahns algorithm
# Code here
q = deque()
indegree = [0] * V
for i in range(V):
for it in adj[i]:
indegree[it] += 1
for i in range(V):
if not indegree[i]:
q.append(i)
topo = []
while q:
node = q.popleft()
topo.append(node)
for it in adj[node]:
indegree[it] -= 1
if not indegree[it]:
q.append(it)
return topo
#Function to return list containing vertices in Topological order.
#SC - O(n) + O(n) -> stack + vis array
#TC - V + E for directed graph
# def topoSort(self, V, adj):
# # Code here
# vis = [0] * V
# stack = []
# for i in range(V):
# if not vis[i]:
# self.dfs(i, vis, stack, adj)
# res = []
# while stack:
# res.append(stack[-1])
# stack.pop()
# return res
# def dfs(self, node, vis, stack, adj):
# vis[node] = 1
# for it in adj[node]:
# if not vis[it]:
# self.dfs(it, vis, stack, adj)
# stack.append(node)
#{
# Driver Code Starts
# Driver Program
import sys
sys.setrecursionlimit(10**6)
def check(graph, N, res):
if N!=len(res):
return False
map=[0]*N
for i in range(N):
map[res[i]]=i
for i in range(N):
for v in graph[i]:
if map[i] > map[v]:
return False
return True
if __name__=='__main__':
t = int(input())
for i in range(t):
e,N = list(map(int, input().strip().split()))
adj = [[] for i in range(N)]
for i in range(e):
u,v=map(int,input().split())
adj[u].append(v)
ob = Solution()
res = ob.topoSort(N, adj)
if check(adj, N, res):
print(1)
else:
print(0)
# Contributed By: Harshit Sidhwa
# } Driver Code Ends
|
dhruvv173/Leetcode
|
Topological sort - GFG/topological-sort.py
|
topological-sort.py
|
py
| 2,255 |
python
|
en
|
code
| 1 |
github-code
|
50
|
18952296654
|
'''This module contains the following:
Controller
A class for Keras (Tensorflow backend) based OpenAI gym controllers.
Models
A class implementing and supplying Keras models to the Controller
class.
ActionTransformations
A container class for methods that transform the controller (Keras model)
output (action) to a representation suitable for the OpenAI gym
environment.
action_transformations
A dictionary that links the action transformation to the specific
environment name. The Controller.fitness method accesses this
dictionary.
EarlyStop
A class containing a method and dictionary that enable
the controller.fitness evaluation to be prematurely terminated if a
candidate controllers performance is poor. This reduces computational cost.
'''
import numpy as np
import gym
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, Flatten
from keras.utils import plot_model
class Controller(object):
'''Class for Keras (Tensorflow backend) based OpenAI gym controllers.'''
def __init__(self, modelFunctionHandle, env,
episode_length, device='/cpu:0', render=False,
force_action_space=None):
'''Initialize a controller.
Args:
modelFunctionHandle (function):
Function that returns a keras model and tensorflow default
graph (threading). The function takes the input and output
dimensions of the keras model as an argument.
env (str):
A OpenAI gym envrionment name.
episode_length (int):
Number of frames to process.
device (str, optional):
String that specifies the tensorflow device to use.
render (bool, optional):
Boolean to indicate whether the environment is rendered.
force_action_space (int, optional):
Whenever the gym environment is not correctly implemented
( `type(env.action_space_low)!=np.ndarray` ) use this input to
manually specify action_space dimension.
'''
# get obs/act space dims. was not always correctly implemented in gym
# hence the uglyness
self.env = gym.make(env)
self.observation_space_low = self.env.observation_space.low
self.action_space_low = self.env.action_space.sample()
# get the model
if type(self.action_space_low) == np.ndarray:
self.model, self.graph = modelFunctionHandle(
self.observation_space_low.shape[0], len(self.action_space_low))
# whenever gym would not work properly, set using additional parameter
else:
self.model, self.graph = modelFunctionHandle(
self.observation_space_low.shape[0], force_action_space)
self.stacked_weights = self.model.get_weights()
# save some useful things
self.env_name = env
self.episode_length = episode_length
self.device = device
self.frame_count = 0
self.render = render
# save weight sizes for output as column vector
self.weight_sizes = [(x.shape, x.size) for x in self.stacked_weights]
# save the dimension by simply adding all sizes
self.n = sum([x.size for x in self.stacked_weights])
def fitness(self, flat_weights):
'''Sample the cumulative return of one episode acting according to
current weights.
Args:
flat_wights (numpy.ndarray): Vector of length self.n specifying the
weights of the controller to sample.
Returns:
float: Cumulative reward after an episode
of length self.episode_length.
'''
# convert weight vector to keras structure and set
self.set_weights(flat_weights)
# reset environment
observation = self.env.reset()
fitness = 0
# loop over steps
for step in range(self.episode_length):
# check rendering
if self.render:
self.env.render()
# be sure to use preferred device
with tf.device(self.device):
# resolves error in multithreading
with self.graph.as_default():
# get controller output
action = self.model.predict(np.array([observation]))
# convert action to gym format
action = action_transformations[self.env_name](action)
# act
observation, reward, done, info = self.env.step(action)
fitness += reward
self.frame_count += 1
# check for early stopping
if done or EarlyStop.check(step, fitness, self.env_name):
# inverse fitness for minimizing algorithms
return -fitness
# inverse fitness for minimizing algorithms
return -fitness
def set_weights(self, flat_weights):
'''Convert the weight vector from optimizer friendly format
to a layerwise representation. Use this to set model weights to.
Args:
flat_weights (numpy.ndarray): A vector of shape (self.n,) holding
the weights the controller should be set to.
'''
# resolves threading error
with self.graph.as_default():
i = 0
j = 0
# get layer representation
for weight_size in self.weight_sizes:
self.stacked_weights[j] = np.reshape(
flat_weights[i:i+weight_size[1]], weight_size[0])
j += 1
i += weight_size[1]
# set keras model weights
self.model.set_weights(self.stacked_weights)
def get_weights(self):
'''Just a wrapper for the standard methods that returns the
stacked (layerwise) weights of the Keras model.
Returns:
Stacked model weights.
'''
return self.model.get_weights()
class Models(object):
'''Container for methods that return a Keras model and the
tensorflow default graph.
The method must take the dimensionality of the state space as well
as the dimensionality of the action space as arguments.
'''
@staticmethod
def smallModel(input_dim, output_dim):
model = Sequential()
model.add(Dense(10, input_dim=input_dim))
model.add(Activation('elu'))
model.add(Dense(output_dim))
model.add(Activation('sigmoid'))
# resolves error in multithreading
graph = tf.get_default_graph()
return model, graph
@staticmethod
def bipedalModel(input_dim, output_dim):
model = Sequential()
model.add(Dense(30, input_dim=input_dim))
model.add(Activation('elu'))
model.add(Dense(30))
model.add(Activation('elu'))
model.add(Dense(15))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(output_dim))
model.add(Activation('sigmoid'))
# resolves error in multithreading
graph = tf.get_default_graph()
return model, graph
@staticmethod
def robopongModel(input_dim, output_dim):
model = Sequential()
model.add(Dense(30, input_dim=input_dim))
model.add(Activation('elu'))
model.add(Dense(30))
model.add(Activation('elu'))
model.add(Dense(15))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(output_dim))
model.add(Activation('sigmoid'))
graph=tf.get_default_graph()
return model, graph
@staticmethod
def acrobotModel(input_dim, output_dim):
input_dim=input_dim[0]
model=Sequential()
model.add(Dense(30, input_dim=input_dim))
model.add(Activation('elu'))
model.add(Dense(30))
model.add(Activation('elu'))
model.add(Dense(10))
model.add(Activation('elu'))
model.add(Dense(output_dim))
model.add(Activation('sigmoid'))
graph=tf.get_default_graph()
return model, graph
class ActionTransformations(object):
'''Container for methods that transform the controller (Keras model)
output (action) to a representation suitable for the OpenAI gym
environment.
Typically the method is implemented to suit a specific
controller-environment configuration.
'''
@staticmethod
def cartPoleV0(action):
return int(action[0, 0])
@staticmethod
def carRacingV0(action):
return action[0]
@staticmethod
def bipedalWalkerV2(action):
return (action[0]-[0.5, 0.5, 0.5, 0.5])*2
@staticmethod
def breakoutRamV0(action):
return np.argmax(action[0])
@staticmethod
def roboschoolPongV1(action):
return (action[0]-[0.5, 0.5])*2
@staticmethod
def acrobotV1(action):
#print(action,np.argmax(action[0]))
return np.argmax(action[0])
action_transformations={'CartPole-v0': ActionTransformations.cartPoleV0,
'CarRacing-v0': ActionTransformations.carRacingV0,
'BipedalWalker-v2': ActionTransformations.bipedalWalkerV2,
'Breakout-ram-v0': ActionTransformations.breakoutRamV0,
'RoboschoolPong-v1': ActionTransformations.roboschoolPongV1,
'Acrobot-v1': ActionTransformations.acrobotV1}
'''dict: Links the action transformation to the specific environment name.
The fitness method accesses this dictionary.'''
class EarlyStop(object):
'''Contains a method and dictionary that enable
the controller.fitness evaluation to be prematurely terminated if a
candidate controllers performance is poor. This reduces computational cost.
If a given controller falls short of reaching a the specified cumulative
reward within the corresponding number of timesteps, the evaluation
in controller.fitness is prematurely terminated in order to reduce
the runtime. The interface to the controller.fitness is given by the
EarlyStop.check method.
'''
step_fitness_dict = {'CartPole-v0': [],
'CarRacing-v0': [],
'Breakout-ram-v0': [],
'BipedalWalker-v2': [(190, 15), (300, 30), (400, 40), (600, 50),
(700, 65), (800, 80)],
'RoboschoolPong-v1': [],
'Acrobot-v1': []}
'''dict: A dictionary specifying corresponding fitness and
time-step thresholds for the envrionments.'''
@classmethod
def check(cls, step, fitness, env_name):
'''The interface to the controller.fitness.
Here the check is performed.
Args:
step (int): The current time-step.
fitness (float): The current cumulative reward.
env_name (str): The environments name.
Returns:
bool: Indicating whether evaluation should be prematurely
terminated.
'''
for i in range( len(cls.step_fitness_dict[env_name]) ):
if ( step > cls.step_fitness_dict[env_name][i][0] ) and\
( fitness < cls.step_fitness_dict[env_name][i][1] ):
return True
return False
|
NiMlr/High-Dim-ES-RL
|
applications/control/gymcontrollers.py
|
gymcontrollers.py
|
py
| 11,832 |
python
|
en
|
code
| 25 |
github-code
|
50
|
42597415594
|
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import google.auth
import io
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
from pathlib import Path
from dotenv import load_dotenv
SCOPES = ["https://www.googleapis.com/auth/drive"]
class CCGPDrive:
"""Class for interacting with CCGP Data Wrangling drive."""
def __init__(self) -> None:
"""Gets credentials and builds google drive service."""
load_dotenv()
self.creds, _ = google.auth.default(scopes=SCOPES)
self.service = build("drive", "v3", credentials=self.creds)
def _get_files_list_response(self, query: str) -> list[dict]:
""" "Helper function that sends creates and send query to Drive API and returns response."""
page_token = None
result = []
while True:
response = (
self.service.files()
.list(
q=query,
spaces="drive",
fields="nextPageToken, files(id, name, modifiedTime, mimeType)",
pageToken=page_token,
)
.execute()
)
files = response.get("files", [])
for file in files:
result.append(file)
page_token = response.get(
"nextPageToken", None
) # Drive API iterates each page of the drive, so we have to do this to make sure we search each page. Doesn't matter for this case but its best practices
if page_token is None:
break
return result
def get_folder_id(self, folder: str) -> str:
query = f"name = '{folder}' and mimeType = 'application/vnd.google-apps.folder'"
found = self._get_files_list_response(query)
if len(found) == 0:
return False
result = found[0].get("id")
return result
def list_files_from_folder(self, folder: str) -> list[dict]:
folder_id = self.get_folder_id(folder)
query = f"'{folder_id}' in parents"
found = self._get_files_list_response(query)
return found
def download_files(self, *files: dict) -> None:
"""Downloads files to current directory."""
for file in files:
if Path(file["name"]).exists():
print(
"File: "
+ "'"
+ file["name"]
+ "'"
+ " already exists, skipping download."
)
continue
if file.get("mimeType") == "application/vnd.google-apps.spreadsheet":
request = self.service.files().export_media(
fileId=file.get("id"), mimeType="text/tab-separated-values"
)
file["name"]
else:
request = self.service.files().get_media(fileId=file.get("id"))
fh = io.FileIO(file.get("name"), "wb")
downloader = MediaIoBaseDownload(fh, request)
done = False
print("Downloading file " + "'" + file["name"] + "," + file["id"] + "'")
while done is False:
status, done = downloader.next_chunk()
def upload_file(
self, file: Path, folder_name: str = None, folder_id: str = None
) -> None:
if folder_name is not None and folder_id is None:
folder_id = self.get_folder_id(folder_name)
file_metadata = {"name": file.name, "parents": [folder_id]}
if folder_id == False:
folder_id = self.create_folder(folder_name)
file_metadata = {"name": file.name, "parents": [folder_id]}
elif folder_id is not None:
file_metadata = {"name": file.name, "parents": [folder_id]}
else:
file_metadata = {"name": file.name}
media = MediaFileUpload(file)
up = (
self.service.files()
.create(body=file_metadata, media_body=media, fields="id")
.execute()
)
print("Uploaded file: " + "'" + file.name + "'")
def create_folder(self, folder_name: str, parent_id: str = None) -> str:
""" "Creates folder and returns id"""
if parent_id is None:
parent_id = self.get_folder_id("Project Results")
file_metadata = {
"name": folder_name,
"parents": [parent_id],
"mimeType": "application/vnd.google-apps.folder",
}
file = (
self.service.files()
.create(body=file_metadata, fields="id", supportsAllDrives=True)
.execute()
)
return file.get("id")
def main():
g = CCGPDrive()
hi = g.create_folder("hi")
bye = g.create_folder("bye", hi)
if __name__ == "__main__":
main()
|
cademirch/ccgp-data-wrangling
|
utils/gdrive.py
|
gdrive.py
|
py
| 4,861 |
python
|
en
|
code
| 1 |
github-code
|
50
|
34242307761
|
import pandas as pd
from os import listdir
from pandas import read_csv
from pickle import load
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
def label_encoder_data(data):
columns_to_encode = list(data.select_dtypes(include=['object']))
le = LabelEncoder()
for feature in columns_to_encode:
data[feature] = le.fit_transform(data[feature])
return data
def load_model(path):
with open(path, "rb") as f:
loaded_model = load(f)
return loaded_model
def predict(model_path, file, check):
if check == True:
model = load_model(model_path)
data = pd.read_csv(file)
encoded_data = label_encoder_data(data)
df = encoded_data.drop("Label", axis=True)
df = df.sample(frac=1).reset_index(drop=True)
predictions = model.predict(df)
predictions = np.around(predictions, decimals=0)
predictions = np.where(predictions == 2., 1., predictions)
new_data = df.copy()
try:
new_data['Predict'] = predictions
new_data.to_csv('dataset/classification/results.csv', index=False)
except:
print('Bad request')
else:
model = load_model(model_path)
data = pd.read_csv(file)
encoded_data = label_encoder_data(data)
df = encoded_data.drop("Label", axis=True)
df = df.sample(frac=1).reset_index(drop=True)
predictions = model.predict(df)
predictions = np.around(predictions, decimals=0)
new_data = df.copy()
try:
new_data['Predict'] = predictions
new_data.to_csv('dataset/classification/results.csv', index=False)
except:
print('Bad request')
return df, predictions
def get_files(folder_name, file_extension):
files = listdir(folder_name)
files = list(filter(lambda x: x.endswith(file_extension), files))
return files
def allowed_file(filename, ALLOWED_EXTENSIONS):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
hurtishka/Machine-Learning-App
|
classification.py
|
classification.py
|
py
| 2,081 |
python
|
en
|
code
| 0 |
github-code
|
50
|
15405978308
|
import tensorflow as tf
from tensorflow.keras.layers import (
Input,
)
from tensorflow.keras.models import Model
from tensorflow.keras.applications import MobileNetV2
from indian_mobilnet_unet.mobileunet import decoder_block, loss_IoU
print("TF Version: ", tf.__version__)
def build_mobilenetv2_unet2(input_shape=(256, 256, 3)): # (512, 512, 3)
"""Input"""
inputs = Input(shape=input_shape)
""" Pre-trained MobileNetV2 """
encoder = MobileNetV2(
include_top=False, weights=None, input_tensor=inputs, alpha=0.35
)
encoder.trainable = True
""" Encoder """
s1 = encoder.get_layer("input_1").output # (512 x 512)
s2 = encoder.get_layer("block_1_expand_relu").output # (256 x 256)
s3 = encoder.get_layer("block_3_expand_relu").output # (128 x 128)
s4 = encoder.get_layer("block_6_expand_relu").output # (64 x 64)
""" Bridge """
b1 = encoder.get_layer("block_13_expand_relu").output # (32 x 32)
""" Decoder """
d1 = decoder_block(b1, s4, 512) # (64 x 64)
d2 = decoder_block(d1, s3, 256) # (128 x 128)
# d3 = decoder_block(d2, s2, 128) # (256 x 256)
# d4 = decoder_block(d3, s1, 64) # (512 x 512)
""" Output """
outputs = []
for i in range(10):
if i == 0:
outputs.append(
tf.keras.layers.Conv2D(
1,
(3, 3),
padding="same",
name="foot_mask",
activation="sigmoid",
)(d2)
)
elif i == 1:
outputs.append(
tf.keras.layers.Conv2D(
1,
(3, 3),
padding="same",
name="leg_mask",
activation="sigmoid",
)(d2)
)
else:
outputs.append(
tf.keras.layers.Conv2D(
1,
(3, 3),
padding="same",
name="keypoints" + str(i - 2),
activation="sigmoid",
)(d2)
)
# outputs.append(
# tf.keras.layers.Conv2D(2, (1, 1), activation="sigmoid", name="classOut")(d2)
# )
model = tf.keras.Model(inputs=[inputs], outputs=outputs, name="MobileNetV2_U-Net")
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(
optimizer=opt,
loss={
"foot_mask": [loss_IoU],
"leg_mask": [loss_IoU],
"keypoints0": [loss_IoU],
"keypoints1": [loss_IoU],
"keypoints2": [loss_IoU],
"keypoints3": [loss_IoU],
"keypoints4": [loss_IoU],
"keypoints5": [loss_IoU],
"keypoints6": [loss_IoU],
"keypoints7": [loss_IoU],
},
metrics=["accuracy"],
)
return model
if __name__ == "__main__":
model = build_mobilenetv2_unet2((256, 256, 3))
model.summary()
|
sugartechnology/foot-detector-tracker-public
|
mobileunet2.py
|
mobileunet2.py
|
py
| 2,991 |
python
|
en
|
code
| 0 |
github-code
|
50
|
34467643685
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
class SoftmaxClassifier(object):
'''
Basic implementation of a softmax classifier.
Parameters
----------
random_state: int (default: 1337)
Defines the random state of numpy for a particular class instance.
n_classes: int (default: 2)
Number of classes used for the classification.
epochs: int (default: 500)
Number of epochs to train the model. This should be validated with a
validation set in real world problems to avoid overtraining or
stopping too early.
learning_rate: float (default: 0.1)
Initial learning rate. Also has to be validated to archieve a fast
convergence. A learning rate decay schedule still needs to be
implemented.
'''
def __init__(self,
random_state=None,
n_classes=2,
epochs=500,
learning_rate=.1):
if random_state is None:
np.random.RandomState(1337)
else:
np.random.RandomState(random_state)
self._is_fitted = False
self._W = None
self._b = None
self._n_classes = n_classes
self.epochs = epochs
self.learning_rate = learning_rate
def fit(self, Xs, Ys, init_params=True):
'''
Fits the model for given `Xs` and `Ys` and initializes all the
parameters if needed.
Parameters
----------
Xs: array of shape (n_points, n_features)
Two dimensional array containing data points with all features.
Ys: array of shape (n_points, n_classes)
Two dimensional array containing the labels of all data point in a
one-hot structure.
init_params: bool
If True the weights and biases get initialized, otherwise the
class attributes _W and _b are used.
'''
self._fit(Xs=Xs, Ys=Ys, init_params=init_params)
self._is_fitted = True
return self
def _fit(self, Xs, Ys, init_params):
if init_params:
n_classes = self._n_classes
n_features = Xs.shape[1]
self._W, self._b = self._init_params(n_features, n_classes)
self.costs = []
if self._W.any() is None or self._b.any() is None:
raise AttributeError(
'Initialize weights and biases before fitting.')
# Implement batch use here! For the moment one batch = all data
for i in range(self.epochs):
scores = self._scores(Xs, self._W, self._b)
probs = self._softmax(scores)
cost = self._cost(probs, Ys)
self.costs.append(cost)
dW, db = self._calc_gradient(Xs, Ys, probs)
# The gradient can also be calculated numerically to check
# the analytical calculation
# dW_, db_ = self._calc_numeric_gradient(Xs, Ys)
# print(dW, db)
# print(dW_, db_)
self._W += -self.learning_rate * dW
self._b += -self.learning_rate * db
def predict(self, Xs):
'''
Predict the class of data points `Xs` given the current model.
Parameters
----------
Xs: array of shape (n_points, n_features)
Two dimensional array containing data points with all features.
Returns
-------
array of shape (n_points,)
The returned array contains the predicted classlabel for every data
point in `Xs`.
'''
if not self._is_fitted:
raise AttributeError('Model is not fitted, yet!')
return self._predict(Xs)
def _predict(self, Xs):
scores = self._scores(Xs, self._W, self._b)
probs = self._softmax(scores)
return self._to_classlabel(probs)
def _calc_gradient(self, Xs, Ys, probs):
diff = -(Ys - probs)
diff /= diff.shape[0]
dW = Xs.T.dot(diff)
db = np.sum(diff, axis=0)
return dW, db
def _calc_numeric_gradient(self, Xs, Ys):
from copy import deepcopy as cp
dW = np.zeros(self._W.shape)
db = np.zeros(self._b.shape)
h = self.learning_rate
it = np.nditer(self._W, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
Wph = cp(self._W)
Wph[ix] += h
yp_pred = self._softmax(self._scores(Xs, Wph, self._b))
cph = self._cost(yp_pred, Ys)
Wmh = cp(self._W)
Wmh[ix] -= h
ym_pred = self._softmax(self._scores(Xs, Wmh, self._b))
cmh = self._cost(ym_pred, Ys)
dW[ix] = (cph - cmh) / (2 * h)
it.iternext()
it = np.nditer(self._b, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
bph = cp(self._b)
bph[ix] += h
yp_pred = self._softmax(self._scores(Xs, self._W, bph))
cph = self._cost(yp_pred, Ys)
bmh = cp(self._b)
bmh[ix] -= h
ym_pred = self._softmax(self._scores(Xs, self._W, bmh))
cmh = self._cost(ym_pred, Ys)
db[ix] = (cph - cmh) / (2 * h)
it.iternext()
return dW, db
def _init_params(self, n_features, n_classes):
weight_shape = (n_features, n_classes)
bias_shape = (n_classes,)
W = np.random.rand(n_features * n_classes) / \
np.sqrt(n_features * n_classes)
W = W.reshape(weight_shape)
b = np.zeros(bias_shape)
return W, b
def _scores(self, xs, W, b):
scores = np.matmul(xs, W) + b
return scores
def _softmax(self, scores):
scores -= np.max(scores)
probs = (np.exp(scores).T / np.sum(np.exp(scores), axis=1)).T
return probs
def _to_classlabel(self, probs):
return np.argmax(probs, axis=1)
def _cross_entropy(self, y_pred, y_true):
y_pred = np.clip(y_pred, 1e-10, 1)
cross_entropy = -np.sum(y_true * np.log(y_pred), axis=1)
return cross_entropy
def _cost(self, y_pred, y_true):
'''
This function calculates the actual cost,
which allows to add additional terms to the function,
e.g. L2 regularization.
'''
return np.mean(self._cross_entropy(y_pred, y_true))
def generate_easy_data_(self, N_points=50):
Xs_0_mean = [2., 10.]
Xs_0_cov = [[1., 0.], [0., 1.]]
Xs_0 = np.random.multivariate_normal(Xs_0_mean,
Xs_0_cov,
size=N_points)
Xs_1_mean = [5., 5.]
Xs_1_cov = [[1., 0.], [0., 1.]]
Xs_1 = np.random.multivariate_normal(Xs_1_mean,
Xs_1_cov,
size=N_points)
Xs = np.append(Xs_0, Xs_1, axis=0)
n_classes = 2
Ys_0 = np.zeros(N_points, dtype=np.int)
Ys_1 = np.ones(N_points, dtype=np.int)
Ys_ = np.append(Ys_0, Ys_1)
Ys = np.eye(n_classes)[Ys_]
return Xs, Ys
def generate_spiral_data(self, N_points=50):
n_classes = self._n_classes
n_features = 2
Xs = np.zeros((N_points * n_classes, n_features))
Ys = np.zeros((N_points * n_classes), dtype=np.int)
for i in range(n_classes):
ix = range(N_points * i, N_points * (i + 1))
r = np.linspace(0., 1., N_points)
t = np.linspace(i * 4, (i + 1) * 4, N_points) + \
np.random.randn(N_points) * 0.2
Xs[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
Ys[ix] = i
Ys = np.eye(n_classes)[Ys]
return Xs, Ys
def generate_two_pop(self):
P0 = np.load('data/P0.npy')
P1 = np.load('data/P1.npy')
Xs = np.append(P0.T, P1.T, axis=0)
Ys = np.append(np.zeros(P0.shape[1], dtype=np.int),
np.ones(P1.shape[1], dtype=np.int))
Ys = np.eye(2)[Ys]
return Xs, Ys
def plot_decision_regions(Xs, Ys, classifier,
ax=None, res=.01, colors=None):
if ax is None:
ax = plt.gca()
if colors is None:
colors = sns.color_palette()
x_min, x_max = np.min(Xs[:, 0]) - .1, np.max(Xs[:, 0]) + .1
y_min, y_max = np.min(Xs[:, 1]) - .1, np.max(Xs[:, 1]) + .1
xx, yy = np.meshgrid(np.arange(x_min, x_max, res),
np.arange(y_min, y_max, res))
Z = classifier.predict(np.array([xx.ravel(), yy.ravel()]).T)
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z,
alpha=.3,
levels=np.arange(Z.max() + 2) - .5,
colors=colors)
ax.axis(xmin=x_min, xmax=x_max, ymin=y_min, ymax=y_max)
for ix in np.unique(np.argmax(Ys, axis=1)):
ax.scatter(x=Xs[np.argmax(Ys, axis=1) == ix, 0],
y=Xs[np.argmax(Ys, axis=1) == ix, 1],
alpha=.8,
label='Class {}'.format(int(ix)),
c=colors[ix])
ax.legend(loc='lower right', fancybox=True)
if __name__ == '__main__':
sc = SoftmaxClassifier()
Xs, Ys = sc.generate_two_pop()
sc.fit(Xs, Ys)
plot_decision_regions(Xs, Ys, sc)
plt.savefig('2_pop.pdf')
sc = SoftmaxClassifier(n_classes=3)
Xs, Ys = sc.generate_spiral_data()
sc.fit(Xs, Ys)
plot_decision_regions(Xs, Ys, sc)
plt.savefig('spiral_data.pdf')
|
mxmeier/smd_examples
|
examples/softmax_regression.py
|
softmax_regression.py
|
py
| 9,594 |
python
|
en
|
code
| 0 |
github-code
|
50
|
12064032600
|
from django import forms
from django.core.mail import send_mail
from django.core.validators import validate_email
from meetings.models import Template
class MultiEmailField(forms.Field):
def to_python(self, value):
"""Normalize data to a list of strings."""
# Return an empty list if no input was given.
if not value:
return []
return [item.strip() for item in value.split(',')]
def validate(self, value):
"""Check if value consists only of valid emails."""
# Use the parent's handling of required fields, etc.
super().validate(value)
for email in value:
validate_email(email)
class InviteForm(forms.Form):
members = MultiEmailField(label="Mitglieder",
widget=forms.TextInput(attrs={'class': "input"}))
guests = MultiEmailField(label="Gäste", widget=forms.TextInput(attrs={'class': "input"}), required=False)
subject = forms.CharField(label="Betreff", initial="Einladung zur Sitzung",
widget=forms.TextInput(attrs={'class': "input"}), required=True)
message = forms.CharField(label="Nachricht", widget=forms.Textarea(attrs={'class': "textarea"}), required=True)
def __init__(self, *args, **kwargs):
meeting = kwargs.pop('meeting')
template = Template.objects.get(slug='invitation')
super().__init__(*args, **kwargs)
self.fields['guests'].initial = ', '.join(meeting.guests.split(','))
self.fields['message'].initial = template.content.format(date=meeting.date.strftime('%d.%m.%Y'), agenda=meeting.agenda)
def send_mail(self):
send_mail(
self.cleaned_data['subject'],
self.cleaned_data['message'],
None,
self.cleaned_data['members'] + self.cleaned_data['guests'],
)
|
timptner/farafmb
|
meetings/forms.py
|
forms.py
|
py
| 1,857 |
python
|
en
|
code
| 0 |
github-code
|
50
|
6214229480
|
#!/usr/bin/python
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import GObject, Gtk
from os import path
from utils import uri_to_path
class CreateWindow(GObject.GObject):
__gsignals__ = {
'refresh': (GObject.SIGNAL_RUN_FIRST, None, ())
}
def __init__(self, window, file, uri):
GObject.GObject.__init__(self)
self._builder = Gtk.Builder()
self._builder.add_from_resource('/com/nautilus/create-desktop-entry/ui/create.ui')
self._builder.connect_signals(self)
self._window = self._builder.get_object("window")
self._window.set_transient_for(window)
self._build_window(file, uri)
self._window.show_all()
def _build_window(self, file, uri):
filename = file.get_name()
if filename.startswith("."):
filename = filename[1:]
self._name = self._builder.get_object("name")
self._name.set_text(filename.split(".")[0])
self._description = self._builder.get_object("description")
self._command = self._builder.get_object("command")
self._command.set_text(uri)
self._named_icon = self._builder.get_object("named-icon")
self._custom_icon = self._builder.get_object("custom-icon")
self._named_icon_entry = self._builder.get_object("named-icon-entry")
self._file_chooser = self._builder.get_object("file-chooser")
self._create_button = self._builder.get_object("create-button")
def on_required_changed(self, args):
if not self._name.get_text().strip() or not self._command.get_text().strip():
self._create_button.set_sensitive(False)
else:
self._create_button.set_sensitive(True)
def on_change_icon_type(self, args):
if self._named_icon.get_active():
self._named_icon_entry.set_sensitive(True)
self._file_chooser.set_sensitive(False)
else:
self._named_icon_entry.set_sensitive(False)
self._file_chooser.set_sensitive(True)
def on_named_icon_entry_changed(self, args):
self._named_icon_entry.set_property("primary_icon_name", self._named_icon_entry.get_text())
def on_create(self, args):
name = self._name.get_text().strip()
f = open(path.expanduser("~/.local/share/applications/" + name + ".desktop"), "w")
f.write("[Desktop Entry]\n")
f.write("Name=" + name + "\n")
f.write("Comment=" + self._description.get_text().strip()+ "\n")
f.write("Exec=" + self._command.get_text().strip()+ "\n")
f.write("Type=Application\n")
if self._named_icon.get_active():
icon = self._named_icon_entry.get_text()
else:
icon = uri_to_path(self._file_chooser.get_uri())
f.write("Icon=" + icon.strip())
self._window.destroy()
def on_destroy(self, args):
self._window.destroy()
|
luaVolk/nautilus-create-desktop-entry
|
src/windows/create.py
|
create.py
|
py
| 2,924 |
python
|
en
|
code
| 0 |
github-code
|
50
|
22896711019
|
import os
import json
import tornado
import memcache
import htmlmin
from config import config, base_path
from handler import Handler
ioloop = tornado.ioloop.IOLoop.instance()
class Application(object):
def __init__(self, urls):
urle = []
for url, method in urls:
handler = type(method.__name__ + 'Handler', (Handler,),
dict(handler=tornado.gen.coroutine(method)))
urle.append((url, handler))
print(urle)
self.app = tornado.web.Application(
urle, cookie_secret=config['cookie_secret'])
self.template_dir = config['template_dir'] if config['template_dir'].startswith(base_path) \
else os.path.join(base_path, config['template_dir'])
self.template_files = []
if config['use_memcache']:
self.load_templates()
def load_templates(self, path=None):
memcache_url = '{host}:{port}'.format(**config['memcache'])
mem_client = memcache.Client([memcache_url], debug=0)
if not path:
path = self.template_dir
try:
templates = os.listdir(path)
except OSError as error:
print('Error while loading template files: {}'.format(error))
return
for item in templates:
_item = os.path.join(base_path, path, item)
if os.path.isfile(_item):
template_name = _item.replace(self.template_dir, '')[1:]
key = 'template:{template_path}'.format(
template_path=template_name)
value = {
"content": "",
"filename": template_name
}
with open(_item) as tpl_file:
value["content"] = htmlmin.minify(tpl_file.read())
mem_client.set(key, json.dumps(value))
elif os.path.isdir(_item):
self.load_templates(_item)
else:
print('Do nothing, shouldn\'t happen!')
def start(self):
self.app.listen(config['listen_port'])
ioloop.start()
|
deceq/t4p
|
t4p/application.py
|
application.py
|
py
| 2,125 |
python
|
en
|
code
| 0 |
github-code
|
50
|
31974298904
|
from django.db.models.signals import post_save, post_delete
from models import LogWorks
def my_callback(sender, **kwargs):
"""
This function add into model LogWorks
log works (creation/editing/deletion) with all models
"""
if sender._meta.object_name == 'LogWorks':
return None
sign = LogWorks()
work = 'deletion'
if 'created' in kwargs.keys():
work = 'creation' if kwargs['created'] else 'editing'
sign.mod_name = sender._meta.object_name
sign.work = work
sign.save()
post_save.connect(my_callback)
post_delete.connect(my_callback)
|
myar/FortyTwoTestTask
|
apps/hello/signals.py
|
signals.py
|
py
| 605 |
python
|
en
|
code
| null |
github-code
|
50
|
37957507223
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def binaryTreePaths(self, root):
"""
:type root: TreeNode
:rtype: List[str]
"""
if root == None:
return []
ans = []
dfs(root, ans, str(root.val))
return ans
def dfs(root, ans, temp):
if root.left:
dfs(root.left, ans, temp + "->"+str(root.left.val))
if root.right:
dfs(root.right, ans, temp + "->"+str(root.right.val))
if root.right == None and root.left == None:
ans.append(temp)
|
ruizhang84/LeetCode-OJ
|
binaryTreePaths.py
|
binaryTreePaths.py
|
py
| 751 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26146602117
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
(r'^admin/', include(admin.site.urls)),
(r'^hello/$','mysite.views.hello'),
(r'^books/$','books.views.booklist'),
(r'^polls/',include('polls.urls')),
)
|
jarod-chan/mysite
|
mysite/urls.py
|
urls.py
|
py
| 416 |
python
|
en
|
code
| 0 |
github-code
|
50
|
6923251571
|
import pandas
import time
import random
import csv
from google_search import Search
import os
from os import listdir
from os.path import isfile, join
from difflib import SequenceMatcher
# Paths for the data to be analyzed
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
ARTICLES_PATH = BASE_PATH + "/data/datasets/train-articles/"
ARTICLE_LABELS_PATH = ""
#Google search result title length is 70 characters
MAX_TITLE_LENGTH = 70
def similarity(title, queryTitle):
title = title[0:MAX_TITLE_LENGTH]
return SequenceMatcher(None, title, queryTitle).ratio()
def getBestMatch(article):
similarities = []
for x in [x for x in Search.query_google(article[1]) if not x[1] == '']:
similarities.append((x[0], x[1], similarity(article[1], x[0])))
if len(similarities) > 0:
return max(similarities,key=lambda item:item[2])
return ('', 'NULL', 0)
articleFiles = [f for f in listdir(ARTICLES_PATH) if isfile(join(ARTICLES_PATH, f))]
articles = []
for article in articleFiles:
title = ""
with open(ARTICLES_PATH + article, 'r') as file:
for line in file:
if line in ['\r\n', '\n']:
break
else:
title += line
articles.append((article.split("article")[1].split(".")[0],title))
results = []
for i in range(365,len(articles)):
article = articles[i]
print("Analyzing " + str(i) + "/" + str(len(articles)) + " TITLE = " +article[1])
bestMatch = getBestMatch(article)
print(bestMatch)
if bestMatch[2] > 0.2:
results.append((article[0], bestMatch[1]))
else:
results.append((article[0], 'NULL'))
with open(BASE_PATH + '/dates.csv','w') as out:
csv_out=csv.writer(out)
csv_out.writerow(['id','date'])
for row in results:
csv_out.writerow(row)
|
IliassAymaz/Propaganda-detector-political-polarization
|
date_annotation/analyze_data.py
|
analyze_data.py
|
py
| 1,822 |
python
|
en
|
code
| 0 |
github-code
|
50
|
19847824669
|
# def Nhap():
# n = int(input("Nhập n : "))
# m = int(input("Nhập m : "))
# a = []
# for i in range(n):
# k = [0]*m
# for j in range(m):
# k[j] = float (input('a[{}][{}] = '.format(i, j)))
# a.append(k)
# return a
#
# def write_file(a):
# f = open('D:/MATRIX.txt', mode='w')
# f.write(str(len(a)) + " ")
# f.write(str(len(a[0])) + "\n")
#
# for i in range(len(a)):
# for j in range (len(a[0])):
# f.write(str(a[i][j]) + ' ')
# f.write('\n')
# f.close ()
#
# a = Nhap()
# print(a)
# write_file(a)
#
"""
Nhập vào từ bàn phím một ma trận a(nxm) số thực và xuất ma trận vào tệp văn bản theo định dạng :
Dòng 1 : Chứa hai số nguyên n, m
Các dòng khác : Chứa các dòng của ma trận
"""
def Nhap():
n = int(input('Nhập số dòng : '))
m = int(input('Nhập số cột : '))
a = []
for i in range(n):
k = [0] * m
for j in range(m):
k[j] = float(input('Ma trận a[{},{}] = '.format(i + 1, j + 1)))
a.append(k)
return a
def WriteFile(a):
f = open('D:/MATRIX.txt', mode='w')
f.write('n ='+str(len(a)) + ' ') # ghi số dòng
f.write('m = '+str(len(a[0])) + '\n') # ghi số cột
for i in range(len(a)):
for j in range(len(a[0])):
f.write(str(a[i][j]) + ' ')
f.write('\n')
f.close()
a = Nhap()
print(a)
WriteFile(a)
|
linhlukar/PYTHON
|
Python/TH4/41.py
|
41.py
|
py
| 1,478 |
python
|
vi
|
code
| 0 |
github-code
|
50
|
71425539355
|
from flask import Flask, jsonify
from rpc_publisher import RpcClient
from notification_publisher import publish
import json
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
scheduler = BackgroundScheduler()
scheduler.start()
@scheduler.scheduled_job('cron', day='*', hour='18', minute='00')
def first_job():
data = {'target_function':'assigned_zero'}
rpcclient = RpcClient()
response = rpcclient.call(json.dumps(data))
job_count = response.decode('utf-8')
if int(job_count) > 0:
subject = "First Mail"
mail_body = str(job_count)+" numbers of tasks are available."
receiver_list = "[email protected]","[email protected]"
data = {'subject':subject, 'mail_body':mail_body, 'receiver_list':receiver_list}
publish(data)
@scheduler.scheduled_job('cron', day='*', hour='21', minute='00')
def second_job():
data = {'target_function':'assigned_one'}
rpcclient = RpcClient()
response = rpcclient.call(json.dumps(data))
job_count = response.decode('utf-8')
if int(job_count) > 0:
subject = "Second Mail"
mail_body = "No task available!"
receiver_list = "[email protected]","[email protected]"
data = {'subject':subject, 'mail_body':mail_body,'receiver_list':receiver_list}
publish(data)
if __name__ == '__main__':
app.run(debug=True, use_reloader=False,
host='0.0.0.0', port="5000", threaded=True)
|
cdchinmoy/CRON_RabbitMQ_Redis_Flask_Application
|
cron/app.py
|
app.py
|
py
| 1,489 |
python
|
en
|
code
| 0 |
github-code
|
50
|
30413359151
|
""" Environment Map Class for storing information about the simulation
environment. Contains lists of environmental features in the form of
(x, y) coordinate pairs. """
class EnvMap:
""" Container entity for environmental features. """
def __init__(self, width, height, air, surface, dead,
doors, windows, handwash, arrival, other):
self.width = width # x resolution
self.height = height # y resolution
self.air = air # list of air cells
self.surface = surface # list of contactable surface cells
self.dead = dead # list of inaccessible environment cells
self.doors = doors # list of doors
self.windows = windows # list of windows
self.handwash = handwash # list of handwashing stations
self.arrival = arrival # list of arrival/departure cells
self.other = other # list of unknown cells (errors?)
return
|
bcwarner/covid-modeling
|
image_mapping/envmap.py
|
envmap.py
|
py
| 971 |
python
|
en
|
code
| 0 |
github-code
|
50
|
26478914076
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
"""
@Time : 2019/5/22 11:44
@Author : Damon
@Email : [email protected]
@File : test_playPlan.py
@Software : PyCharm
"""
import ast
import json
import time
import unittest
from ddt import ddt, data
from common.logger import logger
from common import do_excel
from common import contants
from common import context
from common import do_mysql
from common.http_request import HttpRequest2
from common.context import Context
@ddt
class PlanTasksTest(unittest.TestCase):
excel = do_excel.DoExcel(contants.case_file, 'shop_add') # 测试shop_add接口
cases = excel.get_case()
@classmethod
def setUpClass(cls):
logger.info('准备测试前置')
cls.http_request = HttpRequest2()
cls.mysql = do_mysql.DoMysql()
@data(*cases)
def test_plan_tasks(self, case):
logger.info('开始测试{}'.format(case.title))
case.data = context.replace(case.data)
resp = self.http_request.http_request(case.method, case.url, case.data)
results = json.loads(resp.text)
try:
sql = f"SELECT plan_id FROM alading_jdcs.al_en_plan WHERE goodsId = '{goodsId}';"
saleprice = self.mysql.fetch_one(sql)['salePrice'] # 查询所属商品的售价
self.assertEqual(results["data"]["shelves"]["salePrice"], saleprice)
except ValueError as e:
self.excel.writer_result(case.case_id + 1, resp.text, "FAIL")
logger.error('报错了{}'.format(e))
raise e
logger.info('测试结束:{}'.format(case.title))
@classmethod
def tearDownClass(cls):
logger.info('测试后置处理')
cls.http_request.close()
|
likangming/Ala_autotest
|
test_case/shop/test_shop.py
|
test_shop.py
|
py
| 1,724 |
python
|
en
|
code
| 0 |
github-code
|
50
|
37683019587
|
import numpy as np
import torch
from torch.utils.data import DataLoader
from solution.data import SCDataset
def run_epoch(epoch, train_loader, model, loss_fn, optimizer, device):
loss_acc = 0
for batch, (x, y) in enumerate(train_loader):
x = x.to(device=device)
y = y.to(device=device)
y_hat = model(x)
loss = loss_fn(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_acc = loss_acc * 0.5 + loss.item() * 0.5
print('Epoch {}, Batch {}, Loss {}'.format(epoch, batch, loss_acc))
return loss_acc
def accuracy(pred, labels):
pred_labels = torch.round(torch.sigmoid(pred))
acc = (pred_labels == labels).sum().float()/labels.shape[0]
return acc
def validate(model, loader, device, loss_fn):
# очень медленно работет
losses = []
acc = []
with torch.no_grad():
for k, (x, y) in enumerate(loader):
y_hat = model(x.to(device=device)).cpu()
losses.append(loss_fn(y_hat, y).item())
acc.append(accuracy(y_hat, y).item())
return np.array(losses), np.array(acc)
def prepare_loaders(ref_dataset_csv_path, transform, transform_label):
train_dataset = SCDataset(ref_dataset_csv_path, subset='train',
transform=transform, transform_label=transform_label)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
valid_dataset = SCDataset(ref_dataset_csv_path, subset='valid',
transform=transform, transform_label=transform_label)
valid_loader = DataLoader(valid_dataset, batch_size=64, shuffle=True)
test_dataset = SCDataset(ref_dataset_csv_path, subset='test',
transform=transform, transform_label=transform_label)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=True)
return train_loader, valid_loader, test_loader
|
nikolaims/speech_command_detector
|
solution/learning.py
|
learning.py
|
py
| 1,955 |
python
|
en
|
code
| 0 |
github-code
|
50
|
18669324787
|
'''
from pywget import wget
Linux
source /opt/intel/openvino/bin/setupvars.sh
# Linux
cd /opt/intel/openvino/deployment_tools/tools/model_downloader
Model Downloader
python3 downloader.py --name face-detection-adas-0001 --precisions FP32 -o /home/workspace
python3 downloader.py --name gaze-estimation-adas-0002 -o /home/thomas/Models
# python3 face_detection.py --model models/face-detection-retail-0004 --video demo.mp4
# mac
sudo python3 downloader.py --name face-detection-adas-0001 -o /Users/pro/Documents/GitHub/Lightbox/models/2021
# Windows
C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\tools\model_downloader>
python3 downloader.py --name face-detection-adas-0001 --precisions FP32
# wget https://download.01.org/opencv/2021/openvinotoolkit/2021.2/open_model_zoo/models_bin/3/face-detection-0200/FP16/face-detection-0200.xml
# wget https://download.01.org/opencv/2021/openvinotoolkit/2021.2/open_model_zoo/models_bin/3/face-detection-0200/FP16/face-detection-0200.bin
'''
import wget
import os
extension = ['bin', 'xml']
path = 'https://download.01.org/opencv/2021/openvinotoolkit/2021.2/open_model_zoo/models_bin/3/face-detection-0200/FP16/face-detection-0200.'
for i in extension:
new_path = path+i
print (new_path)
filename = wget.download(new_path)
|
ET-Technologies/stepbystep
|
Openvino/download_model_openvino.py
|
download_model_openvino.py
|
py
| 1,295 |
python
|
en
|
code
| 0 |
github-code
|
50
|
36064317392
|
class Solution:
def kClosest(self, points: List[List[int]], k: int) -> List[List[int]]:
origin=[0,0]
result=[]
newresult=[]
output=[]
for i in range(0, len(points)):
n=points[i][0]-origin[0]
m=points[i][1]-origin[1]
z=n**2 + m**2
result.append(z)
#return k number of smallest points
newresult=result.copy()
for j in range (0,k):
if(len(output)>=k):
j=k
else:
#get minimum sqrt
a= min(newresult)
#return point with min distance
for x in range(0,len(result)):
if(result[x]==a):
output.append(points[x])
newresult.remove(a)
return output
|
nicole-mulela/Competitive-Programming
|
KClosestPointstoOrigin.py
|
KClosestPointstoOrigin.py
|
py
| 829 |
python
|
en
|
code
| 0 |
github-code
|
50
|
33997388755
|
import speech_recognition as sr
import pyaudio
def Listen():
r=sr.Recognizer()
with sr.Microphone() as source:
print("")
print("Listening... ")
r.pause_threshold=1
audio=r.listen(source)
try:
print("Recognizing..")
query=r.recognize_google(audio,language="en-in")
print(f"You Said:{query}")
except Exception as e:
print("Say that again")
return ""
query=str(query)
return query.lower()
|
Ethancoder012/AI-Assistant
|
listen.py
|
listen.py
|
py
| 486 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8712881705
|
from django.urls import path
from . import views
app_name = 'secMes'
urlpatterns = [
path('', views.home, name='home'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('signup/', views.signup, name='signup'),
path('sendMsg/', views.sendMsg, name='sendMsg'),
path('msgPage/', views.msgPage, name='msgPage'),
path('findUser/', views.findUser, name='findUser'),
path('aboutCreator/', views.aboutCreator, name='aboutCreator'),
path('sendToId/<str:unique_id>', views.sendToId, name='sendToId'),
path('outputMessage/', views.outputMessage, name='outputMessage'),
path('createAccount/', views.createAccount, name='createAccount'),
]
|
ROHIT318/secret-message
|
secMes/urls.py
|
urls.py
|
py
| 684 |
python
|
en
|
code
| 0 |
github-code
|
50
|
16903369385
|
import heapq
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtype: int
"""
vals = [val for row in matrix for val in row]
heapq.heapify(vals)
for _ in range(k):
ans = heapq.heappop(vals)
return ans
|
LYoung-Hub/Algorithm-Data-Structure
|
kthSmallestInMatrix.py
|
kthSmallestInMatrix.py
|
py
| 346 |
python
|
en
|
code
| 0 |
github-code
|
50
|
73700328796
|
'''
士兵许三多有一把AK47
士兵可以开火
枪能发射子弹
枪装填子弹
'''
# 枪类
class Gun:
def __init__(self, model):
self.model = model
self.bullte_count = 0
def add_bullte(self, count):
self.bullte_count += count
def shoot(self):
if self.bullte_count <= 0:
print('%s没有子弹了' %self.model)
return
self.bullte_count -= 1
print('射击了一枪, 剩余子弹数量: %d' % (self.bullte_count))
# 士兵类
class Soldier:
def __init__(self, name):
self.name = name
self.gun = None
def fire(self):
# 开火
if self.gun is None:
print("%s 还没有枪" % self.name)
return
self.gun.add_bullte(50)
self.gun.shoot()
print("%s射中了敌人" % self.name)
AK47 = Gun('AK47')
xusanduo = Soldier('许三多')
# print(xusanduo.gun)
xusanduo.gun = AK47
# print(xusanduo.gun)
xusanduo.fire()
|
zhaofangfang1991/algorithm
|
python_heima/code/面向对象4.py
|
面向对象4.py
|
py
| 990 |
python
|
en
|
code
| 0 |
github-code
|
50
|
941138109
|
import matplotlib.pyplot as plt
import numpy as np
def main():
x = [-3,-2,-1,0,1,2,3,4]
y_jieyi = list(map(lambda a: jieyi(a, 0.5), x))
y_hinge = list(map(lambda a: hinge(a), x))
import pdb;pdb.set_trace()
plt.figure(1)
plt.plot(x,y_jieyi)
plt.plot(x,y_hinge)
plt.show()
def jieyi(x, p):
if x <= 1:
y = p*((1-x)**2 + (1-x))
else:
y =0
return y
def hinge(x):
if x <= 1:
return 1-x
else:
return 0
if __name__ =="__main__":
main()
|
jiechenyi/elastic-svm
|
func_pic.py
|
func_pic.py
|
py
| 529 |
python
|
en
|
code
| 0 |
github-code
|
50
|
8457312840
|
import json
import sys
import time
from selenium import webdriver
from bs4 import BeautifulSoup as soup
import openpyxl
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
PATH = 'C:\Program Files (x86)\chromedriver.exe'
driver = webdriver.Chrome(PATH)
driver.get('https://www.lineups.com/nba/nba-player-minutes-per-game')
# id=ngb-dd-items_per_page
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "ngb-dd-items_per_page")))
item_list = driver.find_element_by_id('ngb-dd-items_per_page')
#print('drop down found')
item_list.click()
players_num_list = driver.find_elements_by_css_selector("label[class='m-0']")
max_players = players_num_list[-1]
max_players.click()
time.sleep(1)
page_soup = soup(driver.page_source, "html.parser")
mins = page_soup.findAll("td", {"data-title": "Projected Minutes"})
names = page_soup.findAll("td", {"data-title": "Name"})
nameslist = []
with open('playerNames.json') as f:
player_names = json.load(f)
for name in names:
textname = name.find("span", {'class': 'player-name-col-lg'}).text
nameslist.append(player_names[textname.strip()])
player_mins = []
for i in range(0, len(mins)) :
player_mins.append({'NAME': nameslist[i], 'MINs': mins[i].span.text})
#print(player_mins[4])
driver.close()
my_file = "C:\\Users\\brose32\\Documents\\" + sys.argv[1]
wb = openpyxl.load_workbook(my_file)
mins_sheet = wb.create_sheet("MINUTES_PROJ")
mins_sheet.append(("Name", "Minutes"))
for player in player_mins:
mins_sheet.append((player['NAME'], float(player['MINs'])))
wb.save(my_file)
|
brose32/dfsprojections
|
MinutesScrape.py
|
MinutesScrape.py
|
py
| 1,741 |
python
|
en
|
code
| 0 |
github-code
|
50
|
17913518180
|
#!/usr/bin/python3
import functools
import pytest
from brownie.test import coverage
@pytest.fixture
def branch_results(coverage_mode, evmtester):
build = evmtester._build
yield functools.partial(_get_branch_results, build)
# organizes branch results based on if they evaluated True or False
def _get_branch_results(build):
branch_false, branch_true = [
sorted(i) for i in list(coverage.get_coverage_eval().values())[0]["EVMTester"]["0"][1:]
]
coverage.clear()
branch_results = {True: [], False: []}
for i in branch_true:
key, map_ = _get_branch(build, i, True)
branch_results[key].append(map_)
for i in branch_false:
key, map_ = _get_branch(build, i, False)
branch_results[key].append(map_)
return branch_results
def _get_branch(build, idx, jump):
cov_map = build["coverageMap"]["branches"]["0"]
result = next((y for v in cov_map.values() for x, y in v.items() if int(x) == idx), None)
if result:
return result[-1] == jump, list(result[:-1])
raise ValueError("Branch map index does not exist")
|
eth-brownie/brownie
|
tests/test/coverage/conftest.py
|
conftest.py
|
py
| 1,103 |
python
|
en
|
code
| 2,541 |
github-code
|
50
|
11600354569
|
from flask import Flask, render_template, request, redirect, make_response
app = Flask(__name__)
@app.route('/')
def index():
if request.cookies.get("gamemode"):
resp = make_response(render_template('index.html'))
resp.set_cookie('gamemode', expires=0)
return resp
return render_template('index.html')
@app.route('/game')
def game():
return render_template('game.html')
@app.route('/setcookie')
def setcookie():
gamemode = request.args.get("gamemode")
redirect_to_index = redirect('/game')
response = make_response(redirect_to_index)
response.set_cookie('gamemode', value=gamemode)
return response
if __name__ == '__main__':
app.run(debug=True)
|
evajanka/js-game
|
server.py
|
server.py
|
py
| 709 |
python
|
en
|
code
| 0 |
github-code
|
50
|
28077871482
|
# -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2022/1/31 16:55
"""
"""
给你两个单词 word1 和 word2, 请返回将 word1 转换成 word2 所使用的最少操作数 。
你可以对一个单词进行如下三种操作:
插入一个字符
删除一个字符
替换一个字符
输入:word1 = "horse", word2 = "ros"
输出:3
解释:
horse -> rorse (将 'h' 替换为 'r')
rorse -> rose (删除 'r')
rose -> ros (删除 'e')
输入:word1 = "intention", word2 = "execution"
输出:5
解释:
intention -> inention (删除 't')
inention -> enention (将 'i' 替换为 'e')
enention -> exention (将 'n' 替换为 'x')
exention -> exection (将 'n' 替换为 'c')
exection -> execution (插入 'u')
"""
"""
思路:用dp,dp[i][j]表示word1的前i个变成word2的前j个所用的最少步数,考虑到字符串为空的情况,加入了'',初始化的时候第一行为
空字符串变成word2,即增加操作,第一列为word1变成空串的情况,即删除操作。dp[i][j]=min(dp[i-1][j-1],dp[i-1][j],dp[i][j-1])+1,
如果word1[i-1]==word2[j-1],说明当前字符相同,所以只需操作前面的字符即可,即dp[i][j]=dp[i-1][j-1]
"""
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m = len(word1)
n = len(word2)
if m * n == 0:
return m + n
dp = [[0 for _ in range(n+1)] for _ in range(m+1)]
for i in range(0, m+1):
dp[i][0] = i
for j in range(0, n+1):
dp[0][j] = j
for i in range(1, m+1):
for j in range(1, n+1):
dp[i][j] = min(dp[i-1][j-1], dp[i-1][j], dp[i][j-1]) + 1
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
return dp[m][n]
|
TankManBeta/LeetCode-Python
|
problem72_hard.py
|
problem72_hard.py
|
py
| 1,907 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
22200119262
|
"""Helpers for writing rules under //pods."""
__all__ = [
'App',
'Mount',
'SystemdUnitGroup',
'Volume',
'define_pod',
'make_pod_journal_watcher_content',
'make_pod_oneshot_content',
'make_pod_service_content',
'make_timer_content',
]
import dataclasses
import itertools
import logging
import typing
from pathlib import Path
import foreman
from g1 import scripts
from g1.bases.assertions import ASSERT
from g1.containers import models as ctr_models
from g1.operations.cores import models as ops_models
import shipyard2
import shipyard2.rules
from shipyard2.rules import releases
from shipyard2.rules import images as _images
# Re-export these.
App = ctr_models.PodConfig.App
Mount = ctr_models.PodConfig.Mount
SystemdUnitGroup = ops_models.PodDeployInstruction.SystemdUnitGroup
Volume = ops_models.PodDeployInstruction.Volume
LOG = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
class PodRules:
build: foreman.Rule
def define_pod(
*,
name: str,
apps: typing.List[App] = (),
images: typing.List[str] = (),
mounts: typing.List[Mount] = (),
volumes: typing.List[Volume] = (),
systemd_unit_groups: typing.List[SystemdUnitGroup] = (),
token_names: typing.Mapping[str, str] = None,
):
"""Define a pod.
This defines:
* Parameter: name/version.
* Rule: name/build. NOTE: This rule is generally run in the host
system, not inside a builder pod.
"""
ASSERT(len(images) <= 1, 'expect at most one image per pod for now: {}')
# Let's require absolute release labels (because it is quite hard to
# derive label path for images and volumes from pod label).
ASSERT.all(images, lambda label: label.startswith('//'))
ASSERT.all(volumes, lambda volume: volume.label.startswith('//'))
ASSERT.unique(map(_get_label_name, images))
ASSERT.unique(_get_label_name(volume.label) for volume in volumes)
name_prefix = shipyard2.rules.canonicalize_name_prefix(name)
parameter_version = name_prefix + 'version'
rule_build = name_prefix + 'build'
(foreman.define_parameter(parameter_version)\
.with_doc('pod version'))
images = list(map(foreman.Label.parse, images))
@foreman.rule(rule_build)
@foreman.rule.depend('//pods/bases:build')
@foreman.rule.depend('//releases:build')
def build(parameters):
version = ASSERT.not_none(parameters[parameter_version])
pod_dir_path = releases.get_output_dir_path(parameters, name, version)
if (
pod_dir_path / \
shipyard2.POD_DIR_RELEASE_METADATA_FILENAME
).exists():
LOG.info('skip: build pod: %s %s', name, version)
return
LOG.info('build pod: %s %s', name, version)
try:
scripts.mkdir(pod_dir_path)
releases.generate_release_metadata(
parameters,
pod_dir_path / shipyard2.POD_DIR_RELEASE_METADATA_FILENAME,
)
_generate_deploy_instruction(
parameters=parameters,
pod_dir_path=pod_dir_path,
name=name,
version=version,
apps=apps,
images=images,
mounts=mounts,
volumes=volumes,
systemd_unit_groups=systemd_unit_groups,
token_names=token_names,
)
_link_images(parameters, pod_dir_path, images)
_link_volumes(parameters, pod_dir_path, volumes)
except Exception:
# Roll back on error.
scripts.rm(pod_dir_path, recursive=True)
raise
for label in images:
build.depend(str(_images.derive_rule(label)))
return PodRules(build=build)
def _get_label_name(label):
return foreman.Label.parse(label).name
def _generate_deploy_instruction(
*,
parameters,
pod_dir_path,
name,
version,
apps,
images,
mounts,
volumes,
systemd_unit_groups,
token_names,
):
releases.dump(
ops_models.PodDeployInstruction(
label=str(releases.get_output_label(name)),
pod_config_template=ctr_models.PodConfig(
name=name,
version=version,
apps=apps,
images=[
ctr_models.PodConfig.Image(
name=shipyard2.BASE,
version=_images.get_image_version(
parameters,
shipyard2.BASE_LABEL,
),
),
*(
ctr_models.PodConfig.Image(
name=str(image.name),
version=_images.get_image_version(
parameters,
image,
),
) for image in images
),
],
mounts=mounts,
),
volumes=volumes,
systemd_unit_groups=systemd_unit_groups,
token_names=token_names or {},
),
pod_dir_path / shipyard2.POD_DIR_DEPLOY_INSTRUCTION_FILENAME,
)
def _link_images(parameters, pod_dir_path, images):
scripts.mkdir(pod_dir_path / shipyard2.POD_DIR_IMAGES_DIR_NAME)
_link(
shipyard2.POD_DIR_IMAGES_DIR_NAME,
parameters,
pod_dir_path,
shipyard2.BASE_LABEL,
None,
)
for label in images:
_link(
shipyard2.POD_DIR_IMAGES_DIR_NAME,
parameters,
pod_dir_path,
label,
None,
)
def _link_volumes(parameters, pod_dir_path, volumes):
scripts.mkdir(pod_dir_path / shipyard2.POD_DIR_VOLUMES_DIR_NAME)
for volume in volumes:
_link(
shipyard2.POD_DIR_VOLUMES_DIR_NAME,
parameters,
pod_dir_path,
foreman.Label.parse(volume.label),
volume.version,
)
def _link(sub_dir_name, parameters, pod_dir_path, label, version):
if sub_dir_name == shipyard2.POD_DIR_IMAGES_DIR_NAME:
derive = lambda ps, l, _: _images.derive_image_path(ps, l)
else:
ASSERT.equal(sub_dir_name, shipyard2.POD_DIR_VOLUMES_DIR_NAME)
derive = _derive_volume_path
target_path = ASSERT.predicate(
derive(parameters, label, version),
Path.is_file,
)
scripts.make_relative_symlink(
target_path,
pod_dir_path / sub_dir_name / label.name / target_path.name,
)
def _derive_volume_path(parameters, label, version):
return (
parameters['//releases:root'] / \
shipyard2.RELEASE_VOLUMES_DIR_NAME /
label.path /
label.name /
version /
shipyard2.VOLUME_DIR_VOLUME_FILENAME
)
_POD_ONESHOT = '''\
[Unit]
Description={description}
[Service]
Slice=machine.slice
Type=oneshot
{exec_starts}\
'''
def make_pod_oneshot_content(
*,
description,
default_exec_starts=('/usr/local/bin/ctr pods run-prepared ${pod_id}', ),
extra_exec_starts=(),
):
return _POD_ONESHOT.format(
description=description,
exec_starts=''.join(
'ExecStart=%s\n' % exec_start for exec_start in itertools.chain(
default_exec_starts,
extra_exec_starts,
)
),
)
# The pod is after time-sync.target because our pods generally will
# behave incorrectly if the clock is far off.
#
# We set StartLimitIntervalSec and StartLimitBurst to prevent the unit
# being trapped in repeated crashes. To manually restart the unit after
# this rate counter was exceeded, run `systemctl reset-failed`.
_POD_SERVICE = '''\
[Unit]
Description={description}
PartOf=machines.target
Before=machines.target
After=time-sync.target
[Service]
Slice=machine.slice
ExecStart=/usr/local/bin/ctr pods run-prepared ${{pod_id}}
KillMode=mixed
Restart=always
StartLimitIntervalSec=30s
StartLimitBurst=4
[Install]
WantedBy=machines.target
'''
def make_pod_service_content(*, description):
return _POD_SERVICE.format(description=description)
# The journal watcher is after machines.target so that pods are started
# before the watchers.
_POD_JOURNAL_WATCHER = '''\
[Unit]
Description={description}
After=machines.target network.target systemd-resolved.service
[Service]
ExecStart=/usr/local/bin/ops alerts watch-journal ${{pod_id}}
KillMode=mixed
Restart=always
StartLimitIntervalSec=30s
StartLimitBurst=4
[Install]
WantedBy=multi-user.target
'''
def make_pod_journal_watcher_content(*, description):
return _POD_JOURNAL_WATCHER.format(description=description)
_TIMER = '''\
[Unit]
Description={description}
[Timer]
{timer_section}
[Install]
WantedBy=multi-user.target
'''
def make_timer_content(*, description, timer_section):
return _TIMER.format(description=description, timer_section=timer_section)
|
clchiou/garage
|
shipyard2/shipyard2/rules/pods.py
|
pods.py
|
py
| 8,964 |
python
|
en
|
code
| 3 |
github-code
|
50
|
43718505374
|
# 定义函式
# 函式内部的程式码,若没有呼叫函式,就不会执行
def multiply(n1, n2):
# print (n1*n2)
return n1*n2
# 呼叫函式
value= multiply (3,4)+ multiply (10,12)
print (value)
# multiply (3,4)
# multiply (10,12)
# 程式的包装:同样的逻辑,可以重复利用
def calculate(max):
sum=0
for i in range(1, max+1):
sum = sum + i
print(sum)
calculate(10)
calculate(20)
|
jielingl11/PythonLearning
|
L9_FunctionBasic.py
|
L9_FunctionBasic.py
|
py
| 432 |
python
|
zh
|
code
| 0 |
github-code
|
50
|
71231050075
|
import socket
s = socket.socket()
host = socket.gethostname()
port = 12345
s.connect((host,port))
while True:
print('Server : ', end='')
msg = s.recv(1024).decode('ascii')
print(msg)
if msg == 'bye':
print('The chat has ended')
s.send("bye".encode('ascii'))
break
else:
print('Client : ', end="")
msg = input()
s.send(msg.encode('ascii'))
s.close()
|
azimsurani/Client-Server-Chat-Program
|
client.py
|
client.py
|
py
| 435 |
python
|
en
|
code
| 1 |
github-code
|
50
|
36795515994
|
#!/usr/bin/python3
import re
import sqlite3
import argparse
import queue
import time
import threading
from multiprocessing.pool import ThreadPool
import traceback
import sys
import math
# variable for collating the multi-line output of route planning commands
routeList = None
# our SQLite database
database = None
DB_THREAD_ID = None
DEFAULT_DB_NAME = 'tw2002.db'
dbqueue = queue.Queue()
settings = {}
# verbosity level for parser output
verbose = 0
class PortStatus:
source = None
operation = None
units = None
prev_their_offer = None
prev_our_offer = None
final_offer = False
port_status = PortStatus()
# synchronizer, so the ZTM algorithm can know when the last route was processed and it can move on to the next
route_saved = threading.Event()
# sync flag for threads to exit
QUITTING_TIME = False
port_class_numbers = {'BBS':1, 'BSB':2, 'SBB':3, 'SSB':4, 'SBS':5, 'BSS':6, 'SSS':7, 'BBB':8}
port_class_sales = {1:'BBS', 2:'BSB', 3:'SBB', 4:'SSB', 5:'SBS', 6:'BSS', 7:'SSS', 8:'BBB'}
# auto-login for twgs
bbsNameEntryRe = re.compile("^Please enter your name \(ENTER for none\):")
gameSelectRe = re.compile("^Selection \(\? for menu\):")
gamePassRe = re.compile("^This is a private game. Please enter a password:")
# pattern matching the port list from Computer Interrogation Mode (CIM)
portListRe = re.compile('^(?P<sector>[ 0-9]{3}[0-9]) (?P<ore_bs>[ -]) (?P<ore_amt>[ 0-9]{3}[0-9]) (?P<ore_pct>[ 0-9]{2}[0-9])% (?P<org_bs>[ -]) (?P<org_amt>[ 0-9]{3}[0-9]) (?P<org_pct>[ 0-9]{2}[0-9])% (?P<equ_bs>[ -]) (?P<equ_amt>[ 0-9]{3}[0-9]) (?P<equ_pct>[ 0-9]{2}[0-9])%$')
# pattern to match so we know what sector we're looking at if we see a Trader or Planet
workingSectorRe = re.compile("^Sector : (?P<sector>[0-9]+) in .*\.$")
# pattern to match the list of warps out of each known sector from the CIM report or Computer Warps report (C, I)
warpListFromCIMRe = re.compile('^(?P<sector>[ 0-9]{3}[0-9])(?P<warps>(?: [ 0-9]{3}[0-9])+)$')
warpListFromCIRe = re.compile("^Sector (?P<sector>[0-9]+) has warps to sector\(s\) : (?P<warps>[0-9 \-]+)$")
# various patterns to match route planning, either via Computer Interrogation Mode (CIM) or Computer -> F Course Plotter (CF) mode
routeListFromCIMRe = re.compile("^FM > [0-9]+$")
routeListFromCFRe = re.compile("^The shortest path .* from sector [0-9]+ to sector [0-9]+ is:$")
routeListRestRe = re.compile("^(?: TO)?[0-9 ()>]+$")
routeListCompleteCIMRe = re.compile("^FM > [0-9]+ TO > [0-9]+ (?P<route>[0-9 ()>]+)$")
routeListCompleteCFRe = re.compile("^The shortest path .* from sector [0-9]+ to sector [0-9]+ is: (?P<route>[0-9 ()>]+)$")
# maintain a list of deployed fighters, so we can calculate the nearest transwarp point for any given sector
clearFightersRe = re.compile("^\s*Deployed Fighter Scan")
saveFightersRe = re.compile("^ (?P<sector>[0-9 ]{4}[0-9])\s+[0-9]+\s+(?:Personal|Corp)\s+(?:Defensive|Offensive|Toll)")
# keep track of planet locations
planetListRe = re.compile("^\s*(?P<sector>[0-9 ]{4}[0-9])\s+T?\s+#(?P<id>[0-9]+)\s+(?P<name>.*?)\s+Class (?P<class>[A-Z]), .*(?P<citadel>No Citadel|Level [0-9])")
# auto-haggle triggers
portOperationRe = re.compile("^How many (?P<planetOrShip>units|holds) of .+ do you want to (?P<operation>buy|sell) \[[0-9,]+\]\?")
portUnitsRe = re.compile("Agreed, (?P<units>[0-9]+) units.")
portFinalOfferRe = re.compile("^Our final offer is [0-9,]+ credits.$")
portPromptRe = re.compile(r"^Your offer \[(?P<offer>[0-9,]+)\]\s{0,1}\?$")
# game information
maxSectorRe = re.compile("^\s+Maximum players [0-9]+, sectors (?P<maxSector>[0-9,]+), ports [0-9,]+, planets [0-9,]+\.")
stardockRe = re.compile("^\s*The StarDock is located in sector (?P<sector>[0-9,]+)\.$")
# from https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
def strip_ansi(inString):
ansi_escape_8bit = re.compile(br'''
(?: # either 7-bit C1, two bytes, ESC Fe (omitting CSI)
\x1B
[@-Z\\-_]
| # or a single 8-bit byte Fe (omitting CSI)
[\x80-\x9A\x9C-\x9F]
| # or CSI + control codes
(?: # 7-bit CSI, ESC [
\x1B\[
| # 8-bit CSI, 9B
\x9B
)
[0-?]* # Parameter bytes
[ -/]* # Intermediate bytes
[@-~] # Final byte
)
''', re.VERBOSE)
return ansi_escape_8bit.sub(b'', inString)
def log(logLevel, msg):
global verbose
if(logLevel > verbose):
return
try:
msg = 'String={}, groups={}'.format(repr(msg.string), repr(msg.groupdict()))
except:
pass
print("[LogLevel {}]: {}".format(logLevel, msg), flush=True)
# function decorator that will pass off database write operations to the dedicated thread, if called from another thread
def dbWriteWrapper(func):
def func_dbWriteWrapper(*args):
if(threading.get_ident() != DB_THREAD_ID):
dbqueue.put((func, *args))
return
return func(*args)
return func_dbWriteWrapper
@dbWriteWrapper
def clear_fighter_locations():
global database
log(1, "clear_fighter_locations")
c = database.cursor()
c.execute('DELETE FROM fighters')
database.commit()
@dbWriteWrapper
def save_fighter_location(match):
global database
sector = int(match.group('sector').strip())
log(1, "save_fighter_location: {}".format(sector))
c = database.cursor()
c.execute('REPLACE INTO fighters (sector) VALUES(?)', (sector,))
database.commit()
@dbWriteWrapper
def save_setting(key,value):
global database
c = database.cursor()
c.execute('REPLACE INTO settings (key, value) VALUES(?, ?)', (key, value))
database.commit()
@dbWriteWrapper
def save_warp_list(match):
global database
sector = int(match.group('sector').strip())
warps = re.findall('[0-9]+', match.group('warps'))
log(1, "save_warp_list: {}, {}".format(sector, warps))
c = database.cursor()
c.execute('''
REPLACE into explored (sector)
VALUES(?)
''', (sector,))
for warp in warps:
c.execute('''
REPLACE INTO warps (source, destination)
VALUES(?, ?)
''', (sector, int(warp))
)
database.commit()
@dbWriteWrapper
def save_port_list(match):
global database
log(1, "save_port_list: {}".format(match.groups()))
port_class = (match.group('ore_bs') + match.group('org_bs') + match.group('equ_bs')).replace(' ', 'S').replace('-', 'B')
c = database.cursor()
c.execute('''
REPLACE INTO ports (sector, class, ore_amt, ore_pct, org_amt, org_pct, equ_amt, equ_pct, last_seen)
VALUES(?, ?, ?, ?, ?, ?, ?, ?, date('now'))
''', (
int(match.group('sector').strip()),
port_class,
int(match.group('ore_amt').strip()),
int(match.group('ore_pct').strip()),
int(match.group('org_amt').strip()),
int(match.group('org_pct').strip()),
int(match.group('equ_amt').strip()),
int(match.group('equ_pct').strip()),
)
)
database.commit()
@dbWriteWrapper
def save_planet_list(match):
global database
log(1, "save_planet_list: {}".format(match.groups()))
c = database.cursor()
citadel = match.group('citadel').strip()[-1]
if(citadel == 'l'): # "No Citadel"
citadel = '0'
c.execute('''
REPLACE INTO planets (sector, id, name, class, citadel)
VALUES(?, ?, ?, ?, ?)
''', (
int(match.group('sector').strip()),
int(match.group('id').strip()),
match.group('name').strip(),
match.group('class').strip(),
int(citadel)
)
)
database.commit()
@dbWriteWrapper
def save_route_list(match):
global database
route = re.findall('[0-9]+', match.group('route'))
log(1, "save_route_list: {}".format(route))
c = database.cursor()
for i in range(len(route)-1):
# print(route[i], route[i+1])
c.execute('''
REPLACE INTO warps (source, destination)
VALUES(?, ?)
''', (int(route[i]), int(route[i+1]))
)
database.commit()
def parse_partial_line(line):
global port_status
try:
strippedLine = strip_ansi(line).decode('utf-8').rstrip()
except:
return
log(3, "parse_partial_line: {}".format((strippedLine,)))
portPrompt = portPromptRe.match(strippedLine)
if(portPrompt):
their_offer = int(portPrompt.group('offer').replace(',',''))
log(1, portPrompt)
our_offer = their_offer
if(port_status.prev_their_offer == None):
if(port_status.operation == 'sell'):
if(port_status.source == 'planet'):
our_offer = math.ceil(our_offer / 0.94) - 1
else:
our_offer *= 1.07
else:
our_offer *= 0.95
elif(their_offer == port_status.prev_their_offer):
# clearly something has gone awry
return None
else:
mult = 0.3
if(port_status.final_offer):
mult = 0.5
delta = port_status.prev_our_offer - their_offer
our_offer = port_status.prev_our_offer - (delta * mult)
port_status.prev_their_offer = their_offer
port_status.prev_our_offer = our_offer
port_status.final_offer = False
return('{}'.format(int(our_offer)).encode('utf-8'))
bbsNameEntry = bbsNameEntryRe.match(strippedLine)
if(bbsNameEntry and 'twgs_name' in settings):
val = settings['twgs_name']
del settings['twgs_name']
return('{}'.format(val).encode('utf-8'))
gameSelect = gameSelectRe.match(strippedLine)
if(gameSelect and 'twgs_game' in settings):
val = settings['twgs_game']
del settings['twgs_game']
return('{}'.format(val).encode('utf-8'))
gamePass = gamePassRe.match(strippedLine)
if(gamePass and 'twgs_game_pass' in settings):
val = settings['twgs_game_pass']
del settings['twgs_game_pass']
return('{}'.format(val).encode('utf-8'))
def parse_complete_line(line):
global routeList
global port_status
try:
strippedLine = strip_ansi(line).decode('utf-8').rstrip()
except:
return
log(3, "parse_complete_line: {}".format((strippedLine,)))
workingSector = workingSectorRe.match(strippedLine)
if(workingSector):
settings['working_sector'] = int(workingSector.group('sector'))
return
stardock = stardockRe.match(strippedLine)
if(stardock):
log(2, "stardock: {}".format(stardock))
sd = int(stardock.group('sector').replace(',',''))
if(not 'stardock' in settings or settings['stardock'] != sd):
settings['stardock'] = sd
save_setting('stardock', sd)
return
maxSector = maxSectorRe.match(strippedLine)
if(maxSector):
log(2, "maxSector: {}".format(maxSector))
max_sector = int(maxSector.group('maxSector').replace(',',''))
if(not 'max_sector' in settings or settings['max_sector'] != max_sector):
settings['max_sector'] = max_sector
save_setting('max_sector', max_sector)
return
portOperation = portOperationRe.match(strippedLine)
if(portOperation):
log(2, "portOperation: {}".format(portOperation))
if(portOperation.group('planetOrShip') == 'units'):
port_status.source = 'planet'
port_status.operation = portOperation.group('operation')
port_status.prev_their_offer = None
return
portUnits = portUnitsRe.match(strippedLine)
if(portUnits):
log(2, "portUnits: {}".format(portUnits))
port_status.units = int(portUnits.group('units'))
return
portFinalOffer = portFinalOfferRe.match(strippedLine)
if(portFinalOffer):
log(2, "portFinalOffer: {}".format(portFinalOffer))
port_status.final_offer = True
return
clearFighters = clearFightersRe.match(strippedLine)
if(clearFighters):
log(2, "clearFighters: {}".format(clearFighters))
clear_fighter_locations()
return
saveFighters = saveFightersRe.match(strippedLine)
if(saveFighters):
log(2, "saveFighters: {}".format(saveFighters))
save_fighter_location(saveFighters)
warpList = warpListFromCIMRe.match(strippedLine)
if(warpList):
# print(strippedLine, warpList.groups())
save_warp_list(warpList)
return
warpList = warpListFromCIRe.match(strippedLine)
if(warpList):
save_warp_list(warpList)
return
portList = portListRe.match(strippedLine)
if(portList):
# print(strippedLine, portList.groups())
save_port_list(portList)
return
planetList = planetListRe.match(strippedLine)
if(planetList):
log(2, "planetList: {}".format(planetList.groups()))
save_planet_list(planetList)
if(routeList): # we've already seen the "FM" line, let's look for the rest of the message
if(len(strippedLine) == 0):
strippedLine = routeList
routeList = None
else:
if(routeListRestRe.match(strippedLine)):
routeList += " " + strippedLine
else:
routeList = None
routeListComplete = routeListCompleteCIMRe.match(strippedLine)
if(routeListComplete):
save_route_list(routeListComplete)
route_saved.set()
return
routeListComplete = routeListCompleteCFRe.match(strippedLine)
if(routeListComplete):
save_route_list(routeListComplete)
route_saved.set()
return
# route listings are multi-line. accumulate the lines, then we'll process it once it's complete
routeListFrom = routeListFromCIMRe.match(strippedLine)
if(routeListFrom):
routeList = strippedLine
return
routeListFrom = routeListFromCFRe.match(strippedLine)
if(routeListFrom):
routeList = strippedLine
return
def dbqueue_service(dbname):
global database
global dbqueue
global QUITTING_TIME
global DB_THREAD_ID
DB_THREAD_ID = threading.get_ident()
database = sqlite3.connect(dbname)
doFlash = False
didWork = 0
while(True):
try:
func, *args = dbqueue.get(block=False)
if(len(args)):
logStr = "dbqueue_service: {}({})".format(func.__name__, *args)
try:
logStr = "dbqueue_service: {}({})".format(func.__name__, ', '.join([repr(x.groupdict()) for x in args]))
except:
# traceback.print_exc()
pass
log(1, logStr)
func(*args)
else:
log(1, "dbqueue_service: {}()".format(func.__name__))
func()
didWork += 1
except queue.Empty:
if(didWork):
if(didWork > 1):
# if we had a queue, flash the screen to indicate that all database operations are complete
doFlash = True
didWork = 0
if(QUITTING_TIME):
break
time.sleep(1)
except Exception:
traceback.print_exc()
if(doFlash):
try:
# flash
print("\x1b[?5h\x1b[?5l", flush=True, end='')
except:
pass
doFlash = False
def dbqueue_monitor():
global dbqueue
global QUITTING_TIME
cnt = 0
while(True):
cnt += 1
if((cnt % 10) == 0):
log(1, "dbqueue_monitor: {} queued items".format(dbqueue.qsize()))
if(QUITTING_TIME):
break
time.sleep(1)
def database_connect(dbname):
initdb = sqlite3.connect(dbname)
cursor = initdb.cursor()
cursor.execute('''
CREATE TABLE IF NOT EXISTS ports (
sector INTEGER PRIMARY KEY,
class TEXT,
ore_amt INTEGER,
ore_pct INTEGER,
org_amt INTEGER,
org_pct INTEGER,
equ_amt INTEGER,
equ_pct INTEGER,
last_seen INTEGER
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS warps (
source INTEGER,
destination INTEGER,
PRIMARY KEY (source, destination)
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS explored (
sector INTEGER PRIMARY KEY
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS planets (
sector INTEGER,
id INTEGER PRIMARY KEY,
name TEXT,
class TEXT,
citadel INTEGER
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS fighters (
sector INTEGER PRIMARY KEY
);
''')
cursor.execute('''
CREATE TABLE IF NOT EXISTS settings (
key TEXT PRIMARY KEY,
value TEXT
);
''')
for k,v in cursor.execute('''
SELECT key, value
FROM settings
'''):
settings[k]=v
# print(settings)
cursor.close()
initdb.commit()
del cursor
del initdb
# pool = ThreadPool(processes=1)
# pool.apply_async(dbqueue_service, (dbname,))
# pool = ThreadPool(processes=1)
# pool.apply_async(dbqueue_monitor)
dbq_s = threading.Thread(target=dbqueue_service, args=(dbname,))
dbq_s.start()
dbq_m = threading.Thread(target=dbqueue_monitor)
dbq_m.start()
def quit():
global dbqueue
global QUITTING_TIME
if(dbqueue.qsize() > 0):
print("Parsing complete.\nWaiting for database writes to finish...")
QUITTING_TIME = True
if(__name__ == '__main__'):
try:
parser = argparse.ArgumentParser(description='A TW2002 log parsing utility. This tool will database ports, warps, and the locations of your fighters and planets for use with analytical tools.')
parser.add_argument('--database', '-d', dest='db', default=DEFAULT_DB_NAME, help='SQLite database file to use; default "{}"'.format(DEFAULT_DB_NAME))
parser.add_argument('--verbose', '-v', type=int, nargs='?', default=0, help='Verbose level for parser feedback (1-3)')
parser.add_argument('filename', nargs='+', type=argparse.FileType('rb'), help='Name of the game log file(s) to parse')
args = parser.parse_args()
verbose = args.verbose
database_connect(args.db)
for f in args.filename:
for line in f:
parse_complete_line(line)
finally:
quit()
|
dracode/tw2002-client
|
twparser.py
|
twparser.py
|
py
| 18,984 |
python
|
en
|
code
| 1 |
github-code
|
50
|
14041830885
|
# coding:utf-8
import re
iplist = []
portlist = []
isip=False;
datafile = file("ip.txt", "r")
for line in datafile.readlines():
line = line.strip('\n')
result=re.findall(r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b", line);
if not result:
if isip:
portlist.append(line)
isip=False
else:
isip=True
iplist.append(result)
|
cheerforthemaster/python
|
agentIP/Proxy_ip.py
|
Proxy_ip.py
|
py
| 435 |
python
|
en
|
code
| 0 |
github-code
|
50
|
42568726628
|
stringVal = "The quick Brow Fox"
countUpper = 0
countLower = 0
stringVal = stringVal.replace(" ", "")
for charVal in stringVal:
if str(charVal).islower():
countLower += 1
else:
countUpper += 1
print("No. of Upper case characters :", countUpper)
print("No. of Lower case characters :", countLower)
|
AshutoshPrograms/PythonTraining
|
CountUpperLower.py
|
CountUpperLower.py
|
py
| 328 |
python
|
en
|
code
| 0 |
github-code
|
50
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.